aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorGuy David <guyda96@gmail.com>2025-08-06 21:05:48 +0300
committerGuy David <guyda96@gmail.com>2025-08-10 06:03:24 +0300
commitc9251a2ee0c2c7ebef1b44ee8337b6c892e5a83e (patch)
tree55938e2face6b00817bdce7ab645d1c1f2f94416
parentcd834449a6d551cace6afad798ffad318f4ff325 (diff)
downloadllvm-users/guy-david/aarch64-machine-sink-fold.zip
llvm-users/guy-david/aarch64-machine-sink-fold.tar.gz
llvm-users/guy-david/aarch64-machine-sink-fold.tar.bz2
-rw-r--r--llvm/lib/CodeGen/MachineSink.cpp24
-rw-r--r--llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll6
-rw-r--r--llvm/test/CodeGen/AArch64/atomicrmw-fadd.ll278
-rw-r--r--llvm/test/CodeGen/AArch64/atomicrmw-fmax.ll278
-rw-r--r--llvm/test/CodeGen/AArch64/atomicrmw-fmin.ll278
-rw-r--r--llvm/test/CodeGen/AArch64/atomicrmw-fsub.ll278
-rw-r--r--llvm/test/CodeGen/AArch64/cmpxchg-idioms.ll31
-rw-r--r--llvm/test/CodeGen/AArch64/machine-combiner-copy.ll14
-rw-r--r--llvm/test/CodeGen/AArch64/typepromotion-cost.ll29
-rw-r--r--llvm/test/CodeGen/AMDGPU/add.ll46
-rw-r--r--llvm/test/CodeGen/AMDGPU/agpr-copy-no-free-registers.ll46
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll47989
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.128bit.ll2757
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.160bit.ll534
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.16bit.ll289
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.192bit.ll1393
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.224bit.ll534
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll3278
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.288bit.ll540
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll3962
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.32bit.ll1833
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.352bit.ll564
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.384bit.ll1439
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.448bit.ll1451
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.48bit.ll253
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll7057
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.576bit.ll2901
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.640bit.ll3329
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll2463
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.704bit.ll3523
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.768bit.ll5529
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.832bit.ll4504
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.896bit.ll6172
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.960bit.ll6978
-rw-r--r--llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.96bit.ll1327
-rw-r--r--llvm/test/CodeGen/AMDGPU/blender-no-live-segment-at-def-implicit-def.ll58
-rw-r--r--llvm/test/CodeGen/AMDGPU/branch-folding-implicit-def-subreg.ll207
-rw-r--r--llvm/test/CodeGen/AMDGPU/branch-relaxation.ll181
-rw-r--r--llvm/test/CodeGen/AMDGPU/carryout-selection.ll543
-rw-r--r--llvm/test/CodeGen/AMDGPU/ctpop16.ll10
-rw-r--r--llvm/test/CodeGen/AMDGPU/ctpop64.ll14
-rw-r--r--llvm/test/CodeGen/AMDGPU/divergent-branch-uniform-condition.ll29
-rw-r--r--llvm/test/CodeGen/AMDGPU/dynamic_stackalloc.ll68
-rw-r--r--llvm/test/CodeGen/AMDGPU/exec-mask-opt-cannot-create-empty-or-backward-segment.ll11
-rw-r--r--llvm/test/CodeGen/AMDGPU/extract-subvector-16bit.ll240
-rw-r--r--llvm/test/CodeGen/AMDGPU/fix-sgpr-copies-phi-regression-issue130646-issue130119.ll26
-rw-r--r--llvm/test/CodeGen/AMDGPU/flat_atomics_i64.ll468
-rw-r--r--llvm/test/CodeGen/AMDGPU/flat_atomics_i64_system.ll354
-rw-r--r--llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll140
-rw-r--r--llvm/test/CodeGen/AMDGPU/insert-delay-alu-bug.ll9
-rw-r--r--llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll26
-rw-r--r--llvm/test/CodeGen/AMDGPU/mad-combine.ll50
-rw-r--r--llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll10
-rw-r--r--llvm/test/CodeGen/AMDGPU/mul.ll120
-rw-r--r--llvm/test/CodeGen/AMDGPU/no-fold-accvgpr-mov.ll12
-rw-r--r--llvm/test/CodeGen/AMDGPU/optimize-negated-cond.ll4
-rw-r--r--llvm/test/CodeGen/AMDGPU/sdiv64.ll36
-rw-r--r--llvm/test/CodeGen/AMDGPU/sgpr-control-flow.ll14
-rw-r--r--llvm/test/CodeGen/AMDGPU/si-unify-exit-multiple-unreachables.ll51
-rw-r--r--llvm/test/CodeGen/AMDGPU/si-unify-exit-return-unreachable.ll1
-rw-r--r--llvm/test/CodeGen/AMDGPU/srem.ll254
-rw-r--r--llvm/test/CodeGen/AMDGPU/udiv64.ll118
-rw-r--r--llvm/test/CodeGen/AMDGPU/wave32.ll183
-rw-r--r--llvm/test/CodeGen/AMDGPU/wqm.ll16
-rw-r--r--llvm/test/CodeGen/AMDGPU/xor.ll32
-rw-r--r--llvm/test/CodeGen/PowerPC/atomic-compare-exchange-weak.ll24
-rw-r--r--llvm/test/CodeGen/PowerPC/atomic-float.ll70
-rw-r--r--llvm/test/CodeGen/PowerPC/atomicrmw-cond-sub-clamp.ll466
-rw-r--r--llvm/test/CodeGen/PowerPC/atomicrmw-uinc-udec-wrap.ll462
-rw-r--r--llvm/test/CodeGen/PowerPC/p10-spill-crun.ll100
-rw-r--r--llvm/test/CodeGen/PowerPC/sink-down-more-instructions-1.mir338
-rw-r--r--llvm/test/CodeGen/RISCV/combine-storetomstore.ll100
-rw-r--r--llvm/test/CodeGen/RISCV/riscv-codegenprepare-asm.ll33
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll78
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll236
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll108
-rw-r--r--llvm/test/CodeGen/RISCV/rvv/vxrm-insert-out-of-loop.ll28
-rw-r--r--llvm/test/CodeGen/RISCV/select-optimize-multiple.ll72
-rw-r--r--llvm/test/CodeGen/RISCV/sextw-removal.ll40
-rw-r--r--llvm/test/CodeGen/RISCV/simplify-condbr.ll16
-rw-r--r--llvm/test/CodeGen/X86/2008-04-28-CoalescerBug.ll19
-rw-r--r--llvm/test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll87
-rw-r--r--llvm/test/CodeGen/X86/bsf.ll44
-rw-r--r--llvm/test/CodeGen/X86/bsr.ll73
-rw-r--r--llvm/test/CodeGen/X86/switch-phi-const.ll60
85 files changed, 62619 insertions, 55027 deletions
diff --git a/llvm/lib/CodeGen/MachineSink.cpp b/llvm/lib/CodeGen/MachineSink.cpp
index 9ec5151..d13f107 100644
--- a/llvm/lib/CodeGen/MachineSink.cpp
+++ b/llvm/lib/CodeGen/MachineSink.cpp
@@ -835,6 +835,7 @@ bool MachineSinking::run(MachineFunction &MF) {
RegClassInfo.runOnMachineFunction(MF);
+ SmallSet<MachineBasicBlock *, 8> NewBlocks;
bool EverMadeChange = false;
while (true) {
@@ -854,6 +855,7 @@ bool MachineSinking::run(MachineFunction &MF) {
auto NewSucc = Pair.first->SplitCriticalEdge(
Pair.second, {LIS, SI, LV, MLI}, nullptr, &MDTU);
if (NewSucc != nullptr) {
+ NewBlocks.insert(NewSucc);
LLVM_DEBUG(dbgs() << " *** Splitting critical edge: "
<< printMBBReference(*Pair.first) << " -- "
<< printMBBReference(*NewSucc) << " -- "
@@ -873,6 +875,28 @@ bool MachineSinking::run(MachineFunction &MF) {
EverMadeChange = true;
}
+ for (MachineBasicBlock *MBB : NewBlocks) {
+ if (MBB->isReturnBlock() || MBB->getSingleSuccessor()->isReturnBlock())
+ continue;
+ // Only consider cheap instructions which don't touch physical registers.
+ if (all_of(llvm::make_range(MBB->begin(), MBB->getFirstTerminator()),
+ [this](const MachineInstr &MI) {
+ if (!MI.isAsCheapAsAMove())
+ return false;
+ return all_of(MI.operands(), [this](const MachineOperand &MO) {
+ return !MO.isReg() || MO.getReg().isVirtual() ||
+ TRI->isConstantPhysReg(MO.getReg());
+ });
+ })) {
+ assert(MBB->pred_size() == 1 &&
+ "Block must have exactly one predecessor");
+ assert(MBB->succ_size() == 1 && "Block must have exactly one successor");
+ MachineBasicBlock *Pred = *MBB->pred_begin();
+ Pred->splice(Pred->getFirstTerminator(), MBB, MBB->begin(),
+ MBB->getFirstTerminator());
+ }
+ }
+
if (SinkInstsIntoCycle) {
SmallVector<MachineCycle *, 8> Cycles(CI->toplevel_cycles());
SchedModel.init(STI);
diff --git a/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll b/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll
index 8655bb1..077c35e 100644
--- a/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll
+++ b/llvm/test/CodeGen/AArch64/aarch64-matrix-umull-smull.ll
@@ -583,8 +583,8 @@ define i16 @red_mla_dup_ext_u8_s8_s16(ptr noalias nocapture noundef readonly %A,
; CHECK-SD-NEXT: mov w10, w2
; CHECK-SD-NEXT: b.hi .LBB5_4
; CHECK-SD-NEXT: // %bb.2:
-; CHECK-SD-NEXT: mov x11, xzr
; CHECK-SD-NEXT: mov w8, wzr
+; CHECK-SD-NEXT: mov x11, xzr
; CHECK-SD-NEXT: b .LBB5_7
; CHECK-SD-NEXT: .LBB5_3:
; CHECK-SD-NEXT: mov w8, wzr
@@ -631,12 +631,12 @@ define i16 @red_mla_dup_ext_u8_s8_s16(ptr noalias nocapture noundef readonly %A,
; CHECK-GI-NEXT: cbz w2, .LBB5_3
; CHECK-GI-NEXT: // %bb.1: // %for.body.preheader
; CHECK-GI-NEXT: cmp w2, #16
+; CHECK-GI-NEXT: mov w9, #0 // =0x0
; CHECK-GI-NEXT: mov w8, w2
; CHECK-GI-NEXT: b.hs .LBB5_4
; CHECK-GI-NEXT: // %bb.2:
-; CHECK-GI-NEXT: mov w10, #0 // =0x0
+; CHECK-GI-NEXT: fmov s0, w9
; CHECK-GI-NEXT: mov x9, xzr
-; CHECK-GI-NEXT: fmov s0, w10
; CHECK-GI-NEXT: b .LBB5_8
; CHECK-GI-NEXT: .LBB5_3:
; CHECK-GI-NEXT: mov w0, wzr
diff --git a/llvm/test/CodeGen/AArch64/atomicrmw-fadd.ll b/llvm/test/CodeGen/AArch64/atomicrmw-fadd.ll
index 21729b9..5a57f2f 100644
--- a/llvm/test/CodeGen/AArch64/atomicrmw-fadd.ll
+++ b/llvm/test/CodeGen/AArch64/atomicrmw-fadd.ll
@@ -49,15 +49,17 @@ define half @test_atomicrmw_fadd_f16_seq_cst_align2(ptr %ptr, half %value) #0 {
; SOFTFP-NOLSE-NEXT: ldrh w0, [x0]
; SOFTFP-NOLSE-NEXT: mov w20, w1
; SOFTFP-NOLSE-NEXT: stp x22, x21, [sp, #16] // 16-byte Folded Spill
-; SOFTFP-NOLSE-NEXT: b .LBB0_2
+; SOFTFP-NOLSE-NEXT: b .LBB0_3
; SOFTFP-NOLSE-NEXT: .LBB0_1: // %cmpxchg.nostore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB0_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, wzr
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB0_3 Depth=1
+; SOFTFP-NOLSE-NEXT: mov w9, wzr
; SOFTFP-NOLSE-NEXT: clrex
-; SOFTFP-NOLSE-NEXT: cbnz w8, .LBB0_6
-; SOFTFP-NOLSE-NEXT: .LBB0_2: // %atomicrmw.start
+; SOFTFP-NOLSE-NEXT: .LBB0_2: // %cmpxchg.end
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB0_3 Depth=1
+; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB0_6
+; SOFTFP-NOLSE-NEXT: .LBB0_3: // %atomicrmw.start
; SOFTFP-NOLSE-NEXT: // =>This Loop Header: Depth=1
-; SOFTFP-NOLSE-NEXT: // Child Loop BB0_3 Depth 2
+; SOFTFP-NOLSE-NEXT: // Child Loop BB0_4 Depth 2
; SOFTFP-NOLSE-NEXT: mov w22, w0
; SOFTFP-NOLSE-NEXT: and w0, w20, #0xffff
; SOFTFP-NOLSE-NEXT: bl __extendhfsf2
@@ -68,19 +70,18 @@ define half @test_atomicrmw_fadd_f16_seq_cst_align2(ptr %ptr, half %value) #0 {
; SOFTFP-NOLSE-NEXT: bl __addsf3
; SOFTFP-NOLSE-NEXT: bl __truncsfhf2
; SOFTFP-NOLSE-NEXT: mov w8, w0
-; SOFTFP-NOLSE-NEXT: .LBB0_3: // %cmpxchg.start
-; SOFTFP-NOLSE-NEXT: // Parent Loop BB0_2 Depth=1
+; SOFTFP-NOLSE-NEXT: .LBB0_4: // %cmpxchg.start
+; SOFTFP-NOLSE-NEXT: // Parent Loop BB0_3 Depth=1
; SOFTFP-NOLSE-NEXT: // => This Inner Loop Header: Depth=2
; SOFTFP-NOLSE-NEXT: ldaxrh w0, [x19]
; SOFTFP-NOLSE-NEXT: cmp w0, w22, uxth
; SOFTFP-NOLSE-NEXT: b.ne .LBB0_1
-; SOFTFP-NOLSE-NEXT: // %bb.4: // %cmpxchg.trystore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB0_3 Depth=2
-; SOFTFP-NOLSE-NEXT: stlxrh w9, w8, [x19]
-; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB0_3
-; SOFTFP-NOLSE-NEXT: // %bb.5: // in Loop: Header=BB0_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, #1 // =0x1
-; SOFTFP-NOLSE-NEXT: cbz w8, .LBB0_2
+; SOFTFP-NOLSE-NEXT: // %bb.5: // %cmpxchg.trystore
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB0_4 Depth=2
+; SOFTFP-NOLSE-NEXT: stlxrh w10, w8, [x19]
+; SOFTFP-NOLSE-NEXT: mov w9, #1 // =0x1
+; SOFTFP-NOLSE-NEXT: cbnz w10, .LBB0_4
+; SOFTFP-NOLSE-NEXT: b .LBB0_2
; SOFTFP-NOLSE-NEXT: .LBB0_6: // %atomicrmw.end
; SOFTFP-NOLSE-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload
; SOFTFP-NOLSE-NEXT: // kill: def $w0 killed $w0 killed $x0
@@ -137,15 +138,17 @@ define half @test_atomicrmw_fadd_f16_seq_cst_align4(ptr %ptr, half %value) #0 {
; SOFTFP-NOLSE-NEXT: ldrh w0, [x0]
; SOFTFP-NOLSE-NEXT: mov w20, w1
; SOFTFP-NOLSE-NEXT: stp x22, x21, [sp, #16] // 16-byte Folded Spill
-; SOFTFP-NOLSE-NEXT: b .LBB1_2
+; SOFTFP-NOLSE-NEXT: b .LBB1_3
; SOFTFP-NOLSE-NEXT: .LBB1_1: // %cmpxchg.nostore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB1_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, wzr
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB1_3 Depth=1
+; SOFTFP-NOLSE-NEXT: mov w9, wzr
; SOFTFP-NOLSE-NEXT: clrex
-; SOFTFP-NOLSE-NEXT: cbnz w8, .LBB1_6
-; SOFTFP-NOLSE-NEXT: .LBB1_2: // %atomicrmw.start
+; SOFTFP-NOLSE-NEXT: .LBB1_2: // %cmpxchg.end
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB1_3 Depth=1
+; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB1_6
+; SOFTFP-NOLSE-NEXT: .LBB1_3: // %atomicrmw.start
; SOFTFP-NOLSE-NEXT: // =>This Loop Header: Depth=1
-; SOFTFP-NOLSE-NEXT: // Child Loop BB1_3 Depth 2
+; SOFTFP-NOLSE-NEXT: // Child Loop BB1_4 Depth 2
; SOFTFP-NOLSE-NEXT: mov w22, w0
; SOFTFP-NOLSE-NEXT: and w0, w20, #0xffff
; SOFTFP-NOLSE-NEXT: bl __extendhfsf2
@@ -156,19 +159,18 @@ define half @test_atomicrmw_fadd_f16_seq_cst_align4(ptr %ptr, half %value) #0 {
; SOFTFP-NOLSE-NEXT: bl __addsf3
; SOFTFP-NOLSE-NEXT: bl __truncsfhf2
; SOFTFP-NOLSE-NEXT: mov w8, w0
-; SOFTFP-NOLSE-NEXT: .LBB1_3: // %cmpxchg.start
-; SOFTFP-NOLSE-NEXT: // Parent Loop BB1_2 Depth=1
+; SOFTFP-NOLSE-NEXT: .LBB1_4: // %cmpxchg.start
+; SOFTFP-NOLSE-NEXT: // Parent Loop BB1_3 Depth=1
; SOFTFP-NOLSE-NEXT: // => This Inner Loop Header: Depth=2
; SOFTFP-NOLSE-NEXT: ldaxrh w0, [x19]
; SOFTFP-NOLSE-NEXT: cmp w0, w22, uxth
; SOFTFP-NOLSE-NEXT: b.ne .LBB1_1
-; SOFTFP-NOLSE-NEXT: // %bb.4: // %cmpxchg.trystore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB1_3 Depth=2
-; SOFTFP-NOLSE-NEXT: stlxrh w9, w8, [x19]
-; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB1_3
-; SOFTFP-NOLSE-NEXT: // %bb.5: // in Loop: Header=BB1_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, #1 // =0x1
-; SOFTFP-NOLSE-NEXT: cbz w8, .LBB1_2
+; SOFTFP-NOLSE-NEXT: // %bb.5: // %cmpxchg.trystore
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB1_4 Depth=2
+; SOFTFP-NOLSE-NEXT: stlxrh w10, w8, [x19]
+; SOFTFP-NOLSE-NEXT: mov w9, #1 // =0x1
+; SOFTFP-NOLSE-NEXT: cbnz w10, .LBB1_4
+; SOFTFP-NOLSE-NEXT: b .LBB1_2
; SOFTFP-NOLSE-NEXT: .LBB1_6: // %atomicrmw.end
; SOFTFP-NOLSE-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload
; SOFTFP-NOLSE-NEXT: // kill: def $w0 killed $w0 killed $x0
@@ -236,34 +238,35 @@ define bfloat @test_atomicrmw_fadd_bf16_seq_cst_align2(ptr %ptr, bfloat %value)
; SOFTFP-NOLSE-NEXT: mov x19, x0
; SOFTFP-NOLSE-NEXT: ldrh w0, [x0]
; SOFTFP-NOLSE-NEXT: lsl w20, w1, #16
-; SOFTFP-NOLSE-NEXT: b .LBB2_2
+; SOFTFP-NOLSE-NEXT: b .LBB2_3
; SOFTFP-NOLSE-NEXT: .LBB2_1: // %cmpxchg.nostore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB2_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, wzr
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB2_3 Depth=1
+; SOFTFP-NOLSE-NEXT: mov w9, wzr
; SOFTFP-NOLSE-NEXT: clrex
-; SOFTFP-NOLSE-NEXT: cbnz w8, .LBB2_6
-; SOFTFP-NOLSE-NEXT: .LBB2_2: // %atomicrmw.start
+; SOFTFP-NOLSE-NEXT: .LBB2_2: // %cmpxchg.end
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB2_3 Depth=1
+; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB2_6
+; SOFTFP-NOLSE-NEXT: .LBB2_3: // %atomicrmw.start
; SOFTFP-NOLSE-NEXT: // =>This Loop Header: Depth=1
-; SOFTFP-NOLSE-NEXT: // Child Loop BB2_3 Depth 2
+; SOFTFP-NOLSE-NEXT: // Child Loop BB2_4 Depth 2
; SOFTFP-NOLSE-NEXT: mov w21, w0
; SOFTFP-NOLSE-NEXT: lsl w0, w0, #16
; SOFTFP-NOLSE-NEXT: mov w1, w20
; SOFTFP-NOLSE-NEXT: bl __addsf3
; SOFTFP-NOLSE-NEXT: bl __truncsfbf2
; SOFTFP-NOLSE-NEXT: mov w8, w0
-; SOFTFP-NOLSE-NEXT: .LBB2_3: // %cmpxchg.start
-; SOFTFP-NOLSE-NEXT: // Parent Loop BB2_2 Depth=1
+; SOFTFP-NOLSE-NEXT: .LBB2_4: // %cmpxchg.start
+; SOFTFP-NOLSE-NEXT: // Parent Loop BB2_3 Depth=1
; SOFTFP-NOLSE-NEXT: // => This Inner Loop Header: Depth=2
; SOFTFP-NOLSE-NEXT: ldaxrh w0, [x19]
; SOFTFP-NOLSE-NEXT: cmp w0, w21, uxth
; SOFTFP-NOLSE-NEXT: b.ne .LBB2_1
-; SOFTFP-NOLSE-NEXT: // %bb.4: // %cmpxchg.trystore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB2_3 Depth=2
-; SOFTFP-NOLSE-NEXT: stlxrh w9, w8, [x19]
-; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB2_3
-; SOFTFP-NOLSE-NEXT: // %bb.5: // in Loop: Header=BB2_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, #1 // =0x1
-; SOFTFP-NOLSE-NEXT: cbz w8, .LBB2_2
+; SOFTFP-NOLSE-NEXT: // %bb.5: // %cmpxchg.trystore
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB2_4 Depth=2
+; SOFTFP-NOLSE-NEXT: stlxrh w10, w8, [x19]
+; SOFTFP-NOLSE-NEXT: mov w9, #1 // =0x1
+; SOFTFP-NOLSE-NEXT: cbnz w10, .LBB2_4
+; SOFTFP-NOLSE-NEXT: b .LBB2_2
; SOFTFP-NOLSE-NEXT: .LBB2_6: // %atomicrmw.end
; SOFTFP-NOLSE-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
; SOFTFP-NOLSE-NEXT: // kill: def $w0 killed $w0 killed $x0
@@ -330,34 +333,35 @@ define bfloat @test_atomicrmw_fadd_bf16_seq_cst_align4(ptr %ptr, bfloat %value)
; SOFTFP-NOLSE-NEXT: mov x19, x0
; SOFTFP-NOLSE-NEXT: ldrh w0, [x0]
; SOFTFP-NOLSE-NEXT: lsl w20, w1, #16
-; SOFTFP-NOLSE-NEXT: b .LBB3_2
+; SOFTFP-NOLSE-NEXT: b .LBB3_3
; SOFTFP-NOLSE-NEXT: .LBB3_1: // %cmpxchg.nostore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB3_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, wzr
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB3_3 Depth=1
+; SOFTFP-NOLSE-NEXT: mov w9, wzr
; SOFTFP-NOLSE-NEXT: clrex
-; SOFTFP-NOLSE-NEXT: cbnz w8, .LBB3_6
-; SOFTFP-NOLSE-NEXT: .LBB3_2: // %atomicrmw.start
+; SOFTFP-NOLSE-NEXT: .LBB3_2: // %cmpxchg.end
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB3_3 Depth=1
+; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB3_6
+; SOFTFP-NOLSE-NEXT: .LBB3_3: // %atomicrmw.start
; SOFTFP-NOLSE-NEXT: // =>This Loop Header: Depth=1
-; SOFTFP-NOLSE-NEXT: // Child Loop BB3_3 Depth 2
+; SOFTFP-NOLSE-NEXT: // Child Loop BB3_4 Depth 2
; SOFTFP-NOLSE-NEXT: mov w21, w0
; SOFTFP-NOLSE-NEXT: lsl w0, w0, #16
; SOFTFP-NOLSE-NEXT: mov w1, w20
; SOFTFP-NOLSE-NEXT: bl __addsf3
; SOFTFP-NOLSE-NEXT: bl __truncsfbf2
; SOFTFP-NOLSE-NEXT: mov w8, w0
-; SOFTFP-NOLSE-NEXT: .LBB3_3: // %cmpxchg.start
-; SOFTFP-NOLSE-NEXT: // Parent Loop BB3_2 Depth=1
+; SOFTFP-NOLSE-NEXT: .LBB3_4: // %cmpxchg.start
+; SOFTFP-NOLSE-NEXT: // Parent Loop BB3_3 Depth=1
; SOFTFP-NOLSE-NEXT: // => This Inner Loop Header: Depth=2
; SOFTFP-NOLSE-NEXT: ldaxrh w0, [x19]
; SOFTFP-NOLSE-NEXT: cmp w0, w21, uxth
; SOFTFP-NOLSE-NEXT: b.ne .LBB3_1
-; SOFTFP-NOLSE-NEXT: // %bb.4: // %cmpxchg.trystore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB3_3 Depth=2
-; SOFTFP-NOLSE-NEXT: stlxrh w9, w8, [x19]
-; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB3_3
-; SOFTFP-NOLSE-NEXT: // %bb.5: // in Loop: Header=BB3_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, #1 // =0x1
-; SOFTFP-NOLSE-NEXT: cbz w8, .LBB3_2
+; SOFTFP-NOLSE-NEXT: // %bb.5: // %cmpxchg.trystore
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB3_4 Depth=2
+; SOFTFP-NOLSE-NEXT: stlxrh w10, w8, [x19]
+; SOFTFP-NOLSE-NEXT: mov w9, #1 // =0x1
+; SOFTFP-NOLSE-NEXT: cbnz w10, .LBB3_4
+; SOFTFP-NOLSE-NEXT: b .LBB3_2
; SOFTFP-NOLSE-NEXT: .LBB3_6: // %atomicrmw.end
; SOFTFP-NOLSE-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
; SOFTFP-NOLSE-NEXT: // kill: def $w0 killed $w0 killed $x0
@@ -406,32 +410,33 @@ define float @test_atomicrmw_fadd_f32_seq_cst_align4(ptr %ptr, float %value) #0
; SOFTFP-NOLSE-NEXT: mov x19, x0
; SOFTFP-NOLSE-NEXT: ldr w0, [x0]
; SOFTFP-NOLSE-NEXT: mov w20, w1
-; SOFTFP-NOLSE-NEXT: b .LBB4_2
+; SOFTFP-NOLSE-NEXT: b .LBB4_3
; SOFTFP-NOLSE-NEXT: .LBB4_1: // %cmpxchg.nostore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB4_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, wzr
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB4_3 Depth=1
+; SOFTFP-NOLSE-NEXT: mov w9, wzr
; SOFTFP-NOLSE-NEXT: clrex
-; SOFTFP-NOLSE-NEXT: cbnz w8, .LBB4_6
-; SOFTFP-NOLSE-NEXT: .LBB4_2: // %atomicrmw.start
+; SOFTFP-NOLSE-NEXT: .LBB4_2: // %cmpxchg.end
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB4_3 Depth=1
+; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB4_6
+; SOFTFP-NOLSE-NEXT: .LBB4_3: // %atomicrmw.start
; SOFTFP-NOLSE-NEXT: // =>This Loop Header: Depth=1
-; SOFTFP-NOLSE-NEXT: // Child Loop BB4_3 Depth 2
+; SOFTFP-NOLSE-NEXT: // Child Loop BB4_4 Depth 2
; SOFTFP-NOLSE-NEXT: mov w1, w20
; SOFTFP-NOLSE-NEXT: mov w21, w0
; SOFTFP-NOLSE-NEXT: bl __addsf3
; SOFTFP-NOLSE-NEXT: mov w8, w0
-; SOFTFP-NOLSE-NEXT: .LBB4_3: // %cmpxchg.start
-; SOFTFP-NOLSE-NEXT: // Parent Loop BB4_2 Depth=1
+; SOFTFP-NOLSE-NEXT: .LBB4_4: // %cmpxchg.start
+; SOFTFP-NOLSE-NEXT: // Parent Loop BB4_3 Depth=1
; SOFTFP-NOLSE-NEXT: // => This Inner Loop Header: Depth=2
; SOFTFP-NOLSE-NEXT: ldaxr w0, [x19]
; SOFTFP-NOLSE-NEXT: cmp w0, w21
; SOFTFP-NOLSE-NEXT: b.ne .LBB4_1
-; SOFTFP-NOLSE-NEXT: // %bb.4: // %cmpxchg.trystore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB4_3 Depth=2
-; SOFTFP-NOLSE-NEXT: stlxr w9, w8, [x19]
-; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB4_3
-; SOFTFP-NOLSE-NEXT: // %bb.5: // in Loop: Header=BB4_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, #1 // =0x1
-; SOFTFP-NOLSE-NEXT: cbz w8, .LBB4_2
+; SOFTFP-NOLSE-NEXT: // %bb.5: // %cmpxchg.trystore
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB4_4 Depth=2
+; SOFTFP-NOLSE-NEXT: stlxr w10, w8, [x19]
+; SOFTFP-NOLSE-NEXT: mov w9, #1 // =0x1
+; SOFTFP-NOLSE-NEXT: cbnz w10, .LBB4_4
+; SOFTFP-NOLSE-NEXT: b .LBB4_2
; SOFTFP-NOLSE-NEXT: .LBB4_6: // %atomicrmw.end
; SOFTFP-NOLSE-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
; SOFTFP-NOLSE-NEXT: // kill: def $w0 killed $w0 killed $x0
@@ -480,32 +485,33 @@ define double @test_atomicrmw_fadd_f32_seq_cst_align8(ptr %ptr, double %value) #
; SOFTFP-NOLSE-NEXT: mov x19, x0
; SOFTFP-NOLSE-NEXT: ldr x0, [x0]
; SOFTFP-NOLSE-NEXT: mov x20, x1
-; SOFTFP-NOLSE-NEXT: b .LBB5_2
+; SOFTFP-NOLSE-NEXT: b .LBB5_3
; SOFTFP-NOLSE-NEXT: .LBB5_1: // %cmpxchg.nostore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB5_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, wzr
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB5_3 Depth=1
+; SOFTFP-NOLSE-NEXT: mov w9, wzr
; SOFTFP-NOLSE-NEXT: clrex
-; SOFTFP-NOLSE-NEXT: cbnz w8, .LBB5_6
-; SOFTFP-NOLSE-NEXT: .LBB5_2: // %atomicrmw.start
+; SOFTFP-NOLSE-NEXT: .LBB5_2: // %cmpxchg.end
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB5_3 Depth=1
+; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB5_6
+; SOFTFP-NOLSE-NEXT: .LBB5_3: // %atomicrmw.start
; SOFTFP-NOLSE-NEXT: // =>This Loop Header: Depth=1
-; SOFTFP-NOLSE-NEXT: // Child Loop BB5_3 Depth 2
+; SOFTFP-NOLSE-NEXT: // Child Loop BB5_4 Depth 2
; SOFTFP-NOLSE-NEXT: mov x1, x20
; SOFTFP-NOLSE-NEXT: mov x21, x0
; SOFTFP-NOLSE-NEXT: bl __adddf3
; SOFTFP-NOLSE-NEXT: mov x8, x0
-; SOFTFP-NOLSE-NEXT: .LBB5_3: // %cmpxchg.start
-; SOFTFP-NOLSE-NEXT: // Parent Loop BB5_2 Depth=1
+; SOFTFP-NOLSE-NEXT: .LBB5_4: // %cmpxchg.start
+; SOFTFP-NOLSE-NEXT: // Parent Loop BB5_3 Depth=1
; SOFTFP-NOLSE-NEXT: // => This Inner Loop Header: Depth=2
; SOFTFP-NOLSE-NEXT: ldaxr x0, [x19]
; SOFTFP-NOLSE-NEXT: cmp x0, x21
; SOFTFP-NOLSE-NEXT: b.ne .LBB5_1
-; SOFTFP-NOLSE-NEXT: // %bb.4: // %cmpxchg.trystore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB5_3 Depth=2
-; SOFTFP-NOLSE-NEXT: stlxr w9, x8, [x19]
-; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB5_3
-; SOFTFP-NOLSE-NEXT: // %bb.5: // in Loop: Header=BB5_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, #1 // =0x1
-; SOFTFP-NOLSE-NEXT: cbz w8, .LBB5_2
+; SOFTFP-NOLSE-NEXT: // %bb.5: // %cmpxchg.trystore
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB5_4 Depth=2
+; SOFTFP-NOLSE-NEXT: stlxr w10, x8, [x19]
+; SOFTFP-NOLSE-NEXT: mov w9, #1 // =0x1
+; SOFTFP-NOLSE-NEXT: cbnz w10, .LBB5_4
+; SOFTFP-NOLSE-NEXT: b .LBB5_2
; SOFTFP-NOLSE-NEXT: .LBB5_6: // %atomicrmw.end
; SOFTFP-NOLSE-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
; SOFTFP-NOLSE-NEXT: ldp x30, x21, [sp], #32 // 16-byte Folded Reload
@@ -701,16 +707,18 @@ define <2 x half> @test_atomicrmw_fadd_v2f16_seq_cst_align4(ptr %ptr, <2 x half>
; SOFTFP-NOLSE-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill
; SOFTFP-NOLSE-NEXT: mov w19, w2
; SOFTFP-NOLSE-NEXT: mov x20, x0
-; SOFTFP-NOLSE-NEXT: b .LBB7_2
+; SOFTFP-NOLSE-NEXT: b .LBB7_3
; SOFTFP-NOLSE-NEXT: .LBB7_1: // %cmpxchg.nostore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB7_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, wzr
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB7_3 Depth=1
+; SOFTFP-NOLSE-NEXT: mov w9, wzr
; SOFTFP-NOLSE-NEXT: clrex
+; SOFTFP-NOLSE-NEXT: .LBB7_2: // %cmpxchg.end
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB7_3 Depth=1
; SOFTFP-NOLSE-NEXT: lsr w23, w22, #16
-; SOFTFP-NOLSE-NEXT: cbnz w8, .LBB7_6
-; SOFTFP-NOLSE-NEXT: .LBB7_2: // %atomicrmw.start
+; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB7_6
+; SOFTFP-NOLSE-NEXT: .LBB7_3: // %atomicrmw.start
; SOFTFP-NOLSE-NEXT: // =>This Loop Header: Depth=1
-; SOFTFP-NOLSE-NEXT: // Child Loop BB7_3 Depth 2
+; SOFTFP-NOLSE-NEXT: // Child Loop BB7_4 Depth 2
; SOFTFP-NOLSE-NEXT: and w0, w19, #0xffff
; SOFTFP-NOLSE-NEXT: bl __extendhfsf2
; SOFTFP-NOLSE-NEXT: mov w24, w0
@@ -731,20 +739,18 @@ define <2 x half> @test_atomicrmw_fadd_v2f16_seq_cst_align4(ptr %ptr, <2 x half>
; SOFTFP-NOLSE-NEXT: mov w8, w22
; SOFTFP-NOLSE-NEXT: bfi w0, w24, #16, #16
; SOFTFP-NOLSE-NEXT: bfi w8, w23, #16, #16
-; SOFTFP-NOLSE-NEXT: .LBB7_3: // %cmpxchg.start
-; SOFTFP-NOLSE-NEXT: // Parent Loop BB7_2 Depth=1
+; SOFTFP-NOLSE-NEXT: .LBB7_4: // %cmpxchg.start
+; SOFTFP-NOLSE-NEXT: // Parent Loop BB7_3 Depth=1
; SOFTFP-NOLSE-NEXT: // => This Inner Loop Header: Depth=2
; SOFTFP-NOLSE-NEXT: ldaxr w22, [x20]
; SOFTFP-NOLSE-NEXT: cmp w22, w8
; SOFTFP-NOLSE-NEXT: b.ne .LBB7_1
-; SOFTFP-NOLSE-NEXT: // %bb.4: // %cmpxchg.trystore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB7_3 Depth=2
-; SOFTFP-NOLSE-NEXT: stlxr w9, w0, [x20]
-; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB7_3
-; SOFTFP-NOLSE-NEXT: // %bb.5: // in Loop: Header=BB7_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, #1 // =0x1
-; SOFTFP-NOLSE-NEXT: lsr w23, w22, #16
-; SOFTFP-NOLSE-NEXT: cbz w8, .LBB7_2
+; SOFTFP-NOLSE-NEXT: // %bb.5: // %cmpxchg.trystore
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB7_4 Depth=2
+; SOFTFP-NOLSE-NEXT: stlxr w10, w0, [x20]
+; SOFTFP-NOLSE-NEXT: mov w9, #1 // =0x1
+; SOFTFP-NOLSE-NEXT: cbnz w10, .LBB7_4
+; SOFTFP-NOLSE-NEXT: b .LBB7_2
; SOFTFP-NOLSE-NEXT: .LBB7_6: // %atomicrmw.end
; SOFTFP-NOLSE-NEXT: mov w0, w22
; SOFTFP-NOLSE-NEXT: mov w1, w23
@@ -817,16 +823,18 @@ define <2 x bfloat> @test_atomicrmw_fadd_v2bf16_seq_cst_align4(ptr %ptr, <2 x bf
; SOFTFP-NOLSE-NEXT: lsl w21, w8, #16
; SOFTFP-NOLSE-NEXT: mov x19, x0
; SOFTFP-NOLSE-NEXT: stp x24, x23, [sp, #16] // 16-byte Folded Spill
-; SOFTFP-NOLSE-NEXT: b .LBB8_2
+; SOFTFP-NOLSE-NEXT: b .LBB8_3
; SOFTFP-NOLSE-NEXT: .LBB8_1: // %cmpxchg.nostore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB8_2 Depth=1
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB8_3 Depth=1
; SOFTFP-NOLSE-NEXT: mov w8, wzr
; SOFTFP-NOLSE-NEXT: clrex
+; SOFTFP-NOLSE-NEXT: .LBB8_2: // %cmpxchg.end
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB8_3 Depth=1
; SOFTFP-NOLSE-NEXT: lsr w1, w22, #16
; SOFTFP-NOLSE-NEXT: cbnz w8, .LBB8_6
-; SOFTFP-NOLSE-NEXT: .LBB8_2: // %atomicrmw.start
+; SOFTFP-NOLSE-NEXT: .LBB8_3: // %atomicrmw.start
; SOFTFP-NOLSE-NEXT: // =>This Loop Header: Depth=1
-; SOFTFP-NOLSE-NEXT: // Child Loop BB8_3 Depth 2
+; SOFTFP-NOLSE-NEXT: // Child Loop BB8_4 Depth 2
; SOFTFP-NOLSE-NEXT: lsl w23, w1, #16
; SOFTFP-NOLSE-NEXT: mov w1, w20
; SOFTFP-NOLSE-NEXT: mov w0, w23
@@ -839,20 +847,18 @@ define <2 x bfloat> @test_atomicrmw_fadd_v2bf16_seq_cst_align4(ptr %ptr, <2 x bf
; SOFTFP-NOLSE-NEXT: bl __truncsfbf2
; SOFTFP-NOLSE-NEXT: bfxil w23, w22, #0, #16
; SOFTFP-NOLSE-NEXT: bfi w0, w24, #16, #16
-; SOFTFP-NOLSE-NEXT: .LBB8_3: // %cmpxchg.start
-; SOFTFP-NOLSE-NEXT: // Parent Loop BB8_2 Depth=1
+; SOFTFP-NOLSE-NEXT: .LBB8_4: // %cmpxchg.start
+; SOFTFP-NOLSE-NEXT: // Parent Loop BB8_3 Depth=1
; SOFTFP-NOLSE-NEXT: // => This Inner Loop Header: Depth=2
; SOFTFP-NOLSE-NEXT: ldaxr w22, [x19]
; SOFTFP-NOLSE-NEXT: cmp w22, w23
; SOFTFP-NOLSE-NEXT: b.ne .LBB8_1
-; SOFTFP-NOLSE-NEXT: // %bb.4: // %cmpxchg.trystore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB8_3 Depth=2
-; SOFTFP-NOLSE-NEXT: stlxr w8, w0, [x19]
-; SOFTFP-NOLSE-NEXT: cbnz w8, .LBB8_3
-; SOFTFP-NOLSE-NEXT: // %bb.5: // in Loop: Header=BB8_2 Depth=1
+; SOFTFP-NOLSE-NEXT: // %bb.5: // %cmpxchg.trystore
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB8_4 Depth=2
+; SOFTFP-NOLSE-NEXT: stlxr w9, w0, [x19]
; SOFTFP-NOLSE-NEXT: mov w8, #1 // =0x1
-; SOFTFP-NOLSE-NEXT: lsr w1, w22, #16
-; SOFTFP-NOLSE-NEXT: cbz w8, .LBB8_2
+; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB8_4
+; SOFTFP-NOLSE-NEXT: b .LBB8_2
; SOFTFP-NOLSE-NEXT: .LBB8_6: // %atomicrmw.end
; SOFTFP-NOLSE-NEXT: mov w0, w22
; SOFTFP-NOLSE-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload
@@ -906,16 +912,18 @@ define <2 x float> @test_atomicrmw_fadd_v2f32_seq_cst_align8(ptr %ptr, <2 x floa
; SOFTFP-NOLSE-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill
; SOFTFP-NOLSE-NEXT: mov w19, w2
; SOFTFP-NOLSE-NEXT: mov x20, x0
-; SOFTFP-NOLSE-NEXT: b .LBB9_2
+; SOFTFP-NOLSE-NEXT: b .LBB9_3
; SOFTFP-NOLSE-NEXT: .LBB9_1: // %cmpxchg.nostore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB9_2 Depth=1
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB9_3 Depth=1
; SOFTFP-NOLSE-NEXT: mov w8, wzr
; SOFTFP-NOLSE-NEXT: clrex
+; SOFTFP-NOLSE-NEXT: .LBB9_2: // %cmpxchg.end
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB9_3 Depth=1
; SOFTFP-NOLSE-NEXT: lsr x23, x22, #32
; SOFTFP-NOLSE-NEXT: cbnz w8, .LBB9_6
-; SOFTFP-NOLSE-NEXT: .LBB9_2: // %atomicrmw.start
+; SOFTFP-NOLSE-NEXT: .LBB9_3: // %atomicrmw.start
; SOFTFP-NOLSE-NEXT: // =>This Loop Header: Depth=1
-; SOFTFP-NOLSE-NEXT: // Child Loop BB9_3 Depth 2
+; SOFTFP-NOLSE-NEXT: // Child Loop BB9_4 Depth 2
; SOFTFP-NOLSE-NEXT: mov w0, w23
; SOFTFP-NOLSE-NEXT: mov w1, w19
; SOFTFP-NOLSE-NEXT: bl __addsf3
@@ -924,24 +932,22 @@ define <2 x float> @test_atomicrmw_fadd_v2f32_seq_cst_align8(ptr %ptr, <2 x floa
; SOFTFP-NOLSE-NEXT: mov w1, w21
; SOFTFP-NOLSE-NEXT: bl __addsf3
; SOFTFP-NOLSE-NEXT: mov w8, w0
-; SOFTFP-NOLSE-NEXT: mov w9, w22
+; SOFTFP-NOLSE-NEXT: mov w10, w22
; SOFTFP-NOLSE-NEXT: // kill: def $w23 killed $w23 killed $x23 def $x23
-; SOFTFP-NOLSE-NEXT: orr x8, x8, x24, lsl #32
-; SOFTFP-NOLSE-NEXT: orr x9, x9, x23, lsl #32
-; SOFTFP-NOLSE-NEXT: .LBB9_3: // %cmpxchg.start
-; SOFTFP-NOLSE-NEXT: // Parent Loop BB9_2 Depth=1
+; SOFTFP-NOLSE-NEXT: orr x9, x8, x24, lsl #32
+; SOFTFP-NOLSE-NEXT: orr x10, x10, x23, lsl #32
+; SOFTFP-NOLSE-NEXT: .LBB9_4: // %cmpxchg.start
+; SOFTFP-NOLSE-NEXT: // Parent Loop BB9_3 Depth=1
; SOFTFP-NOLSE-NEXT: // => This Inner Loop Header: Depth=2
; SOFTFP-NOLSE-NEXT: ldaxr x22, [x20]
-; SOFTFP-NOLSE-NEXT: cmp x22, x9
+; SOFTFP-NOLSE-NEXT: cmp x22, x10
; SOFTFP-NOLSE-NEXT: b.ne .LBB9_1
-; SOFTFP-NOLSE-NEXT: // %bb.4: // %cmpxchg.trystore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB9_3 Depth=2
-; SOFTFP-NOLSE-NEXT: stlxr w10, x8, [x20]
-; SOFTFP-NOLSE-NEXT: cbnz w10, .LBB9_3
-; SOFTFP-NOLSE-NEXT: // %bb.5: // in Loop: Header=BB9_2 Depth=1
+; SOFTFP-NOLSE-NEXT: // %bb.5: // %cmpxchg.trystore
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB9_4 Depth=2
+; SOFTFP-NOLSE-NEXT: stlxr w11, x9, [x20]
; SOFTFP-NOLSE-NEXT: mov w8, #1 // =0x1
-; SOFTFP-NOLSE-NEXT: lsr x23, x22, #32
-; SOFTFP-NOLSE-NEXT: cbz w8, .LBB9_2
+; SOFTFP-NOLSE-NEXT: cbnz w11, .LBB9_4
+; SOFTFP-NOLSE-NEXT: b .LBB9_2
; SOFTFP-NOLSE-NEXT: .LBB9_6: // %atomicrmw.end
; SOFTFP-NOLSE-NEXT: mov w0, w22
; SOFTFP-NOLSE-NEXT: mov w1, w23
diff --git a/llvm/test/CodeGen/AArch64/atomicrmw-fmax.ll b/llvm/test/CodeGen/AArch64/atomicrmw-fmax.ll
index e3e18a1..5ad9991 100644
--- a/llvm/test/CodeGen/AArch64/atomicrmw-fmax.ll
+++ b/llvm/test/CodeGen/AArch64/atomicrmw-fmax.ll
@@ -51,15 +51,17 @@ define half @test_atomicrmw_fmax_f16_seq_cst_align2(ptr %ptr, half %value) #0 {
; SOFTFP-NOLSE-NEXT: ldrh w0, [x0]
; SOFTFP-NOLSE-NEXT: mov w20, w1
; SOFTFP-NOLSE-NEXT: stp x22, x21, [sp, #16] // 16-byte Folded Spill
-; SOFTFP-NOLSE-NEXT: b .LBB0_2
+; SOFTFP-NOLSE-NEXT: b .LBB0_3
; SOFTFP-NOLSE-NEXT: .LBB0_1: // %cmpxchg.nostore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB0_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, wzr
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB0_3 Depth=1
+; SOFTFP-NOLSE-NEXT: mov w9, wzr
; SOFTFP-NOLSE-NEXT: clrex
-; SOFTFP-NOLSE-NEXT: cbnz w8, .LBB0_6
-; SOFTFP-NOLSE-NEXT: .LBB0_2: // %atomicrmw.start
+; SOFTFP-NOLSE-NEXT: .LBB0_2: // %cmpxchg.end
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB0_3 Depth=1
+; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB0_6
+; SOFTFP-NOLSE-NEXT: .LBB0_3: // %atomicrmw.start
; SOFTFP-NOLSE-NEXT: // =>This Loop Header: Depth=1
-; SOFTFP-NOLSE-NEXT: // Child Loop BB0_3 Depth 2
+; SOFTFP-NOLSE-NEXT: // Child Loop BB0_4 Depth 2
; SOFTFP-NOLSE-NEXT: mov w22, w0
; SOFTFP-NOLSE-NEXT: and w0, w20, #0xffff
; SOFTFP-NOLSE-NEXT: bl __extendhfsf2
@@ -70,19 +72,18 @@ define half @test_atomicrmw_fmax_f16_seq_cst_align2(ptr %ptr, half %value) #0 {
; SOFTFP-NOLSE-NEXT: bl fmaxf
; SOFTFP-NOLSE-NEXT: bl __truncsfhf2
; SOFTFP-NOLSE-NEXT: mov w8, w0
-; SOFTFP-NOLSE-NEXT: .LBB0_3: // %cmpxchg.start
-; SOFTFP-NOLSE-NEXT: // Parent Loop BB0_2 Depth=1
+; SOFTFP-NOLSE-NEXT: .LBB0_4: // %cmpxchg.start
+; SOFTFP-NOLSE-NEXT: // Parent Loop BB0_3 Depth=1
; SOFTFP-NOLSE-NEXT: // => This Inner Loop Header: Depth=2
; SOFTFP-NOLSE-NEXT: ldaxrh w0, [x19]
; SOFTFP-NOLSE-NEXT: cmp w0, w22, uxth
; SOFTFP-NOLSE-NEXT: b.ne .LBB0_1
-; SOFTFP-NOLSE-NEXT: // %bb.4: // %cmpxchg.trystore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB0_3 Depth=2
-; SOFTFP-NOLSE-NEXT: stlxrh w9, w8, [x19]
-; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB0_3
-; SOFTFP-NOLSE-NEXT: // %bb.5: // in Loop: Header=BB0_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, #1 // =0x1
-; SOFTFP-NOLSE-NEXT: cbz w8, .LBB0_2
+; SOFTFP-NOLSE-NEXT: // %bb.5: // %cmpxchg.trystore
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB0_4 Depth=2
+; SOFTFP-NOLSE-NEXT: stlxrh w10, w8, [x19]
+; SOFTFP-NOLSE-NEXT: mov w9, #1 // =0x1
+; SOFTFP-NOLSE-NEXT: cbnz w10, .LBB0_4
+; SOFTFP-NOLSE-NEXT: b .LBB0_2
; SOFTFP-NOLSE-NEXT: .LBB0_6: // %atomicrmw.end
; SOFTFP-NOLSE-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload
; SOFTFP-NOLSE-NEXT: // kill: def $w0 killed $w0 killed $x0
@@ -139,15 +140,17 @@ define half @test_atomicrmw_fmax_f16_seq_cst_align4(ptr %ptr, half %value) #0 {
; SOFTFP-NOLSE-NEXT: ldrh w0, [x0]
; SOFTFP-NOLSE-NEXT: mov w20, w1
; SOFTFP-NOLSE-NEXT: stp x22, x21, [sp, #16] // 16-byte Folded Spill
-; SOFTFP-NOLSE-NEXT: b .LBB1_2
+; SOFTFP-NOLSE-NEXT: b .LBB1_3
; SOFTFP-NOLSE-NEXT: .LBB1_1: // %cmpxchg.nostore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB1_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, wzr
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB1_3 Depth=1
+; SOFTFP-NOLSE-NEXT: mov w9, wzr
; SOFTFP-NOLSE-NEXT: clrex
-; SOFTFP-NOLSE-NEXT: cbnz w8, .LBB1_6
-; SOFTFP-NOLSE-NEXT: .LBB1_2: // %atomicrmw.start
+; SOFTFP-NOLSE-NEXT: .LBB1_2: // %cmpxchg.end
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB1_3 Depth=1
+; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB1_6
+; SOFTFP-NOLSE-NEXT: .LBB1_3: // %atomicrmw.start
; SOFTFP-NOLSE-NEXT: // =>This Loop Header: Depth=1
-; SOFTFP-NOLSE-NEXT: // Child Loop BB1_3 Depth 2
+; SOFTFP-NOLSE-NEXT: // Child Loop BB1_4 Depth 2
; SOFTFP-NOLSE-NEXT: mov w22, w0
; SOFTFP-NOLSE-NEXT: and w0, w20, #0xffff
; SOFTFP-NOLSE-NEXT: bl __extendhfsf2
@@ -158,19 +161,18 @@ define half @test_atomicrmw_fmax_f16_seq_cst_align4(ptr %ptr, half %value) #0 {
; SOFTFP-NOLSE-NEXT: bl fmaxf
; SOFTFP-NOLSE-NEXT: bl __truncsfhf2
; SOFTFP-NOLSE-NEXT: mov w8, w0
-; SOFTFP-NOLSE-NEXT: .LBB1_3: // %cmpxchg.start
-; SOFTFP-NOLSE-NEXT: // Parent Loop BB1_2 Depth=1
+; SOFTFP-NOLSE-NEXT: .LBB1_4: // %cmpxchg.start
+; SOFTFP-NOLSE-NEXT: // Parent Loop BB1_3 Depth=1
; SOFTFP-NOLSE-NEXT: // => This Inner Loop Header: Depth=2
; SOFTFP-NOLSE-NEXT: ldaxrh w0, [x19]
; SOFTFP-NOLSE-NEXT: cmp w0, w22, uxth
; SOFTFP-NOLSE-NEXT: b.ne .LBB1_1
-; SOFTFP-NOLSE-NEXT: // %bb.4: // %cmpxchg.trystore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB1_3 Depth=2
-; SOFTFP-NOLSE-NEXT: stlxrh w9, w8, [x19]
-; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB1_3
-; SOFTFP-NOLSE-NEXT: // %bb.5: // in Loop: Header=BB1_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, #1 // =0x1
-; SOFTFP-NOLSE-NEXT: cbz w8, .LBB1_2
+; SOFTFP-NOLSE-NEXT: // %bb.5: // %cmpxchg.trystore
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB1_4 Depth=2
+; SOFTFP-NOLSE-NEXT: stlxrh w10, w8, [x19]
+; SOFTFP-NOLSE-NEXT: mov w9, #1 // =0x1
+; SOFTFP-NOLSE-NEXT: cbnz w10, .LBB1_4
+; SOFTFP-NOLSE-NEXT: b .LBB1_2
; SOFTFP-NOLSE-NEXT: .LBB1_6: // %atomicrmw.end
; SOFTFP-NOLSE-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload
; SOFTFP-NOLSE-NEXT: // kill: def $w0 killed $w0 killed $x0
@@ -238,34 +240,35 @@ define bfloat @test_atomicrmw_fmax_bf16_seq_cst_align2(ptr %ptr, bfloat %value)
; SOFTFP-NOLSE-NEXT: mov x19, x0
; SOFTFP-NOLSE-NEXT: ldrh w0, [x0]
; SOFTFP-NOLSE-NEXT: lsl w20, w1, #16
-; SOFTFP-NOLSE-NEXT: b .LBB2_2
+; SOFTFP-NOLSE-NEXT: b .LBB2_3
; SOFTFP-NOLSE-NEXT: .LBB2_1: // %cmpxchg.nostore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB2_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, wzr
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB2_3 Depth=1
+; SOFTFP-NOLSE-NEXT: mov w9, wzr
; SOFTFP-NOLSE-NEXT: clrex
-; SOFTFP-NOLSE-NEXT: cbnz w8, .LBB2_6
-; SOFTFP-NOLSE-NEXT: .LBB2_2: // %atomicrmw.start
+; SOFTFP-NOLSE-NEXT: .LBB2_2: // %cmpxchg.end
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB2_3 Depth=1
+; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB2_6
+; SOFTFP-NOLSE-NEXT: .LBB2_3: // %atomicrmw.start
; SOFTFP-NOLSE-NEXT: // =>This Loop Header: Depth=1
-; SOFTFP-NOLSE-NEXT: // Child Loop BB2_3 Depth 2
+; SOFTFP-NOLSE-NEXT: // Child Loop BB2_4 Depth 2
; SOFTFP-NOLSE-NEXT: mov w21, w0
; SOFTFP-NOLSE-NEXT: lsl w0, w0, #16
; SOFTFP-NOLSE-NEXT: mov w1, w20
; SOFTFP-NOLSE-NEXT: bl fmaxf
; SOFTFP-NOLSE-NEXT: bl __truncsfbf2
; SOFTFP-NOLSE-NEXT: mov w8, w0
-; SOFTFP-NOLSE-NEXT: .LBB2_3: // %cmpxchg.start
-; SOFTFP-NOLSE-NEXT: // Parent Loop BB2_2 Depth=1
+; SOFTFP-NOLSE-NEXT: .LBB2_4: // %cmpxchg.start
+; SOFTFP-NOLSE-NEXT: // Parent Loop BB2_3 Depth=1
; SOFTFP-NOLSE-NEXT: // => This Inner Loop Header: Depth=2
; SOFTFP-NOLSE-NEXT: ldaxrh w0, [x19]
; SOFTFP-NOLSE-NEXT: cmp w0, w21, uxth
; SOFTFP-NOLSE-NEXT: b.ne .LBB2_1
-; SOFTFP-NOLSE-NEXT: // %bb.4: // %cmpxchg.trystore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB2_3 Depth=2
-; SOFTFP-NOLSE-NEXT: stlxrh w9, w8, [x19]
-; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB2_3
-; SOFTFP-NOLSE-NEXT: // %bb.5: // in Loop: Header=BB2_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, #1 // =0x1
-; SOFTFP-NOLSE-NEXT: cbz w8, .LBB2_2
+; SOFTFP-NOLSE-NEXT: // %bb.5: // %cmpxchg.trystore
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB2_4 Depth=2
+; SOFTFP-NOLSE-NEXT: stlxrh w10, w8, [x19]
+; SOFTFP-NOLSE-NEXT: mov w9, #1 // =0x1
+; SOFTFP-NOLSE-NEXT: cbnz w10, .LBB2_4
+; SOFTFP-NOLSE-NEXT: b .LBB2_2
; SOFTFP-NOLSE-NEXT: .LBB2_6: // %atomicrmw.end
; SOFTFP-NOLSE-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
; SOFTFP-NOLSE-NEXT: // kill: def $w0 killed $w0 killed $x0
@@ -332,34 +335,35 @@ define bfloat @test_atomicrmw_fmax_bf16_seq_cst_align4(ptr %ptr, bfloat %value)
; SOFTFP-NOLSE-NEXT: mov x19, x0
; SOFTFP-NOLSE-NEXT: ldrh w0, [x0]
; SOFTFP-NOLSE-NEXT: lsl w20, w1, #16
-; SOFTFP-NOLSE-NEXT: b .LBB3_2
+; SOFTFP-NOLSE-NEXT: b .LBB3_3
; SOFTFP-NOLSE-NEXT: .LBB3_1: // %cmpxchg.nostore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB3_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, wzr
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB3_3 Depth=1
+; SOFTFP-NOLSE-NEXT: mov w9, wzr
; SOFTFP-NOLSE-NEXT: clrex
-; SOFTFP-NOLSE-NEXT: cbnz w8, .LBB3_6
-; SOFTFP-NOLSE-NEXT: .LBB3_2: // %atomicrmw.start
+; SOFTFP-NOLSE-NEXT: .LBB3_2: // %cmpxchg.end
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB3_3 Depth=1
+; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB3_6
+; SOFTFP-NOLSE-NEXT: .LBB3_3: // %atomicrmw.start
; SOFTFP-NOLSE-NEXT: // =>This Loop Header: Depth=1
-; SOFTFP-NOLSE-NEXT: // Child Loop BB3_3 Depth 2
+; SOFTFP-NOLSE-NEXT: // Child Loop BB3_4 Depth 2
; SOFTFP-NOLSE-NEXT: mov w21, w0
; SOFTFP-NOLSE-NEXT: lsl w0, w0, #16
; SOFTFP-NOLSE-NEXT: mov w1, w20
; SOFTFP-NOLSE-NEXT: bl fmaxf
; SOFTFP-NOLSE-NEXT: bl __truncsfbf2
; SOFTFP-NOLSE-NEXT: mov w8, w0
-; SOFTFP-NOLSE-NEXT: .LBB3_3: // %cmpxchg.start
-; SOFTFP-NOLSE-NEXT: // Parent Loop BB3_2 Depth=1
+; SOFTFP-NOLSE-NEXT: .LBB3_4: // %cmpxchg.start
+; SOFTFP-NOLSE-NEXT: // Parent Loop BB3_3 Depth=1
; SOFTFP-NOLSE-NEXT: // => This Inner Loop Header: Depth=2
; SOFTFP-NOLSE-NEXT: ldaxrh w0, [x19]
; SOFTFP-NOLSE-NEXT: cmp w0, w21, uxth
; SOFTFP-NOLSE-NEXT: b.ne .LBB3_1
-; SOFTFP-NOLSE-NEXT: // %bb.4: // %cmpxchg.trystore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB3_3 Depth=2
-; SOFTFP-NOLSE-NEXT: stlxrh w9, w8, [x19]
-; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB3_3
-; SOFTFP-NOLSE-NEXT: // %bb.5: // in Loop: Header=BB3_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, #1 // =0x1
-; SOFTFP-NOLSE-NEXT: cbz w8, .LBB3_2
+; SOFTFP-NOLSE-NEXT: // %bb.5: // %cmpxchg.trystore
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB3_4 Depth=2
+; SOFTFP-NOLSE-NEXT: stlxrh w10, w8, [x19]
+; SOFTFP-NOLSE-NEXT: mov w9, #1 // =0x1
+; SOFTFP-NOLSE-NEXT: cbnz w10, .LBB3_4
+; SOFTFP-NOLSE-NEXT: b .LBB3_2
; SOFTFP-NOLSE-NEXT: .LBB3_6: // %atomicrmw.end
; SOFTFP-NOLSE-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
; SOFTFP-NOLSE-NEXT: // kill: def $w0 killed $w0 killed $x0
@@ -408,32 +412,33 @@ define float @test_atomicrmw_fmax_f32_seq_cst_align4(ptr %ptr, float %value) #0
; SOFTFP-NOLSE-NEXT: mov x19, x0
; SOFTFP-NOLSE-NEXT: ldr w0, [x0]
; SOFTFP-NOLSE-NEXT: mov w20, w1
-; SOFTFP-NOLSE-NEXT: b .LBB4_2
+; SOFTFP-NOLSE-NEXT: b .LBB4_3
; SOFTFP-NOLSE-NEXT: .LBB4_1: // %cmpxchg.nostore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB4_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, wzr
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB4_3 Depth=1
+; SOFTFP-NOLSE-NEXT: mov w9, wzr
; SOFTFP-NOLSE-NEXT: clrex
-; SOFTFP-NOLSE-NEXT: cbnz w8, .LBB4_6
-; SOFTFP-NOLSE-NEXT: .LBB4_2: // %atomicrmw.start
+; SOFTFP-NOLSE-NEXT: .LBB4_2: // %cmpxchg.end
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB4_3 Depth=1
+; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB4_6
+; SOFTFP-NOLSE-NEXT: .LBB4_3: // %atomicrmw.start
; SOFTFP-NOLSE-NEXT: // =>This Loop Header: Depth=1
-; SOFTFP-NOLSE-NEXT: // Child Loop BB4_3 Depth 2
+; SOFTFP-NOLSE-NEXT: // Child Loop BB4_4 Depth 2
; SOFTFP-NOLSE-NEXT: mov w1, w20
; SOFTFP-NOLSE-NEXT: mov w21, w0
; SOFTFP-NOLSE-NEXT: bl fmaxf
; SOFTFP-NOLSE-NEXT: mov w8, w0
-; SOFTFP-NOLSE-NEXT: .LBB4_3: // %cmpxchg.start
-; SOFTFP-NOLSE-NEXT: // Parent Loop BB4_2 Depth=1
+; SOFTFP-NOLSE-NEXT: .LBB4_4: // %cmpxchg.start
+; SOFTFP-NOLSE-NEXT: // Parent Loop BB4_3 Depth=1
; SOFTFP-NOLSE-NEXT: // => This Inner Loop Header: Depth=2
; SOFTFP-NOLSE-NEXT: ldaxr w0, [x19]
; SOFTFP-NOLSE-NEXT: cmp w0, w21
; SOFTFP-NOLSE-NEXT: b.ne .LBB4_1
-; SOFTFP-NOLSE-NEXT: // %bb.4: // %cmpxchg.trystore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB4_3 Depth=2
-; SOFTFP-NOLSE-NEXT: stlxr w9, w8, [x19]
-; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB4_3
-; SOFTFP-NOLSE-NEXT: // %bb.5: // in Loop: Header=BB4_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, #1 // =0x1
-; SOFTFP-NOLSE-NEXT: cbz w8, .LBB4_2
+; SOFTFP-NOLSE-NEXT: // %bb.5: // %cmpxchg.trystore
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB4_4 Depth=2
+; SOFTFP-NOLSE-NEXT: stlxr w10, w8, [x19]
+; SOFTFP-NOLSE-NEXT: mov w9, #1 // =0x1
+; SOFTFP-NOLSE-NEXT: cbnz w10, .LBB4_4
+; SOFTFP-NOLSE-NEXT: b .LBB4_2
; SOFTFP-NOLSE-NEXT: .LBB4_6: // %atomicrmw.end
; SOFTFP-NOLSE-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
; SOFTFP-NOLSE-NEXT: // kill: def $w0 killed $w0 killed $x0
@@ -482,32 +487,33 @@ define double @test_atomicrmw_fmax_f32_seq_cst_align8(ptr %ptr, double %value) #
; SOFTFP-NOLSE-NEXT: mov x19, x0
; SOFTFP-NOLSE-NEXT: ldr x0, [x0]
; SOFTFP-NOLSE-NEXT: mov x20, x1
-; SOFTFP-NOLSE-NEXT: b .LBB5_2
+; SOFTFP-NOLSE-NEXT: b .LBB5_3
; SOFTFP-NOLSE-NEXT: .LBB5_1: // %cmpxchg.nostore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB5_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, wzr
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB5_3 Depth=1
+; SOFTFP-NOLSE-NEXT: mov w9, wzr
; SOFTFP-NOLSE-NEXT: clrex
-; SOFTFP-NOLSE-NEXT: cbnz w8, .LBB5_6
-; SOFTFP-NOLSE-NEXT: .LBB5_2: // %atomicrmw.start
+; SOFTFP-NOLSE-NEXT: .LBB5_2: // %cmpxchg.end
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB5_3 Depth=1
+; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB5_6
+; SOFTFP-NOLSE-NEXT: .LBB5_3: // %atomicrmw.start
; SOFTFP-NOLSE-NEXT: // =>This Loop Header: Depth=1
-; SOFTFP-NOLSE-NEXT: // Child Loop BB5_3 Depth 2
+; SOFTFP-NOLSE-NEXT: // Child Loop BB5_4 Depth 2
; SOFTFP-NOLSE-NEXT: mov x1, x20
; SOFTFP-NOLSE-NEXT: mov x21, x0
; SOFTFP-NOLSE-NEXT: bl fmax
; SOFTFP-NOLSE-NEXT: mov x8, x0
-; SOFTFP-NOLSE-NEXT: .LBB5_3: // %cmpxchg.start
-; SOFTFP-NOLSE-NEXT: // Parent Loop BB5_2 Depth=1
+; SOFTFP-NOLSE-NEXT: .LBB5_4: // %cmpxchg.start
+; SOFTFP-NOLSE-NEXT: // Parent Loop BB5_3 Depth=1
; SOFTFP-NOLSE-NEXT: // => This Inner Loop Header: Depth=2
; SOFTFP-NOLSE-NEXT: ldaxr x0, [x19]
; SOFTFP-NOLSE-NEXT: cmp x0, x21
; SOFTFP-NOLSE-NEXT: b.ne .LBB5_1
-; SOFTFP-NOLSE-NEXT: // %bb.4: // %cmpxchg.trystore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB5_3 Depth=2
-; SOFTFP-NOLSE-NEXT: stlxr w9, x8, [x19]
-; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB5_3
-; SOFTFP-NOLSE-NEXT: // %bb.5: // in Loop: Header=BB5_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, #1 // =0x1
-; SOFTFP-NOLSE-NEXT: cbz w8, .LBB5_2
+; SOFTFP-NOLSE-NEXT: // %bb.5: // %cmpxchg.trystore
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB5_4 Depth=2
+; SOFTFP-NOLSE-NEXT: stlxr w10, x8, [x19]
+; SOFTFP-NOLSE-NEXT: mov w9, #1 // =0x1
+; SOFTFP-NOLSE-NEXT: cbnz w10, .LBB5_4
+; SOFTFP-NOLSE-NEXT: b .LBB5_2
; SOFTFP-NOLSE-NEXT: .LBB5_6: // %atomicrmw.end
; SOFTFP-NOLSE-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
; SOFTFP-NOLSE-NEXT: ldp x30, x21, [sp], #32 // 16-byte Folded Reload
@@ -581,16 +587,18 @@ define <2 x half> @test_atomicrmw_fmax_v2f16_seq_cst_align4(ptr %ptr, <2 x half>
; SOFTFP-NOLSE-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill
; SOFTFP-NOLSE-NEXT: mov w19, w2
; SOFTFP-NOLSE-NEXT: mov x20, x0
-; SOFTFP-NOLSE-NEXT: b .LBB6_2
+; SOFTFP-NOLSE-NEXT: b .LBB6_3
; SOFTFP-NOLSE-NEXT: .LBB6_1: // %cmpxchg.nostore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB6_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, wzr
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB6_3 Depth=1
+; SOFTFP-NOLSE-NEXT: mov w9, wzr
; SOFTFP-NOLSE-NEXT: clrex
+; SOFTFP-NOLSE-NEXT: .LBB6_2: // %cmpxchg.end
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB6_3 Depth=1
; SOFTFP-NOLSE-NEXT: lsr w23, w22, #16
-; SOFTFP-NOLSE-NEXT: cbnz w8, .LBB6_6
-; SOFTFP-NOLSE-NEXT: .LBB6_2: // %atomicrmw.start
+; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB6_6
+; SOFTFP-NOLSE-NEXT: .LBB6_3: // %atomicrmw.start
; SOFTFP-NOLSE-NEXT: // =>This Loop Header: Depth=1
-; SOFTFP-NOLSE-NEXT: // Child Loop BB6_3 Depth 2
+; SOFTFP-NOLSE-NEXT: // Child Loop BB6_4 Depth 2
; SOFTFP-NOLSE-NEXT: and w0, w19, #0xffff
; SOFTFP-NOLSE-NEXT: bl __extendhfsf2
; SOFTFP-NOLSE-NEXT: mov w24, w0
@@ -611,20 +619,18 @@ define <2 x half> @test_atomicrmw_fmax_v2f16_seq_cst_align4(ptr %ptr, <2 x half>
; SOFTFP-NOLSE-NEXT: mov w8, w22
; SOFTFP-NOLSE-NEXT: bfi w0, w24, #16, #16
; SOFTFP-NOLSE-NEXT: bfi w8, w23, #16, #16
-; SOFTFP-NOLSE-NEXT: .LBB6_3: // %cmpxchg.start
-; SOFTFP-NOLSE-NEXT: // Parent Loop BB6_2 Depth=1
+; SOFTFP-NOLSE-NEXT: .LBB6_4: // %cmpxchg.start
+; SOFTFP-NOLSE-NEXT: // Parent Loop BB6_3 Depth=1
; SOFTFP-NOLSE-NEXT: // => This Inner Loop Header: Depth=2
; SOFTFP-NOLSE-NEXT: ldaxr w22, [x20]
; SOFTFP-NOLSE-NEXT: cmp w22, w8
; SOFTFP-NOLSE-NEXT: b.ne .LBB6_1
-; SOFTFP-NOLSE-NEXT: // %bb.4: // %cmpxchg.trystore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB6_3 Depth=2
-; SOFTFP-NOLSE-NEXT: stlxr w9, w0, [x20]
-; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB6_3
-; SOFTFP-NOLSE-NEXT: // %bb.5: // in Loop: Header=BB6_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, #1 // =0x1
-; SOFTFP-NOLSE-NEXT: lsr w23, w22, #16
-; SOFTFP-NOLSE-NEXT: cbz w8, .LBB6_2
+; SOFTFP-NOLSE-NEXT: // %bb.5: // %cmpxchg.trystore
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB6_4 Depth=2
+; SOFTFP-NOLSE-NEXT: stlxr w10, w0, [x20]
+; SOFTFP-NOLSE-NEXT: mov w9, #1 // =0x1
+; SOFTFP-NOLSE-NEXT: cbnz w10, .LBB6_4
+; SOFTFP-NOLSE-NEXT: b .LBB6_2
; SOFTFP-NOLSE-NEXT: .LBB6_6: // %atomicrmw.end
; SOFTFP-NOLSE-NEXT: mov w0, w22
; SOFTFP-NOLSE-NEXT: mov w1, w23
@@ -725,16 +731,18 @@ define <2 x bfloat> @test_atomicrmw_fmax_v2bf16_seq_cst_align4(ptr %ptr, <2 x bf
; SOFTFP-NOLSE-NEXT: lsl w21, w8, #16
; SOFTFP-NOLSE-NEXT: mov x19, x0
; SOFTFP-NOLSE-NEXT: stp x24, x23, [sp, #16] // 16-byte Folded Spill
-; SOFTFP-NOLSE-NEXT: b .LBB7_2
+; SOFTFP-NOLSE-NEXT: b .LBB7_3
; SOFTFP-NOLSE-NEXT: .LBB7_1: // %cmpxchg.nostore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB7_2 Depth=1
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB7_3 Depth=1
; SOFTFP-NOLSE-NEXT: mov w8, wzr
; SOFTFP-NOLSE-NEXT: clrex
+; SOFTFP-NOLSE-NEXT: .LBB7_2: // %cmpxchg.end
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB7_3 Depth=1
; SOFTFP-NOLSE-NEXT: lsr w1, w22, #16
; SOFTFP-NOLSE-NEXT: cbnz w8, .LBB7_6
-; SOFTFP-NOLSE-NEXT: .LBB7_2: // %atomicrmw.start
+; SOFTFP-NOLSE-NEXT: .LBB7_3: // %atomicrmw.start
; SOFTFP-NOLSE-NEXT: // =>This Loop Header: Depth=1
-; SOFTFP-NOLSE-NEXT: // Child Loop BB7_3 Depth 2
+; SOFTFP-NOLSE-NEXT: // Child Loop BB7_4 Depth 2
; SOFTFP-NOLSE-NEXT: lsl w23, w1, #16
; SOFTFP-NOLSE-NEXT: mov w1, w20
; SOFTFP-NOLSE-NEXT: mov w0, w23
@@ -747,20 +755,18 @@ define <2 x bfloat> @test_atomicrmw_fmax_v2bf16_seq_cst_align4(ptr %ptr, <2 x bf
; SOFTFP-NOLSE-NEXT: bl __truncsfbf2
; SOFTFP-NOLSE-NEXT: bfxil w23, w22, #0, #16
; SOFTFP-NOLSE-NEXT: bfi w0, w24, #16, #16
-; SOFTFP-NOLSE-NEXT: .LBB7_3: // %cmpxchg.start
-; SOFTFP-NOLSE-NEXT: // Parent Loop BB7_2 Depth=1
+; SOFTFP-NOLSE-NEXT: .LBB7_4: // %cmpxchg.start
+; SOFTFP-NOLSE-NEXT: // Parent Loop BB7_3 Depth=1
; SOFTFP-NOLSE-NEXT: // => This Inner Loop Header: Depth=2
; SOFTFP-NOLSE-NEXT: ldaxr w22, [x19]
; SOFTFP-NOLSE-NEXT: cmp w22, w23
; SOFTFP-NOLSE-NEXT: b.ne .LBB7_1
-; SOFTFP-NOLSE-NEXT: // %bb.4: // %cmpxchg.trystore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB7_3 Depth=2
-; SOFTFP-NOLSE-NEXT: stlxr w8, w0, [x19]
-; SOFTFP-NOLSE-NEXT: cbnz w8, .LBB7_3
-; SOFTFP-NOLSE-NEXT: // %bb.5: // in Loop: Header=BB7_2 Depth=1
+; SOFTFP-NOLSE-NEXT: // %bb.5: // %cmpxchg.trystore
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB7_4 Depth=2
+; SOFTFP-NOLSE-NEXT: stlxr w9, w0, [x19]
; SOFTFP-NOLSE-NEXT: mov w8, #1 // =0x1
-; SOFTFP-NOLSE-NEXT: lsr w1, w22, #16
-; SOFTFP-NOLSE-NEXT: cbz w8, .LBB7_2
+; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB7_4
+; SOFTFP-NOLSE-NEXT: b .LBB7_2
; SOFTFP-NOLSE-NEXT: .LBB7_6: // %atomicrmw.end
; SOFTFP-NOLSE-NEXT: mov w0, w22
; SOFTFP-NOLSE-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload
@@ -814,16 +820,18 @@ define <2 x float> @test_atomicrmw_fmax_v2f32_seq_cst_align8(ptr %ptr, <2 x floa
; SOFTFP-NOLSE-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill
; SOFTFP-NOLSE-NEXT: mov w19, w2
; SOFTFP-NOLSE-NEXT: mov x20, x0
-; SOFTFP-NOLSE-NEXT: b .LBB8_2
+; SOFTFP-NOLSE-NEXT: b .LBB8_3
; SOFTFP-NOLSE-NEXT: .LBB8_1: // %cmpxchg.nostore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB8_2 Depth=1
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB8_3 Depth=1
; SOFTFP-NOLSE-NEXT: mov w8, wzr
; SOFTFP-NOLSE-NEXT: clrex
+; SOFTFP-NOLSE-NEXT: .LBB8_2: // %cmpxchg.end
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB8_3 Depth=1
; SOFTFP-NOLSE-NEXT: lsr x23, x22, #32
; SOFTFP-NOLSE-NEXT: cbnz w8, .LBB8_6
-; SOFTFP-NOLSE-NEXT: .LBB8_2: // %atomicrmw.start
+; SOFTFP-NOLSE-NEXT: .LBB8_3: // %atomicrmw.start
; SOFTFP-NOLSE-NEXT: // =>This Loop Header: Depth=1
-; SOFTFP-NOLSE-NEXT: // Child Loop BB8_3 Depth 2
+; SOFTFP-NOLSE-NEXT: // Child Loop BB8_4 Depth 2
; SOFTFP-NOLSE-NEXT: mov w0, w23
; SOFTFP-NOLSE-NEXT: mov w1, w19
; SOFTFP-NOLSE-NEXT: bl fmaxf
@@ -832,24 +840,22 @@ define <2 x float> @test_atomicrmw_fmax_v2f32_seq_cst_align8(ptr %ptr, <2 x floa
; SOFTFP-NOLSE-NEXT: mov w1, w21
; SOFTFP-NOLSE-NEXT: bl fmaxf
; SOFTFP-NOLSE-NEXT: mov w8, w0
-; SOFTFP-NOLSE-NEXT: mov w9, w22
+; SOFTFP-NOLSE-NEXT: mov w10, w22
; SOFTFP-NOLSE-NEXT: // kill: def $w23 killed $w23 killed $x23 def $x23
-; SOFTFP-NOLSE-NEXT: orr x8, x8, x24, lsl #32
-; SOFTFP-NOLSE-NEXT: orr x9, x9, x23, lsl #32
-; SOFTFP-NOLSE-NEXT: .LBB8_3: // %cmpxchg.start
-; SOFTFP-NOLSE-NEXT: // Parent Loop BB8_2 Depth=1
+; SOFTFP-NOLSE-NEXT: orr x9, x8, x24, lsl #32
+; SOFTFP-NOLSE-NEXT: orr x10, x10, x23, lsl #32
+; SOFTFP-NOLSE-NEXT: .LBB8_4: // %cmpxchg.start
+; SOFTFP-NOLSE-NEXT: // Parent Loop BB8_3 Depth=1
; SOFTFP-NOLSE-NEXT: // => This Inner Loop Header: Depth=2
; SOFTFP-NOLSE-NEXT: ldaxr x22, [x20]
-; SOFTFP-NOLSE-NEXT: cmp x22, x9
+; SOFTFP-NOLSE-NEXT: cmp x22, x10
; SOFTFP-NOLSE-NEXT: b.ne .LBB8_1
-; SOFTFP-NOLSE-NEXT: // %bb.4: // %cmpxchg.trystore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB8_3 Depth=2
-; SOFTFP-NOLSE-NEXT: stlxr w10, x8, [x20]
-; SOFTFP-NOLSE-NEXT: cbnz w10, .LBB8_3
-; SOFTFP-NOLSE-NEXT: // %bb.5: // in Loop: Header=BB8_2 Depth=1
+; SOFTFP-NOLSE-NEXT: // %bb.5: // %cmpxchg.trystore
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB8_4 Depth=2
+; SOFTFP-NOLSE-NEXT: stlxr w11, x9, [x20]
; SOFTFP-NOLSE-NEXT: mov w8, #1 // =0x1
-; SOFTFP-NOLSE-NEXT: lsr x23, x22, #32
-; SOFTFP-NOLSE-NEXT: cbz w8, .LBB8_2
+; SOFTFP-NOLSE-NEXT: cbnz w11, .LBB8_4
+; SOFTFP-NOLSE-NEXT: b .LBB8_2
; SOFTFP-NOLSE-NEXT: .LBB8_6: // %atomicrmw.end
; SOFTFP-NOLSE-NEXT: mov w0, w22
; SOFTFP-NOLSE-NEXT: mov w1, w23
diff --git a/llvm/test/CodeGen/AArch64/atomicrmw-fmin.ll b/llvm/test/CodeGen/AArch64/atomicrmw-fmin.ll
index 10de677..d421b4c 100644
--- a/llvm/test/CodeGen/AArch64/atomicrmw-fmin.ll
+++ b/llvm/test/CodeGen/AArch64/atomicrmw-fmin.ll
@@ -51,15 +51,17 @@ define half @test_atomicrmw_fmin_f16_seq_cst_align2(ptr %ptr, half %value) #0 {
; SOFTFP-NOLSE-NEXT: ldrh w0, [x0]
; SOFTFP-NOLSE-NEXT: mov w20, w1
; SOFTFP-NOLSE-NEXT: stp x22, x21, [sp, #16] // 16-byte Folded Spill
-; SOFTFP-NOLSE-NEXT: b .LBB0_2
+; SOFTFP-NOLSE-NEXT: b .LBB0_3
; SOFTFP-NOLSE-NEXT: .LBB0_1: // %cmpxchg.nostore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB0_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, wzr
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB0_3 Depth=1
+; SOFTFP-NOLSE-NEXT: mov w9, wzr
; SOFTFP-NOLSE-NEXT: clrex
-; SOFTFP-NOLSE-NEXT: cbnz w8, .LBB0_6
-; SOFTFP-NOLSE-NEXT: .LBB0_2: // %atomicrmw.start
+; SOFTFP-NOLSE-NEXT: .LBB0_2: // %cmpxchg.end
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB0_3 Depth=1
+; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB0_6
+; SOFTFP-NOLSE-NEXT: .LBB0_3: // %atomicrmw.start
; SOFTFP-NOLSE-NEXT: // =>This Loop Header: Depth=1
-; SOFTFP-NOLSE-NEXT: // Child Loop BB0_3 Depth 2
+; SOFTFP-NOLSE-NEXT: // Child Loop BB0_4 Depth 2
; SOFTFP-NOLSE-NEXT: mov w22, w0
; SOFTFP-NOLSE-NEXT: and w0, w20, #0xffff
; SOFTFP-NOLSE-NEXT: bl __extendhfsf2
@@ -70,19 +72,18 @@ define half @test_atomicrmw_fmin_f16_seq_cst_align2(ptr %ptr, half %value) #0 {
; SOFTFP-NOLSE-NEXT: bl fminf
; SOFTFP-NOLSE-NEXT: bl __truncsfhf2
; SOFTFP-NOLSE-NEXT: mov w8, w0
-; SOFTFP-NOLSE-NEXT: .LBB0_3: // %cmpxchg.start
-; SOFTFP-NOLSE-NEXT: // Parent Loop BB0_2 Depth=1
+; SOFTFP-NOLSE-NEXT: .LBB0_4: // %cmpxchg.start
+; SOFTFP-NOLSE-NEXT: // Parent Loop BB0_3 Depth=1
; SOFTFP-NOLSE-NEXT: // => This Inner Loop Header: Depth=2
; SOFTFP-NOLSE-NEXT: ldaxrh w0, [x19]
; SOFTFP-NOLSE-NEXT: cmp w0, w22, uxth
; SOFTFP-NOLSE-NEXT: b.ne .LBB0_1
-; SOFTFP-NOLSE-NEXT: // %bb.4: // %cmpxchg.trystore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB0_3 Depth=2
-; SOFTFP-NOLSE-NEXT: stlxrh w9, w8, [x19]
-; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB0_3
-; SOFTFP-NOLSE-NEXT: // %bb.5: // in Loop: Header=BB0_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, #1 // =0x1
-; SOFTFP-NOLSE-NEXT: cbz w8, .LBB0_2
+; SOFTFP-NOLSE-NEXT: // %bb.5: // %cmpxchg.trystore
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB0_4 Depth=2
+; SOFTFP-NOLSE-NEXT: stlxrh w10, w8, [x19]
+; SOFTFP-NOLSE-NEXT: mov w9, #1 // =0x1
+; SOFTFP-NOLSE-NEXT: cbnz w10, .LBB0_4
+; SOFTFP-NOLSE-NEXT: b .LBB0_2
; SOFTFP-NOLSE-NEXT: .LBB0_6: // %atomicrmw.end
; SOFTFP-NOLSE-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload
; SOFTFP-NOLSE-NEXT: // kill: def $w0 killed $w0 killed $x0
@@ -139,15 +140,17 @@ define half @test_atomicrmw_fmin_f16_seq_cst_align4(ptr %ptr, half %value) #0 {
; SOFTFP-NOLSE-NEXT: ldrh w0, [x0]
; SOFTFP-NOLSE-NEXT: mov w20, w1
; SOFTFP-NOLSE-NEXT: stp x22, x21, [sp, #16] // 16-byte Folded Spill
-; SOFTFP-NOLSE-NEXT: b .LBB1_2
+; SOFTFP-NOLSE-NEXT: b .LBB1_3
; SOFTFP-NOLSE-NEXT: .LBB1_1: // %cmpxchg.nostore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB1_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, wzr
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB1_3 Depth=1
+; SOFTFP-NOLSE-NEXT: mov w9, wzr
; SOFTFP-NOLSE-NEXT: clrex
-; SOFTFP-NOLSE-NEXT: cbnz w8, .LBB1_6
-; SOFTFP-NOLSE-NEXT: .LBB1_2: // %atomicrmw.start
+; SOFTFP-NOLSE-NEXT: .LBB1_2: // %cmpxchg.end
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB1_3 Depth=1
+; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB1_6
+; SOFTFP-NOLSE-NEXT: .LBB1_3: // %atomicrmw.start
; SOFTFP-NOLSE-NEXT: // =>This Loop Header: Depth=1
-; SOFTFP-NOLSE-NEXT: // Child Loop BB1_3 Depth 2
+; SOFTFP-NOLSE-NEXT: // Child Loop BB1_4 Depth 2
; SOFTFP-NOLSE-NEXT: mov w22, w0
; SOFTFP-NOLSE-NEXT: and w0, w20, #0xffff
; SOFTFP-NOLSE-NEXT: bl __extendhfsf2
@@ -158,19 +161,18 @@ define half @test_atomicrmw_fmin_f16_seq_cst_align4(ptr %ptr, half %value) #0 {
; SOFTFP-NOLSE-NEXT: bl fminf
; SOFTFP-NOLSE-NEXT: bl __truncsfhf2
; SOFTFP-NOLSE-NEXT: mov w8, w0
-; SOFTFP-NOLSE-NEXT: .LBB1_3: // %cmpxchg.start
-; SOFTFP-NOLSE-NEXT: // Parent Loop BB1_2 Depth=1
+; SOFTFP-NOLSE-NEXT: .LBB1_4: // %cmpxchg.start
+; SOFTFP-NOLSE-NEXT: // Parent Loop BB1_3 Depth=1
; SOFTFP-NOLSE-NEXT: // => This Inner Loop Header: Depth=2
; SOFTFP-NOLSE-NEXT: ldaxrh w0, [x19]
; SOFTFP-NOLSE-NEXT: cmp w0, w22, uxth
; SOFTFP-NOLSE-NEXT: b.ne .LBB1_1
-; SOFTFP-NOLSE-NEXT: // %bb.4: // %cmpxchg.trystore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB1_3 Depth=2
-; SOFTFP-NOLSE-NEXT: stlxrh w9, w8, [x19]
-; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB1_3
-; SOFTFP-NOLSE-NEXT: // %bb.5: // in Loop: Header=BB1_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, #1 // =0x1
-; SOFTFP-NOLSE-NEXT: cbz w8, .LBB1_2
+; SOFTFP-NOLSE-NEXT: // %bb.5: // %cmpxchg.trystore
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB1_4 Depth=2
+; SOFTFP-NOLSE-NEXT: stlxrh w10, w8, [x19]
+; SOFTFP-NOLSE-NEXT: mov w9, #1 // =0x1
+; SOFTFP-NOLSE-NEXT: cbnz w10, .LBB1_4
+; SOFTFP-NOLSE-NEXT: b .LBB1_2
; SOFTFP-NOLSE-NEXT: .LBB1_6: // %atomicrmw.end
; SOFTFP-NOLSE-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload
; SOFTFP-NOLSE-NEXT: // kill: def $w0 killed $w0 killed $x0
@@ -238,34 +240,35 @@ define bfloat @test_atomicrmw_fmin_bf16_seq_cst_align2(ptr %ptr, bfloat %value)
; SOFTFP-NOLSE-NEXT: mov x19, x0
; SOFTFP-NOLSE-NEXT: ldrh w0, [x0]
; SOFTFP-NOLSE-NEXT: lsl w20, w1, #16
-; SOFTFP-NOLSE-NEXT: b .LBB2_2
+; SOFTFP-NOLSE-NEXT: b .LBB2_3
; SOFTFP-NOLSE-NEXT: .LBB2_1: // %cmpxchg.nostore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB2_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, wzr
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB2_3 Depth=1
+; SOFTFP-NOLSE-NEXT: mov w9, wzr
; SOFTFP-NOLSE-NEXT: clrex
-; SOFTFP-NOLSE-NEXT: cbnz w8, .LBB2_6
-; SOFTFP-NOLSE-NEXT: .LBB2_2: // %atomicrmw.start
+; SOFTFP-NOLSE-NEXT: .LBB2_2: // %cmpxchg.end
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB2_3 Depth=1
+; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB2_6
+; SOFTFP-NOLSE-NEXT: .LBB2_3: // %atomicrmw.start
; SOFTFP-NOLSE-NEXT: // =>This Loop Header: Depth=1
-; SOFTFP-NOLSE-NEXT: // Child Loop BB2_3 Depth 2
+; SOFTFP-NOLSE-NEXT: // Child Loop BB2_4 Depth 2
; SOFTFP-NOLSE-NEXT: mov w21, w0
; SOFTFP-NOLSE-NEXT: lsl w0, w0, #16
; SOFTFP-NOLSE-NEXT: mov w1, w20
; SOFTFP-NOLSE-NEXT: bl fminf
; SOFTFP-NOLSE-NEXT: bl __truncsfbf2
; SOFTFP-NOLSE-NEXT: mov w8, w0
-; SOFTFP-NOLSE-NEXT: .LBB2_3: // %cmpxchg.start
-; SOFTFP-NOLSE-NEXT: // Parent Loop BB2_2 Depth=1
+; SOFTFP-NOLSE-NEXT: .LBB2_4: // %cmpxchg.start
+; SOFTFP-NOLSE-NEXT: // Parent Loop BB2_3 Depth=1
; SOFTFP-NOLSE-NEXT: // => This Inner Loop Header: Depth=2
; SOFTFP-NOLSE-NEXT: ldaxrh w0, [x19]
; SOFTFP-NOLSE-NEXT: cmp w0, w21, uxth
; SOFTFP-NOLSE-NEXT: b.ne .LBB2_1
-; SOFTFP-NOLSE-NEXT: // %bb.4: // %cmpxchg.trystore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB2_3 Depth=2
-; SOFTFP-NOLSE-NEXT: stlxrh w9, w8, [x19]
-; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB2_3
-; SOFTFP-NOLSE-NEXT: // %bb.5: // in Loop: Header=BB2_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, #1 // =0x1
-; SOFTFP-NOLSE-NEXT: cbz w8, .LBB2_2
+; SOFTFP-NOLSE-NEXT: // %bb.5: // %cmpxchg.trystore
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB2_4 Depth=2
+; SOFTFP-NOLSE-NEXT: stlxrh w10, w8, [x19]
+; SOFTFP-NOLSE-NEXT: mov w9, #1 // =0x1
+; SOFTFP-NOLSE-NEXT: cbnz w10, .LBB2_4
+; SOFTFP-NOLSE-NEXT: b .LBB2_2
; SOFTFP-NOLSE-NEXT: .LBB2_6: // %atomicrmw.end
; SOFTFP-NOLSE-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
; SOFTFP-NOLSE-NEXT: // kill: def $w0 killed $w0 killed $x0
@@ -332,34 +335,35 @@ define bfloat @test_atomicrmw_fmin_bf16_seq_cst_align4(ptr %ptr, bfloat %value)
; SOFTFP-NOLSE-NEXT: mov x19, x0
; SOFTFP-NOLSE-NEXT: ldrh w0, [x0]
; SOFTFP-NOLSE-NEXT: lsl w20, w1, #16
-; SOFTFP-NOLSE-NEXT: b .LBB3_2
+; SOFTFP-NOLSE-NEXT: b .LBB3_3
; SOFTFP-NOLSE-NEXT: .LBB3_1: // %cmpxchg.nostore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB3_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, wzr
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB3_3 Depth=1
+; SOFTFP-NOLSE-NEXT: mov w9, wzr
; SOFTFP-NOLSE-NEXT: clrex
-; SOFTFP-NOLSE-NEXT: cbnz w8, .LBB3_6
-; SOFTFP-NOLSE-NEXT: .LBB3_2: // %atomicrmw.start
+; SOFTFP-NOLSE-NEXT: .LBB3_2: // %cmpxchg.end
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB3_3 Depth=1
+; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB3_6
+; SOFTFP-NOLSE-NEXT: .LBB3_3: // %atomicrmw.start
; SOFTFP-NOLSE-NEXT: // =>This Loop Header: Depth=1
-; SOFTFP-NOLSE-NEXT: // Child Loop BB3_3 Depth 2
+; SOFTFP-NOLSE-NEXT: // Child Loop BB3_4 Depth 2
; SOFTFP-NOLSE-NEXT: mov w21, w0
; SOFTFP-NOLSE-NEXT: lsl w0, w0, #16
; SOFTFP-NOLSE-NEXT: mov w1, w20
; SOFTFP-NOLSE-NEXT: bl fminf
; SOFTFP-NOLSE-NEXT: bl __truncsfbf2
; SOFTFP-NOLSE-NEXT: mov w8, w0
-; SOFTFP-NOLSE-NEXT: .LBB3_3: // %cmpxchg.start
-; SOFTFP-NOLSE-NEXT: // Parent Loop BB3_2 Depth=1
+; SOFTFP-NOLSE-NEXT: .LBB3_4: // %cmpxchg.start
+; SOFTFP-NOLSE-NEXT: // Parent Loop BB3_3 Depth=1
; SOFTFP-NOLSE-NEXT: // => This Inner Loop Header: Depth=2
; SOFTFP-NOLSE-NEXT: ldaxrh w0, [x19]
; SOFTFP-NOLSE-NEXT: cmp w0, w21, uxth
; SOFTFP-NOLSE-NEXT: b.ne .LBB3_1
-; SOFTFP-NOLSE-NEXT: // %bb.4: // %cmpxchg.trystore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB3_3 Depth=2
-; SOFTFP-NOLSE-NEXT: stlxrh w9, w8, [x19]
-; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB3_3
-; SOFTFP-NOLSE-NEXT: // %bb.5: // in Loop: Header=BB3_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, #1 // =0x1
-; SOFTFP-NOLSE-NEXT: cbz w8, .LBB3_2
+; SOFTFP-NOLSE-NEXT: // %bb.5: // %cmpxchg.trystore
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB3_4 Depth=2
+; SOFTFP-NOLSE-NEXT: stlxrh w10, w8, [x19]
+; SOFTFP-NOLSE-NEXT: mov w9, #1 // =0x1
+; SOFTFP-NOLSE-NEXT: cbnz w10, .LBB3_4
+; SOFTFP-NOLSE-NEXT: b .LBB3_2
; SOFTFP-NOLSE-NEXT: .LBB3_6: // %atomicrmw.end
; SOFTFP-NOLSE-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
; SOFTFP-NOLSE-NEXT: // kill: def $w0 killed $w0 killed $x0
@@ -408,32 +412,33 @@ define float @test_atomicrmw_fmin_f32_seq_cst_align4(ptr %ptr, float %value) #0
; SOFTFP-NOLSE-NEXT: mov x19, x0
; SOFTFP-NOLSE-NEXT: ldr w0, [x0]
; SOFTFP-NOLSE-NEXT: mov w20, w1
-; SOFTFP-NOLSE-NEXT: b .LBB4_2
+; SOFTFP-NOLSE-NEXT: b .LBB4_3
; SOFTFP-NOLSE-NEXT: .LBB4_1: // %cmpxchg.nostore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB4_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, wzr
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB4_3 Depth=1
+; SOFTFP-NOLSE-NEXT: mov w9, wzr
; SOFTFP-NOLSE-NEXT: clrex
-; SOFTFP-NOLSE-NEXT: cbnz w8, .LBB4_6
-; SOFTFP-NOLSE-NEXT: .LBB4_2: // %atomicrmw.start
+; SOFTFP-NOLSE-NEXT: .LBB4_2: // %cmpxchg.end
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB4_3 Depth=1
+; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB4_6
+; SOFTFP-NOLSE-NEXT: .LBB4_3: // %atomicrmw.start
; SOFTFP-NOLSE-NEXT: // =>This Loop Header: Depth=1
-; SOFTFP-NOLSE-NEXT: // Child Loop BB4_3 Depth 2
+; SOFTFP-NOLSE-NEXT: // Child Loop BB4_4 Depth 2
; SOFTFP-NOLSE-NEXT: mov w1, w20
; SOFTFP-NOLSE-NEXT: mov w21, w0
; SOFTFP-NOLSE-NEXT: bl fminf
; SOFTFP-NOLSE-NEXT: mov w8, w0
-; SOFTFP-NOLSE-NEXT: .LBB4_3: // %cmpxchg.start
-; SOFTFP-NOLSE-NEXT: // Parent Loop BB4_2 Depth=1
+; SOFTFP-NOLSE-NEXT: .LBB4_4: // %cmpxchg.start
+; SOFTFP-NOLSE-NEXT: // Parent Loop BB4_3 Depth=1
; SOFTFP-NOLSE-NEXT: // => This Inner Loop Header: Depth=2
; SOFTFP-NOLSE-NEXT: ldaxr w0, [x19]
; SOFTFP-NOLSE-NEXT: cmp w0, w21
; SOFTFP-NOLSE-NEXT: b.ne .LBB4_1
-; SOFTFP-NOLSE-NEXT: // %bb.4: // %cmpxchg.trystore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB4_3 Depth=2
-; SOFTFP-NOLSE-NEXT: stlxr w9, w8, [x19]
-; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB4_3
-; SOFTFP-NOLSE-NEXT: // %bb.5: // in Loop: Header=BB4_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, #1 // =0x1
-; SOFTFP-NOLSE-NEXT: cbz w8, .LBB4_2
+; SOFTFP-NOLSE-NEXT: // %bb.5: // %cmpxchg.trystore
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB4_4 Depth=2
+; SOFTFP-NOLSE-NEXT: stlxr w10, w8, [x19]
+; SOFTFP-NOLSE-NEXT: mov w9, #1 // =0x1
+; SOFTFP-NOLSE-NEXT: cbnz w10, .LBB4_4
+; SOFTFP-NOLSE-NEXT: b .LBB4_2
; SOFTFP-NOLSE-NEXT: .LBB4_6: // %atomicrmw.end
; SOFTFP-NOLSE-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
; SOFTFP-NOLSE-NEXT: // kill: def $w0 killed $w0 killed $x0
@@ -482,32 +487,33 @@ define double @test_atomicrmw_fmin_f32_seq_cst_align8(ptr %ptr, double %value) #
; SOFTFP-NOLSE-NEXT: mov x19, x0
; SOFTFP-NOLSE-NEXT: ldr x0, [x0]
; SOFTFP-NOLSE-NEXT: mov x20, x1
-; SOFTFP-NOLSE-NEXT: b .LBB5_2
+; SOFTFP-NOLSE-NEXT: b .LBB5_3
; SOFTFP-NOLSE-NEXT: .LBB5_1: // %cmpxchg.nostore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB5_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, wzr
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB5_3 Depth=1
+; SOFTFP-NOLSE-NEXT: mov w9, wzr
; SOFTFP-NOLSE-NEXT: clrex
-; SOFTFP-NOLSE-NEXT: cbnz w8, .LBB5_6
-; SOFTFP-NOLSE-NEXT: .LBB5_2: // %atomicrmw.start
+; SOFTFP-NOLSE-NEXT: .LBB5_2: // %cmpxchg.end
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB5_3 Depth=1
+; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB5_6
+; SOFTFP-NOLSE-NEXT: .LBB5_3: // %atomicrmw.start
; SOFTFP-NOLSE-NEXT: // =>This Loop Header: Depth=1
-; SOFTFP-NOLSE-NEXT: // Child Loop BB5_3 Depth 2
+; SOFTFP-NOLSE-NEXT: // Child Loop BB5_4 Depth 2
; SOFTFP-NOLSE-NEXT: mov x1, x20
; SOFTFP-NOLSE-NEXT: mov x21, x0
; SOFTFP-NOLSE-NEXT: bl fmin
; SOFTFP-NOLSE-NEXT: mov x8, x0
-; SOFTFP-NOLSE-NEXT: .LBB5_3: // %cmpxchg.start
-; SOFTFP-NOLSE-NEXT: // Parent Loop BB5_2 Depth=1
+; SOFTFP-NOLSE-NEXT: .LBB5_4: // %cmpxchg.start
+; SOFTFP-NOLSE-NEXT: // Parent Loop BB5_3 Depth=1
; SOFTFP-NOLSE-NEXT: // => This Inner Loop Header: Depth=2
; SOFTFP-NOLSE-NEXT: ldaxr x0, [x19]
; SOFTFP-NOLSE-NEXT: cmp x0, x21
; SOFTFP-NOLSE-NEXT: b.ne .LBB5_1
-; SOFTFP-NOLSE-NEXT: // %bb.4: // %cmpxchg.trystore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB5_3 Depth=2
-; SOFTFP-NOLSE-NEXT: stlxr w9, x8, [x19]
-; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB5_3
-; SOFTFP-NOLSE-NEXT: // %bb.5: // in Loop: Header=BB5_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, #1 // =0x1
-; SOFTFP-NOLSE-NEXT: cbz w8, .LBB5_2
+; SOFTFP-NOLSE-NEXT: // %bb.5: // %cmpxchg.trystore
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB5_4 Depth=2
+; SOFTFP-NOLSE-NEXT: stlxr w10, x8, [x19]
+; SOFTFP-NOLSE-NEXT: mov w9, #1 // =0x1
+; SOFTFP-NOLSE-NEXT: cbnz w10, .LBB5_4
+; SOFTFP-NOLSE-NEXT: b .LBB5_2
; SOFTFP-NOLSE-NEXT: .LBB5_6: // %atomicrmw.end
; SOFTFP-NOLSE-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
; SOFTFP-NOLSE-NEXT: ldp x30, x21, [sp], #32 // 16-byte Folded Reload
@@ -581,16 +587,18 @@ define <2 x half> @test_atomicrmw_fmin_v2f16_seq_cst_align4(ptr %ptr, <2 x half>
; SOFTFP-NOLSE-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill
; SOFTFP-NOLSE-NEXT: mov w19, w2
; SOFTFP-NOLSE-NEXT: mov x20, x0
-; SOFTFP-NOLSE-NEXT: b .LBB6_2
+; SOFTFP-NOLSE-NEXT: b .LBB6_3
; SOFTFP-NOLSE-NEXT: .LBB6_1: // %cmpxchg.nostore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB6_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, wzr
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB6_3 Depth=1
+; SOFTFP-NOLSE-NEXT: mov w9, wzr
; SOFTFP-NOLSE-NEXT: clrex
+; SOFTFP-NOLSE-NEXT: .LBB6_2: // %cmpxchg.end
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB6_3 Depth=1
; SOFTFP-NOLSE-NEXT: lsr w23, w22, #16
-; SOFTFP-NOLSE-NEXT: cbnz w8, .LBB6_6
-; SOFTFP-NOLSE-NEXT: .LBB6_2: // %atomicrmw.start
+; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB6_6
+; SOFTFP-NOLSE-NEXT: .LBB6_3: // %atomicrmw.start
; SOFTFP-NOLSE-NEXT: // =>This Loop Header: Depth=1
-; SOFTFP-NOLSE-NEXT: // Child Loop BB6_3 Depth 2
+; SOFTFP-NOLSE-NEXT: // Child Loop BB6_4 Depth 2
; SOFTFP-NOLSE-NEXT: and w0, w19, #0xffff
; SOFTFP-NOLSE-NEXT: bl __extendhfsf2
; SOFTFP-NOLSE-NEXT: mov w24, w0
@@ -611,20 +619,18 @@ define <2 x half> @test_atomicrmw_fmin_v2f16_seq_cst_align4(ptr %ptr, <2 x half>
; SOFTFP-NOLSE-NEXT: mov w8, w22
; SOFTFP-NOLSE-NEXT: bfi w0, w24, #16, #16
; SOFTFP-NOLSE-NEXT: bfi w8, w23, #16, #16
-; SOFTFP-NOLSE-NEXT: .LBB6_3: // %cmpxchg.start
-; SOFTFP-NOLSE-NEXT: // Parent Loop BB6_2 Depth=1
+; SOFTFP-NOLSE-NEXT: .LBB6_4: // %cmpxchg.start
+; SOFTFP-NOLSE-NEXT: // Parent Loop BB6_3 Depth=1
; SOFTFP-NOLSE-NEXT: // => This Inner Loop Header: Depth=2
; SOFTFP-NOLSE-NEXT: ldaxr w22, [x20]
; SOFTFP-NOLSE-NEXT: cmp w22, w8
; SOFTFP-NOLSE-NEXT: b.ne .LBB6_1
-; SOFTFP-NOLSE-NEXT: // %bb.4: // %cmpxchg.trystore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB6_3 Depth=2
-; SOFTFP-NOLSE-NEXT: stlxr w9, w0, [x20]
-; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB6_3
-; SOFTFP-NOLSE-NEXT: // %bb.5: // in Loop: Header=BB6_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, #1 // =0x1
-; SOFTFP-NOLSE-NEXT: lsr w23, w22, #16
-; SOFTFP-NOLSE-NEXT: cbz w8, .LBB6_2
+; SOFTFP-NOLSE-NEXT: // %bb.5: // %cmpxchg.trystore
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB6_4 Depth=2
+; SOFTFP-NOLSE-NEXT: stlxr w10, w0, [x20]
+; SOFTFP-NOLSE-NEXT: mov w9, #1 // =0x1
+; SOFTFP-NOLSE-NEXT: cbnz w10, .LBB6_4
+; SOFTFP-NOLSE-NEXT: b .LBB6_2
; SOFTFP-NOLSE-NEXT: .LBB6_6: // %atomicrmw.end
; SOFTFP-NOLSE-NEXT: mov w0, w22
; SOFTFP-NOLSE-NEXT: mov w1, w23
@@ -725,16 +731,18 @@ define <2 x bfloat> @test_atomicrmw_fmin_v2bf16_seq_cst_align4(ptr %ptr, <2 x bf
; SOFTFP-NOLSE-NEXT: lsl w21, w8, #16
; SOFTFP-NOLSE-NEXT: mov x19, x0
; SOFTFP-NOLSE-NEXT: stp x24, x23, [sp, #16] // 16-byte Folded Spill
-; SOFTFP-NOLSE-NEXT: b .LBB7_2
+; SOFTFP-NOLSE-NEXT: b .LBB7_3
; SOFTFP-NOLSE-NEXT: .LBB7_1: // %cmpxchg.nostore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB7_2 Depth=1
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB7_3 Depth=1
; SOFTFP-NOLSE-NEXT: mov w8, wzr
; SOFTFP-NOLSE-NEXT: clrex
+; SOFTFP-NOLSE-NEXT: .LBB7_2: // %cmpxchg.end
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB7_3 Depth=1
; SOFTFP-NOLSE-NEXT: lsr w1, w22, #16
; SOFTFP-NOLSE-NEXT: cbnz w8, .LBB7_6
-; SOFTFP-NOLSE-NEXT: .LBB7_2: // %atomicrmw.start
+; SOFTFP-NOLSE-NEXT: .LBB7_3: // %atomicrmw.start
; SOFTFP-NOLSE-NEXT: // =>This Loop Header: Depth=1
-; SOFTFP-NOLSE-NEXT: // Child Loop BB7_3 Depth 2
+; SOFTFP-NOLSE-NEXT: // Child Loop BB7_4 Depth 2
; SOFTFP-NOLSE-NEXT: lsl w23, w1, #16
; SOFTFP-NOLSE-NEXT: mov w1, w20
; SOFTFP-NOLSE-NEXT: mov w0, w23
@@ -747,20 +755,18 @@ define <2 x bfloat> @test_atomicrmw_fmin_v2bf16_seq_cst_align4(ptr %ptr, <2 x bf
; SOFTFP-NOLSE-NEXT: bl __truncsfbf2
; SOFTFP-NOLSE-NEXT: bfxil w23, w22, #0, #16
; SOFTFP-NOLSE-NEXT: bfi w0, w24, #16, #16
-; SOFTFP-NOLSE-NEXT: .LBB7_3: // %cmpxchg.start
-; SOFTFP-NOLSE-NEXT: // Parent Loop BB7_2 Depth=1
+; SOFTFP-NOLSE-NEXT: .LBB7_4: // %cmpxchg.start
+; SOFTFP-NOLSE-NEXT: // Parent Loop BB7_3 Depth=1
; SOFTFP-NOLSE-NEXT: // => This Inner Loop Header: Depth=2
; SOFTFP-NOLSE-NEXT: ldaxr w22, [x19]
; SOFTFP-NOLSE-NEXT: cmp w22, w23
; SOFTFP-NOLSE-NEXT: b.ne .LBB7_1
-; SOFTFP-NOLSE-NEXT: // %bb.4: // %cmpxchg.trystore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB7_3 Depth=2
-; SOFTFP-NOLSE-NEXT: stlxr w8, w0, [x19]
-; SOFTFP-NOLSE-NEXT: cbnz w8, .LBB7_3
-; SOFTFP-NOLSE-NEXT: // %bb.5: // in Loop: Header=BB7_2 Depth=1
+; SOFTFP-NOLSE-NEXT: // %bb.5: // %cmpxchg.trystore
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB7_4 Depth=2
+; SOFTFP-NOLSE-NEXT: stlxr w9, w0, [x19]
; SOFTFP-NOLSE-NEXT: mov w8, #1 // =0x1
-; SOFTFP-NOLSE-NEXT: lsr w1, w22, #16
-; SOFTFP-NOLSE-NEXT: cbz w8, .LBB7_2
+; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB7_4
+; SOFTFP-NOLSE-NEXT: b .LBB7_2
; SOFTFP-NOLSE-NEXT: .LBB7_6: // %atomicrmw.end
; SOFTFP-NOLSE-NEXT: mov w0, w22
; SOFTFP-NOLSE-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload
@@ -814,16 +820,18 @@ define <2 x float> @test_atomicrmw_fmin_v2f32_seq_cst_align8(ptr %ptr, <2 x floa
; SOFTFP-NOLSE-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill
; SOFTFP-NOLSE-NEXT: mov w19, w2
; SOFTFP-NOLSE-NEXT: mov x20, x0
-; SOFTFP-NOLSE-NEXT: b .LBB8_2
+; SOFTFP-NOLSE-NEXT: b .LBB8_3
; SOFTFP-NOLSE-NEXT: .LBB8_1: // %cmpxchg.nostore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB8_2 Depth=1
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB8_3 Depth=1
; SOFTFP-NOLSE-NEXT: mov w8, wzr
; SOFTFP-NOLSE-NEXT: clrex
+; SOFTFP-NOLSE-NEXT: .LBB8_2: // %cmpxchg.end
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB8_3 Depth=1
; SOFTFP-NOLSE-NEXT: lsr x23, x22, #32
; SOFTFP-NOLSE-NEXT: cbnz w8, .LBB8_6
-; SOFTFP-NOLSE-NEXT: .LBB8_2: // %atomicrmw.start
+; SOFTFP-NOLSE-NEXT: .LBB8_3: // %atomicrmw.start
; SOFTFP-NOLSE-NEXT: // =>This Loop Header: Depth=1
-; SOFTFP-NOLSE-NEXT: // Child Loop BB8_3 Depth 2
+; SOFTFP-NOLSE-NEXT: // Child Loop BB8_4 Depth 2
; SOFTFP-NOLSE-NEXT: mov w0, w23
; SOFTFP-NOLSE-NEXT: mov w1, w19
; SOFTFP-NOLSE-NEXT: bl fminf
@@ -832,24 +840,22 @@ define <2 x float> @test_atomicrmw_fmin_v2f32_seq_cst_align8(ptr %ptr, <2 x floa
; SOFTFP-NOLSE-NEXT: mov w1, w21
; SOFTFP-NOLSE-NEXT: bl fminf
; SOFTFP-NOLSE-NEXT: mov w8, w0
-; SOFTFP-NOLSE-NEXT: mov w9, w22
+; SOFTFP-NOLSE-NEXT: mov w10, w22
; SOFTFP-NOLSE-NEXT: // kill: def $w23 killed $w23 killed $x23 def $x23
-; SOFTFP-NOLSE-NEXT: orr x8, x8, x24, lsl #32
-; SOFTFP-NOLSE-NEXT: orr x9, x9, x23, lsl #32
-; SOFTFP-NOLSE-NEXT: .LBB8_3: // %cmpxchg.start
-; SOFTFP-NOLSE-NEXT: // Parent Loop BB8_2 Depth=1
+; SOFTFP-NOLSE-NEXT: orr x9, x8, x24, lsl #32
+; SOFTFP-NOLSE-NEXT: orr x10, x10, x23, lsl #32
+; SOFTFP-NOLSE-NEXT: .LBB8_4: // %cmpxchg.start
+; SOFTFP-NOLSE-NEXT: // Parent Loop BB8_3 Depth=1
; SOFTFP-NOLSE-NEXT: // => This Inner Loop Header: Depth=2
; SOFTFP-NOLSE-NEXT: ldaxr x22, [x20]
-; SOFTFP-NOLSE-NEXT: cmp x22, x9
+; SOFTFP-NOLSE-NEXT: cmp x22, x10
; SOFTFP-NOLSE-NEXT: b.ne .LBB8_1
-; SOFTFP-NOLSE-NEXT: // %bb.4: // %cmpxchg.trystore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB8_3 Depth=2
-; SOFTFP-NOLSE-NEXT: stlxr w10, x8, [x20]
-; SOFTFP-NOLSE-NEXT: cbnz w10, .LBB8_3
-; SOFTFP-NOLSE-NEXT: // %bb.5: // in Loop: Header=BB8_2 Depth=1
+; SOFTFP-NOLSE-NEXT: // %bb.5: // %cmpxchg.trystore
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB8_4 Depth=2
+; SOFTFP-NOLSE-NEXT: stlxr w11, x9, [x20]
; SOFTFP-NOLSE-NEXT: mov w8, #1 // =0x1
-; SOFTFP-NOLSE-NEXT: lsr x23, x22, #32
-; SOFTFP-NOLSE-NEXT: cbz w8, .LBB8_2
+; SOFTFP-NOLSE-NEXT: cbnz w11, .LBB8_4
+; SOFTFP-NOLSE-NEXT: b .LBB8_2
; SOFTFP-NOLSE-NEXT: .LBB8_6: // %atomicrmw.end
; SOFTFP-NOLSE-NEXT: mov w0, w22
; SOFTFP-NOLSE-NEXT: mov w1, w23
diff --git a/llvm/test/CodeGen/AArch64/atomicrmw-fsub.ll b/llvm/test/CodeGen/AArch64/atomicrmw-fsub.ll
index 82e0f14..947f67a 100644
--- a/llvm/test/CodeGen/AArch64/atomicrmw-fsub.ll
+++ b/llvm/test/CodeGen/AArch64/atomicrmw-fsub.ll
@@ -49,15 +49,17 @@ define half @test_atomicrmw_fsub_f16_seq_cst_align2(ptr %ptr, half %value) #0 {
; SOFTFP-NOLSE-NEXT: ldrh w0, [x0]
; SOFTFP-NOLSE-NEXT: mov w20, w1
; SOFTFP-NOLSE-NEXT: stp x22, x21, [sp, #16] // 16-byte Folded Spill
-; SOFTFP-NOLSE-NEXT: b .LBB0_2
+; SOFTFP-NOLSE-NEXT: b .LBB0_3
; SOFTFP-NOLSE-NEXT: .LBB0_1: // %cmpxchg.nostore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB0_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, wzr
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB0_3 Depth=1
+; SOFTFP-NOLSE-NEXT: mov w9, wzr
; SOFTFP-NOLSE-NEXT: clrex
-; SOFTFP-NOLSE-NEXT: cbnz w8, .LBB0_6
-; SOFTFP-NOLSE-NEXT: .LBB0_2: // %atomicrmw.start
+; SOFTFP-NOLSE-NEXT: .LBB0_2: // %cmpxchg.end
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB0_3 Depth=1
+; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB0_6
+; SOFTFP-NOLSE-NEXT: .LBB0_3: // %atomicrmw.start
; SOFTFP-NOLSE-NEXT: // =>This Loop Header: Depth=1
-; SOFTFP-NOLSE-NEXT: // Child Loop BB0_3 Depth 2
+; SOFTFP-NOLSE-NEXT: // Child Loop BB0_4 Depth 2
; SOFTFP-NOLSE-NEXT: mov w22, w0
; SOFTFP-NOLSE-NEXT: and w0, w20, #0xffff
; SOFTFP-NOLSE-NEXT: bl __extendhfsf2
@@ -68,19 +70,18 @@ define half @test_atomicrmw_fsub_f16_seq_cst_align2(ptr %ptr, half %value) #0 {
; SOFTFP-NOLSE-NEXT: bl __subsf3
; SOFTFP-NOLSE-NEXT: bl __truncsfhf2
; SOFTFP-NOLSE-NEXT: mov w8, w0
-; SOFTFP-NOLSE-NEXT: .LBB0_3: // %cmpxchg.start
-; SOFTFP-NOLSE-NEXT: // Parent Loop BB0_2 Depth=1
+; SOFTFP-NOLSE-NEXT: .LBB0_4: // %cmpxchg.start
+; SOFTFP-NOLSE-NEXT: // Parent Loop BB0_3 Depth=1
; SOFTFP-NOLSE-NEXT: // => This Inner Loop Header: Depth=2
; SOFTFP-NOLSE-NEXT: ldaxrh w0, [x19]
; SOFTFP-NOLSE-NEXT: cmp w0, w22, uxth
; SOFTFP-NOLSE-NEXT: b.ne .LBB0_1
-; SOFTFP-NOLSE-NEXT: // %bb.4: // %cmpxchg.trystore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB0_3 Depth=2
-; SOFTFP-NOLSE-NEXT: stlxrh w9, w8, [x19]
-; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB0_3
-; SOFTFP-NOLSE-NEXT: // %bb.5: // in Loop: Header=BB0_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, #1 // =0x1
-; SOFTFP-NOLSE-NEXT: cbz w8, .LBB0_2
+; SOFTFP-NOLSE-NEXT: // %bb.5: // %cmpxchg.trystore
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB0_4 Depth=2
+; SOFTFP-NOLSE-NEXT: stlxrh w10, w8, [x19]
+; SOFTFP-NOLSE-NEXT: mov w9, #1 // =0x1
+; SOFTFP-NOLSE-NEXT: cbnz w10, .LBB0_4
+; SOFTFP-NOLSE-NEXT: b .LBB0_2
; SOFTFP-NOLSE-NEXT: .LBB0_6: // %atomicrmw.end
; SOFTFP-NOLSE-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload
; SOFTFP-NOLSE-NEXT: // kill: def $w0 killed $w0 killed $x0
@@ -137,15 +138,17 @@ define half @test_atomicrmw_fsub_f16_seq_cst_align4(ptr %ptr, half %value) #0 {
; SOFTFP-NOLSE-NEXT: ldrh w0, [x0]
; SOFTFP-NOLSE-NEXT: mov w20, w1
; SOFTFP-NOLSE-NEXT: stp x22, x21, [sp, #16] // 16-byte Folded Spill
-; SOFTFP-NOLSE-NEXT: b .LBB1_2
+; SOFTFP-NOLSE-NEXT: b .LBB1_3
; SOFTFP-NOLSE-NEXT: .LBB1_1: // %cmpxchg.nostore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB1_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, wzr
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB1_3 Depth=1
+; SOFTFP-NOLSE-NEXT: mov w9, wzr
; SOFTFP-NOLSE-NEXT: clrex
-; SOFTFP-NOLSE-NEXT: cbnz w8, .LBB1_6
-; SOFTFP-NOLSE-NEXT: .LBB1_2: // %atomicrmw.start
+; SOFTFP-NOLSE-NEXT: .LBB1_2: // %cmpxchg.end
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB1_3 Depth=1
+; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB1_6
+; SOFTFP-NOLSE-NEXT: .LBB1_3: // %atomicrmw.start
; SOFTFP-NOLSE-NEXT: // =>This Loop Header: Depth=1
-; SOFTFP-NOLSE-NEXT: // Child Loop BB1_3 Depth 2
+; SOFTFP-NOLSE-NEXT: // Child Loop BB1_4 Depth 2
; SOFTFP-NOLSE-NEXT: mov w22, w0
; SOFTFP-NOLSE-NEXT: and w0, w20, #0xffff
; SOFTFP-NOLSE-NEXT: bl __extendhfsf2
@@ -156,19 +159,18 @@ define half @test_atomicrmw_fsub_f16_seq_cst_align4(ptr %ptr, half %value) #0 {
; SOFTFP-NOLSE-NEXT: bl __subsf3
; SOFTFP-NOLSE-NEXT: bl __truncsfhf2
; SOFTFP-NOLSE-NEXT: mov w8, w0
-; SOFTFP-NOLSE-NEXT: .LBB1_3: // %cmpxchg.start
-; SOFTFP-NOLSE-NEXT: // Parent Loop BB1_2 Depth=1
+; SOFTFP-NOLSE-NEXT: .LBB1_4: // %cmpxchg.start
+; SOFTFP-NOLSE-NEXT: // Parent Loop BB1_3 Depth=1
; SOFTFP-NOLSE-NEXT: // => This Inner Loop Header: Depth=2
; SOFTFP-NOLSE-NEXT: ldaxrh w0, [x19]
; SOFTFP-NOLSE-NEXT: cmp w0, w22, uxth
; SOFTFP-NOLSE-NEXT: b.ne .LBB1_1
-; SOFTFP-NOLSE-NEXT: // %bb.4: // %cmpxchg.trystore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB1_3 Depth=2
-; SOFTFP-NOLSE-NEXT: stlxrh w9, w8, [x19]
-; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB1_3
-; SOFTFP-NOLSE-NEXT: // %bb.5: // in Loop: Header=BB1_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, #1 // =0x1
-; SOFTFP-NOLSE-NEXT: cbz w8, .LBB1_2
+; SOFTFP-NOLSE-NEXT: // %bb.5: // %cmpxchg.trystore
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB1_4 Depth=2
+; SOFTFP-NOLSE-NEXT: stlxrh w10, w8, [x19]
+; SOFTFP-NOLSE-NEXT: mov w9, #1 // =0x1
+; SOFTFP-NOLSE-NEXT: cbnz w10, .LBB1_4
+; SOFTFP-NOLSE-NEXT: b .LBB1_2
; SOFTFP-NOLSE-NEXT: .LBB1_6: // %atomicrmw.end
; SOFTFP-NOLSE-NEXT: ldp x20, x19, [sp, #32] // 16-byte Folded Reload
; SOFTFP-NOLSE-NEXT: // kill: def $w0 killed $w0 killed $x0
@@ -236,34 +238,35 @@ define bfloat @test_atomicrmw_fsub_bf16_seq_cst_align2(ptr %ptr, bfloat %value)
; SOFTFP-NOLSE-NEXT: mov x19, x0
; SOFTFP-NOLSE-NEXT: ldrh w0, [x0]
; SOFTFP-NOLSE-NEXT: lsl w20, w1, #16
-; SOFTFP-NOLSE-NEXT: b .LBB2_2
+; SOFTFP-NOLSE-NEXT: b .LBB2_3
; SOFTFP-NOLSE-NEXT: .LBB2_1: // %cmpxchg.nostore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB2_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, wzr
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB2_3 Depth=1
+; SOFTFP-NOLSE-NEXT: mov w9, wzr
; SOFTFP-NOLSE-NEXT: clrex
-; SOFTFP-NOLSE-NEXT: cbnz w8, .LBB2_6
-; SOFTFP-NOLSE-NEXT: .LBB2_2: // %atomicrmw.start
+; SOFTFP-NOLSE-NEXT: .LBB2_2: // %cmpxchg.end
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB2_3 Depth=1
+; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB2_6
+; SOFTFP-NOLSE-NEXT: .LBB2_3: // %atomicrmw.start
; SOFTFP-NOLSE-NEXT: // =>This Loop Header: Depth=1
-; SOFTFP-NOLSE-NEXT: // Child Loop BB2_3 Depth 2
+; SOFTFP-NOLSE-NEXT: // Child Loop BB2_4 Depth 2
; SOFTFP-NOLSE-NEXT: mov w21, w0
; SOFTFP-NOLSE-NEXT: lsl w0, w0, #16
; SOFTFP-NOLSE-NEXT: mov w1, w20
; SOFTFP-NOLSE-NEXT: bl __subsf3
; SOFTFP-NOLSE-NEXT: bl __truncsfbf2
; SOFTFP-NOLSE-NEXT: mov w8, w0
-; SOFTFP-NOLSE-NEXT: .LBB2_3: // %cmpxchg.start
-; SOFTFP-NOLSE-NEXT: // Parent Loop BB2_2 Depth=1
+; SOFTFP-NOLSE-NEXT: .LBB2_4: // %cmpxchg.start
+; SOFTFP-NOLSE-NEXT: // Parent Loop BB2_3 Depth=1
; SOFTFP-NOLSE-NEXT: // => This Inner Loop Header: Depth=2
; SOFTFP-NOLSE-NEXT: ldaxrh w0, [x19]
; SOFTFP-NOLSE-NEXT: cmp w0, w21, uxth
; SOFTFP-NOLSE-NEXT: b.ne .LBB2_1
-; SOFTFP-NOLSE-NEXT: // %bb.4: // %cmpxchg.trystore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB2_3 Depth=2
-; SOFTFP-NOLSE-NEXT: stlxrh w9, w8, [x19]
-; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB2_3
-; SOFTFP-NOLSE-NEXT: // %bb.5: // in Loop: Header=BB2_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, #1 // =0x1
-; SOFTFP-NOLSE-NEXT: cbz w8, .LBB2_2
+; SOFTFP-NOLSE-NEXT: // %bb.5: // %cmpxchg.trystore
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB2_4 Depth=2
+; SOFTFP-NOLSE-NEXT: stlxrh w10, w8, [x19]
+; SOFTFP-NOLSE-NEXT: mov w9, #1 // =0x1
+; SOFTFP-NOLSE-NEXT: cbnz w10, .LBB2_4
+; SOFTFP-NOLSE-NEXT: b .LBB2_2
; SOFTFP-NOLSE-NEXT: .LBB2_6: // %atomicrmw.end
; SOFTFP-NOLSE-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
; SOFTFP-NOLSE-NEXT: // kill: def $w0 killed $w0 killed $x0
@@ -330,34 +333,35 @@ define bfloat @test_atomicrmw_fsub_bf16_seq_cst_align4(ptr %ptr, bfloat %value)
; SOFTFP-NOLSE-NEXT: mov x19, x0
; SOFTFP-NOLSE-NEXT: ldrh w0, [x0]
; SOFTFP-NOLSE-NEXT: lsl w20, w1, #16
-; SOFTFP-NOLSE-NEXT: b .LBB3_2
+; SOFTFP-NOLSE-NEXT: b .LBB3_3
; SOFTFP-NOLSE-NEXT: .LBB3_1: // %cmpxchg.nostore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB3_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, wzr
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB3_3 Depth=1
+; SOFTFP-NOLSE-NEXT: mov w9, wzr
; SOFTFP-NOLSE-NEXT: clrex
-; SOFTFP-NOLSE-NEXT: cbnz w8, .LBB3_6
-; SOFTFP-NOLSE-NEXT: .LBB3_2: // %atomicrmw.start
+; SOFTFP-NOLSE-NEXT: .LBB3_2: // %cmpxchg.end
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB3_3 Depth=1
+; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB3_6
+; SOFTFP-NOLSE-NEXT: .LBB3_3: // %atomicrmw.start
; SOFTFP-NOLSE-NEXT: // =>This Loop Header: Depth=1
-; SOFTFP-NOLSE-NEXT: // Child Loop BB3_3 Depth 2
+; SOFTFP-NOLSE-NEXT: // Child Loop BB3_4 Depth 2
; SOFTFP-NOLSE-NEXT: mov w21, w0
; SOFTFP-NOLSE-NEXT: lsl w0, w0, #16
; SOFTFP-NOLSE-NEXT: mov w1, w20
; SOFTFP-NOLSE-NEXT: bl __subsf3
; SOFTFP-NOLSE-NEXT: bl __truncsfbf2
; SOFTFP-NOLSE-NEXT: mov w8, w0
-; SOFTFP-NOLSE-NEXT: .LBB3_3: // %cmpxchg.start
-; SOFTFP-NOLSE-NEXT: // Parent Loop BB3_2 Depth=1
+; SOFTFP-NOLSE-NEXT: .LBB3_4: // %cmpxchg.start
+; SOFTFP-NOLSE-NEXT: // Parent Loop BB3_3 Depth=1
; SOFTFP-NOLSE-NEXT: // => This Inner Loop Header: Depth=2
; SOFTFP-NOLSE-NEXT: ldaxrh w0, [x19]
; SOFTFP-NOLSE-NEXT: cmp w0, w21, uxth
; SOFTFP-NOLSE-NEXT: b.ne .LBB3_1
-; SOFTFP-NOLSE-NEXT: // %bb.4: // %cmpxchg.trystore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB3_3 Depth=2
-; SOFTFP-NOLSE-NEXT: stlxrh w9, w8, [x19]
-; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB3_3
-; SOFTFP-NOLSE-NEXT: // %bb.5: // in Loop: Header=BB3_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, #1 // =0x1
-; SOFTFP-NOLSE-NEXT: cbz w8, .LBB3_2
+; SOFTFP-NOLSE-NEXT: // %bb.5: // %cmpxchg.trystore
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB3_4 Depth=2
+; SOFTFP-NOLSE-NEXT: stlxrh w10, w8, [x19]
+; SOFTFP-NOLSE-NEXT: mov w9, #1 // =0x1
+; SOFTFP-NOLSE-NEXT: cbnz w10, .LBB3_4
+; SOFTFP-NOLSE-NEXT: b .LBB3_2
; SOFTFP-NOLSE-NEXT: .LBB3_6: // %atomicrmw.end
; SOFTFP-NOLSE-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
; SOFTFP-NOLSE-NEXT: // kill: def $w0 killed $w0 killed $x0
@@ -406,32 +410,33 @@ define float @test_atomicrmw_fsub_f32_seq_cst_align4(ptr %ptr, float %value) #0
; SOFTFP-NOLSE-NEXT: mov x19, x0
; SOFTFP-NOLSE-NEXT: ldr w0, [x0]
; SOFTFP-NOLSE-NEXT: mov w20, w1
-; SOFTFP-NOLSE-NEXT: b .LBB4_2
+; SOFTFP-NOLSE-NEXT: b .LBB4_3
; SOFTFP-NOLSE-NEXT: .LBB4_1: // %cmpxchg.nostore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB4_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, wzr
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB4_3 Depth=1
+; SOFTFP-NOLSE-NEXT: mov w9, wzr
; SOFTFP-NOLSE-NEXT: clrex
-; SOFTFP-NOLSE-NEXT: cbnz w8, .LBB4_6
-; SOFTFP-NOLSE-NEXT: .LBB4_2: // %atomicrmw.start
+; SOFTFP-NOLSE-NEXT: .LBB4_2: // %cmpxchg.end
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB4_3 Depth=1
+; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB4_6
+; SOFTFP-NOLSE-NEXT: .LBB4_3: // %atomicrmw.start
; SOFTFP-NOLSE-NEXT: // =>This Loop Header: Depth=1
-; SOFTFP-NOLSE-NEXT: // Child Loop BB4_3 Depth 2
+; SOFTFP-NOLSE-NEXT: // Child Loop BB4_4 Depth 2
; SOFTFP-NOLSE-NEXT: mov w1, w20
; SOFTFP-NOLSE-NEXT: mov w21, w0
; SOFTFP-NOLSE-NEXT: bl __subsf3
; SOFTFP-NOLSE-NEXT: mov w8, w0
-; SOFTFP-NOLSE-NEXT: .LBB4_3: // %cmpxchg.start
-; SOFTFP-NOLSE-NEXT: // Parent Loop BB4_2 Depth=1
+; SOFTFP-NOLSE-NEXT: .LBB4_4: // %cmpxchg.start
+; SOFTFP-NOLSE-NEXT: // Parent Loop BB4_3 Depth=1
; SOFTFP-NOLSE-NEXT: // => This Inner Loop Header: Depth=2
; SOFTFP-NOLSE-NEXT: ldaxr w0, [x19]
; SOFTFP-NOLSE-NEXT: cmp w0, w21
; SOFTFP-NOLSE-NEXT: b.ne .LBB4_1
-; SOFTFP-NOLSE-NEXT: // %bb.4: // %cmpxchg.trystore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB4_3 Depth=2
-; SOFTFP-NOLSE-NEXT: stlxr w9, w8, [x19]
-; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB4_3
-; SOFTFP-NOLSE-NEXT: // %bb.5: // in Loop: Header=BB4_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, #1 // =0x1
-; SOFTFP-NOLSE-NEXT: cbz w8, .LBB4_2
+; SOFTFP-NOLSE-NEXT: // %bb.5: // %cmpxchg.trystore
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB4_4 Depth=2
+; SOFTFP-NOLSE-NEXT: stlxr w10, w8, [x19]
+; SOFTFP-NOLSE-NEXT: mov w9, #1 // =0x1
+; SOFTFP-NOLSE-NEXT: cbnz w10, .LBB4_4
+; SOFTFP-NOLSE-NEXT: b .LBB4_2
; SOFTFP-NOLSE-NEXT: .LBB4_6: // %atomicrmw.end
; SOFTFP-NOLSE-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
; SOFTFP-NOLSE-NEXT: // kill: def $w0 killed $w0 killed $x0
@@ -480,32 +485,33 @@ define double @test_atomicrmw_fsub_f32_seq_cst_align8(ptr %ptr, double %value) #
; SOFTFP-NOLSE-NEXT: mov x19, x0
; SOFTFP-NOLSE-NEXT: ldr x0, [x0]
; SOFTFP-NOLSE-NEXT: mov x20, x1
-; SOFTFP-NOLSE-NEXT: b .LBB5_2
+; SOFTFP-NOLSE-NEXT: b .LBB5_3
; SOFTFP-NOLSE-NEXT: .LBB5_1: // %cmpxchg.nostore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB5_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, wzr
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB5_3 Depth=1
+; SOFTFP-NOLSE-NEXT: mov w9, wzr
; SOFTFP-NOLSE-NEXT: clrex
-; SOFTFP-NOLSE-NEXT: cbnz w8, .LBB5_6
-; SOFTFP-NOLSE-NEXT: .LBB5_2: // %atomicrmw.start
+; SOFTFP-NOLSE-NEXT: .LBB5_2: // %cmpxchg.end
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB5_3 Depth=1
+; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB5_6
+; SOFTFP-NOLSE-NEXT: .LBB5_3: // %atomicrmw.start
; SOFTFP-NOLSE-NEXT: // =>This Loop Header: Depth=1
-; SOFTFP-NOLSE-NEXT: // Child Loop BB5_3 Depth 2
+; SOFTFP-NOLSE-NEXT: // Child Loop BB5_4 Depth 2
; SOFTFP-NOLSE-NEXT: mov x1, x20
; SOFTFP-NOLSE-NEXT: mov x21, x0
; SOFTFP-NOLSE-NEXT: bl __subdf3
; SOFTFP-NOLSE-NEXT: mov x8, x0
-; SOFTFP-NOLSE-NEXT: .LBB5_3: // %cmpxchg.start
-; SOFTFP-NOLSE-NEXT: // Parent Loop BB5_2 Depth=1
+; SOFTFP-NOLSE-NEXT: .LBB5_4: // %cmpxchg.start
+; SOFTFP-NOLSE-NEXT: // Parent Loop BB5_3 Depth=1
; SOFTFP-NOLSE-NEXT: // => This Inner Loop Header: Depth=2
; SOFTFP-NOLSE-NEXT: ldaxr x0, [x19]
; SOFTFP-NOLSE-NEXT: cmp x0, x21
; SOFTFP-NOLSE-NEXT: b.ne .LBB5_1
-; SOFTFP-NOLSE-NEXT: // %bb.4: // %cmpxchg.trystore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB5_3 Depth=2
-; SOFTFP-NOLSE-NEXT: stlxr w9, x8, [x19]
-; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB5_3
-; SOFTFP-NOLSE-NEXT: // %bb.5: // in Loop: Header=BB5_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, #1 // =0x1
-; SOFTFP-NOLSE-NEXT: cbz w8, .LBB5_2
+; SOFTFP-NOLSE-NEXT: // %bb.5: // %cmpxchg.trystore
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB5_4 Depth=2
+; SOFTFP-NOLSE-NEXT: stlxr w10, x8, [x19]
+; SOFTFP-NOLSE-NEXT: mov w9, #1 // =0x1
+; SOFTFP-NOLSE-NEXT: cbnz w10, .LBB5_4
+; SOFTFP-NOLSE-NEXT: b .LBB5_2
; SOFTFP-NOLSE-NEXT: .LBB5_6: // %atomicrmw.end
; SOFTFP-NOLSE-NEXT: ldp x20, x19, [sp, #16] // 16-byte Folded Reload
; SOFTFP-NOLSE-NEXT: ldp x30, x21, [sp], #32 // 16-byte Folded Reload
@@ -701,16 +707,18 @@ define <2 x half> @test_atomicrmw_fsub_v2f16_seq_cst_align4(ptr %ptr, <2 x half>
; SOFTFP-NOLSE-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill
; SOFTFP-NOLSE-NEXT: mov w19, w2
; SOFTFP-NOLSE-NEXT: mov x20, x0
-; SOFTFP-NOLSE-NEXT: b .LBB7_2
+; SOFTFP-NOLSE-NEXT: b .LBB7_3
; SOFTFP-NOLSE-NEXT: .LBB7_1: // %cmpxchg.nostore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB7_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, wzr
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB7_3 Depth=1
+; SOFTFP-NOLSE-NEXT: mov w9, wzr
; SOFTFP-NOLSE-NEXT: clrex
+; SOFTFP-NOLSE-NEXT: .LBB7_2: // %cmpxchg.end
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB7_3 Depth=1
; SOFTFP-NOLSE-NEXT: lsr w23, w22, #16
-; SOFTFP-NOLSE-NEXT: cbnz w8, .LBB7_6
-; SOFTFP-NOLSE-NEXT: .LBB7_2: // %atomicrmw.start
+; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB7_6
+; SOFTFP-NOLSE-NEXT: .LBB7_3: // %atomicrmw.start
; SOFTFP-NOLSE-NEXT: // =>This Loop Header: Depth=1
-; SOFTFP-NOLSE-NEXT: // Child Loop BB7_3 Depth 2
+; SOFTFP-NOLSE-NEXT: // Child Loop BB7_4 Depth 2
; SOFTFP-NOLSE-NEXT: and w0, w19, #0xffff
; SOFTFP-NOLSE-NEXT: bl __extendhfsf2
; SOFTFP-NOLSE-NEXT: mov w24, w0
@@ -731,20 +739,18 @@ define <2 x half> @test_atomicrmw_fsub_v2f16_seq_cst_align4(ptr %ptr, <2 x half>
; SOFTFP-NOLSE-NEXT: mov w8, w22
; SOFTFP-NOLSE-NEXT: bfi w0, w24, #16, #16
; SOFTFP-NOLSE-NEXT: bfi w8, w23, #16, #16
-; SOFTFP-NOLSE-NEXT: .LBB7_3: // %cmpxchg.start
-; SOFTFP-NOLSE-NEXT: // Parent Loop BB7_2 Depth=1
+; SOFTFP-NOLSE-NEXT: .LBB7_4: // %cmpxchg.start
+; SOFTFP-NOLSE-NEXT: // Parent Loop BB7_3 Depth=1
; SOFTFP-NOLSE-NEXT: // => This Inner Loop Header: Depth=2
; SOFTFP-NOLSE-NEXT: ldaxr w22, [x20]
; SOFTFP-NOLSE-NEXT: cmp w22, w8
; SOFTFP-NOLSE-NEXT: b.ne .LBB7_1
-; SOFTFP-NOLSE-NEXT: // %bb.4: // %cmpxchg.trystore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB7_3 Depth=2
-; SOFTFP-NOLSE-NEXT: stlxr w9, w0, [x20]
-; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB7_3
-; SOFTFP-NOLSE-NEXT: // %bb.5: // in Loop: Header=BB7_2 Depth=1
-; SOFTFP-NOLSE-NEXT: mov w8, #1 // =0x1
-; SOFTFP-NOLSE-NEXT: lsr w23, w22, #16
-; SOFTFP-NOLSE-NEXT: cbz w8, .LBB7_2
+; SOFTFP-NOLSE-NEXT: // %bb.5: // %cmpxchg.trystore
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB7_4 Depth=2
+; SOFTFP-NOLSE-NEXT: stlxr w10, w0, [x20]
+; SOFTFP-NOLSE-NEXT: mov w9, #1 // =0x1
+; SOFTFP-NOLSE-NEXT: cbnz w10, .LBB7_4
+; SOFTFP-NOLSE-NEXT: b .LBB7_2
; SOFTFP-NOLSE-NEXT: .LBB7_6: // %atomicrmw.end
; SOFTFP-NOLSE-NEXT: mov w0, w22
; SOFTFP-NOLSE-NEXT: mov w1, w23
@@ -817,16 +823,18 @@ define <2 x bfloat> @test_atomicrmw_fsub_v2bf16_seq_cst_align4(ptr %ptr, <2 x bf
; SOFTFP-NOLSE-NEXT: lsl w21, w8, #16
; SOFTFP-NOLSE-NEXT: mov x19, x0
; SOFTFP-NOLSE-NEXT: stp x24, x23, [sp, #16] // 16-byte Folded Spill
-; SOFTFP-NOLSE-NEXT: b .LBB8_2
+; SOFTFP-NOLSE-NEXT: b .LBB8_3
; SOFTFP-NOLSE-NEXT: .LBB8_1: // %cmpxchg.nostore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB8_2 Depth=1
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB8_3 Depth=1
; SOFTFP-NOLSE-NEXT: mov w8, wzr
; SOFTFP-NOLSE-NEXT: clrex
+; SOFTFP-NOLSE-NEXT: .LBB8_2: // %cmpxchg.end
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB8_3 Depth=1
; SOFTFP-NOLSE-NEXT: lsr w1, w22, #16
; SOFTFP-NOLSE-NEXT: cbnz w8, .LBB8_6
-; SOFTFP-NOLSE-NEXT: .LBB8_2: // %atomicrmw.start
+; SOFTFP-NOLSE-NEXT: .LBB8_3: // %atomicrmw.start
; SOFTFP-NOLSE-NEXT: // =>This Loop Header: Depth=1
-; SOFTFP-NOLSE-NEXT: // Child Loop BB8_3 Depth 2
+; SOFTFP-NOLSE-NEXT: // Child Loop BB8_4 Depth 2
; SOFTFP-NOLSE-NEXT: lsl w23, w1, #16
; SOFTFP-NOLSE-NEXT: mov w1, w20
; SOFTFP-NOLSE-NEXT: mov w0, w23
@@ -839,20 +847,18 @@ define <2 x bfloat> @test_atomicrmw_fsub_v2bf16_seq_cst_align4(ptr %ptr, <2 x bf
; SOFTFP-NOLSE-NEXT: bl __truncsfbf2
; SOFTFP-NOLSE-NEXT: bfxil w23, w22, #0, #16
; SOFTFP-NOLSE-NEXT: bfi w0, w24, #16, #16
-; SOFTFP-NOLSE-NEXT: .LBB8_3: // %cmpxchg.start
-; SOFTFP-NOLSE-NEXT: // Parent Loop BB8_2 Depth=1
+; SOFTFP-NOLSE-NEXT: .LBB8_4: // %cmpxchg.start
+; SOFTFP-NOLSE-NEXT: // Parent Loop BB8_3 Depth=1
; SOFTFP-NOLSE-NEXT: // => This Inner Loop Header: Depth=2
; SOFTFP-NOLSE-NEXT: ldaxr w22, [x19]
; SOFTFP-NOLSE-NEXT: cmp w22, w23
; SOFTFP-NOLSE-NEXT: b.ne .LBB8_1
-; SOFTFP-NOLSE-NEXT: // %bb.4: // %cmpxchg.trystore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB8_3 Depth=2
-; SOFTFP-NOLSE-NEXT: stlxr w8, w0, [x19]
-; SOFTFP-NOLSE-NEXT: cbnz w8, .LBB8_3
-; SOFTFP-NOLSE-NEXT: // %bb.5: // in Loop: Header=BB8_2 Depth=1
+; SOFTFP-NOLSE-NEXT: // %bb.5: // %cmpxchg.trystore
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB8_4 Depth=2
+; SOFTFP-NOLSE-NEXT: stlxr w9, w0, [x19]
; SOFTFP-NOLSE-NEXT: mov w8, #1 // =0x1
-; SOFTFP-NOLSE-NEXT: lsr w1, w22, #16
-; SOFTFP-NOLSE-NEXT: cbz w8, .LBB8_2
+; SOFTFP-NOLSE-NEXT: cbnz w9, .LBB8_4
+; SOFTFP-NOLSE-NEXT: b .LBB8_2
; SOFTFP-NOLSE-NEXT: .LBB8_6: // %atomicrmw.end
; SOFTFP-NOLSE-NEXT: mov w0, w22
; SOFTFP-NOLSE-NEXT: ldp x20, x19, [sp, #48] // 16-byte Folded Reload
@@ -906,16 +912,18 @@ define <2 x float> @test_atomicrmw_fsub_v2f32_seq_cst_align8(ptr %ptr, <2 x floa
; SOFTFP-NOLSE-NEXT: stp x20, x19, [sp, #48] // 16-byte Folded Spill
; SOFTFP-NOLSE-NEXT: mov w19, w2
; SOFTFP-NOLSE-NEXT: mov x20, x0
-; SOFTFP-NOLSE-NEXT: b .LBB9_2
+; SOFTFP-NOLSE-NEXT: b .LBB9_3
; SOFTFP-NOLSE-NEXT: .LBB9_1: // %cmpxchg.nostore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB9_2 Depth=1
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB9_3 Depth=1
; SOFTFP-NOLSE-NEXT: mov w8, wzr
; SOFTFP-NOLSE-NEXT: clrex
+; SOFTFP-NOLSE-NEXT: .LBB9_2: // %cmpxchg.end
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB9_3 Depth=1
; SOFTFP-NOLSE-NEXT: lsr x23, x22, #32
; SOFTFP-NOLSE-NEXT: cbnz w8, .LBB9_6
-; SOFTFP-NOLSE-NEXT: .LBB9_2: // %atomicrmw.start
+; SOFTFP-NOLSE-NEXT: .LBB9_3: // %atomicrmw.start
; SOFTFP-NOLSE-NEXT: // =>This Loop Header: Depth=1
-; SOFTFP-NOLSE-NEXT: // Child Loop BB9_3 Depth 2
+; SOFTFP-NOLSE-NEXT: // Child Loop BB9_4 Depth 2
; SOFTFP-NOLSE-NEXT: mov w0, w23
; SOFTFP-NOLSE-NEXT: mov w1, w19
; SOFTFP-NOLSE-NEXT: bl __subsf3
@@ -924,24 +932,22 @@ define <2 x float> @test_atomicrmw_fsub_v2f32_seq_cst_align8(ptr %ptr, <2 x floa
; SOFTFP-NOLSE-NEXT: mov w1, w21
; SOFTFP-NOLSE-NEXT: bl __subsf3
; SOFTFP-NOLSE-NEXT: mov w8, w0
-; SOFTFP-NOLSE-NEXT: mov w9, w22
+; SOFTFP-NOLSE-NEXT: mov w10, w22
; SOFTFP-NOLSE-NEXT: // kill: def $w23 killed $w23 killed $x23 def $x23
-; SOFTFP-NOLSE-NEXT: orr x8, x8, x24, lsl #32
-; SOFTFP-NOLSE-NEXT: orr x9, x9, x23, lsl #32
-; SOFTFP-NOLSE-NEXT: .LBB9_3: // %cmpxchg.start
-; SOFTFP-NOLSE-NEXT: // Parent Loop BB9_2 Depth=1
+; SOFTFP-NOLSE-NEXT: orr x9, x8, x24, lsl #32
+; SOFTFP-NOLSE-NEXT: orr x10, x10, x23, lsl #32
+; SOFTFP-NOLSE-NEXT: .LBB9_4: // %cmpxchg.start
+; SOFTFP-NOLSE-NEXT: // Parent Loop BB9_3 Depth=1
; SOFTFP-NOLSE-NEXT: // => This Inner Loop Header: Depth=2
; SOFTFP-NOLSE-NEXT: ldaxr x22, [x20]
-; SOFTFP-NOLSE-NEXT: cmp x22, x9
+; SOFTFP-NOLSE-NEXT: cmp x22, x10
; SOFTFP-NOLSE-NEXT: b.ne .LBB9_1
-; SOFTFP-NOLSE-NEXT: // %bb.4: // %cmpxchg.trystore
-; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB9_3 Depth=2
-; SOFTFP-NOLSE-NEXT: stlxr w10, x8, [x20]
-; SOFTFP-NOLSE-NEXT: cbnz w10, .LBB9_3
-; SOFTFP-NOLSE-NEXT: // %bb.5: // in Loop: Header=BB9_2 Depth=1
+; SOFTFP-NOLSE-NEXT: // %bb.5: // %cmpxchg.trystore
+; SOFTFP-NOLSE-NEXT: // in Loop: Header=BB9_4 Depth=2
+; SOFTFP-NOLSE-NEXT: stlxr w11, x9, [x20]
; SOFTFP-NOLSE-NEXT: mov w8, #1 // =0x1
-; SOFTFP-NOLSE-NEXT: lsr x23, x22, #32
-; SOFTFP-NOLSE-NEXT: cbz w8, .LBB9_2
+; SOFTFP-NOLSE-NEXT: cbnz w11, .LBB9_4
+; SOFTFP-NOLSE-NEXT: b .LBB9_2
; SOFTFP-NOLSE-NEXT: .LBB9_6: // %atomicrmw.end
; SOFTFP-NOLSE-NEXT: mov w0, w22
; SOFTFP-NOLSE-NEXT: mov w1, w23
diff --git a/llvm/test/CodeGen/AArch64/cmpxchg-idioms.ll b/llvm/test/CodeGen/AArch64/cmpxchg-idioms.ll
index b7817eb..d8407e9 100644
--- a/llvm/test/CodeGen/AArch64/cmpxchg-idioms.ll
+++ b/llvm/test/CodeGen/AArch64/cmpxchg-idioms.ll
@@ -181,36 +181,35 @@ define i1 @test_conditional2(i32 %a, i32 %b, ptr %c) {
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ldaxr w8, [x19]
; CHECK-NEXT: cmp w8, w21
-; CHECK-NEXT: b.ne LBB3_4
+; CHECK-NEXT: b.ne LBB3_3
; CHECK-NEXT: ; %bb.2: ; %cmpxchg.trystore
; CHECK-NEXT: ; in Loop: Header=BB3_1 Depth=1
-; CHECK-NEXT: stlxr w8, w20, [x19]
-; CHECK-NEXT: cbnz w8, LBB3_1
-; CHECK-NEXT: ; %bb.3:
+; CHECK-NEXT: stlxr w9, w20, [x19]
; CHECK-NEXT: mov w8, #1 ; =0x1
-; CHECK-NEXT: b LBB3_5
-; CHECK-NEXT: LBB3_4: ; %cmpxchg.nostore
+; CHECK-NEXT: cbnz w9, LBB3_1
+; CHECK-NEXT: b LBB3_4
+; CHECK-NEXT: LBB3_3: ; %cmpxchg.nostore
; CHECK-NEXT: mov w8, wzr
; CHECK-NEXT: clrex
-; CHECK-NEXT: LBB3_5: ; %for.cond.preheader
+; CHECK-NEXT: LBB3_4: ; %for.cond.preheader
; CHECK-NEXT: mov w22, #2 ; =0x2
-; CHECK-NEXT: LBB3_6: ; %for.cond
+; CHECK-NEXT: LBB3_5: ; %for.cond
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
-; CHECK-NEXT: cbz w22, LBB3_9
-; CHECK-NEXT: ; %bb.7: ; %for.body
-; CHECK-NEXT: ; in Loop: Header=BB3_6 Depth=1
+; CHECK-NEXT: cbz w22, LBB3_8
+; CHECK-NEXT: ; %bb.6: ; %for.body
+; CHECK-NEXT: ; in Loop: Header=BB3_5 Depth=1
; CHECK-NEXT: sub w22, w22, #1
; CHECK-NEXT: orr w9, w21, w20
; CHECK-NEXT: ldr w10, [x19, w22, sxtw #2]
; CHECK-NEXT: cmp w9, w10
-; CHECK-NEXT: b.eq LBB3_6
-; CHECK-NEXT: ; %bb.8: ; %if.then
-; CHECK-NEXT: ; in Loop: Header=BB3_6 Depth=1
+; CHECK-NEXT: b.eq LBB3_5
+; CHECK-NEXT: ; %bb.7: ; %if.then
+; CHECK-NEXT: ; in Loop: Header=BB3_5 Depth=1
; CHECK-NEXT: str w9, [x19, w22, sxtw #2]
; CHECK-NEXT: bl _foo
; CHECK-NEXT: mov w8, wzr
-; CHECK-NEXT: b LBB3_6
-; CHECK-NEXT: LBB3_9: ; %for.cond.cleanup
+; CHECK-NEXT: b LBB3_5
+; CHECK-NEXT: LBB3_8: ; %for.cond.cleanup
; CHECK-NEXT: ldp x29, x30, [sp, #32] ; 16-byte Folded Reload
; CHECK-NEXT: and w0, w8, #0x1
; CHECK-NEXT: ldp x20, x19, [sp, #16] ; 16-byte Folded Reload
diff --git a/llvm/test/CodeGen/AArch64/machine-combiner-copy.ll b/llvm/test/CodeGen/AArch64/machine-combiner-copy.ll
index 4c8e589..ba29fa7 100644
--- a/llvm/test/CodeGen/AArch64/machine-combiner-copy.ll
+++ b/llvm/test/CodeGen/AArch64/machine-combiner-copy.ll
@@ -8,16 +8,16 @@ define void @fma_dup_f16(ptr noalias nocapture noundef readonly %A, half noundef
; CHECK-NEXT: cbz w2, .LBB0_8
; CHECK-NEXT: // %bb.1: // %for.body.preheader
; CHECK-NEXT: cmp w2, #15
-; CHECK-NEXT: mov w8, w2
+; CHECK-NEXT: mov w9, w2
; CHECK-NEXT: b.hi .LBB0_3
; CHECK-NEXT: // %bb.2:
-; CHECK-NEXT: mov x9, xzr
+; CHECK-NEXT: mov x8, xzr
; CHECK-NEXT: b .LBB0_6
; CHECK-NEXT: .LBB0_3: // %vector.ph
-; CHECK-NEXT: and x9, x8, #0xfffffff0
+; CHECK-NEXT: and x8, x9, #0xfffffff0
; CHECK-NEXT: add x10, x1, #16
; CHECK-NEXT: add x11, x0, #16
-; CHECK-NEXT: mov x12, x9
+; CHECK-NEXT: mov x12, x8
; CHECK-NEXT: .LBB0_4: // %vector.body
; CHECK-NEXT: // =>This Inner Loop Header: Depth=1
; CHECK-NEXT: ldp q1, q4, [x10, #-16]
@@ -30,11 +30,11 @@ define void @fma_dup_f16(ptr noalias nocapture noundef readonly %A, half noundef
; CHECK-NEXT: add x10, x10, #32
; CHECK-NEXT: b.ne .LBB0_4
; CHECK-NEXT: // %bb.5: // %middle.block
-; CHECK-NEXT: cmp x9, x8
+; CHECK-NEXT: cmp x8, x9
; CHECK-NEXT: b.eq .LBB0_8
; CHECK-NEXT: .LBB0_6: // %for.body.preheader1
-; CHECK-NEXT: lsl x10, x9, #1
-; CHECK-NEXT: sub x8, x8, x9
+; CHECK-NEXT: lsl x10, x8, #1
+; CHECK-NEXT: sub x8, x9, x8
; CHECK-NEXT: add x9, x1, x10
; CHECK-NEXT: add x10, x0, x10
; CHECK-NEXT: .LBB0_7: // %for.body
diff --git a/llvm/test/CodeGen/AArch64/typepromotion-cost.ll b/llvm/test/CodeGen/AArch64/typepromotion-cost.ll
index 3aed4cb..c3ec614 100644
--- a/llvm/test/CodeGen/AArch64/typepromotion-cost.ll
+++ b/llvm/test/CodeGen/AArch64/typepromotion-cost.ll
@@ -7,39 +7,32 @@ define i32 @needless_promotion(ptr nocapture noundef readonly %S, i64 noundef %r
; CHECK-O2-LABEL: needless_promotion:
; CHECK-O2: // %bb.0: // %entry
; CHECK-O2-NEXT: ldrsh w8, [x0, #4]
-; CHECK-O2-NEXT: tbnz w8, #31, .LBB0_3
+; CHECK-O2-NEXT: mov w9, #1 // =0x1
+; CHECK-O2-NEXT: tbnz w8, #31, .LBB0_2
; CHECK-O2-NEXT: // %bb.1: // %lor.rhs
-; CHECK-O2-NEXT: cbz x1, .LBB0_5
-; CHECK-O2-NEXT: // %bb.2:
-; CHECK-O2-NEXT: mov w9, #2
-; CHECK-O2-NEXT: b .LBB0_4
-; CHECK-O2-NEXT: .LBB0_3:
-; CHECK-O2-NEXT: mov w9, #1
-; CHECK-O2-NEXT: .LBB0_4: // %lor.end.sink.split
+; CHECK-O2-NEXT: mov w9, #2 // =0x2
+; CHECK-O2-NEXT: cbz x1, .LBB0_3
+; CHECK-O2-NEXT: .LBB0_2: // %lor.end.sink.split
; CHECK-O2-NEXT: cmp w8, w9
; CHECK-O2-NEXT: cset w0, eq
; CHECK-O2-NEXT: ret
-; CHECK-O2-NEXT: .LBB0_5:
+; CHECK-O2-NEXT: .LBB0_3:
; CHECK-O2-NEXT: mov w0, wzr
; CHECK-O2-NEXT: ret
;
; CHECK-O3-LABEL: needless_promotion:
; CHECK-O3: // %bb.0: // %entry
; CHECK-O3-NEXT: ldrsh w8, [x0, #4]
-; CHECK-O3-NEXT: tbnz w8, #31, .LBB0_3
+; CHECK-O3-NEXT: mov w9, #1 // =0x1
+; CHECK-O3-NEXT: tbnz w8, #31, .LBB0_2
; CHECK-O3-NEXT: // %bb.1: // %lor.rhs
-; CHECK-O3-NEXT: cbz x1, .LBB0_4
-; CHECK-O3-NEXT: // %bb.2:
-; CHECK-O3-NEXT: mov w9, #2
+; CHECK-O3-NEXT: mov w9, #2 // =0x2
+; CHECK-O3-NEXT: cbz x1, .LBB0_3
+; CHECK-O3-NEXT: .LBB0_2: // %lor.end.sink.split
; CHECK-O3-NEXT: cmp w8, w9
; CHECK-O3-NEXT: cset w0, eq
; CHECK-O3-NEXT: ret
; CHECK-O3-NEXT: .LBB0_3:
-; CHECK-O3-NEXT: mov w9, #1
-; CHECK-O3-NEXT: cmp w8, w9
-; CHECK-O3-NEXT: cset w0, eq
-; CHECK-O3-NEXT: ret
-; CHECK-O3-NEXT: .LBB0_4:
; CHECK-O3-NEXT: mov w0, wzr
; CHECK-O3-NEXT: ret
entry:
diff --git a/llvm/test/CodeGen/AMDGPU/add.ll b/llvm/test/CodeGen/AMDGPU/add.ll
index b8814b6..8bf9bf9 100644
--- a/llvm/test/CodeGen/AMDGPU/add.ll
+++ b/llvm/test/CodeGen/AMDGPU/add.ll
@@ -1153,16 +1153,15 @@ define amdgpu_kernel void @add64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX6-LABEL: add64_in_branch:
; GFX6: ; %bb.0: ; %entry
; GFX6-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
-; GFX6-NEXT: s_mov_b64 s[8:9], 0
; GFX6-NEXT: s_waitcnt lgkmcnt(0)
-; GFX6-NEXT: v_cmp_ne_u64_e64 s[10:11], s[4:5], 0
-; GFX6-NEXT: s_and_b64 vcc, exec, s[10:11]
+; GFX6-NEXT: v_cmp_ne_u64_e64 s[8:9], s[4:5], 0
+; GFX6-NEXT: s_and_b64 vcc, exec, s[8:9]
+; GFX6-NEXT: s_mov_b64 s[8:9], -1
; GFX6-NEXT: s_cbranch_vccz .LBB9_4
; GFX6-NEXT: ; %bb.1: ; %else
; GFX6-NEXT: s_add_u32 s4, s4, s6
; GFX6-NEXT: s_addc_u32 s5, s5, s7
-; GFX6-NEXT: s_andn2_b64 vcc, exec, s[8:9]
-; GFX6-NEXT: s_cbranch_vccnz .LBB9_3
+; GFX6-NEXT: s_cbranch_execnz .LBB9_3
; GFX6-NEXT: .LBB9_2: ; %if
; GFX6-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
; GFX6-NEXT: .LBB9_3: ; %endif
@@ -1175,20 +1174,21 @@ define amdgpu_kernel void @add64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX6-NEXT: s_endpgm
; GFX6-NEXT: .LBB9_4:
; GFX6-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX6-NEXT: s_branch .LBB9_2
+; GFX6-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; GFX6-NEXT: s_cbranch_vccz .LBB9_2
+; GFX6-NEXT: s_branch .LBB9_3
;
; GFX8-LABEL: add64_in_branch:
; GFX8: ; %bb.0: ; %entry
; GFX8-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
-; GFX8-NEXT: s_mov_b64 s[8:9], 0
+; GFX8-NEXT: s_mov_b64 s[8:9], -1
; GFX8-NEXT: s_waitcnt lgkmcnt(0)
; GFX8-NEXT: s_cmp_lg_u64 s[4:5], 0
; GFX8-NEXT: s_cbranch_scc0 .LBB9_4
; GFX8-NEXT: ; %bb.1: ; %else
; GFX8-NEXT: s_add_u32 s4, s4, s6
; GFX8-NEXT: s_addc_u32 s5, s5, s7
-; GFX8-NEXT: s_andn2_b64 vcc, exec, s[8:9]
-; GFX8-NEXT: s_cbranch_vccnz .LBB9_3
+; GFX8-NEXT: s_cbranch_execnz .LBB9_3
; GFX8-NEXT: .LBB9_2: ; %if
; GFX8-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x0
; GFX8-NEXT: .LBB9_3: ; %endif
@@ -1201,20 +1201,21 @@ define amdgpu_kernel void @add64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX8-NEXT: s_endpgm
; GFX8-NEXT: .LBB9_4:
; GFX8-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX8-NEXT: s_branch .LBB9_2
+; GFX8-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; GFX8-NEXT: s_cbranch_vccz .LBB9_2
+; GFX8-NEXT: s_branch .LBB9_3
;
; GFX9-LABEL: add64_in_branch:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24
-; GFX9-NEXT: s_mov_b64 s[2:3], 0
+; GFX9-NEXT: s_mov_b64 s[2:3], -1
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u64 s[12:13], 0
; GFX9-NEXT: s_cbranch_scc0 .LBB9_4
; GFX9-NEXT: ; %bb.1: ; %else
; GFX9-NEXT: s_add_u32 s0, s12, s14
; GFX9-NEXT: s_addc_u32 s1, s13, s15
-; GFX9-NEXT: s_andn2_b64 vcc, exec, s[2:3]
-; GFX9-NEXT: s_cbranch_vccnz .LBB9_3
+; GFX9-NEXT: s_cbranch_execnz .LBB9_3
; GFX9-NEXT: .LBB9_2: ; %if
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[10:11], 0x0
; GFX9-NEXT: .LBB9_3: ; %endif
@@ -1226,11 +1227,14 @@ define amdgpu_kernel void @add64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX9-NEXT: s_endpgm
; GFX9-NEXT: .LBB9_4:
; GFX9-NEXT: ; implicit-def: $sgpr0_sgpr1
-; GFX9-NEXT: s_branch .LBB9_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_vccz .LBB9_2
+; GFX9-NEXT: s_branch .LBB9_3
;
; GFX10-LABEL: add64_in_branch:
; GFX10: ; %bb.0: ; %entry
; GFX10-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24
+; GFX10-NEXT: s_mov_b32 s2, -1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_cmp_lg_u64 s[12:13], 0
; GFX10-NEXT: s_cbranch_scc0 .LBB9_4
@@ -1249,11 +1253,14 @@ define amdgpu_kernel void @add64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX10-NEXT: s_endpgm
; GFX10-NEXT: .LBB9_4:
; GFX10-NEXT: ; implicit-def: $sgpr0_sgpr1
-; GFX10-NEXT: s_branch .LBB9_2
+; GFX10-NEXT: s_andn2_b32 vcc_lo, exec_lo, s2
+; GFX10-NEXT: s_cbranch_vccz .LBB9_2
+; GFX10-NEXT: s_branch .LBB9_3
;
; GFX11-LABEL: add64_in_branch:
; GFX11: ; %bb.0: ; %entry
; GFX11-NEXT: s_load_b256 s[0:7], s[4:5], 0x24
+; GFX11-NEXT: s_mov_b32 s8, -1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u64 s[4:5], 0
; GFX11-NEXT: s_cbranch_scc0 .LBB9_4
@@ -1271,11 +1278,14 @@ define amdgpu_kernel void @add64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX11-NEXT: s_endpgm
; GFX11-NEXT: .LBB9_4:
; GFX11-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX11-NEXT: s_branch .LBB9_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-NEXT: s_cbranch_vccz .LBB9_2
+; GFX11-NEXT: s_branch .LBB9_3
;
; GFX12-LABEL: add64_in_branch:
; GFX12: ; %bb.0: ; %entry
; GFX12-NEXT: s_load_b256 s[0:7], s[4:5], 0x24
+; GFX12-NEXT: s_mov_b32 s8, -1
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: s_cmp_lg_u64 s[4:5], 0
; GFX12-NEXT: s_cbranch_scc0 .LBB9_4
@@ -1292,7 +1302,9 @@ define amdgpu_kernel void @add64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX12-NEXT: s_endpgm
; GFX12-NEXT: .LBB9_4:
; GFX12-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX12-NEXT: s_branch .LBB9_2
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX12-NEXT: s_cbranch_vccz .LBB9_2
+; GFX12-NEXT: s_branch .LBB9_3
entry:
%0 = icmp eq i64 %a, 0
br i1 %0, label %if, label %else
diff --git a/llvm/test/CodeGen/AMDGPU/agpr-copy-no-free-registers.ll b/llvm/test/CodeGen/AMDGPU/agpr-copy-no-free-registers.ll
index 3160e38..b4c85d8 100644
--- a/llvm/test/CodeGen/AMDGPU/agpr-copy-no-free-registers.ll
+++ b/llvm/test/CodeGen/AMDGPU/agpr-copy-no-free-registers.ll
@@ -562,13 +562,13 @@ define amdgpu_kernel void @introduced_copy_to_sgpr(i64 %arg, i32 %arg1, i32 %arg
; GFX908-NEXT: .LBB3_1: ; %Flow20
; GFX908-NEXT: ; in Loop: Header=BB3_2 Depth=1
; GFX908-NEXT: s_andn2_b64 vcc, exec, s[2:3]
-; GFX908-NEXT: s_cbranch_vccz .LBB3_12
+; GFX908-NEXT: s_cbranch_vccz .LBB3_13
; GFX908-NEXT: .LBB3_2: ; %bb9
; GFX908-NEXT: ; =>This Loop Header: Depth=1
; GFX908-NEXT: ; Child Loop BB3_5 Depth 2
; GFX908-NEXT: s_mov_b64 s[18:19], -1
; GFX908-NEXT: s_mov_b64 vcc, s[0:1]
-; GFX908-NEXT: s_cbranch_vccz .LBB3_10
+; GFX908-NEXT: s_cbranch_vccz .LBB3_11
; GFX908-NEXT: ; %bb.3: ; %bb14
; GFX908-NEXT: ; in Loop: Header=BB3_2 Depth=1
; GFX908-NEXT: global_load_dwordx2 v[2:3], v[0:1], off
@@ -607,7 +607,7 @@ define amdgpu_kernel void @introduced_copy_to_sgpr(i64 %arg, i32 %arg1, i32 %arg
; GFX908-NEXT: s_addc_u32 s21, s21, s5
; GFX908-NEXT: s_mov_b64 s[22:23], 0
; GFX908-NEXT: s_andn2_b64 vcc, exec, s[24:25]
-; GFX908-NEXT: s_cbranch_vccz .LBB3_9
+; GFX908-NEXT: s_cbranch_vccz .LBB3_10
; GFX908-NEXT: .LBB3_5: ; %bb16
; GFX908-NEXT: ; Parent Loop BB3_2 Depth=1
; GFX908-NEXT: ; => This Inner Loop Header: Depth=2
@@ -648,24 +648,27 @@ define amdgpu_kernel void @introduced_copy_to_sgpr(i64 %arg, i32 %arg1, i32 %arg
; GFX908-NEXT: v_add_f32_e32 v9, v9, v15
; GFX908-NEXT: v_add_f32_e32 v10, v10, v12
; GFX908-NEXT: v_add_f32_e32 v11, v11, v13
-; GFX908-NEXT: s_branch .LBB3_4
+; GFX908-NEXT: s_mov_b64 s[22:23], -1
+; GFX908-NEXT: s_branch .LBB3_8
; GFX908-NEXT: .LBB3_7: ; in Loop: Header=BB3_5 Depth=2
; GFX908-NEXT: s_mov_b64 s[22:23], s[18:19]
+; GFX908-NEXT: .LBB3_8: ; %Flow
+; GFX908-NEXT: ; in Loop: Header=BB3_5 Depth=2
; GFX908-NEXT: s_andn2_b64 vcc, exec, s[22:23]
-; GFX908-NEXT: s_cbranch_vccz .LBB3_4
-; GFX908-NEXT: ; %bb.8: ; in Loop: Header=BB3_2 Depth=1
; GFX908-NEXT: s_mov_b64 s[22:23], -1
+; GFX908-NEXT: s_cbranch_vccz .LBB3_4
+; GFX908-NEXT: ; %bb.9: ; in Loop: Header=BB3_2 Depth=1
; GFX908-NEXT: ; implicit-def: $vgpr2_vgpr3
; GFX908-NEXT: ; implicit-def: $sgpr20_sgpr21
-; GFX908-NEXT: .LBB3_9: ; %loop.exit.guard
+; GFX908-NEXT: .LBB3_10: ; %loop.exit.guard
; GFX908-NEXT: ; in Loop: Header=BB3_2 Depth=1
; GFX908-NEXT: s_xor_b64 s[18:19], s[22:23], -1
-; GFX908-NEXT: .LBB3_10: ; %Flow19
+; GFX908-NEXT: .LBB3_11: ; %Flow19
; GFX908-NEXT: ; in Loop: Header=BB3_2 Depth=1
; GFX908-NEXT: s_mov_b64 s[2:3], -1
; GFX908-NEXT: s_and_b64 vcc, exec, s[18:19]
; GFX908-NEXT: s_cbranch_vccz .LBB3_1
-; GFX908-NEXT: ; %bb.11: ; %bb12
+; GFX908-NEXT: ; %bb.12: ; %bb12
; GFX908-NEXT: ; in Loop: Header=BB3_2 Depth=1
; GFX908-NEXT: s_add_u32 s10, s10, s8
; GFX908-NEXT: s_addc_u32 s11, s11, 0
@@ -673,7 +676,7 @@ define amdgpu_kernel void @introduced_copy_to_sgpr(i64 %arg, i32 %arg1, i32 %arg
; GFX908-NEXT: s_addc_u32 s15, s15, s17
; GFX908-NEXT: s_mov_b64 s[2:3], 0
; GFX908-NEXT: s_branch .LBB3_1
-; GFX908-NEXT: .LBB3_12: ; %DummyReturnBlock
+; GFX908-NEXT: .LBB3_13: ; %DummyReturnBlock
; GFX908-NEXT: s_endpgm
;
; GFX90A-LABEL: introduced_copy_to_sgpr:
@@ -727,13 +730,13 @@ define amdgpu_kernel void @introduced_copy_to_sgpr(i64 %arg, i32 %arg1, i32 %arg
; GFX90A-NEXT: .LBB3_1: ; %Flow20
; GFX90A-NEXT: ; in Loop: Header=BB3_2 Depth=1
; GFX90A-NEXT: s_andn2_b64 vcc, exec, s[2:3]
-; GFX90A-NEXT: s_cbranch_vccz .LBB3_12
+; GFX90A-NEXT: s_cbranch_vccz .LBB3_13
; GFX90A-NEXT: .LBB3_2: ; %bb9
; GFX90A-NEXT: ; =>This Loop Header: Depth=1
; GFX90A-NEXT: ; Child Loop BB3_5 Depth 2
; GFX90A-NEXT: s_mov_b64 s[18:19], -1
; GFX90A-NEXT: s_mov_b64 vcc, s[0:1]
-; GFX90A-NEXT: s_cbranch_vccz .LBB3_10
+; GFX90A-NEXT: s_cbranch_vccz .LBB3_11
; GFX90A-NEXT: ; %bb.3: ; %bb14
; GFX90A-NEXT: ; in Loop: Header=BB3_2 Depth=1
; GFX90A-NEXT: global_load_dwordx2 v[4:5], v[0:1], off
@@ -768,7 +771,7 @@ define amdgpu_kernel void @introduced_copy_to_sgpr(i64 %arg, i32 %arg1, i32 %arg
; GFX90A-NEXT: v_cmp_lt_i64_e64 s[24:25], -1, v[4:5]
; GFX90A-NEXT: s_mov_b64 s[22:23], 0
; GFX90A-NEXT: s_andn2_b64 vcc, exec, s[24:25]
-; GFX90A-NEXT: s_cbranch_vccz .LBB3_9
+; GFX90A-NEXT: s_cbranch_vccz .LBB3_10
; GFX90A-NEXT: .LBB3_5: ; %bb16
; GFX90A-NEXT: ; Parent Loop BB3_2 Depth=1
; GFX90A-NEXT: ; => This Inner Loop Header: Depth=2
@@ -802,24 +805,27 @@ define amdgpu_kernel void @introduced_copy_to_sgpr(i64 %arg, i32 %arg1, i32 %arg
; GFX90A-NEXT: v_pk_add_f32 v[8:9], v[8:9], v[26:27]
; GFX90A-NEXT: v_pk_add_f32 v[10:11], v[10:11], v[16:17]
; GFX90A-NEXT: v_pk_add_f32 v[12:13], v[12:13], v[14:15]
-; GFX90A-NEXT: s_branch .LBB3_4
+; GFX90A-NEXT: s_mov_b64 s[22:23], -1
+; GFX90A-NEXT: s_branch .LBB3_8
; GFX90A-NEXT: .LBB3_7: ; in Loop: Header=BB3_5 Depth=2
; GFX90A-NEXT: s_mov_b64 s[22:23], s[18:19]
+; GFX90A-NEXT: .LBB3_8: ; %Flow
+; GFX90A-NEXT: ; in Loop: Header=BB3_5 Depth=2
; GFX90A-NEXT: s_andn2_b64 vcc, exec, s[22:23]
-; GFX90A-NEXT: s_cbranch_vccz .LBB3_4
-; GFX90A-NEXT: ; %bb.8: ; in Loop: Header=BB3_2 Depth=1
; GFX90A-NEXT: s_mov_b64 s[22:23], -1
+; GFX90A-NEXT: s_cbranch_vccz .LBB3_4
+; GFX90A-NEXT: ; %bb.9: ; in Loop: Header=BB3_2 Depth=1
; GFX90A-NEXT: ; implicit-def: $vgpr4_vgpr5
; GFX90A-NEXT: ; implicit-def: $sgpr20_sgpr21
-; GFX90A-NEXT: .LBB3_9: ; %loop.exit.guard
+; GFX90A-NEXT: .LBB3_10: ; %loop.exit.guard
; GFX90A-NEXT: ; in Loop: Header=BB3_2 Depth=1
; GFX90A-NEXT: s_xor_b64 s[18:19], s[22:23], -1
-; GFX90A-NEXT: .LBB3_10: ; %Flow19
+; GFX90A-NEXT: .LBB3_11: ; %Flow19
; GFX90A-NEXT: ; in Loop: Header=BB3_2 Depth=1
; GFX90A-NEXT: s_mov_b64 s[2:3], -1
; GFX90A-NEXT: s_and_b64 vcc, exec, s[18:19]
; GFX90A-NEXT: s_cbranch_vccz .LBB3_1
-; GFX90A-NEXT: ; %bb.11: ; %bb12
+; GFX90A-NEXT: ; %bb.12: ; %bb12
; GFX90A-NEXT: ; in Loop: Header=BB3_2 Depth=1
; GFX90A-NEXT: s_add_u32 s10, s10, s8
; GFX90A-NEXT: s_addc_u32 s11, s11, 0
@@ -827,7 +833,7 @@ define amdgpu_kernel void @introduced_copy_to_sgpr(i64 %arg, i32 %arg1, i32 %arg
; GFX90A-NEXT: s_addc_u32 s15, s15, s17
; GFX90A-NEXT: s_mov_b64 s[2:3], 0
; GFX90A-NEXT: s_branch .LBB3_1
-; GFX90A-NEXT: .LBB3_12: ; %DummyReturnBlock
+; GFX90A-NEXT: .LBB3_13: ; %DummyReturnBlock
; GFX90A-NEXT: s_endpgm
bb:
%i = load volatile i16, ptr addrspace(4) poison, align 2
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll
index 0d5f538..e19dab1 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.1024bit.ll
@@ -232,6 +232,7 @@ define inreg <32 x float> @bitcast_v32i32_to_v32f32_scalar(<32 x i32> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v31, v17
; SI-NEXT: v_mov_b32_e32 v30, v16
; SI-NEXT: v_mov_b32_e32 v29, v15
@@ -252,7 +253,7 @@ define inreg <32 x float> @bitcast_v32i32_to_v32f32_scalar(<32 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v14, v0
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v2, s18
; SI-NEXT: v_mov_b32_e32 v3, s19
; SI-NEXT: v_mov_b32_e32 v4, s20
@@ -265,10 +266,13 @@ define inreg <32 x float> @bitcast_v32i32_to_v32f32_scalar(<32 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB1_3
-; SI-NEXT: .LBB1_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB1_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB1_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v31, vcc, 3, v31
; SI-NEXT: v_add_i32_e32 v30, vcc, 3, v30
; SI-NEXT: v_add_i32_e32 v29, vcc, 3, v29
@@ -301,16 +305,15 @@ define inreg <32 x float> @bitcast_v32i32_to_v32f32_scalar(<32 x i32> inreg %a,
; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
-; SI-NEXT: .LBB1_3: ; %end
+; SI-NEXT: .LBB1_4: ; %end
; SI-NEXT: v_mov_b32_e32 v18, v32
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: s_branch .LBB1_2
;
; VI-LABEL: bitcast_v32i32_to_v32f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v31, v17
; VI-NEXT: v_mov_b32_e32 v30, v16
; VI-NEXT: v_mov_b32_e32 v29, v15
@@ -331,7 +334,7 @@ define inreg <32 x float> @bitcast_v32i32_to_v32f32_scalar(<32 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
@@ -344,10 +347,13 @@ define inreg <32 x float> @bitcast_v32i32_to_v32f32_scalar(<32 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB1_4
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB1_3
-; VI-NEXT: .LBB1_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB1_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB1_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v31, vcc, 3, v31
; VI-NEXT: v_add_u32_e32 v30, vcc, 3, v30
; VI-NEXT: v_add_u32_e32 v29, vcc, 3, v29
@@ -380,16 +386,15 @@ define inreg <32 x float> @bitcast_v32i32_to_v32f32_scalar(<32 x i32> inreg %a,
; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: .LBB1_3: ; %end
+; VI-NEXT: .LBB1_4: ; %end
; VI-NEXT: v_mov_b32_e32 v18, v32
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_4:
-; VI-NEXT: s_branch .LBB1_2
;
; GFX9-LABEL: bitcast_v32i32_to_v32f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v31, v17
; GFX9-NEXT: v_mov_b32_e32 v30, v16
; GFX9-NEXT: v_mov_b32_e32 v29, v15
@@ -410,7 +415,7 @@ define inreg <32 x float> @bitcast_v32i32_to_v32f32_scalar(<32 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -423,10 +428,13 @@ define inreg <32 x float> @bitcast_v32i32_to_v32f32_scalar(<32 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB1_3
-; GFX9-NEXT: .LBB1_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB1_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_u32_e32 v31, 3, v31
; GFX9-NEXT: v_add_u32_e32 v30, 3, v30
; GFX9-NEXT: v_add_u32_e32 v29, 3, v29
@@ -459,44 +467,42 @@ define inreg <32 x float> @bitcast_v32i32_to_v32f32_scalar(<32 x i32> inreg %a,
; GFX9-NEXT: v_add_u32_e32 v2, 3, v2
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: .LBB1_3: ; %end
+; GFX9-NEXT: .LBB1_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v18, v32
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-LABEL: bitcast_v32i32_to_v32f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v14 :: v_dual_mov_b32 v31, v13
-; GFX11-NEXT: v_dual_mov_b32 v30, v12 :: v_dual_mov_b32 v29, v11
-; GFX11-NEXT: v_dual_mov_b32 v28, v10 :: v_dual_mov_b32 v27, v9
+; GFX11-NEXT: v_dual_mov_b32 v15, v14 :: v_dual_mov_b32 v30, v12
+; GFX11-NEXT: v_dual_mov_b32 v31, v13 :: v_dual_mov_b32 v28, v10
+; GFX11-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v26, v8
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB1_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB1_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_3:
-; GFX11-NEXT: .LBB1_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_nc_u32_e32 v31, 3, v31
; GFX11-NEXT: v_add_nc_u32_e32 v30, 3, v30
; GFX11-NEXT: v_add_nc_u32_e32 v29, 3, v29
@@ -529,6 +535,7 @@ define inreg <32 x float> @bitcast_v32i32_to_v32f32_scalar(<32 x i32> inreg %a,
; GFX11-NEXT: v_add_nc_u32_e32 v2, 3, v2
; GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v1
; GFX11-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-NEXT: .LBB1_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -755,6 +762,7 @@ define inreg <32 x i32> @bitcast_v32f32_to_v32i32_scalar(<32 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v31, v17
; SI-NEXT: v_mov_b32_e32 v30, v16
; SI-NEXT: v_mov_b32_e32 v29, v15
@@ -775,7 +783,7 @@ define inreg <32 x i32> @bitcast_v32f32_to_v32i32_scalar(<32 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v14, v0
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v2, s18
; SI-NEXT: v_mov_b32_e32 v3, s19
; SI-NEXT: v_mov_b32_e32 v4, s20
@@ -788,10 +796,13 @@ define inreg <32 x i32> @bitcast_v32f32_to_v32i32_scalar(<32 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB3_4
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB3_3
-; SI-NEXT: .LBB3_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB3_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB3_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e32 v31, 1.0, v31
; SI-NEXT: v_add_f32_e32 v30, 1.0, v30
; SI-NEXT: v_add_f32_e32 v29, 1.0, v29
@@ -824,16 +835,15 @@ define inreg <32 x i32> @bitcast_v32f32_to_v32i32_scalar(<32 x float> inreg %a,
; SI-NEXT: v_add_f32_e32 v2, 1.0, v2
; SI-NEXT: v_add_f32_e32 v1, 1.0, v1
; SI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; SI-NEXT: .LBB3_3: ; %end
+; SI-NEXT: .LBB3_4: ; %end
; SI-NEXT: v_mov_b32_e32 v18, v32
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB3_4:
-; SI-NEXT: s_branch .LBB3_2
;
; VI-LABEL: bitcast_v32f32_to_v32i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v31, v17
; VI-NEXT: v_mov_b32_e32 v30, v16
; VI-NEXT: v_mov_b32_e32 v29, v15
@@ -854,7 +864,7 @@ define inreg <32 x i32> @bitcast_v32f32_to_v32i32_scalar(<32 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
@@ -867,10 +877,13 @@ define inreg <32 x i32> @bitcast_v32f32_to_v32i32_scalar(<32 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB3_4
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_3
-; VI-NEXT: .LBB3_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB3_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB3_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e32 v31, 1.0, v31
; VI-NEXT: v_add_f32_e32 v30, 1.0, v30
; VI-NEXT: v_add_f32_e32 v29, 1.0, v29
@@ -903,16 +916,15 @@ define inreg <32 x i32> @bitcast_v32f32_to_v32i32_scalar(<32 x float> inreg %a,
; VI-NEXT: v_add_f32_e32 v2, 1.0, v2
; VI-NEXT: v_add_f32_e32 v1, 1.0, v1
; VI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; VI-NEXT: .LBB3_3: ; %end
+; VI-NEXT: .LBB3_4: ; %end
; VI-NEXT: v_mov_b32_e32 v18, v32
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB3_4:
-; VI-NEXT: s_branch .LBB3_2
;
; GFX9-LABEL: bitcast_v32f32_to_v32i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v31, v17
; GFX9-NEXT: v_mov_b32_e32 v30, v16
; GFX9-NEXT: v_mov_b32_e32 v29, v15
@@ -933,7 +945,7 @@ define inreg <32 x i32> @bitcast_v32f32_to_v32i32_scalar(<32 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -946,10 +958,13 @@ define inreg <32 x i32> @bitcast_v32f32_to_v32i32_scalar(<32 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_3
-; GFX9-NEXT: .LBB3_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB3_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e32 v31, 1.0, v31
; GFX9-NEXT: v_add_f32_e32 v30, 1.0, v30
; GFX9-NEXT: v_add_f32_e32 v29, 1.0, v29
@@ -982,44 +997,42 @@ define inreg <32 x i32> @bitcast_v32f32_to_v32i32_scalar(<32 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e32 v2, 1.0, v2
; GFX9-NEXT: v_add_f32_e32 v1, 1.0, v1
; GFX9-NEXT: v_add_f32_e32 v0, 1.0, v0
-; GFX9-NEXT: .LBB3_3: ; %end
+; GFX9-NEXT: .LBB3_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v18, v32
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB3_4:
-; GFX9-NEXT: s_branch .LBB3_2
;
; GFX11-LABEL: bitcast_v32f32_to_v32i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v14 :: v_dual_mov_b32 v31, v13
-; GFX11-NEXT: v_dual_mov_b32 v30, v12 :: v_dual_mov_b32 v29, v11
-; GFX11-NEXT: v_dual_mov_b32 v28, v10 :: v_dual_mov_b32 v27, v9
+; GFX11-NEXT: v_dual_mov_b32 v15, v14 :: v_dual_mov_b32 v30, v12
+; GFX11-NEXT: v_dual_mov_b32 v31, v13 :: v_dual_mov_b32 v28, v10
+; GFX11-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v26, v8
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB3_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB3_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB3_3:
-; GFX11-NEXT: .LBB3_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_dual_add_f32 v31, 1.0, v31 :: v_dual_add_f32 v30, 1.0, v30
; GFX11-NEXT: v_dual_add_f32 v29, 1.0, v29 :: v_dual_add_f32 v28, 1.0, v28
; GFX11-NEXT: v_dual_add_f32 v27, 1.0, v27 :: v_dual_add_f32 v26, 1.0, v26
@@ -1036,6 +1049,7 @@ define inreg <32 x i32> @bitcast_v32f32_to_v32i32_scalar(<32 x float> inreg %a,
; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-NEXT: .LBB3_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1278,6 +1292,7 @@ define inreg <16 x i64> @bitcast_v32i32_to_v16i64_scalar(<32 x i32> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v31, v17
; SI-NEXT: v_mov_b32_e32 v30, v16
; SI-NEXT: v_mov_b32_e32 v29, v15
@@ -1298,7 +1313,7 @@ define inreg <16 x i64> @bitcast_v32i32_to_v16i64_scalar(<32 x i32> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v14, v0
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v2, s18
; SI-NEXT: v_mov_b32_e32 v3, s19
; SI-NEXT: v_mov_b32_e32 v4, s20
@@ -1311,10 +1326,13 @@ define inreg <16 x i64> @bitcast_v32i32_to_v16i64_scalar(<32 x i32> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB5_4
+; SI-NEXT: s_cbranch_scc0 .LBB5_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB5_3
-; SI-NEXT: .LBB5_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB5_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB5_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v31, vcc, 3, v31
; SI-NEXT: v_add_i32_e32 v30, vcc, 3, v30
; SI-NEXT: v_add_i32_e32 v29, vcc, 3, v29
@@ -1347,16 +1365,15 @@ define inreg <16 x i64> @bitcast_v32i32_to_v16i64_scalar(<32 x i32> inreg %a, i3
; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
-; SI-NEXT: .LBB5_3: ; %end
+; SI-NEXT: .LBB5_4: ; %end
; SI-NEXT: v_mov_b32_e32 v18, v32
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB5_4:
-; SI-NEXT: s_branch .LBB5_2
;
; VI-LABEL: bitcast_v32i32_to_v16i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v31, v17
; VI-NEXT: v_mov_b32_e32 v30, v16
; VI-NEXT: v_mov_b32_e32 v29, v15
@@ -1377,7 +1394,7 @@ define inreg <16 x i64> @bitcast_v32i32_to_v16i64_scalar(<32 x i32> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
@@ -1390,10 +1407,13 @@ define inreg <16 x i64> @bitcast_v32i32_to_v16i64_scalar(<32 x i32> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB5_4
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB5_3
-; VI-NEXT: .LBB5_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB5_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB5_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v31, vcc, 3, v31
; VI-NEXT: v_add_u32_e32 v30, vcc, 3, v30
; VI-NEXT: v_add_u32_e32 v29, vcc, 3, v29
@@ -1426,16 +1446,15 @@ define inreg <16 x i64> @bitcast_v32i32_to_v16i64_scalar(<32 x i32> inreg %a, i3
; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: .LBB5_3: ; %end
+; VI-NEXT: .LBB5_4: ; %end
; VI-NEXT: v_mov_b32_e32 v18, v32
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB5_4:
-; VI-NEXT: s_branch .LBB5_2
;
; GFX9-LABEL: bitcast_v32i32_to_v16i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v31, v17
; GFX9-NEXT: v_mov_b32_e32 v30, v16
; GFX9-NEXT: v_mov_b32_e32 v29, v15
@@ -1456,7 +1475,7 @@ define inreg <16 x i64> @bitcast_v32i32_to_v16i64_scalar(<32 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -1469,10 +1488,13 @@ define inreg <16 x i64> @bitcast_v32i32_to_v16i64_scalar(<32 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB5_3
-; GFX9-NEXT: .LBB5_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB5_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_u32_e32 v31, 3, v31
; GFX9-NEXT: v_add_u32_e32 v30, 3, v30
; GFX9-NEXT: v_add_u32_e32 v29, 3, v29
@@ -1505,44 +1527,42 @@ define inreg <16 x i64> @bitcast_v32i32_to_v16i64_scalar(<32 x i32> inreg %a, i3
; GFX9-NEXT: v_add_u32_e32 v2, 3, v2
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: .LBB5_3: ; %end
+; GFX9-NEXT: .LBB5_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v18, v32
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_4:
-; GFX9-NEXT: s_branch .LBB5_2
;
; GFX11-LABEL: bitcast_v32i32_to_v16i64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v14 :: v_dual_mov_b32 v31, v13
-; GFX11-NEXT: v_dual_mov_b32 v30, v12 :: v_dual_mov_b32 v29, v11
-; GFX11-NEXT: v_dual_mov_b32 v28, v10 :: v_dual_mov_b32 v27, v9
+; GFX11-NEXT: v_dual_mov_b32 v15, v14 :: v_dual_mov_b32 v30, v12
+; GFX11-NEXT: v_dual_mov_b32 v31, v13 :: v_dual_mov_b32 v28, v10
+; GFX11-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v26, v8
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB5_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB5_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB5_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB5_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB5_3:
-; GFX11-NEXT: .LBB5_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_nc_u32_e32 v31, 3, v31
; GFX11-NEXT: v_add_nc_u32_e32 v30, 3, v30
; GFX11-NEXT: v_add_nc_u32_e32 v29, 3, v29
@@ -1575,6 +1595,7 @@ define inreg <16 x i64> @bitcast_v32i32_to_v16i64_scalar(<32 x i32> inreg %a, i3
; GFX11-NEXT: v_add_nc_u32_e32 v2, 3, v2
; GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v1
; GFX11-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-NEXT: .LBB5_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1825,6 +1846,7 @@ define inreg <32 x i32> @bitcast_v16i64_to_v32i32_scalar(<16 x i64> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v31, v17
; SI-NEXT: v_mov_b32_e32 v30, v16
; SI-NEXT: v_mov_b32_e32 v29, v15
@@ -1845,7 +1867,7 @@ define inreg <32 x i32> @bitcast_v16i64_to_v32i32_scalar(<16 x i64> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v14, v0
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v2, s18
; SI-NEXT: v_mov_b32_e32 v3, s19
; SI-NEXT: v_mov_b32_e32 v4, s20
@@ -1858,10 +1880,13 @@ define inreg <32 x i32> @bitcast_v16i64_to_v32i32_scalar(<16 x i64> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB7_4
+; SI-NEXT: s_cbranch_scc0 .LBB7_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB7_3
-; SI-NEXT: .LBB7_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB7_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB7_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v30, vcc, 3, v30
; SI-NEXT: v_addc_u32_e32 v31, vcc, 0, v31, vcc
; SI-NEXT: v_add_i32_e32 v28, vcc, 3, v28
@@ -1894,16 +1919,15 @@ define inreg <32 x i32> @bitcast_v16i64_to_v32i32_scalar(<16 x i64> inreg %a, i3
; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT: .LBB7_3: ; %end
+; SI-NEXT: .LBB7_4: ; %end
; SI-NEXT: v_mov_b32_e32 v18, v32
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB7_4:
-; SI-NEXT: s_branch .LBB7_2
;
; VI-LABEL: bitcast_v16i64_to_v32i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v31, v17
; VI-NEXT: v_mov_b32_e32 v30, v16
; VI-NEXT: v_mov_b32_e32 v29, v15
@@ -1924,7 +1948,7 @@ define inreg <32 x i32> @bitcast_v16i64_to_v32i32_scalar(<16 x i64> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
@@ -1937,10 +1961,13 @@ define inreg <32 x i32> @bitcast_v16i64_to_v32i32_scalar(<16 x i64> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB7_4
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB7_3
-; VI-NEXT: .LBB7_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB7_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB7_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v30, vcc, 3, v30
; VI-NEXT: v_addc_u32_e32 v31, vcc, 0, v31, vcc
; VI-NEXT: v_add_u32_e32 v28, vcc, 3, v28
@@ -1973,16 +2000,15 @@ define inreg <32 x i32> @bitcast_v16i64_to_v32i32_scalar(<16 x i64> inreg %a, i3
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT: .LBB7_3: ; %end
+; VI-NEXT: .LBB7_4: ; %end
; VI-NEXT: v_mov_b32_e32 v18, v32
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB7_4:
-; VI-NEXT: s_branch .LBB7_2
;
; GFX9-LABEL: bitcast_v16i64_to_v32i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v31, v17
; GFX9-NEXT: v_mov_b32_e32 v30, v16
; GFX9-NEXT: v_mov_b32_e32 v29, v15
@@ -2003,7 +2029,7 @@ define inreg <32 x i32> @bitcast_v16i64_to_v32i32_scalar(<16 x i64> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -2016,10 +2042,13 @@ define inreg <32 x i32> @bitcast_v16i64_to_v32i32_scalar(<16 x i64> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB7_3
-; GFX9-NEXT: .LBB7_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB7_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB7_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_co_u32_e32 v30, vcc, 3, v30
; GFX9-NEXT: v_addc_co_u32_e32 v31, vcc, 0, v31, vcc
; GFX9-NEXT: v_add_co_u32_e32 v28, vcc, 3, v28
@@ -2052,44 +2081,42 @@ define inreg <32 x i32> @bitcast_v16i64_to_v32i32_scalar(<16 x i64> inreg %a, i3
; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 3, v0
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
-; GFX9-NEXT: .LBB7_3: ; %end
+; GFX9-NEXT: .LBB7_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v18, v32
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB7_4:
-; GFX9-NEXT: s_branch .LBB7_2
;
; GFX11-LABEL: bitcast_v16i64_to_v32i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v14 :: v_dual_mov_b32 v31, v13
-; GFX11-NEXT: v_dual_mov_b32 v30, v12 :: v_dual_mov_b32 v29, v11
-; GFX11-NEXT: v_dual_mov_b32 v28, v10 :: v_dual_mov_b32 v27, v9
+; GFX11-NEXT: v_dual_mov_b32 v15, v14 :: v_dual_mov_b32 v30, v12
+; GFX11-NEXT: v_dual_mov_b32 v31, v13 :: v_dual_mov_b32 v28, v10
+; GFX11-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v26, v8
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB7_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB7_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB7_3:
-; GFX11-NEXT: .LBB7_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB7_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_co_u32 v30, vcc_lo, v30, 3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add_co_ci_u32_e64 v31, null, 0, v31, vcc_lo
@@ -2130,6 +2157,7 @@ define inreg <32 x i32> @bitcast_v16i64_to_v32i32_scalar(<16 x i64> inreg %a, i3
; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-NEXT: .LBB7_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2372,6 +2400,7 @@ define inreg <16 x double> @bitcast_v32i32_to_v16f64_scalar(<32 x i32> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v31, v17
; SI-NEXT: v_mov_b32_e32 v30, v16
; SI-NEXT: v_mov_b32_e32 v29, v15
@@ -2392,7 +2421,7 @@ define inreg <16 x double> @bitcast_v32i32_to_v16f64_scalar(<32 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v14, v0
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v2, s18
; SI-NEXT: v_mov_b32_e32 v3, s19
; SI-NEXT: v_mov_b32_e32 v4, s20
@@ -2405,10 +2434,13 @@ define inreg <16 x double> @bitcast_v32i32_to_v16f64_scalar(<32 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB9_4
+; SI-NEXT: s_cbranch_scc0 .LBB9_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB9_3
-; SI-NEXT: .LBB9_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB9_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB9_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v31, vcc, 3, v31
; SI-NEXT: v_add_i32_e32 v30, vcc, 3, v30
; SI-NEXT: v_add_i32_e32 v29, vcc, 3, v29
@@ -2441,16 +2473,15 @@ define inreg <16 x double> @bitcast_v32i32_to_v16f64_scalar(<32 x i32> inreg %a,
; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
-; SI-NEXT: .LBB9_3: ; %end
+; SI-NEXT: .LBB9_4: ; %end
; SI-NEXT: v_mov_b32_e32 v18, v32
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB9_4:
-; SI-NEXT: s_branch .LBB9_2
;
; VI-LABEL: bitcast_v32i32_to_v16f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v31, v17
; VI-NEXT: v_mov_b32_e32 v30, v16
; VI-NEXT: v_mov_b32_e32 v29, v15
@@ -2471,7 +2502,7 @@ define inreg <16 x double> @bitcast_v32i32_to_v16f64_scalar(<32 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
@@ -2484,10 +2515,13 @@ define inreg <16 x double> @bitcast_v32i32_to_v16f64_scalar(<32 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB9_4
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB9_3
-; VI-NEXT: .LBB9_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB9_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB9_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v31, vcc, 3, v31
; VI-NEXT: v_add_u32_e32 v30, vcc, 3, v30
; VI-NEXT: v_add_u32_e32 v29, vcc, 3, v29
@@ -2520,16 +2554,15 @@ define inreg <16 x double> @bitcast_v32i32_to_v16f64_scalar(<32 x i32> inreg %a,
; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: .LBB9_3: ; %end
+; VI-NEXT: .LBB9_4: ; %end
; VI-NEXT: v_mov_b32_e32 v18, v32
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB9_4:
-; VI-NEXT: s_branch .LBB9_2
;
; GFX9-LABEL: bitcast_v32i32_to_v16f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v31, v17
; GFX9-NEXT: v_mov_b32_e32 v30, v16
; GFX9-NEXT: v_mov_b32_e32 v29, v15
@@ -2550,7 +2583,7 @@ define inreg <16 x double> @bitcast_v32i32_to_v16f64_scalar(<32 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -2563,10 +2596,13 @@ define inreg <16 x double> @bitcast_v32i32_to_v16f64_scalar(<32 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB9_3
-; GFX9-NEXT: .LBB9_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB9_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_u32_e32 v31, 3, v31
; GFX9-NEXT: v_add_u32_e32 v30, 3, v30
; GFX9-NEXT: v_add_u32_e32 v29, 3, v29
@@ -2599,44 +2635,42 @@ define inreg <16 x double> @bitcast_v32i32_to_v16f64_scalar(<32 x i32> inreg %a,
; GFX9-NEXT: v_add_u32_e32 v2, 3, v2
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: .LBB9_3: ; %end
+; GFX9-NEXT: .LBB9_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v18, v32
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB9_4:
-; GFX9-NEXT: s_branch .LBB9_2
;
; GFX11-LABEL: bitcast_v32i32_to_v16f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v14 :: v_dual_mov_b32 v31, v13
-; GFX11-NEXT: v_dual_mov_b32 v30, v12 :: v_dual_mov_b32 v29, v11
-; GFX11-NEXT: v_dual_mov_b32 v28, v10 :: v_dual_mov_b32 v27, v9
+; GFX11-NEXT: v_dual_mov_b32 v15, v14 :: v_dual_mov_b32 v30, v12
+; GFX11-NEXT: v_dual_mov_b32 v31, v13 :: v_dual_mov_b32 v28, v10
+; GFX11-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v26, v8
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB9_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB9_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB9_3:
-; GFX11-NEXT: .LBB9_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_nc_u32_e32 v31, 3, v31
; GFX11-NEXT: v_add_nc_u32_e32 v30, 3, v30
; GFX11-NEXT: v_add_nc_u32_e32 v29, 3, v29
@@ -2669,6 +2703,7 @@ define inreg <16 x double> @bitcast_v32i32_to_v16f64_scalar(<32 x i32> inreg %a,
; GFX11-NEXT: v_add_nc_u32_e32 v2, 3, v2
; GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v1
; GFX11-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-NEXT: .LBB9_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2847,6 +2882,7 @@ define inreg <32 x i32> @bitcast_v16f64_to_v32i32_scalar(<16 x double> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v31, v17
; SI-NEXT: v_mov_b32_e32 v30, v16
; SI-NEXT: v_mov_b32_e32 v29, v15
@@ -2877,13 +2913,16 @@ define inreg <32 x i32> @bitcast_v16f64_to_v32i32_scalar(<16 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v9, s25
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB11_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB11_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB11_3
-; SI-NEXT: .LBB11_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB11_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB11_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[30:31], v[30:31], 1.0
; SI-NEXT: v_add_f64 v[28:29], v[28:29], 1.0
; SI-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
@@ -2900,17 +2939,16 @@ define inreg <32 x i32> @bitcast_v16f64_to_v32i32_scalar(<16 x double> inreg %a,
; SI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; SI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; SI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; SI-NEXT: .LBB11_3: ; %end
+; SI-NEXT: .LBB11_4: ; %end
; SI-NEXT: v_mov_b32_e32 v18, v32
; SI-NEXT: v_mov_b32_e32 v19, v33
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB11_4:
-; SI-NEXT: s_branch .LBB11_2
;
; VI-LABEL: bitcast_v16f64_to_v32i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v31, v17
; VI-NEXT: v_mov_b32_e32 v30, v16
; VI-NEXT: v_mov_b32_e32 v29, v15
@@ -2941,13 +2979,16 @@ define inreg <32 x i32> @bitcast_v16f64_to_v32i32_scalar(<16 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB11_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB11_3
-; VI-NEXT: .LBB11_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB11_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB11_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[30:31], v[30:31], 1.0
; VI-NEXT: v_add_f64 v[28:29], v[28:29], 1.0
; VI-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
@@ -2964,17 +3005,16 @@ define inreg <32 x i32> @bitcast_v16f64_to_v32i32_scalar(<16 x double> inreg %a,
; VI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; VI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; VI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; VI-NEXT: .LBB11_3: ; %end
+; VI-NEXT: .LBB11_4: ; %end
; VI-NEXT: v_mov_b32_e32 v18, v32
; VI-NEXT: v_mov_b32_e32 v19, v33
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB11_4:
-; VI-NEXT: s_branch .LBB11_2
;
; GFX9-LABEL: bitcast_v16f64_to_v32i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v31, v17
; GFX9-NEXT: v_mov_b32_e32 v30, v16
; GFX9-NEXT: v_mov_b32_e32 v29, v15
@@ -3005,13 +3045,16 @@ define inreg <32 x i32> @bitcast_v16f64_to_v32i32_scalar(<16 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_3
-; GFX9-NEXT: .LBB11_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB11_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[30:31], v[30:31], 1.0
; GFX9-NEXT: v_add_f64 v[28:29], v[28:29], 1.0
; GFX9-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
@@ -3028,45 +3071,43 @@ define inreg <32 x i32> @bitcast_v16f64_to_v32i32_scalar(<16 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX9-NEXT: .LBB11_3: ; %end
+; GFX9-NEXT: .LBB11_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v18, v32
; GFX9-NEXT: v_mov_b32_e32 v19, v33
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB11_4:
-; GFX9-NEXT: s_branch .LBB11_2
;
; GFX11-LABEL: bitcast_v16f64_to_v32i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v14 :: v_dual_mov_b32 v31, v13
-; GFX11-NEXT: v_dual_mov_b32 v30, v12 :: v_dual_mov_b32 v29, v11
-; GFX11-NEXT: v_dual_mov_b32 v28, v10 :: v_dual_mov_b32 v27, v9
+; GFX11-NEXT: v_dual_mov_b32 v15, v14 :: v_dual_mov_b32 v30, v12
+; GFX11-NEXT: v_dual_mov_b32 v31, v13 :: v_dual_mov_b32 v28, v10
+; GFX11-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v26, v8
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB11_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB11_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: .LBB11_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[30:31], v[30:31], 1.0
; GFX11-NEXT: v_add_f64 v[28:29], v[28:29], 1.0
; GFX11-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
@@ -3083,6 +3124,7 @@ define inreg <32 x i32> @bitcast_v16f64_to_v32i32_scalar(<16 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-NEXT: .LBB11_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -7592,8 +7634,9 @@ define inreg <128 x i8> @bitcast_v32i32_to_v128i8_scalar(<32 x i32> inreg %a, i3
; SI-NEXT: v_writelane_b32 v41, s66, 18
; SI-NEXT: v_writelane_b32 v41, s67, 19
; SI-NEXT: v_writelane_b32 v41, s68, 20
-; SI-NEXT: v_writelane_b32 v41, s69, 21
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v19
+; SI-NEXT: v_writelane_b32 v41, s69, 21
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_writelane_b32 v41, s70, 22
; SI-NEXT: v_readfirstlane_b32 s47, v1
; SI-NEXT: v_readfirstlane_b32 s46, v2
@@ -7612,8 +7655,8 @@ define inreg <128 x i8> @bitcast_v32i32_to_v128i8_scalar(<32 x i32> inreg %a, i3
; SI-NEXT: v_readfirstlane_b32 s9, v15
; SI-NEXT: v_readfirstlane_b32 s8, v16
; SI-NEXT: v_readfirstlane_b32 s7, v17
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s6, v18
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: v_writelane_b32 v41, s71, 23
; SI-NEXT: s_cbranch_scc0 .LBB13_4
@@ -7659,9 +7702,9 @@ define inreg <128 x i8> @bitcast_v32i32_to_v128i8_scalar(<32 x i32> inreg %a, i3
; SI-NEXT: v_alignbit_b32 v19, s42, v21, 24
; SI-NEXT: v_alignbit_b32 v20, s42, v21, 16
; SI-NEXT: v_alignbit_b32 v21, s42, v21, 8
-; SI-NEXT: v_alignbit_b32 v30, s46, v22, 24
-; SI-NEXT: v_alignbit_b32 v31, s46, v22, 16
-; SI-NEXT: v_alignbit_b32 v32, s46, v22, 8
+; SI-NEXT: v_alignbit_b32 v31, s46, v22, 24
+; SI-NEXT: v_alignbit_b32 v32, s46, v22, 16
+; SI-NEXT: v_alignbit_b32 v30, s46, v22, 8
; SI-NEXT: v_alignbit_b32 v36, s29, v23, 24
; SI-NEXT: v_alignbit_b32 v22, s29, v23, 16
; SI-NEXT: v_alignbit_b32 v23, s29, v23, 8
@@ -7806,9 +7849,9 @@ define inreg <128 x i8> @bitcast_v32i32_to_v128i8_scalar(<32 x i32> inreg %a, i3
; SI-NEXT: v_alignbit_b32 v19, s42, v21, 24
; SI-NEXT: v_alignbit_b32 v20, s42, v21, 16
; SI-NEXT: v_alignbit_b32 v21, s42, v21, 8
-; SI-NEXT: v_alignbit_b32 v30, s46, v22, 24
-; SI-NEXT: v_alignbit_b32 v31, s46, v22, 16
-; SI-NEXT: v_alignbit_b32 v32, s46, v22, 8
+; SI-NEXT: v_alignbit_b32 v31, s46, v22, 24
+; SI-NEXT: v_alignbit_b32 v32, s46, v22, 16
+; SI-NEXT: v_alignbit_b32 v30, s46, v22, 8
; SI-NEXT: v_alignbit_b32 v36, s29, v23, 24
; SI-NEXT: v_alignbit_b32 v22, s29, v23, 16
; SI-NEXT: v_alignbit_b32 v23, s29, v23, 8
@@ -8048,16 +8091,16 @@ define inreg <128 x i8> @bitcast_v32i32_to_v128i8_scalar(<32 x i32> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v23, s4
; SI-NEXT: buffer_store_dword v23, v22, s[0:3], 0 offen
; SI-NEXT: s_and_b32 s4, s47, 0xff
-; SI-NEXT: v_lshlrev_b32_e32 v22, 8, v32
+; SI-NEXT: v_lshlrev_b32_e32 v22, 8, v30
; SI-NEXT: v_or_b32_e32 v22, s4, v22
; SI-NEXT: s_and_b32 s4, s46, 0xff
; SI-NEXT: s_lshl_b32 s5, s34, 8
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_and_b32_e32 v23, 0xff, v31
+; SI-NEXT: v_and_b32_e32 v23, 0xff, v32
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: s_and_b32 s5, s31, 0xff
; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v23
-; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v30
+; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v31
; SI-NEXT: s_lshl_b32 s5, s5, 16
; SI-NEXT: s_lshl_b32 s16, s30, 24
; SI-NEXT: v_and_b32_e32 v22, 0xffff, v22
@@ -8358,18 +8401,18 @@ define inreg <128 x i8> @bitcast_v32i32_to_v128i8_scalar(<32 x i32> inreg %a, i3
; SI-NEXT: ; implicit-def: $sgpr72
; SI-NEXT: ; implicit-def: $sgpr63
; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $vgpr36
; SI-NEXT: ; implicit-def: $sgpr61
-; SI-NEXT: ; implicit-def: $vgpr32
; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $vgpr31
+; SI-NEXT: ; implicit-def: $vgpr36
; SI-NEXT: ; implicit-def: $sgpr59
; SI-NEXT: ; implicit-def: $vgpr30
; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $vgpr26
+; SI-NEXT: ; implicit-def: $vgpr32
; SI-NEXT: ; implicit-def: $sgpr57
-; SI-NEXT: ; implicit-def: $vgpr25
+; SI-NEXT: ; implicit-def: $vgpr31
; SI-NEXT: ; implicit-def: $sgpr56
+; SI-NEXT: ; implicit-def: $vgpr26
+; SI-NEXT: ; implicit-def: $vgpr25
; SI-NEXT: ; implicit-def: $vgpr24
; SI-NEXT: ; implicit-def: $vgpr21
; SI-NEXT: ; implicit-def: $vgpr20
@@ -8392,7 +8435,9 @@ define inreg <128 x i8> @bitcast_v32i32_to_v128i8_scalar(<32 x i32> inreg %a, i3
; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: s_branch .LBB13_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB13_2
+; SI-NEXT: s_branch .LBB13_3
;
; VI-LABEL: bitcast_v32i32_to_v128i8_scalar:
; VI: ; %bb.0:
@@ -8450,8 +8495,9 @@ define inreg <128 x i8> @bitcast_v32i32_to_v128i8_scalar(<32 x i32> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s6, v15
; VI-NEXT: v_readfirstlane_b32 s7, v16
; VI-NEXT: v_readfirstlane_b32 s4, v17
-; VI-NEXT: s_and_b64 s[46:47], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s5, v18
+; VI-NEXT: s_and_b64 s[46:47], vcc, exec
+; VI-NEXT: s_mov_b64 vcc, -1
; VI-NEXT: v_writelane_b32 v20, s87, 31
; VI-NEXT: ; implicit-def: $vgpr21 : SGPR spill to VGPR lane
; VI-NEXT: s_cbranch_scc0 .LBB13_4
@@ -9287,8 +9333,6 @@ define inreg <128 x i8> @bitcast_v32i32_to_v128i8_scalar(<32 x i32> inreg %a, i3
; VI-NEXT: .LBB13_4:
; VI-NEXT: ; implicit-def: $sgpr60
; VI-NEXT: ; kill: killed $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; kill: killed $sgpr60
; VI-NEXT: ; implicit-def: $sgpr65
; VI-NEXT: ; implicit-def: $sgpr64
; VI-NEXT: ; implicit-def: $sgpr55
@@ -9430,6 +9474,8 @@ define inreg <128 x i8> @bitcast_v32i32_to_v128i8_scalar(<32 x i32> inreg %a, i3
; VI-NEXT: ; implicit-def: $sgpr60
; VI-NEXT: ; kill: killed $sgpr60
; VI-NEXT: ; implicit-def: $sgpr60
+; VI-NEXT: ; kill: killed $sgpr60
+; VI-NEXT: ; implicit-def: $sgpr60
; VI-NEXT: v_writelane_b32 v21, s60, 0
; VI-NEXT: v_writelane_b32 v21, s61, 1
; VI-NEXT: ; implicit-def: $sgpr60
@@ -9441,7 +9487,9 @@ define inreg <128 x i8> @bitcast_v32i32_to_v128i8_scalar(<32 x i32> inreg %a, i3
; VI-NEXT: ; implicit-def: $sgpr60
; VI-NEXT: v_writelane_b32 v21, s60, 6
; VI-NEXT: v_writelane_b32 v21, s61, 7
-; VI-NEXT: s_branch .LBB13_2
+; VI-NEXT: s_andn2_b64 vcc, exec, vcc
+; VI-NEXT: s_cbranch_vccz .LBB13_2
+; VI-NEXT: s_branch .LBB13_3
;
; GFX9-LABEL: bitcast_v32i32_to_v128i8_scalar:
; GFX9: ; %bb.0:
@@ -9503,8 +9551,9 @@ define inreg <128 x i8> @bitcast_v32i32_to_v128i8_scalar(<32 x i32> inreg %a, i3
; GFX9-NEXT: v_readfirstlane_b32 s6, v15
; GFX9-NEXT: v_readfirstlane_b32 s7, v16
; GFX9-NEXT: v_readfirstlane_b32 s4, v17
-; GFX9-NEXT: s_and_b64 s[46:47], vcc, exec
; GFX9-NEXT: v_readfirstlane_b32 s5, v18
+; GFX9-NEXT: s_and_b64 s[46:47], vcc, exec
+; GFX9-NEXT: s_mov_b64 vcc, -1
; GFX9-NEXT: v_writelane_b32 v20, s99, 35
; GFX9-NEXT: ; implicit-def: $vgpr21 : SGPR spill to VGPR lane
; GFX9-NEXT: s_cbranch_scc0 .LBB13_4
@@ -9609,9 +9658,9 @@ define inreg <128 x i8> @bitcast_v32i32_to_v128i8_scalar(<32 x i32> inreg %a, i3
; GFX9-NEXT: v_writelane_b32 v21, s46, 50
; GFX9-NEXT: s_lshr_b64 s[56:57], s[4:5], 24
; GFX9-NEXT: v_writelane_b32 v21, s56, 0
-; GFX9-NEXT: s_lshr_b32 s82, s28, 8
-; GFX9-NEXT: s_lshr_b32 s83, s27, 24
-; GFX9-NEXT: s_lshr_b32 s81, s27, 16
+; GFX9-NEXT: s_lshr_b32 s81, s28, 8
+; GFX9-NEXT: s_lshr_b32 s82, s27, 24
+; GFX9-NEXT: s_lshr_b32 s83, s27, 16
; GFX9-NEXT: s_lshr_b32 s84, s27, 8
; GFX9-NEXT: s_lshr_b32 s85, s26, 16
; GFX9-NEXT: s_lshr_b32 s86, s26, 8
@@ -9790,9 +9839,9 @@ define inreg <128 x i8> @bitcast_v32i32_to_v128i8_scalar(<32 x i32> inreg %a, i3
; GFX9-NEXT: s_add_i32 s27, s27, 3
; GFX9-NEXT: s_add_i32 s26, s26, 3
; GFX9-NEXT: v_writelane_b32 v21, s56, 0
-; GFX9-NEXT: s_lshr_b32 s82, s28, 8
-; GFX9-NEXT: s_lshr_b32 s83, s27, 24
-; GFX9-NEXT: s_lshr_b32 s81, s27, 16
+; GFX9-NEXT: s_lshr_b32 s81, s28, 8
+; GFX9-NEXT: s_lshr_b32 s82, s27, 24
+; GFX9-NEXT: s_lshr_b32 s83, s27, 16
; GFX9-NEXT: s_lshr_b32 s84, s27, 8
; GFX9-NEXT: s_lshr_b32 s85, s26, 16
; GFX9-NEXT: s_lshr_b32 s86, s26, 8
@@ -9951,14 +10000,14 @@ define inreg <128 x i8> @bitcast_v32i32_to_v128i8_scalar(<32 x i32> inreg %a, i3
; GFX9-NEXT: s_and_b32 s16, s27, 0xff
; GFX9-NEXT: s_lshl_b32 s17, s84, 8
; GFX9-NEXT: s_or_b32 s16, s16, s17
-; GFX9-NEXT: s_and_b32 s17, s81, 0xff
-; GFX9-NEXT: s_lshl_b32 s18, s83, 8
+; GFX9-NEXT: s_and_b32 s17, s83, 0xff
+; GFX9-NEXT: s_lshl_b32 s18, s82, 8
; GFX9-NEXT: s_or_b32 s17, s17, s18
; GFX9-NEXT: s_and_b32 s16, s16, 0xffff
; GFX9-NEXT: s_lshl_b32 s17, s17, 16
; GFX9-NEXT: s_or_b32 s16, s16, s17
; GFX9-NEXT: v_mov_b32_e32 v12, s16
-; GFX9-NEXT: s_lshl_b32 s16, s82, 8
+; GFX9-NEXT: s_lshl_b32 s16, s81, 8
; GFX9-NEXT: s_and_b32 s17, s28, 0xff
; GFX9-NEXT: v_readlane_b32 s18, v21, 50
; GFX9-NEXT: s_or_b32 s16, s17, s16
@@ -10286,13 +10335,7 @@ define inreg <128 x i8> @bitcast_v32i32_to_v128i8_scalar(<32 x i32> inreg %a, i3
; GFX9-NEXT: .LBB13_4:
; GFX9-NEXT: ; implicit-def: $sgpr47
; GFX9-NEXT: ; kill: killed $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr83
-; GFX9-NEXT: ; implicit-def: $sgpr82
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; kill: killed $sgpr47
-; GFX9-NEXT: v_writelane_b32 v21, s82, 0
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; kill: killed $sgpr47
+; GFX9-NEXT: ; implicit-def: $sgpr56
; GFX9-NEXT: ; implicit-def: $sgpr46
; GFX9-NEXT: ; implicit-def: $sgpr80
; GFX9-NEXT: ; implicit-def: $sgpr71
@@ -10321,6 +10364,8 @@ define inreg <128 x i8> @bitcast_v32i32_to_v128i8_scalar(<32 x i32> inreg %a, i3
; GFX9-NEXT: ; implicit-def: $sgpr86
; GFX9-NEXT: ; implicit-def: $sgpr85
; GFX9-NEXT: ; implicit-def: $sgpr84
+; GFX9-NEXT: ; implicit-def: $sgpr83
+; GFX9-NEXT: ; implicit-def: $sgpr82
; GFX9-NEXT: ; implicit-def: $sgpr81
; GFX9-NEXT: ; implicit-def: $sgpr36
; GFX9-NEXT: ; implicit-def: $sgpr34
@@ -10336,15 +10381,15 @@ define inreg <128 x i8> @bitcast_v32i32_to_v128i8_scalar(<32 x i32> inreg %a, i3
; GFX9-NEXT: ; implicit-def: $sgpr62
; GFX9-NEXT: ; implicit-def: $sgpr60
; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: v_writelane_b32 v21, s83, 1
; GFX9-NEXT: ; implicit-def: $sgpr47
; GFX9-NEXT: ; kill: killed $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr82
+; GFX9-NEXT: v_writelane_b32 v21, s56, 0
; GFX9-NEXT: ; implicit-def: $sgpr47
; GFX9-NEXT: ; kill: killed $sgpr47
+; GFX9-NEXT: v_writelane_b32 v21, s57, 1
; GFX9-NEXT: ; implicit-def: $sgpr47
; GFX9-NEXT: ; kill: killed $sgpr47
+; GFX9-NEXT: ; implicit-def: $sgpr56
; GFX9-NEXT: ; implicit-def: $sgpr47
; GFX9-NEXT: ; kill: killed $sgpr47
; GFX9-NEXT: ; implicit-def: $sgpr47
@@ -10431,7 +10476,13 @@ define inreg <128 x i8> @bitcast_v32i32_to_v128i8_scalar(<32 x i32> inreg %a, i3
; GFX9-NEXT: ; kill: killed $sgpr47
; GFX9-NEXT: ; implicit-def: $sgpr47
; GFX9-NEXT: ; kill: killed $sgpr47
-; GFX9-NEXT: s_branch .LBB13_2
+; GFX9-NEXT: ; implicit-def: $sgpr47
+; GFX9-NEXT: ; kill: killed $sgpr47
+; GFX9-NEXT: ; implicit-def: $sgpr47
+; GFX9-NEXT: ; kill: killed $sgpr47
+; GFX9-NEXT: s_andn2_b64 vcc, exec, vcc
+; GFX9-NEXT: s_cbranch_vccz .LBB13_2
+; GFX9-NEXT: s_branch .LBB13_3
;
; GFX11-LABEL: bitcast_v32i32_to_v128i8_scalar:
; GFX11: ; %bb.0:
@@ -10470,8 +10521,8 @@ define inreg <128 x i8> @bitcast_v32i32_to_v128i8_scalar(<32 x i32> inreg %a, i3
; GFX11-NEXT: v_readfirstlane_b32 s5, v14
; GFX11-NEXT: v_writelane_b32 v16, s37, 5
; GFX11-NEXT: v_writelane_b32 v17, s101, 5
-; GFX11-NEXT: s_mov_b32 s101, 0
; GFX11-NEXT: s_and_b32 s42, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 vcc_lo, -1
; GFX11-NEXT: ; implicit-def: $vgpr19 : SGPR spill to VGPR lane
; GFX11-NEXT: ; implicit-def: $vgpr18 : SGPR spill to VGPR lane
; GFX11-NEXT: v_writelane_b32 v16, s38, 6
@@ -10503,835 +10554,686 @@ define inreg <128 x i8> @bitcast_v32i32_to_v128i8_scalar(<32 x i32> inreg %a, i3
; GFX11-NEXT: v_writelane_b32 v16, s85, 29
; GFX11-NEXT: v_writelane_b32 v16, s86, 30
; GFX11-NEXT: v_writelane_b32 v16, s87, 31
-; GFX11-NEXT: s_cbranch_scc0 .LBB13_2
+; GFX11-NEXT: s_cbranch_scc0 .LBB13_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s43, s25, 8
-; GFX11-NEXT: s_lshr_b64 s[62:63], s[4:5], 24
-; GFX11-NEXT: v_writelane_b32 v19, s43, 16
-; GFX11-NEXT: s_lshr_b32 s43, s24, 16
-; GFX11-NEXT: s_lshr_b32 s104, s5, 24
-; GFX11-NEXT: s_lshr_b32 s102, s5, 16
-; GFX11-NEXT: s_lshr_b32 s103, s5, 8
-; GFX11-NEXT: v_writelane_b32 v19, s43, 17
-; GFX11-NEXT: s_lshr_b32 s43, s24, 8
-; GFX11-NEXT: s_lshr_b32 s57, s4, 16
-; GFX11-NEXT: s_lshr_b32 s47, s4, 8
-; GFX11-NEXT: s_lshr_b32 s46, s7, 24
-; GFX11-NEXT: v_writelane_b32 v19, s43, 18
-; GFX11-NEXT: s_lshr_b32 s43, s23, 24
-; GFX11-NEXT: s_lshr_b32 vcc_hi, s7, 16
-; GFX11-NEXT: s_lshr_b32 s34, s7, 8
-; GFX11-NEXT: s_lshr_b32 s69, s6, 16
-; GFX11-NEXT: v_writelane_b32 v19, s43, 19
-; GFX11-NEXT: s_lshr_b32 s43, s23, 16
-; GFX11-NEXT: s_lshr_b32 s56, s6, 8
-; GFX11-NEXT: s_lshr_b32 s35, s9, 24
-; GFX11-NEXT: s_lshr_b32 s36, s9, 16
-; GFX11-NEXT: v_writelane_b32 v19, s43, 20
-; GFX11-NEXT: s_lshr_b32 s43, s23, 8
-; GFX11-NEXT: s_lshr_b32 s37, s9, 8
-; GFX11-NEXT: s_lshr_b32 s38, s8, 16
-; GFX11-NEXT: s_lshr_b32 s39, s8, 8
-; GFX11-NEXT: v_writelane_b32 v19, s43, 21
-; GFX11-NEXT: s_lshr_b32 s43, s22, 16
-; GFX11-NEXT: s_lshr_b32 s48, s11, 24
-; GFX11-NEXT: s_lshr_b32 s49, s11, 16
-; GFX11-NEXT: s_lshr_b32 s50, s11, 8
-; GFX11-NEXT: v_writelane_b32 v19, s43, 22
-; GFX11-NEXT: s_lshr_b32 s43, s22, 8
-; GFX11-NEXT: s_lshr_b32 s51, s10, 16
-; GFX11-NEXT: s_lshr_b32 s52, s10, 8
-; GFX11-NEXT: s_lshr_b32 s53, s13, 24
-; GFX11-NEXT: v_writelane_b32 v19, s43, 23
-; GFX11-NEXT: s_lshr_b32 s43, s21, 24
-; GFX11-NEXT: s_lshr_b32 s54, s13, 16
-; GFX11-NEXT: s_lshr_b32 s55, s13, 8
-; GFX11-NEXT: s_lshr_b32 s64, s12, 16
-; GFX11-NEXT: v_writelane_b32 v19, s43, 24
-; GFX11-NEXT: s_lshr_b32 s43, s21, 16
-; GFX11-NEXT: s_lshr_b32 s65, s12, 8
-; GFX11-NEXT: s_lshr_b32 s66, s15, 24
-; GFX11-NEXT: s_lshr_b32 s67, s15, 16
-; GFX11-NEXT: v_writelane_b32 v19, s43, 25
-; GFX11-NEXT: s_lshr_b32 s43, s21, 8
-; GFX11-NEXT: s_lshr_b32 s68, s15, 8
-; GFX11-NEXT: s_lshr_b32 s59, s14, 16
-; GFX11-NEXT: s_lshr_b32 s58, s14, 8
-; GFX11-NEXT: v_writelane_b32 v19, s43, 26
-; GFX11-NEXT: s_lshr_b32 s43, s20, 16
-; GFX11-NEXT: s_lshr_b32 s70, s41, 24
-; GFX11-NEXT: s_lshr_b32 s71, s41, 16
-; GFX11-NEXT: s_lshr_b32 s60, s41, 8
-; GFX11-NEXT: v_writelane_b32 v19, s43, 27
-; GFX11-NEXT: s_lshr_b32 s43, s20, 8
-; GFX11-NEXT: s_lshr_b32 s80, s40, 16
-; GFX11-NEXT: s_lshr_b32 s61, s40, 8
-; GFX11-NEXT: s_lshr_b32 s81, s29, 24
-; GFX11-NEXT: v_writelane_b32 v19, s43, 28
-; GFX11-NEXT: s_lshr_b32 s43, s19, 24
-; GFX11-NEXT: s_lshr_b32 s82, s29, 16
-; GFX11-NEXT: s_lshr_b32 s83, s29, 8
-; GFX11-NEXT: s_lshr_b32 s84, s28, 16
-; GFX11-NEXT: v_writelane_b32 v19, s43, 29
-; GFX11-NEXT: s_lshr_b32 s43, s19, 16
-; GFX11-NEXT: s_lshr_b32 s85, s28, 8
-; GFX11-NEXT: s_lshr_b32 s86, s27, 24
-; GFX11-NEXT: s_lshr_b32 s72, s27, 16
-; GFX11-NEXT: v_writelane_b32 v19, s43, 30
-; GFX11-NEXT: s_lshr_b32 s43, s19, 8
-; GFX11-NEXT: s_lshr_b32 s87, s27, 8
-; GFX11-NEXT: s_lshr_b32 s73, s26, 16
-; GFX11-NEXT: s_lshr_b32 s96, s26, 8
-; GFX11-NEXT: v_writelane_b32 v19, s43, 31
-; GFX11-NEXT: s_lshr_b32 s43, s18, 16
-; GFX11-NEXT: s_lshr_b32 s97, s25, 24
-; GFX11-NEXT: v_writelane_b32 v18, s43, 0
-; GFX11-NEXT: s_lshr_b32 s43, s18, 8
-; GFX11-NEXT: v_writelane_b32 v19, s62, 14
-; GFX11-NEXT: s_lshr_b32 s42, s25, 16
-; GFX11-NEXT: s_lshr_b32 s74, s2, 16
-; GFX11-NEXT: v_writelane_b32 v18, s43, 1
-; GFX11-NEXT: s_lshr_b32 s43, s17, 24
-; GFX11-NEXT: v_writelane_b32 v19, s63, 15
-; GFX11-NEXT: s_lshr_b64 s[62:63], s[6:7], 24
-; GFX11-NEXT: s_lshr_b32 s98, s1, 24
-; GFX11-NEXT: v_writelane_b32 v18, s43, 2
-; GFX11-NEXT: s_lshr_b32 s43, s17, 16
-; GFX11-NEXT: v_writelane_b32 v19, s62, 12
-; GFX11-NEXT: s_lshr_b32 s99, s1, 16
-; GFX11-NEXT: s_lshr_b32 s100, s1, 8
-; GFX11-NEXT: v_writelane_b32 v18, s43, 3
-; GFX11-NEXT: s_lshr_b32 s43, s17, 8
-; GFX11-NEXT: v_writelane_b32 v19, s63, 13
-; GFX11-NEXT: s_lshr_b64 s[62:63], s[8:9], 24
-; GFX11-NEXT: s_lshr_b32 s44, s0, 16
-; GFX11-NEXT: v_writelane_b32 v18, s43, 4
-; GFX11-NEXT: s_lshr_b32 s43, s16, 16
-; GFX11-NEXT: v_writelane_b32 v19, s62, 10
-; GFX11-NEXT: s_lshr_b32 s45, s0, 8
-; GFX11-NEXT: s_lshr_b64 s[76:77], s[26:27], 24
-; GFX11-NEXT: v_writelane_b32 v18, s43, 5
-; GFX11-NEXT: s_lshr_b32 s43, s16, 8
-; GFX11-NEXT: v_writelane_b32 v19, s63, 11
-; GFX11-NEXT: s_lshr_b64 s[62:63], s[10:11], 24
+; GFX11-NEXT: s_lshr_b32 s42, s5, 24
+; GFX11-NEXT: s_lshr_b32 vcc_hi, s27, 24
+; GFX11-NEXT: v_writelane_b32 v19, s42, 0
+; GFX11-NEXT: s_lshr_b32 s42, s5, 16
+; GFX11-NEXT: s_lshr_b32 s34, s27, 16
+; GFX11-NEXT: s_lshr_b32 s35, s27, 8
+; GFX11-NEXT: s_lshr_b32 s36, s26, 16
+; GFX11-NEXT: v_writelane_b32 v19, s42, 1
+; GFX11-NEXT: s_lshr_b32 s42, s5, 8
+; GFX11-NEXT: s_lshr_b32 s37, s26, 8
+; GFX11-NEXT: s_lshr_b32 s38, s25, 24
+; GFX11-NEXT: s_lshr_b32 s39, s25, 16
+; GFX11-NEXT: v_writelane_b32 v19, s42, 2
+; GFX11-NEXT: s_lshr_b32 s42, s4, 16
+; GFX11-NEXT: s_lshr_b32 s48, s25, 8
+; GFX11-NEXT: s_lshr_b32 s49, s24, 16
+; GFX11-NEXT: s_lshr_b32 s50, s24, 8
+; GFX11-NEXT: v_writelane_b32 v19, s42, 3
+; GFX11-NEXT: s_lshr_b32 s42, s4, 8
+; GFX11-NEXT: s_lshr_b32 s51, s23, 24
+; GFX11-NEXT: s_lshr_b32 s52, s23, 16
+; GFX11-NEXT: s_lshr_b32 s53, s23, 8
+; GFX11-NEXT: v_writelane_b32 v19, s42, 4
+; GFX11-NEXT: s_lshr_b32 s42, s7, 24
+; GFX11-NEXT: s_lshr_b32 s54, s22, 16
+; GFX11-NEXT: s_lshr_b32 s55, s22, 8
+; GFX11-NEXT: s_lshr_b32 s64, s21, 24
+; GFX11-NEXT: v_writelane_b32 v19, s42, 5
+; GFX11-NEXT: s_lshr_b32 s42, s7, 16
+; GFX11-NEXT: s_lshr_b32 s65, s21, 16
+; GFX11-NEXT: s_lshr_b32 s66, s21, 8
+; GFX11-NEXT: s_lshr_b32 s67, s20, 16
+; GFX11-NEXT: v_writelane_b32 v19, s42, 6
+; GFX11-NEXT: s_lshr_b32 s42, s7, 8
+; GFX11-NEXT: s_lshr_b32 s68, s20, 8
+; GFX11-NEXT: s_lshr_b32 s69, s19, 24
+; GFX11-NEXT: s_lshr_b32 s70, s19, 16
+; GFX11-NEXT: v_writelane_b32 v19, s42, 7
+; GFX11-NEXT: s_lshr_b32 s42, s6, 16
+; GFX11-NEXT: s_lshr_b32 s71, s19, 8
+; GFX11-NEXT: s_lshr_b32 s80, s18, 16
+; GFX11-NEXT: s_lshr_b32 s81, s18, 8
+; GFX11-NEXT: v_writelane_b32 v19, s42, 8
+; GFX11-NEXT: s_lshr_b32 s42, s6, 8
+; GFX11-NEXT: s_lshr_b32 s82, s17, 24
+; GFX11-NEXT: s_lshr_b32 s83, s17, 16
+; GFX11-NEXT: s_lshr_b32 s84, s17, 8
+; GFX11-NEXT: v_writelane_b32 v19, s42, 9
+; GFX11-NEXT: s_lshr_b32 s42, s9, 24
+; GFX11-NEXT: s_lshr_b32 s85, s16, 16
+; GFX11-NEXT: s_lshr_b32 s86, s16, 8
+; GFX11-NEXT: s_lshr_b32 s87, s3, 24
+; GFX11-NEXT: v_writelane_b32 v19, s42, 10
+; GFX11-NEXT: s_lshr_b32 s42, s9, 16
+; GFX11-NEXT: s_lshr_b32 s96, s3, 16
+; GFX11-NEXT: s_lshr_b32 s97, s3, 8
+; GFX11-NEXT: s_lshr_b32 s98, s2, 16
+; GFX11-NEXT: v_writelane_b32 v19, s42, 11
+; GFX11-NEXT: s_lshr_b32 s42, s9, 8
+; GFX11-NEXT: s_lshr_b32 s99, s2, 8
+; GFX11-NEXT: s_lshr_b32 s100, s1, 24
+; GFX11-NEXT: s_lshr_b32 s101, s1, 16
+; GFX11-NEXT: v_writelane_b32 v19, s42, 12
+; GFX11-NEXT: s_lshr_b32 s42, s8, 16
+; GFX11-NEXT: s_lshr_b32 s102, s1, 8
+; GFX11-NEXT: s_lshr_b32 s103, s0, 16
+; GFX11-NEXT: s_lshr_b32 s104, s0, 8
+; GFX11-NEXT: v_writelane_b32 v19, s42, 13
+; GFX11-NEXT: s_lshr_b32 s42, s8, 8
+; GFX11-NEXT: s_lshr_b64 s[44:45], s[6:7], 24
+; GFX11-NEXT: s_lshr_b64 s[46:47], s[8:9], 24
+; GFX11-NEXT: v_writelane_b32 v19, s42, 14
+; GFX11-NEXT: s_lshr_b32 s42, s11, 24
+; GFX11-NEXT: s_lshr_b64 s[56:57], s[10:11], 24
+; GFX11-NEXT: s_lshr_b64 s[58:59], s[12:13], 24
+; GFX11-NEXT: s_lshr_b64 s[60:61], s[14:15], 24
+; GFX11-NEXT: v_writelane_b32 v19, s42, 15
+; GFX11-NEXT: s_lshr_b32 s42, s11, 16
+; GFX11-NEXT: s_lshr_b64 s[62:63], s[40:41], 24
+; GFX11-NEXT: s_lshr_b64 s[72:73], s[28:29], 24
+; GFX11-NEXT: s_lshr_b64 s[78:79], s[26:27], 24
+; GFX11-NEXT: v_writelane_b32 v19, s42, 16
+; GFX11-NEXT: s_lshr_b32 s42, s11, 8
; GFX11-NEXT: s_lshr_b64 s[88:89], s[24:25], 24
-; GFX11-NEXT: v_writelane_b32 v18, s43, 6
-; GFX11-NEXT: s_lshr_b32 s43, s3, 24
-; GFX11-NEXT: v_writelane_b32 v19, s62, 8
-; GFX11-NEXT: s_lshr_b64 s[78:79], s[20:21], 24
+; GFX11-NEXT: s_lshr_b64 s[74:75], s[22:23], 24
+; GFX11-NEXT: s_lshr_b64 s[76:77], s[20:21], 24
+; GFX11-NEXT: v_writelane_b32 v19, s42, 17
+; GFX11-NEXT: s_lshr_b32 s42, s10, 16
; GFX11-NEXT: s_lshr_b64 s[90:91], s[18:19], 24
-; GFX11-NEXT: v_writelane_b32 v18, s43, 7
-; GFX11-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-NEXT: v_writelane_b32 v19, s63, 9
-; GFX11-NEXT: s_lshr_b64 s[62:63], s[12:13], 24
; GFX11-NEXT: s_lshr_b64 s[92:93], s[16:17], 24
-; GFX11-NEXT: v_writelane_b32 v18, s43, 8
-; GFX11-NEXT: s_lshr_b32 s43, s3, 8
-; GFX11-NEXT: v_writelane_b32 v19, s62, 6
; GFX11-NEXT: s_lshr_b64 s[94:95], s[2:3], 24
+; GFX11-NEXT: v_writelane_b32 v19, s42, 18
+; GFX11-NEXT: s_lshr_b32 s42, s10, 8
; GFX11-NEXT: s_lshr_b64 s[30:31], s[0:1], 24
-; GFX11-NEXT: v_writelane_b32 v18, s43, 9
-; GFX11-NEXT: s_lshr_b32 s43, s2, 8
-; GFX11-NEXT: v_writelane_b32 v19, s63, 7
-; GFX11-NEXT: s_lshr_b64 s[62:63], s[14:15], 24
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v19, s62, 4
-; GFX11-NEXT: v_writelane_b32 v19, s63, 5
-; GFX11-NEXT: s_lshr_b64 s[62:63], s[40:41], 24
-; GFX11-NEXT: v_writelane_b32 v19, s62, 2
-; GFX11-NEXT: v_writelane_b32 v19, s63, 3
-; GFX11-NEXT: s_lshr_b64 s[62:63], s[28:29], 24
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v19, s62, 0
-; GFX11-NEXT: v_writelane_b32 v19, s63, 1
-; GFX11-NEXT: s_lshr_b64 s[62:63], s[22:23], 24
-; GFX11-NEXT: s_branch .LBB13_3
-; GFX11-NEXT: .LBB13_2:
-; GFX11-NEXT: ; implicit-def: $vcc_hi
-; GFX11-NEXT: ; implicit-def: $vcc_lo
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: s_mov_b32 s101, -1
-; GFX11-NEXT: v_writelane_b32 v19, vcc_lo, 0
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: v_writelane_b32 v19, vcc_hi, 1
-; GFX11-NEXT: ; implicit-def: $vcc_lo
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: v_writelane_b32 v19, vcc_lo, 2
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: v_writelane_b32 v19, vcc_hi, 3
-; GFX11-NEXT: ; implicit-def: $vcc_lo
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: v_writelane_b32 v19, vcc_lo, 4
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: v_writelane_b32 v19, vcc_hi, 5
-; GFX11-NEXT: ; implicit-def: $vcc_lo
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr30
-; GFX11-NEXT: ; implicit-def: $sgpr100
-; GFX11-NEXT: ; implicit-def: $sgpr99
-; GFX11-NEXT: ; implicit-def: $sgpr98
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr74
-; GFX11-NEXT: ; implicit-def: $sgpr94
-; GFX11-NEXT: ; implicit-def: $sgpr92
-; GFX11-NEXT: ; implicit-def: $sgpr90
-; GFX11-NEXT: ; implicit-def: $sgpr78
-; GFX11-NEXT: ; implicit-def: $sgpr62
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr97
-; GFX11-NEXT: ; implicit-def: $sgpr96
-; GFX11-NEXT: ; implicit-def: $sgpr73
-; GFX11-NEXT: ; implicit-def: $sgpr87
-; GFX11-NEXT: ; implicit-def: $sgpr72
-; GFX11-NEXT: ; implicit-def: $sgpr86
-; GFX11-NEXT: ; implicit-def: $sgpr85
-; GFX11-NEXT: ; implicit-def: $sgpr84
-; GFX11-NEXT: ; implicit-def: $sgpr83
-; GFX11-NEXT: ; implicit-def: $sgpr82
-; GFX11-NEXT: ; implicit-def: $sgpr81
-; GFX11-NEXT: ; implicit-def: $sgpr61
-; GFX11-NEXT: ; implicit-def: $sgpr80
-; GFX11-NEXT: ; implicit-def: $sgpr60
-; GFX11-NEXT: ; implicit-def: $sgpr71
-; GFX11-NEXT: ; implicit-def: $sgpr70
-; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr59
-; GFX11-NEXT: ; implicit-def: $sgpr68
-; GFX11-NEXT: ; implicit-def: $sgpr67
-; GFX11-NEXT: ; implicit-def: $sgpr66
-; GFX11-NEXT: ; implicit-def: $sgpr65
-; GFX11-NEXT: ; implicit-def: $sgpr64
-; GFX11-NEXT: ; implicit-def: $sgpr55
-; GFX11-NEXT: ; implicit-def: $sgpr54
-; GFX11-NEXT: ; implicit-def: $sgpr53
-; GFX11-NEXT: ; implicit-def: $sgpr52
-; GFX11-NEXT: ; implicit-def: $sgpr51
-; GFX11-NEXT: ; implicit-def: $sgpr50
-; GFX11-NEXT: ; implicit-def: $sgpr49
-; GFX11-NEXT: ; implicit-def: $sgpr48
-; GFX11-NEXT: ; implicit-def: $sgpr39
-; GFX11-NEXT: ; implicit-def: $sgpr38
-; GFX11-NEXT: ; implicit-def: $sgpr37
-; GFX11-NEXT: ; implicit-def: $sgpr36
-; GFX11-NEXT: ; implicit-def: $sgpr35
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr69
-; GFX11-NEXT: ; implicit-def: $sgpr34
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr47
-; GFX11-NEXT: ; implicit-def: $sgpr57
-; GFX11-NEXT: ; implicit-def: $sgpr103
-; GFX11-NEXT: ; implicit-def: $sgpr102
-; GFX11-NEXT: ; implicit-def: $sgpr104
-; GFX11-NEXT: ; implicit-def: $sgpr88
-; GFX11-NEXT: ; implicit-def: $sgpr76
-; GFX11-NEXT: v_writelane_b32 v19, vcc_lo, 6
-; GFX11-NEXT: v_writelane_b32 v19, vcc_hi, 7
-; GFX11-NEXT: ; implicit-def: $vcc_lo
-; GFX11-NEXT: v_writelane_b32 v19, vcc_lo, 8
-; GFX11-NEXT: v_writelane_b32 v19, vcc_hi, 9
-; GFX11-NEXT: ; implicit-def: $vcc_lo
-; GFX11-NEXT: v_writelane_b32 v19, vcc_lo, 10
-; GFX11-NEXT: v_writelane_b32 v19, vcc_hi, 11
-; GFX11-NEXT: ; implicit-def: $vcc_lo
-; GFX11-NEXT: v_writelane_b32 v19, vcc_lo, 12
-; GFX11-NEXT: v_writelane_b32 v19, vcc_hi, 13
-; GFX11-NEXT: ; implicit-def: $vcc_lo
-; GFX11-NEXT: v_writelane_b32 v19, vcc_lo, 14
-; GFX11-NEXT: v_writelane_b32 v19, vcc_hi, 15
-; GFX11-NEXT: .LBB13_3: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s101
-; GFX11-NEXT: s_mov_b32 s101, s104
-; GFX11-NEXT: s_mov_b32 s104, s57
-; GFX11-NEXT: s_mov_b32 s57, s69
-; GFX11-NEXT: s_mov_b32 s69, s42
-; GFX11-NEXT: s_cbranch_vccnz .LBB13_5
-; GFX11-NEXT: ; %bb.4: ; %cmp.true
-; GFX11-NEXT: s_add_i32 s25, s25, 3
-; GFX11-NEXT: s_add_i32 s24, s24, 3
-; GFX11-NEXT: s_lshr_b32 s42, s25, 8
-; GFX11-NEXT: s_add_i32 s23, s23, 3
-; GFX11-NEXT: v_writelane_b32 v19, s42, 16
-; GFX11-NEXT: s_lshr_b32 s42, s24, 16
-; GFX11-NEXT: s_add_i32 s22, s22, 3
-; GFX11-NEXT: s_add_i32 s21, s21, 3
-; GFX11-NEXT: s_add_i32 s20, s20, 3
-; GFX11-NEXT: v_writelane_b32 v19, s42, 17
-; GFX11-NEXT: s_lshr_b32 s42, s24, 8
-; GFX11-NEXT: s_add_i32 s19, s19, 3
+; GFX11-NEXT: v_writelane_b32 v19, s42, 19
+; GFX11-NEXT: s_lshr_b32 s42, s13, 24
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: v_writelane_b32 v19, s42, 20
+; GFX11-NEXT: s_lshr_b32 s42, s13, 16
+; GFX11-NEXT: v_writelane_b32 v19, s42, 21
+; GFX11-NEXT: s_lshr_b32 s42, s13, 8
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: v_writelane_b32 v19, s42, 22
+; GFX11-NEXT: s_lshr_b32 s42, s12, 16
+; GFX11-NEXT: v_writelane_b32 v19, s42, 23
+; GFX11-NEXT: s_lshr_b32 s42, s12, 8
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: v_writelane_b32 v19, s42, 24
+; GFX11-NEXT: s_lshr_b32 s42, s15, 24
+; GFX11-NEXT: v_writelane_b32 v19, s42, 25
+; GFX11-NEXT: s_lshr_b32 s42, s15, 16
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: v_writelane_b32 v19, s42, 26
+; GFX11-NEXT: s_lshr_b32 s42, s15, 8
+; GFX11-NEXT: v_writelane_b32 v19, s42, 27
+; GFX11-NEXT: s_lshr_b32 s42, s14, 16
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: v_writelane_b32 v19, s42, 28
+; GFX11-NEXT: s_lshr_b32 s42, s14, 8
+; GFX11-NEXT: v_writelane_b32 v19, s42, 29
+; GFX11-NEXT: s_lshr_b32 s42, s41, 24
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: v_writelane_b32 v19, s42, 30
+; GFX11-NEXT: s_lshr_b32 s42, s41, 16
+; GFX11-NEXT: v_writelane_b32 v19, s42, 31
+; GFX11-NEXT: s_lshr_b32 s42, s41, 8
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: v_writelane_b32 v18, s42, 0
+; GFX11-NEXT: s_lshr_b32 s42, s40, 16
+; GFX11-NEXT: v_writelane_b32 v18, s42, 1
+; GFX11-NEXT: s_lshr_b32 s42, s40, 8
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: v_writelane_b32 v18, s42, 2
+; GFX11-NEXT: s_lshr_b32 s42, s29, 24
+; GFX11-NEXT: v_writelane_b32 v18, s42, 3
+; GFX11-NEXT: s_lshr_b32 s42, s29, 16
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: v_writelane_b32 v18, s42, 4
+; GFX11-NEXT: s_lshr_b32 s42, s29, 8
+; GFX11-NEXT: v_writelane_b32 v18, s42, 5
+; GFX11-NEXT: s_lshr_b32 s42, s28, 16
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: v_writelane_b32 v18, s42, 6
+; GFX11-NEXT: s_lshr_b32 s42, s28, 8
+; GFX11-NEXT: v_writelane_b32 v18, s42, 7
+; GFX11-NEXT: s_lshr_b64 s[42:43], s[4:5], 24
+; GFX11-NEXT: s_cbranch_execnz .LBB13_3
+; GFX11-NEXT: .LBB13_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s5, s5, 3
; GFX11-NEXT: s_add_i32 s4, s4, 3
-; GFX11-NEXT: v_writelane_b32 v19, s42, 18
-; GFX11-NEXT: s_lshr_b32 s42, s23, 24
-; GFX11-NEXT: s_lshr_b64 s[62:63], s[4:5], 24
+; GFX11-NEXT: s_lshr_b32 s42, s5, 24
; GFX11-NEXT: s_add_i32 s7, s7, 3
+; GFX11-NEXT: v_writelane_b32 v19, s42, 0
+; GFX11-NEXT: s_lshr_b32 s42, s5, 16
; GFX11-NEXT: s_add_i32 s6, s6, 3
-; GFX11-NEXT: v_writelane_b32 v19, s42, 19
-; GFX11-NEXT: s_lshr_b32 s42, s23, 16
; GFX11-NEXT: s_add_i32 s9, s9, 3
; GFX11-NEXT: s_add_i32 s8, s8, 3
+; GFX11-NEXT: v_writelane_b32 v19, s42, 1
+; GFX11-NEXT: s_lshr_b32 s42, s5, 8
; GFX11-NEXT: s_add_i32 s11, s11, 3
-; GFX11-NEXT: v_writelane_b32 v19, s42, 20
-; GFX11-NEXT: s_lshr_b32 s42, s23, 8
; GFX11-NEXT: s_add_i32 s10, s10, 3
-; GFX11-NEXT: s_add_i32 s18, s18, 3
; GFX11-NEXT: s_add_i32 s13, s13, 3
-; GFX11-NEXT: v_writelane_b32 v19, s42, 21
-; GFX11-NEXT: s_lshr_b32 s42, s22, 16
+; GFX11-NEXT: v_writelane_b32 v19, s42, 2
+; GFX11-NEXT: s_lshr_b32 s42, s4, 16
; GFX11-NEXT: s_add_i32 s12, s12, 3
-; GFX11-NEXT: s_add_i32 s17, s17, 3
; GFX11-NEXT: s_add_i32 s15, s15, 3
-; GFX11-NEXT: v_writelane_b32 v19, s42, 22
-; GFX11-NEXT: s_lshr_b32 s42, s22, 8
; GFX11-NEXT: s_add_i32 s14, s14, 3
-; GFX11-NEXT: s_add_i32 s16, s16, 3
+; GFX11-NEXT: v_writelane_b32 v19, s42, 3
+; GFX11-NEXT: s_lshr_b32 s42, s4, 8
; GFX11-NEXT: s_add_i32 s41, s41, 3
-; GFX11-NEXT: v_writelane_b32 v19, s42, 23
-; GFX11-NEXT: s_lshr_b32 s42, s21, 24
; GFX11-NEXT: s_add_i32 s40, s40, 3
-; GFX11-NEXT: s_add_i32 s3, s3, 3
; GFX11-NEXT: s_add_i32 s29, s29, 3
-; GFX11-NEXT: v_writelane_b32 v19, s42, 24
-; GFX11-NEXT: s_lshr_b32 s42, s21, 16
+; GFX11-NEXT: v_writelane_b32 v19, s42, 4
+; GFX11-NEXT: s_lshr_b32 s42, s7, 24
; GFX11-NEXT: s_add_i32 s28, s28, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: v_writelane_b32 v19, s42, 25
-; GFX11-NEXT: s_lshr_b32 s42, s21, 8
+; GFX11-NEXT: v_writelane_b32 v19, s42, 5
+; GFX11-NEXT: s_lshr_b32 s42, s7, 16
+; GFX11-NEXT: s_add_i32 s3, s3, 3
; GFX11-NEXT: s_add_i32 s2, s2, 3
+; GFX11-NEXT: s_add_i32 s17, s17, 3
+; GFX11-NEXT: v_writelane_b32 v19, s42, 6
+; GFX11-NEXT: s_lshr_b32 s42, s7, 8
+; GFX11-NEXT: s_add_i32 s16, s16, 3
+; GFX11-NEXT: s_add_i32 s19, s19, 3
+; GFX11-NEXT: s_add_i32 s18, s18, 3
+; GFX11-NEXT: v_writelane_b32 v19, s42, 7
+; GFX11-NEXT: s_lshr_b32 s42, s6, 16
+; GFX11-NEXT: s_add_i32 s21, s21, 3
+; GFX11-NEXT: s_add_i32 s20, s20, 3
+; GFX11-NEXT: s_add_i32 s23, s23, 3
+; GFX11-NEXT: v_writelane_b32 v19, s42, 8
+; GFX11-NEXT: s_lshr_b32 s42, s6, 8
+; GFX11-NEXT: s_add_i32 s22, s22, 3
+; GFX11-NEXT: s_add_i32 s25, s25, 3
+; GFX11-NEXT: s_add_i32 s24, s24, 3
+; GFX11-NEXT: v_writelane_b32 v19, s42, 9
+; GFX11-NEXT: s_lshr_b32 s42, s9, 24
; GFX11-NEXT: s_add_i32 s27, s27, 3
; GFX11-NEXT: s_add_i32 s26, s26, 3
+; GFX11-NEXT: s_lshr_b32 vcc_hi, s27, 24
+; GFX11-NEXT: v_writelane_b32 v19, s42, 10
+; GFX11-NEXT: s_lshr_b32 s42, s9, 16
+; GFX11-NEXT: s_lshr_b32 s34, s27, 16
+; GFX11-NEXT: s_lshr_b32 s35, s27, 8
+; GFX11-NEXT: s_lshr_b32 s36, s26, 16
+; GFX11-NEXT: v_writelane_b32 v19, s42, 11
+; GFX11-NEXT: s_lshr_b32 s42, s9, 8
+; GFX11-NEXT: s_lshr_b32 s37, s26, 8
+; GFX11-NEXT: s_lshr_b32 s38, s25, 24
+; GFX11-NEXT: s_lshr_b32 s39, s25, 16
+; GFX11-NEXT: v_writelane_b32 v19, s42, 12
+; GFX11-NEXT: s_lshr_b32 s42, s8, 16
+; GFX11-NEXT: s_lshr_b32 s48, s25, 8
+; GFX11-NEXT: s_lshr_b32 s49, s24, 16
+; GFX11-NEXT: s_lshr_b32 s50, s24, 8
+; GFX11-NEXT: v_writelane_b32 v19, s42, 13
+; GFX11-NEXT: s_lshr_b32 s42, s8, 8
+; GFX11-NEXT: s_lshr_b32 s51, s23, 24
+; GFX11-NEXT: s_lshr_b32 s52, s23, 16
+; GFX11-NEXT: s_lshr_b32 s53, s23, 8
+; GFX11-NEXT: v_writelane_b32 v19, s42, 14
+; GFX11-NEXT: s_lshr_b32 s42, s11, 24
+; GFX11-NEXT: s_lshr_b32 s54, s22, 16
+; GFX11-NEXT: s_lshr_b32 s55, s22, 8
+; GFX11-NEXT: s_lshr_b32 s64, s21, 24
+; GFX11-NEXT: v_writelane_b32 v19, s42, 15
+; GFX11-NEXT: s_lshr_b32 s42, s11, 16
+; GFX11-NEXT: s_lshr_b32 s65, s21, 16
+; GFX11-NEXT: s_lshr_b32 s66, s21, 8
+; GFX11-NEXT: s_lshr_b32 s67, s20, 16
+; GFX11-NEXT: v_writelane_b32 v19, s42, 16
+; GFX11-NEXT: s_lshr_b32 s42, s11, 8
+; GFX11-NEXT: s_lshr_b32 s68, s20, 8
+; GFX11-NEXT: s_lshr_b32 s69, s19, 24
+; GFX11-NEXT: s_lshr_b32 s70, s19, 16
+; GFX11-NEXT: v_writelane_b32 v19, s42, 17
+; GFX11-NEXT: s_lshr_b32 s42, s10, 16
+; GFX11-NEXT: s_lshr_b32 s71, s19, 8
+; GFX11-NEXT: s_lshr_b32 s80, s18, 16
+; GFX11-NEXT: s_lshr_b32 s81, s18, 8
+; GFX11-NEXT: v_writelane_b32 v19, s42, 18
+; GFX11-NEXT: s_lshr_b32 s42, s10, 8
+; GFX11-NEXT: s_lshr_b32 s82, s17, 24
+; GFX11-NEXT: s_lshr_b32 s83, s17, 16
+; GFX11-NEXT: s_lshr_b32 s84, s17, 8
+; GFX11-NEXT: v_writelane_b32 v19, s42, 19
+; GFX11-NEXT: s_lshr_b32 s42, s13, 24
+; GFX11-NEXT: s_lshr_b32 s85, s16, 16
+; GFX11-NEXT: s_lshr_b32 s86, s16, 8
+; GFX11-NEXT: s_lshr_b32 s87, s3, 24
+; GFX11-NEXT: v_writelane_b32 v19, s42, 20
+; GFX11-NEXT: s_lshr_b32 s42, s13, 16
+; GFX11-NEXT: s_lshr_b32 s96, s3, 16
+; GFX11-NEXT: s_lshr_b32 s97, s3, 8
+; GFX11-NEXT: s_lshr_b32 s98, s2, 16
+; GFX11-NEXT: v_writelane_b32 v19, s42, 21
+; GFX11-NEXT: s_lshr_b32 s42, s13, 8
+; GFX11-NEXT: s_lshr_b32 s99, s2, 8
+; GFX11-NEXT: s_lshr_b32 s100, s1, 24
+; GFX11-NEXT: s_lshr_b32 s101, s1, 16
+; GFX11-NEXT: v_writelane_b32 v19, s42, 22
+; GFX11-NEXT: s_lshr_b32 s42, s12, 16
+; GFX11-NEXT: s_lshr_b32 s102, s1, 8
+; GFX11-NEXT: s_lshr_b32 s103, s0, 16
+; GFX11-NEXT: s_lshr_b32 s104, s0, 8
+; GFX11-NEXT: v_writelane_b32 v19, s42, 23
+; GFX11-NEXT: s_lshr_b32 s42, s12, 8
+; GFX11-NEXT: s_lshr_b64 s[44:45], s[6:7], 24
+; GFX11-NEXT: s_lshr_b64 s[46:47], s[8:9], 24
+; GFX11-NEXT: s_lshr_b64 s[56:57], s[10:11], 24
+; GFX11-NEXT: v_writelane_b32 v19, s42, 24
+; GFX11-NEXT: s_lshr_b32 s42, s15, 24
+; GFX11-NEXT: s_lshr_b64 s[58:59], s[12:13], 24
+; GFX11-NEXT: s_lshr_b64 s[60:61], s[14:15], 24
+; GFX11-NEXT: s_lshr_b64 s[62:63], s[40:41], 24
+; GFX11-NEXT: v_writelane_b32 v19, s42, 25
+; GFX11-NEXT: s_lshr_b32 s42, s15, 16
+; GFX11-NEXT: s_lshr_b64 s[72:73], s[28:29], 24
+; GFX11-NEXT: s_lshr_b64 s[78:79], s[26:27], 24
+; GFX11-NEXT: s_lshr_b64 s[88:89], s[24:25], 24
; GFX11-NEXT: v_writelane_b32 v19, s42, 26
-; GFX11-NEXT: s_lshr_b32 s42, s20, 16
-; GFX11-NEXT: s_lshr_b32 s101, s5, 24
-; GFX11-NEXT: s_lshr_b32 s102, s5, 16
-; GFX11-NEXT: s_lshr_b32 s103, s5, 8
+; GFX11-NEXT: s_lshr_b32 s42, s15, 8
+; GFX11-NEXT: s_lshr_b64 s[74:75], s[22:23], 24
+; GFX11-NEXT: s_lshr_b64 s[76:77], s[20:21], 24
+; GFX11-NEXT: s_lshr_b64 s[90:91], s[18:19], 24
; GFX11-NEXT: v_writelane_b32 v19, s42, 27
-; GFX11-NEXT: s_lshr_b32 s42, s20, 8
-; GFX11-NEXT: s_lshr_b32 s104, s4, 16
-; GFX11-NEXT: s_lshr_b32 s47, s4, 8
-; GFX11-NEXT: s_lshr_b32 s46, s7, 24
+; GFX11-NEXT: s_lshr_b32 s42, s14, 16
+; GFX11-NEXT: s_lshr_b64 s[92:93], s[16:17], 24
+; GFX11-NEXT: s_lshr_b64 s[94:95], s[2:3], 24
+; GFX11-NEXT: s_lshr_b64 s[30:31], s[0:1], 24
; GFX11-NEXT: v_writelane_b32 v19, s42, 28
-; GFX11-NEXT: s_lshr_b32 s42, s19, 24
-; GFX11-NEXT: s_lshr_b32 vcc_hi, s7, 16
-; GFX11-NEXT: s_lshr_b32 s34, s7, 8
-; GFX11-NEXT: s_lshr_b32 s57, s6, 16
+; GFX11-NEXT: s_lshr_b32 s42, s14, 8
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX11-NEXT: v_writelane_b32 v19, s42, 29
-; GFX11-NEXT: s_lshr_b32 s42, s19, 16
-; GFX11-NEXT: s_lshr_b32 s56, s6, 8
-; GFX11-NEXT: s_lshr_b32 s35, s9, 24
-; GFX11-NEXT: s_lshr_b32 s36, s9, 16
+; GFX11-NEXT: s_lshr_b32 s42, s41, 24
; GFX11-NEXT: v_writelane_b32 v19, s42, 30
-; GFX11-NEXT: s_lshr_b32 s42, s19, 8
-; GFX11-NEXT: s_lshr_b32 s37, s9, 8
-; GFX11-NEXT: s_lshr_b32 s38, s8, 16
-; GFX11-NEXT: s_lshr_b32 s39, s8, 8
+; GFX11-NEXT: s_lshr_b32 s42, s41, 16
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX11-NEXT: v_writelane_b32 v19, s42, 31
-; GFX11-NEXT: s_lshr_b32 s42, s18, 16
-; GFX11-NEXT: s_lshr_b32 s48, s11, 24
+; GFX11-NEXT: s_lshr_b32 s42, s41, 8
; GFX11-NEXT: v_writelane_b32 v18, s42, 0
-; GFX11-NEXT: s_lshr_b32 s42, s18, 8
-; GFX11-NEXT: v_writelane_b32 v19, s62, 14
-; GFX11-NEXT: s_lshr_b32 s49, s11, 16
-; GFX11-NEXT: s_lshr_b32 s50, s11, 8
+; GFX11-NEXT: s_lshr_b32 s42, s40, 16
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX11-NEXT: v_writelane_b32 v18, s42, 1
-; GFX11-NEXT: s_lshr_b32 s42, s17, 24
-; GFX11-NEXT: v_writelane_b32 v19, s63, 15
-; GFX11-NEXT: s_lshr_b64 s[62:63], s[6:7], 24
-; GFX11-NEXT: s_lshr_b32 s51, s10, 16
+; GFX11-NEXT: s_lshr_b32 s42, s40, 8
; GFX11-NEXT: v_writelane_b32 v18, s42, 2
-; GFX11-NEXT: s_lshr_b32 s42, s17, 16
-; GFX11-NEXT: v_writelane_b32 v19, s62, 12
-; GFX11-NEXT: s_lshr_b32 s52, s10, 8
-; GFX11-NEXT: s_lshr_b32 s53, s13, 24
+; GFX11-NEXT: s_lshr_b32 s42, s29, 24
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX11-NEXT: v_writelane_b32 v18, s42, 3
-; GFX11-NEXT: s_lshr_b32 s42, s17, 8
-; GFX11-NEXT: v_writelane_b32 v19, s63, 13
-; GFX11-NEXT: s_lshr_b64 s[62:63], s[8:9], 24
-; GFX11-NEXT: s_lshr_b32 s54, s13, 16
+; GFX11-NEXT: s_lshr_b32 s42, s29, 16
; GFX11-NEXT: v_writelane_b32 v18, s42, 4
-; GFX11-NEXT: s_lshr_b32 s42, s16, 16
-; GFX11-NEXT: v_writelane_b32 v19, s62, 10
-; GFX11-NEXT: s_lshr_b32 s55, s13, 8
-; GFX11-NEXT: s_lshr_b32 s64, s12, 16
+; GFX11-NEXT: s_lshr_b32 s42, s29, 8
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX11-NEXT: v_writelane_b32 v18, s42, 5
-; GFX11-NEXT: s_lshr_b32 s42, s16, 8
-; GFX11-NEXT: v_writelane_b32 v19, s63, 11
-; GFX11-NEXT: s_lshr_b64 s[62:63], s[10:11], 24
-; GFX11-NEXT: s_lshr_b32 s65, s12, 8
+; GFX11-NEXT: s_lshr_b32 s42, s28, 16
; GFX11-NEXT: v_writelane_b32 v18, s42, 6
-; GFX11-NEXT: s_lshr_b32 s42, s3, 24
-; GFX11-NEXT: v_writelane_b32 v19, s62, 8
-; GFX11-NEXT: s_lshr_b32 s66, s15, 24
-; GFX11-NEXT: s_lshr_b32 s67, s15, 16
+; GFX11-NEXT: s_lshr_b32 s42, s28, 8
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_writelane_b32 v18, s42, 7
-; GFX11-NEXT: s_lshr_b32 s42, s3, 16
-; GFX11-NEXT: v_writelane_b32 v19, s63, 9
-; GFX11-NEXT: s_lshr_b64 s[62:63], s[12:13], 24
-; GFX11-NEXT: s_lshr_b32 s68, s15, 8
-; GFX11-NEXT: v_writelane_b32 v18, s42, 8
-; GFX11-NEXT: s_lshr_b32 s59, s14, 16
-; GFX11-NEXT: v_writelane_b32 v19, s62, 6
-; GFX11-NEXT: s_lshr_b32 s58, s14, 8
-; GFX11-NEXT: s_lshr_b32 s70, s41, 24
-; GFX11-NEXT: s_lshr_b32 s71, s41, 16
-; GFX11-NEXT: s_lshr_b32 s60, s41, 8
-; GFX11-NEXT: v_writelane_b32 v19, s63, 7
-; GFX11-NEXT: s_lshr_b64 s[62:63], s[14:15], 24
-; GFX11-NEXT: s_lshr_b32 s80, s40, 16
-; GFX11-NEXT: s_lshr_b32 s61, s40, 8
-; GFX11-NEXT: s_lshr_b32 s81, s29, 24
-; GFX11-NEXT: v_writelane_b32 v19, s62, 4
-; GFX11-NEXT: s_lshr_b32 s82, s29, 16
-; GFX11-NEXT: s_lshr_b32 s83, s29, 8
-; GFX11-NEXT: s_lshr_b32 s84, s28, 16
-; GFX11-NEXT: s_lshr_b32 s85, s28, 8
-; GFX11-NEXT: v_writelane_b32 v19, s63, 5
-; GFX11-NEXT: s_lshr_b64 s[62:63], s[40:41], 24
-; GFX11-NEXT: s_lshr_b32 s86, s27, 24
-; GFX11-NEXT: s_lshr_b32 s72, s27, 16
-; GFX11-NEXT: s_lshr_b32 s87, s27, 8
-; GFX11-NEXT: v_writelane_b32 v19, s62, 2
-; GFX11-NEXT: s_lshr_b32 s73, s26, 16
-; GFX11-NEXT: s_lshr_b32 s96, s26, 8
-; GFX11-NEXT: s_lshr_b32 s97, s25, 24
-; GFX11-NEXT: s_lshr_b32 s69, s25, 16
-; GFX11-NEXT: v_writelane_b32 v19, s63, 3
-; GFX11-NEXT: s_lshr_b64 s[62:63], s[28:29], 24
-; GFX11-NEXT: s_lshr_b32 s42, s3, 8
-; GFX11-NEXT: s_lshr_b32 s74, s2, 16
-; GFX11-NEXT: s_lshr_b32 s43, s2, 8
-; GFX11-NEXT: v_writelane_b32 v19, s62, 0
-; GFX11-NEXT: s_lshr_b32 s98, s1, 24
-; GFX11-NEXT: s_lshr_b32 s99, s1, 16
-; GFX11-NEXT: s_lshr_b32 s100, s1, 8
-; GFX11-NEXT: s_lshr_b32 s44, s0, 16
-; GFX11-NEXT: s_lshr_b32 s45, s0, 8
-; GFX11-NEXT: v_writelane_b32 v19, s63, 1
-; GFX11-NEXT: s_lshr_b64 s[76:77], s[26:27], 24
-; GFX11-NEXT: s_lshr_b64 s[88:89], s[24:25], 24
-; GFX11-NEXT: s_lshr_b64 s[62:63], s[22:23], 24
-; GFX11-NEXT: s_lshr_b64 s[78:79], s[20:21], 24
-; GFX11-NEXT: s_lshr_b64 s[90:91], s[18:19], 24
-; GFX11-NEXT: s_lshr_b64 s[92:93], s[16:17], 24
-; GFX11-NEXT: s_lshr_b64 s[94:95], s[2:3], 24
-; GFX11-NEXT: s_lshr_b64 s[30:31], s[0:1], 24
-; GFX11-NEXT: v_writelane_b32 v18, s42, 9
-; GFX11-NEXT: .LBB13_5: ; %end
-; GFX11-NEXT: s_lshl_b32 s43, s43, 8
-; GFX11-NEXT: s_and_b32 s2, s2, 0xff
-; GFX11-NEXT: s_and_b32 s42, s74, 0xff
-; GFX11-NEXT: s_or_b32 s2, s2, s43
-; GFX11-NEXT: s_lshl_b32 s43, s94, 8
-; GFX11-NEXT: s_and_b32 s2, s2, 0xffff
-; GFX11-NEXT: s_or_b32 s42, s42, s43
-; GFX11-NEXT: s_lshl_b32 s45, s45, 8
-; GFX11-NEXT: s_lshl_b32 s42, s42, 16
+; GFX11-NEXT: s_lshr_b64 s[42:43], s[4:5], 24
+; GFX11-NEXT: .LBB13_3: ; %end
+; GFX11-NEXT: s_lshl_b32 s43, s104, 8
; GFX11-NEXT: s_and_b32 s0, s0, 0xff
-; GFX11-NEXT: s_or_b32 s2, s2, s42
-; GFX11-NEXT: v_readlane_b32 s42, v18, 9
-; GFX11-NEXT: s_or_b32 s0, s0, s45
-; GFX11-NEXT: s_lshl_b32 s45, s30, 8
-; GFX11-NEXT: s_and_b32 s44, s44, 0xff
-; GFX11-NEXT: s_and_b32 s3, s3, 0xff
-; GFX11-NEXT: s_or_b32 s44, s44, s45
-; GFX11-NEXT: s_lshl_b32 s42, s42, 8
+; GFX11-NEXT: s_and_b32 s45, s103, 0xff
+; GFX11-NEXT: s_or_b32 s0, s0, s43
+; GFX11-NEXT: s_lshl_b32 s43, s30, 8
; GFX11-NEXT: s_and_b32 s0, s0, 0xffff
-; GFX11-NEXT: s_lshl_b32 s44, s44, 16
-; GFX11-NEXT: s_or_b32 s3, s3, s42
-; GFX11-NEXT: v_readlane_b32 s42, v18, 8
-; GFX11-NEXT: v_readlane_b32 s43, v18, 7
-; GFX11-NEXT: s_or_b32 s0, s0, s44
+; GFX11-NEXT: s_or_b32 s43, s45, s43
; GFX11-NEXT: s_and_b32 s1, s1, 0xff
-; GFX11-NEXT: s_lshl_b32 s44, s100, 8
-; GFX11-NEXT: s_lshl_b32 s45, s98, 8
-; GFX11-NEXT: s_or_b32 s1, s1, s44
-; GFX11-NEXT: s_and_b32 s44, s99, 0xff
-; GFX11-NEXT: s_and_b32 s42, s42, 0xff
-; GFX11-NEXT: s_or_b32 s44, s44, s45
-; GFX11-NEXT: s_lshl_b32 s43, s43, 8
+; GFX11-NEXT: s_lshl_b32 s43, s43, 16
+; GFX11-NEXT: s_lshl_b32 s45, s100, 8
+; GFX11-NEXT: s_or_b32 s0, s0, s43
+; GFX11-NEXT: s_lshl_b32 s43, s102, 8
+; GFX11-NEXT: s_and_b32 s2, s2, 0xff
+; GFX11-NEXT: s_or_b32 s1, s1, s43
+; GFX11-NEXT: s_and_b32 s43, s101, 0xff
; GFX11-NEXT: s_and_b32 s1, s1, 0xffff
-; GFX11-NEXT: s_lshl_b32 s44, s44, 16
-; GFX11-NEXT: s_or_b32 s42, s42, s43
-; GFX11-NEXT: s_or_b32 s1, s1, s44
-; GFX11-NEXT: s_and_b32 s3, s3, 0xffff
-; GFX11-NEXT: s_lshl_b32 s42, s42, 16
+; GFX11-NEXT: s_or_b32 s43, s43, s45
+; GFX11-NEXT: s_and_b32 s45, s98, 0xff
+; GFX11-NEXT: s_lshl_b32 s43, s43, 16
+; GFX11-NEXT: s_and_b32 s3, s3, 0xff
+; GFX11-NEXT: s_or_b32 s1, s1, s43
+; GFX11-NEXT: s_lshl_b32 s43, s99, 8
; GFX11-NEXT: v_dual_mov_b32 v1, s0 :: v_dual_mov_b32 v2, s1
-; GFX11-NEXT: v_readlane_b32 s0, v18, 6
-; GFX11-NEXT: s_or_b32 s3, s3, s42
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_dual_mov_b32 v3, s2 :: v_dual_mov_b32 v4, s3
-; GFX11-NEXT: v_readlane_b32 s2, v18, 5
-; GFX11-NEXT: s_lshl_b32 s0, s0, 8
+; GFX11-NEXT: s_or_b32 s2, s2, s43
+; GFX11-NEXT: s_lshl_b32 s43, s94, 8
+; GFX11-NEXT: s_and_b32 s2, s2, 0xffff
+; GFX11-NEXT: s_or_b32 s43, s45, s43
+; GFX11-NEXT: s_lshl_b32 s45, s87, 8
+; GFX11-NEXT: s_lshl_b32 s43, s43, 16
+; GFX11-NEXT: s_lshl_b32 s0, s86, 8
+; GFX11-NEXT: s_or_b32 s2, s2, s43
+; GFX11-NEXT: s_lshl_b32 s43, s97, 8
; GFX11-NEXT: s_and_b32 s1, s16, 0xff
-; GFX11-NEXT: v_readlane_b32 s3, v18, 2
+; GFX11-NEXT: s_or_b32 s3, s3, s43
+; GFX11-NEXT: s_and_b32 s43, s96, 0xff
+; GFX11-NEXT: s_and_b32 s3, s3, 0xffff
+; GFX11-NEXT: s_or_b32 s43, s43, s45
; GFX11-NEXT: s_or_b32 s0, s1, s0
+; GFX11-NEXT: s_lshl_b32 s43, s43, 16
; GFX11-NEXT: s_lshl_b32 s1, s92, 8
-; GFX11-NEXT: s_and_b32 s2, s2, 0xff
+; GFX11-NEXT: s_or_b32 s3, s3, s43
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: v_dual_mov_b32 v3, s2 :: v_dual_mov_b32 v4, s3
+; GFX11-NEXT: s_and_b32 s2, s85, 0xff
; GFX11-NEXT: s_and_b32 s0, s0, 0xffff
; GFX11-NEXT: s_or_b32 s1, s2, s1
-; GFX11-NEXT: v_readlane_b32 s2, v18, 4
+; GFX11-NEXT: s_lshl_b32 s2, s84, 8
; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: s_lshl_b32 s3, s3, 8
+; GFX11-NEXT: s_lshl_b32 s3, s82, 8
; GFX11-NEXT: s_or_b32 s0, s0, s1
; GFX11-NEXT: s_and_b32 s1, s17, 0xff
-; GFX11-NEXT: s_lshl_b32 s2, s2, 8
-; GFX11-NEXT: v_readlane_b32 s16, v18, 0
+; GFX11-NEXT: s_and_b32 s16, s80, 0xff
; GFX11-NEXT: s_or_b32 s1, s1, s2
-; GFX11-NEXT: v_readlane_b32 s2, v18, 3
+; GFX11-NEXT: s_and_b32 s2, s83, 0xff
; GFX11-NEXT: s_and_b32 s1, s1, 0xffff
-; GFX11-NEXT: v_readlane_b32 s17, v19, 29
-; GFX11-NEXT: s_and_b32 s16, s16, 0xff
-; GFX11-NEXT: v_readlane_b32 s100, v17, 4
-; GFX11-NEXT: s_and_b32 s2, s2, 0xff
-; GFX11-NEXT: v_readlane_b32 s99, v17, 3
; GFX11-NEXT: s_or_b32 s2, s2, s3
; GFX11-NEXT: s_and_b32 s3, s18, 0xff
; GFX11-NEXT: s_lshl_b32 s2, s2, 16
-; GFX11-NEXT: s_lshl_b32 s17, s17, 8
+; GFX11-NEXT: s_lshl_b32 s17, s69, 8
; GFX11-NEXT: s_or_b32 s1, s1, s2
-; GFX11-NEXT: v_readlane_b32 s2, v18, 1
+; GFX11-NEXT: s_lshl_b32 s2, s81, 8
; GFX11-NEXT: v_dual_mov_b32 v5, s0 :: v_dual_mov_b32 v6, s1
-; GFX11-NEXT: v_readlane_b32 s0, v19, 28
-; GFX11-NEXT: s_and_b32 s1, s20, 0xff
-; GFX11-NEXT: s_lshl_b32 s2, s2, 8
-; GFX11-NEXT: v_readlane_b32 s18, v19, 19
; GFX11-NEXT: s_or_b32 s2, s3, s2
; GFX11-NEXT: s_lshl_b32 s3, s90, 8
; GFX11-NEXT: s_and_b32 s2, s2, 0xffff
; GFX11-NEXT: s_or_b32 s3, s16, s3
-; GFX11-NEXT: v_readlane_b32 s16, v19, 31
+; GFX11-NEXT: s_lshl_b32 s16, s71, 8
; GFX11-NEXT: s_lshl_b32 s3, s3, 16
-; GFX11-NEXT: s_lshl_b32 s0, s0, 8
+; GFX11-NEXT: s_lshl_b32 s0, s68, 8
; GFX11-NEXT: s_or_b32 s2, s2, s3
; GFX11-NEXT: s_and_b32 s3, s19, 0xff
-; GFX11-NEXT: s_lshl_b32 s16, s16, 8
-; GFX11-NEXT: s_or_b32 s0, s1, s0
+; GFX11-NEXT: s_and_b32 s1, s20, 0xff
; GFX11-NEXT: s_or_b32 s3, s3, s16
-; GFX11-NEXT: v_readlane_b32 s16, v19, 30
+; GFX11-NEXT: s_and_b32 s16, s70, 0xff
; GFX11-NEXT: s_and_b32 s3, s3, 0xffff
-; GFX11-NEXT: s_lshl_b32 s1, s78, 8
-; GFX11-NEXT: s_and_b32 s0, s0, 0xffff
-; GFX11-NEXT: s_lshl_b32 s18, s18, 8
-; GFX11-NEXT: s_and_b32 s16, s16, 0xff
-; GFX11-NEXT: s_lshl_b32 s19, s86, 8
; GFX11-NEXT: s_or_b32 s16, s16, s17
-; GFX11-NEXT: v_readlane_b32 s17, v19, 21
+; GFX11-NEXT: s_or_b32 s0, s1, s0
; GFX11-NEXT: s_lshl_b32 s16, s16, 16
-; GFX11-NEXT: v_readlane_b32 s98, v17, 2
+; GFX11-NEXT: s_lshl_b32 s1, s76, 8
; GFX11-NEXT: s_or_b32 s3, s3, s16
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v7, s2 :: v_dual_mov_b32 v8, s3
-; GFX11-NEXT: v_readlane_b32 s2, v19, 27
-; GFX11-NEXT: v_readlane_b32 s3, v19, 24
-; GFX11-NEXT: v_readlane_b32 s16, v19, 22
-; GFX11-NEXT: s_lshl_b32 s17, s17, 8
-; GFX11-NEXT: s_clause 0x1
-; GFX11-NEXT: scratch_store_b128 v0, v[1:4], off
-; GFX11-NEXT: scratch_store_b128 v0, v[5:8], off offset:16
-; GFX11-NEXT: s_and_b32 s2, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s3, 8
+; GFX11-NEXT: s_and_b32 s2, s67, 0xff
+; GFX11-NEXT: s_and_b32 s0, s0, 0xffff
; GFX11-NEXT: s_or_b32 s1, s2, s1
-; GFX11-NEXT: v_readlane_b32 s2, v19, 26
+; GFX11-NEXT: s_lshl_b32 s2, s66, 8
; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: s_and_b32 s16, s16, 0xff
+; GFX11-NEXT: s_lshl_b32 s3, s64, 8
; GFX11-NEXT: s_or_b32 s0, s0, s1
; GFX11-NEXT: s_and_b32 s1, s21, 0xff
-; GFX11-NEXT: s_lshl_b32 s2, s2, 8
-; GFX11-NEXT: v_readlane_b32 s86, v16, 30
+; GFX11-NEXT: s_and_b32 s16, s54, 0xff
; GFX11-NEXT: s_or_b32 s1, s1, s2
-; GFX11-NEXT: v_readlane_b32 s2, v19, 25
+; GFX11-NEXT: s_and_b32 s2, s65, 0xff
; GFX11-NEXT: s_and_b32 s1, s1, 0xffff
-; GFX11-NEXT: v_readlane_b32 s31, v16, 1
-; GFX11-NEXT: v_readlane_b32 s30, v16, 0
-; GFX11-NEXT: s_and_b32 s2, s2, 0xff
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
; GFX11-NEXT: s_or_b32 s2, s2, s3
; GFX11-NEXT: s_and_b32 s3, s22, 0xff
; GFX11-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-NEXT: s_lshl_b32 s17, s53, 8
; GFX11-NEXT: s_or_b32 s1, s1, s2
-; GFX11-NEXT: v_readlane_b32 s2, v19, 23
-; GFX11-NEXT: v_dual_mov_b32 v9, s0 :: v_dual_mov_b32 v10, s1
-; GFX11-NEXT: v_readlane_b32 s1, v19, 18
-; GFX11-NEXT: s_and_b32 s0, s24, 0xff
-; GFX11-NEXT: s_lshl_b32 s2, s2, 8
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_lshl_b32 s2, s55, 8
+; GFX11-NEXT: s_lshl_b32 s18, s51, 8
; GFX11-NEXT: s_or_b32 s2, s3, s2
-; GFX11-NEXT: s_lshl_b32 s3, s62, 8
+; GFX11-NEXT: s_lshl_b32 s3, s74, 8
; GFX11-NEXT: s_and_b32 s2, s2, 0xffff
; GFX11-NEXT: s_or_b32 s3, s16, s3
; GFX11-NEXT: s_and_b32 s16, s23, 0xff
; GFX11-NEXT: s_lshl_b32 s3, s3, 16
; GFX11-NEXT: s_or_b32 s16, s16, s17
-; GFX11-NEXT: v_readlane_b32 s17, v19, 20
+; GFX11-NEXT: s_and_b32 s17, s52, 0xff
; GFX11-NEXT: s_or_b32 s2, s2, s3
-; GFX11-NEXT: s_and_b32 s3, s16, 0xffff
-; GFX11-NEXT: s_lshl_b32 s1, s1, 8
-; GFX11-NEXT: s_and_b32 s17, s17, 0xff
-; GFX11-NEXT: s_or_b32 s0, s0, s1
; GFX11-NEXT: s_or_b32 s17, s17, s18
-; GFX11-NEXT: s_and_b32 s0, s0, 0xffff
+; GFX11-NEXT: s_and_b32 s3, s16, 0xffff
; GFX11-NEXT: s_lshl_b32 s16, s17, 16
-; GFX11-NEXT: s_lshl_b32 s17, s97, 8
+; GFX11-NEXT: v_dual_mov_b32 v9, s0 :: v_dual_mov_b32 v10, s1
; GFX11-NEXT: s_or_b32 s3, s3, s16
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v11, s2 :: v_dual_mov_b32 v12, s3
-; GFX11-NEXT: v_readlane_b32 s2, v19, 17
+; GFX11-NEXT: s_and_b32 s0, s24, 0xff
+; GFX11-NEXT: s_lshl_b32 s1, s50, 8
+; GFX11-NEXT: s_and_b32 s2, s49, 0xff
; GFX11-NEXT: s_lshl_b32 s3, s88, 8
-; GFX11-NEXT: s_and_b32 s16, s69, 0xff
-; GFX11-NEXT: s_and_b32 s18, s72, 0xff
-; GFX11-NEXT: v_readlane_b32 s97, v17, 1
-; GFX11-NEXT: s_and_b32 s2, s2, 0xff
-; GFX11-NEXT: v_readlane_b32 s69, v16, 21
+; GFX11-NEXT: s_or_b32 s0, s0, s1
; GFX11-NEXT: s_or_b32 s1, s2, s3
-; GFX11-NEXT: v_readlane_b32 s3, v19, 16
; GFX11-NEXT: s_and_b32 s2, s25, 0xff
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_or_b32 s0, s0, s1
-; GFX11-NEXT: s_lshl_b32 s3, s3, 8
+; GFX11-NEXT: s_lshl_b32 s3, s48, 8
+; GFX11-NEXT: s_and_b32 s16, s39, 0xff
+; GFX11-NEXT: s_lshl_b32 s17, s38, 8
; GFX11-NEXT: s_or_b32 s2, s2, s3
; GFX11-NEXT: s_or_b32 s3, s16, s17
+; GFX11-NEXT: s_and_b32 s0, s0, 0xffff
+; GFX11-NEXT: s_lshl_b32 s1, s1, 16
; GFX11-NEXT: s_and_b32 s2, s2, 0xffff
; GFX11-NEXT: s_lshl_b32 s3, s3, 16
-; GFX11-NEXT: s_and_b32 s16, s73, 0xff
+; GFX11-NEXT: s_or_b32 s0, s0, s1
; GFX11-NEXT: s_or_b32 s1, s2, s3
; GFX11-NEXT: s_and_b32 s2, s26, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s96, 8
-; GFX11-NEXT: s_lshl_b32 s17, s76, 8
+; GFX11-NEXT: s_lshl_b32 s3, s37, 8
+; GFX11-NEXT: s_and_b32 s16, s36, 0xff
+; GFX11-NEXT: s_lshl_b32 s17, s78, 8
; GFX11-NEXT: s_or_b32 s2, s2, s3
; GFX11-NEXT: s_or_b32 s3, s16, s17
; GFX11-NEXT: s_and_b32 s16, s27, 0xff
-; GFX11-NEXT: s_lshl_b32 s17, s87, 8
+; GFX11-NEXT: s_lshl_b32 s17, s35, 8
+; GFX11-NEXT: s_and_b32 s18, s34, 0xff
+; GFX11-NEXT: s_lshl_b32 s19, vcc_hi, 8
; GFX11-NEXT: s_and_b32 s2, s2, 0xffff
+; GFX11-NEXT: s_lshl_b32 s3, s3, 16
; GFX11-NEXT: s_or_b32 s16, s16, s17
; GFX11-NEXT: s_or_b32 s17, s18, s19
-; GFX11-NEXT: s_lshl_b32 s3, s3, 16
; GFX11-NEXT: s_and_b32 s16, s16, 0xffff
; GFX11-NEXT: s_lshl_b32 s17, s17, 16
; GFX11-NEXT: s_or_b32 s2, s2, s3
+; GFX11-NEXT: s_clause 0x1
+; GFX11-NEXT: scratch_store_b128 v0, v[1:4], off
+; GFX11-NEXT: scratch_store_b128 v0, v[5:8], off offset:16
; GFX11-NEXT: s_or_b32 s3, s16, s17
-; GFX11-NEXT: v_readlane_b32 s16, v19, 0
; GFX11-NEXT: v_dual_mov_b32 v1, s0 :: v_dual_mov_b32 v2, s1
; GFX11-NEXT: v_dual_mov_b32 v3, s2 :: v_dual_mov_b32 v4, s3
+; GFX11-NEXT: v_readlane_b32 s1, v18, 7
+; GFX11-NEXT: v_readlane_b32 s2, v18, 6
; GFX11-NEXT: s_and_b32 s0, s28, 0xff
-; GFX11-NEXT: s_lshl_b32 s1, s85, 8
-; GFX11-NEXT: s_and_b32 s2, s84, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s16, 8
-; GFX11-NEXT: v_readlane_b32 s17, v19, 1
+; GFX11-NEXT: s_lshl_b32 s3, s72, 8
+; GFX11-NEXT: v_readlane_b32 s16, v18, 4
+; GFX11-NEXT: s_lshl_b32 s1, s1, 8
+; GFX11-NEXT: s_and_b32 s2, s2, 0xff
; GFX11-NEXT: s_or_b32 s0, s0, s1
; GFX11-NEXT: s_or_b32 s1, s2, s3
+; GFX11-NEXT: v_readlane_b32 s3, v18, 5
+; GFX11-NEXT: v_readlane_b32 s17, v18, 3
; GFX11-NEXT: s_and_b32 s2, s29, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s83, 8
-; GFX11-NEXT: s_and_b32 s16, s82, 0xff
-; GFX11-NEXT: s_lshl_b32 s17, s81, 8
-; GFX11-NEXT: v_readlane_b32 s18, v19, 2
+; GFX11-NEXT: s_and_b32 s16, s16, 0xff
+; GFX11-NEXT: s_and_b32 s0, s0, 0xffff
+; GFX11-NEXT: s_lshl_b32 s3, s3, 8
+; GFX11-NEXT: s_lshl_b32 s17, s17, 8
; GFX11-NEXT: s_or_b32 s2, s2, s3
; GFX11-NEXT: s_or_b32 s3, s16, s17
-; GFX11-NEXT: s_and_b32 s0, s0, 0xffff
; GFX11-NEXT: s_lshl_b32 s1, s1, 16
; GFX11-NEXT: s_and_b32 s2, s2, 0xffff
; GFX11-NEXT: s_lshl_b32 s3, s3, 16
; GFX11-NEXT: s_or_b32 s0, s0, s1
; GFX11-NEXT: s_or_b32 s1, s2, s3
+; GFX11-NEXT: v_readlane_b32 s3, v18, 2
+; GFX11-NEXT: v_readlane_b32 s16, v18, 1
; GFX11-NEXT: s_and_b32 s2, s40, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s61, 8
-; GFX11-NEXT: s_and_b32 s16, s80, 0xff
-; GFX11-NEXT: s_lshl_b32 s17, s18, 8
-; GFX11-NEXT: v_readlane_b32 s19, v19, 3
+; GFX11-NEXT: s_lshl_b32 s17, s62, 8
+; GFX11-NEXT: v_readlane_b32 s18, v19, 31
+; GFX11-NEXT: s_lshl_b32 s3, s3, 8
+; GFX11-NEXT: s_and_b32 s16, s16, 0xff
; GFX11-NEXT: s_or_b32 s2, s2, s3
; GFX11-NEXT: s_or_b32 s3, s16, s17
+; GFX11-NEXT: v_readlane_b32 s17, v18, 0
+; GFX11-NEXT: v_readlane_b32 s19, v19, 30
; GFX11-NEXT: s_and_b32 s16, s41, 0xff
-; GFX11-NEXT: s_lshl_b32 s17, s60, 8
-; GFX11-NEXT: s_and_b32 s18, s71, 0xff
-; GFX11-NEXT: s_lshl_b32 s19, s70, 8
-; GFX11-NEXT: s_or_b32 s16, s16, s17
-; GFX11-NEXT: s_or_b32 s17, s18, s19
+; GFX11-NEXT: s_and_b32 s18, s18, 0xff
; GFX11-NEXT: s_and_b32 s2, s2, 0xffff
+; GFX11-NEXT: s_lshl_b32 s17, s17, 8
+; GFX11-NEXT: s_lshl_b32 s19, s19, 8
; GFX11-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-NEXT: s_or_b32 s16, s16, s17
+; GFX11-NEXT: s_or_b32 s17, s18, s19
; GFX11-NEXT: s_and_b32 s16, s16, 0xffff
; GFX11-NEXT: s_lshl_b32 s17, s17, 16
; GFX11-NEXT: s_or_b32 s2, s2, s3
; GFX11-NEXT: s_or_b32 s3, s16, s17
-; GFX11-NEXT: v_readlane_b32 s16, v19, 4
; GFX11-NEXT: v_dual_mov_b32 v5, s0 :: v_dual_mov_b32 v6, s1
; GFX11-NEXT: v_dual_mov_b32 v7, s2 :: v_dual_mov_b32 v8, s3
+; GFX11-NEXT: v_readlane_b32 s1, v19, 29
+; GFX11-NEXT: v_readlane_b32 s2, v19, 28
; GFX11-NEXT: s_and_b32 s0, s14, 0xff
-; GFX11-NEXT: s_lshl_b32 s1, s58, 8
-; GFX11-NEXT: s_and_b32 s2, s59, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s16, 8
+; GFX11-NEXT: s_lshl_b32 s3, s60, 8
+; GFX11-NEXT: v_readlane_b32 s14, v19, 26
+; GFX11-NEXT: s_lshl_b32 s1, s1, 8
+; GFX11-NEXT: s_and_b32 s2, s2, 0xff
; GFX11-NEXT: s_or_b32 s0, s0, s1
; GFX11-NEXT: s_or_b32 s1, s2, s3
; GFX11-NEXT: s_and_b32 s2, s15, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s68, 8
-; GFX11-NEXT: s_and_b32 s14, s67, 0xff
-; GFX11-NEXT: s_lshl_b32 s15, s66, 8
-; GFX11-NEXT: s_or_b32 s2, s2, s3
-; GFX11-NEXT: s_or_b32 s3, s14, s15
-; GFX11-NEXT: v_readlane_b32 s14, v19, 6
+; GFX11-NEXT: v_readlane_b32 s3, v19, 27
+; GFX11-NEXT: v_readlane_b32 s15, v19, 25
+; GFX11-NEXT: s_and_b32 s14, s14, 0xff
; GFX11-NEXT: s_and_b32 s0, s0, 0xffff
; GFX11-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-NEXT: s_lshl_b32 s3, s3, 8
+; GFX11-NEXT: s_lshl_b32 s15, s15, 8
+; GFX11-NEXT: s_or_b32 s2, s2, s3
+; GFX11-NEXT: s_or_b32 s3, s14, s15
; GFX11-NEXT: s_and_b32 s2, s2, 0xffff
; GFX11-NEXT: s_lshl_b32 s3, s3, 16
; GFX11-NEXT: s_or_b32 s0, s0, s1
; GFX11-NEXT: s_or_b32 s1, s2, s3
; GFX11-NEXT: s_and_b32 s2, s12, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s65, 8
-; GFX11-NEXT: s_and_b32 s12, s64, 0xff
-; GFX11-NEXT: s_lshl_b32 s14, s14, 8
-; GFX11-NEXT: v_readlane_b32 s15, v19, 7
+; GFX11-NEXT: v_readlane_b32 s3, v19, 24
+; GFX11-NEXT: v_readlane_b32 s12, v19, 23
+; GFX11-NEXT: s_lshl_b32 s14, s58, 8
+; GFX11-NEXT: v_readlane_b32 s15, v19, 20
+; GFX11-NEXT: s_clause 0x1
+; GFX11-NEXT: scratch_store_b128 v0, v[9:12], off offset:32
+; GFX11-NEXT: scratch_store_b128 v0, v[1:4], off offset:48
+; GFX11-NEXT: s_lshl_b32 s3, s3, 8
+; GFX11-NEXT: s_and_b32 s12, s12, 0xff
; GFX11-NEXT: s_or_b32 s2, s2, s3
; GFX11-NEXT: s_or_b32 s3, s12, s14
; GFX11-NEXT: s_and_b32 s12, s13, 0xff
-; GFX11-NEXT: s_lshl_b32 s13, s55, 8
-; GFX11-NEXT: s_and_b32 s14, s54, 0xff
-; GFX11-NEXT: s_lshl_b32 s15, s53, 8
-; GFX11-NEXT: s_or_b32 s12, s12, s13
-; GFX11-NEXT: s_or_b32 s13, s14, s15
+; GFX11-NEXT: v_readlane_b32 s13, v19, 22
+; GFX11-NEXT: v_readlane_b32 s14, v19, 21
+; GFX11-NEXT: s_lshl_b32 s15, s15, 8
; GFX11-NEXT: s_and_b32 s2, s2, 0xffff
; GFX11-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-NEXT: s_lshl_b32 s13, s13, 8
+; GFX11-NEXT: s_and_b32 s14, s14, 0xff
+; GFX11-NEXT: s_or_b32 s12, s12, s13
+; GFX11-NEXT: s_or_b32 s13, s14, s15
; GFX11-NEXT: s_and_b32 s12, s12, 0xffff
; GFX11-NEXT: s_lshl_b32 s13, s13, 16
; GFX11-NEXT: s_or_b32 s2, s2, s3
; GFX11-NEXT: s_or_b32 s3, s12, s13
-; GFX11-NEXT: v_readlane_b32 s12, v19, 8
-; GFX11-NEXT: s_clause 0x1
-; GFX11-NEXT: scratch_store_b128 v0, v[9:12], off offset:32
-; GFX11-NEXT: scratch_store_b128 v0, v[1:4], off offset:48
; GFX11-NEXT: v_dual_mov_b32 v9, s0 :: v_dual_mov_b32 v10, s1
; GFX11-NEXT: v_dual_mov_b32 v11, s2 :: v_dual_mov_b32 v12, s3
+; GFX11-NEXT: v_readlane_b32 s1, v19, 19
+; GFX11-NEXT: v_readlane_b32 s2, v19, 18
; GFX11-NEXT: s_and_b32 s0, s10, 0xff
-; GFX11-NEXT: s_lshl_b32 s1, s52, 8
-; GFX11-NEXT: s_and_b32 s2, s51, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s12, 8
+; GFX11-NEXT: s_lshl_b32 s3, s56, 8
+; GFX11-NEXT: v_readlane_b32 s10, v19, 16
+; GFX11-NEXT: s_lshl_b32 s1, s1, 8
+; GFX11-NEXT: s_and_b32 s2, s2, 0xff
; GFX11-NEXT: s_or_b32 s0, s0, s1
; GFX11-NEXT: s_or_b32 s1, s2, s3
; GFX11-NEXT: s_and_b32 s2, s11, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s50, 8
-; GFX11-NEXT: s_and_b32 s10, s49, 0xff
-; GFX11-NEXT: s_lshl_b32 s11, s48, 8
-; GFX11-NEXT: s_or_b32 s2, s2, s3
-; GFX11-NEXT: s_or_b32 s3, s10, s11
-; GFX11-NEXT: v_readlane_b32 s10, v19, 10
+; GFX11-NEXT: v_readlane_b32 s3, v19, 17
+; GFX11-NEXT: v_readlane_b32 s11, v19, 15
+; GFX11-NEXT: s_and_b32 s10, s10, 0xff
; GFX11-NEXT: s_and_b32 s0, s0, 0xffff
; GFX11-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-NEXT: s_lshl_b32 s3, s3, 8
+; GFX11-NEXT: s_lshl_b32 s11, s11, 8
+; GFX11-NEXT: s_or_b32 s2, s2, s3
+; GFX11-NEXT: s_or_b32 s3, s10, s11
; GFX11-NEXT: s_and_b32 s2, s2, 0xffff
; GFX11-NEXT: s_lshl_b32 s3, s3, 16
; GFX11-NEXT: s_or_b32 s0, s0, s1
; GFX11-NEXT: s_or_b32 s1, s2, s3
; GFX11-NEXT: s_and_b32 s2, s8, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s39, 8
-; GFX11-NEXT: s_and_b32 s8, s38, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s10, 8
-; GFX11-NEXT: v_readlane_b32 s11, v19, 11
+; GFX11-NEXT: v_readlane_b32 s3, v19, 14
+; GFX11-NEXT: v_readlane_b32 s8, v19, 13
+; GFX11-NEXT: s_lshl_b32 s10, s46, 8
+; GFX11-NEXT: v_readlane_b32 s11, v19, 10
+; GFX11-NEXT: v_dual_mov_b32 v1, s0 :: v_dual_mov_b32 v2, s1
+; GFX11-NEXT: s_lshl_b32 s3, s3, 8
+; GFX11-NEXT: s_and_b32 s8, s8, 0xff
; GFX11-NEXT: s_or_b32 s2, s2, s3
; GFX11-NEXT: s_or_b32 s3, s8, s10
; GFX11-NEXT: s_and_b32 s8, s9, 0xff
-; GFX11-NEXT: s_lshl_b32 s9, s37, 8
-; GFX11-NEXT: s_and_b32 s10, s36, 0xff
-; GFX11-NEXT: s_lshl_b32 s11, s35, 8
-; GFX11-NEXT: s_or_b32 s8, s8, s9
-; GFX11-NEXT: s_or_b32 s9, s10, s11
+; GFX11-NEXT: v_readlane_b32 s9, v19, 12
+; GFX11-NEXT: v_readlane_b32 s10, v19, 11
+; GFX11-NEXT: s_lshl_b32 s11, s11, 8
; GFX11-NEXT: s_and_b32 s2, s2, 0xffff
; GFX11-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-NEXT: s_lshl_b32 s9, s9, 8
+; GFX11-NEXT: s_and_b32 s10, s10, 0xff
+; GFX11-NEXT: s_or_b32 s8, s8, s9
+; GFX11-NEXT: s_or_b32 s9, s10, s11
; GFX11-NEXT: s_and_b32 s8, s8, 0xffff
; GFX11-NEXT: s_lshl_b32 s9, s9, 16
; GFX11-NEXT: s_or_b32 s2, s2, s3
; GFX11-NEXT: s_or_b32 s3, s8, s9
-; GFX11-NEXT: v_readlane_b32 s8, v19, 12
-; GFX11-NEXT: v_dual_mov_b32 v1, s0 :: v_dual_mov_b32 v2, s1
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v3, s2 :: v_dual_mov_b32 v4, s3
+; GFX11-NEXT: v_readlane_b32 s1, v19, 9
+; GFX11-NEXT: v_readlane_b32 s2, v19, 8
; GFX11-NEXT: s_and_b32 s0, s6, 0xff
-; GFX11-NEXT: s_lshl_b32 s1, s56, 8
-; GFX11-NEXT: s_and_b32 s2, s57, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s8, 8
+; GFX11-NEXT: s_lshl_b32 s3, s44, 8
+; GFX11-NEXT: v_readlane_b32 s6, v19, 6
+; GFX11-NEXT: s_lshl_b32 s1, s1, 8
+; GFX11-NEXT: s_and_b32 s2, s2, 0xff
; GFX11-NEXT: s_or_b32 s0, s0, s1
; GFX11-NEXT: s_or_b32 s1, s2, s3
; GFX11-NEXT: s_and_b32 s2, s7, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s34, 8
-; GFX11-NEXT: s_and_b32 s6, vcc_hi, 0xff
-; GFX11-NEXT: s_lshl_b32 s7, s46, 8
-; GFX11-NEXT: s_or_b32 s2, s2, s3
-; GFX11-NEXT: s_or_b32 s3, s6, s7
-; GFX11-NEXT: v_readlane_b32 s6, v19, 14
+; GFX11-NEXT: v_readlane_b32 s3, v19, 7
+; GFX11-NEXT: v_readlane_b32 s7, v19, 5
+; GFX11-NEXT: s_and_b32 s6, s6, 0xff
; GFX11-NEXT: s_and_b32 s0, s0, 0xffff
; GFX11-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-NEXT: s_lshl_b32 s3, s3, 8
+; GFX11-NEXT: s_lshl_b32 s7, s7, 8
+; GFX11-NEXT: s_or_b32 s2, s2, s3
+; GFX11-NEXT: s_or_b32 s3, s6, s7
; GFX11-NEXT: s_and_b32 s2, s2, 0xffff
; GFX11-NEXT: s_lshl_b32 s3, s3, 16
; GFX11-NEXT: s_or_b32 s0, s0, s1
; GFX11-NEXT: s_or_b32 s1, s2, s3
; GFX11-NEXT: s_and_b32 s2, s4, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s47, 8
-; GFX11-NEXT: s_and_b32 s4, s104, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s6, 8
-; GFX11-NEXT: v_readlane_b32 s7, v19, 15
+; GFX11-NEXT: v_readlane_b32 s3, v19, 4
+; GFX11-NEXT: v_readlane_b32 s4, v19, 3
+; GFX11-NEXT: s_lshl_b32 s6, s42, 8
+; GFX11-NEXT: v_readlane_b32 s7, v19, 0
+; GFX11-NEXT: scratch_store_b128 v0, v[5:8], off offset:64
+; GFX11-NEXT: s_lshl_b32 s3, s3, 8
+; GFX11-NEXT: s_and_b32 s4, s4, 0xff
; GFX11-NEXT: s_or_b32 s2, s2, s3
; GFX11-NEXT: s_or_b32 s3, s4, s6
; GFX11-NEXT: s_and_b32 s4, s5, 0xff
-; GFX11-NEXT: s_lshl_b32 s5, s103, 8
-; GFX11-NEXT: s_and_b32 s6, s102, 0xff
-; GFX11-NEXT: s_lshl_b32 s7, s101, 8
-; GFX11-NEXT: s_or_b32 s4, s4, s5
-; GFX11-NEXT: s_or_b32 s5, s6, s7
+; GFX11-NEXT: v_readlane_b32 s5, v19, 2
+; GFX11-NEXT: v_readlane_b32 s6, v19, 1
+; GFX11-NEXT: s_lshl_b32 s7, s7, 8
; GFX11-NEXT: s_and_b32 s2, s2, 0xffff
; GFX11-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-NEXT: s_lshl_b32 s5, s5, 8
+; GFX11-NEXT: s_and_b32 s6, s6, 0xff
+; GFX11-NEXT: s_or_b32 s4, s4, s5
+; GFX11-NEXT: s_or_b32 s5, s6, s7
; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
; GFX11-NEXT: s_lshl_b32 s5, s5, 16
; GFX11-NEXT: s_or_b32 s2, s2, s3
; GFX11-NEXT: s_or_b32 s3, s4, s5
-; GFX11-NEXT: scratch_store_b128 v0, v[5:8], off offset:64
; GFX11-NEXT: v_dual_mov_b32 v5, s0 :: v_dual_mov_b32 v6, s1
; GFX11-NEXT: v_dual_mov_b32 v7, s2 :: v_dual_mov_b32 v8, s3
-; GFX11-NEXT: v_readlane_b32 s17, v19, 5
-; GFX11-NEXT: v_readlane_b32 s13, v19, 9
-; GFX11-NEXT: v_readlane_b32 s9, v19, 13
; GFX11-NEXT: s_clause 0x2
; GFX11-NEXT: scratch_store_b128 v0, v[9:12], off offset:80
; GFX11-NEXT: scratch_store_b128 v0, v[1:4], off offset:96
@@ -11340,8 +11242,13 @@ define inreg <128 x i8> @bitcast_v32i32_to_v128i8_scalar(<32 x i32> inreg %a, i3
; GFX11-NEXT: v_readlane_b32 s103, v17, 7
; GFX11-NEXT: v_readlane_b32 s102, v17, 6
; GFX11-NEXT: v_readlane_b32 s101, v17, 5
+; GFX11-NEXT: v_readlane_b32 s100, v17, 4
+; GFX11-NEXT: v_readlane_b32 s99, v17, 3
+; GFX11-NEXT: v_readlane_b32 s98, v17, 2
+; GFX11-NEXT: v_readlane_b32 s97, v17, 1
; GFX11-NEXT: v_readlane_b32 s96, v17, 0
; GFX11-NEXT: v_readlane_b32 s87, v16, 31
+; GFX11-NEXT: v_readlane_b32 s86, v16, 30
; GFX11-NEXT: v_readlane_b32 s85, v16, 29
; GFX11-NEXT: v_readlane_b32 s84, v16, 28
; GFX11-NEXT: v_readlane_b32 s83, v16, 27
@@ -11350,6 +11257,7 @@ define inreg <128 x i8> @bitcast_v32i32_to_v128i8_scalar(<32 x i32> inreg %a, i3
; GFX11-NEXT: v_readlane_b32 s80, v16, 24
; GFX11-NEXT: v_readlane_b32 s71, v16, 23
; GFX11-NEXT: v_readlane_b32 s70, v16, 22
+; GFX11-NEXT: v_readlane_b32 s69, v16, 21
; GFX11-NEXT: v_readlane_b32 s68, v16, 20
; GFX11-NEXT: v_readlane_b32 s67, v16, 19
; GFX11-NEXT: v_readlane_b32 s66, v16, 18
@@ -11369,6 +11277,8 @@ define inreg <128 x i8> @bitcast_v32i32_to_v128i8_scalar(<32 x i32> inreg %a, i3
; GFX11-NEXT: v_readlane_b32 s36, v16, 4
; GFX11-NEXT: v_readlane_b32 s35, v16, 3
; GFX11-NEXT: v_readlane_b32 s34, v16, 2
+; GFX11-NEXT: v_readlane_b32 s31, v16, 1
+; GFX11-NEXT: v_readlane_b32 s30, v16, 0
; GFX11-NEXT: s_xor_saveexec_b32 s0, -1
; GFX11-NEXT: s_clause 0x3
; GFX11-NEXT: scratch_load_b32 v16, off, s32
@@ -11378,6 +11288,146 @@ define inreg <128 x i8> @bitcast_v32i32_to_v128i8_scalar(<32 x i32> inreg %a, i3
; GFX11-NEXT: s_mov_b32 exec_lo, s0
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-NEXT: .LBB13_4:
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr104
+; GFX11-NEXT: ; implicit-def: $sgpr103
+; GFX11-NEXT: ; implicit-def: $sgpr30
+; GFX11-NEXT: ; implicit-def: $sgpr102
+; GFX11-NEXT: ; implicit-def: $sgpr101
+; GFX11-NEXT: ; implicit-def: $sgpr100
+; GFX11-NEXT: ; implicit-def: $sgpr99
+; GFX11-NEXT: ; implicit-def: $sgpr98
+; GFX11-NEXT: ; implicit-def: $sgpr94
+; GFX11-NEXT: ; implicit-def: $sgpr97
+; GFX11-NEXT: ; implicit-def: $sgpr96
+; GFX11-NEXT: ; implicit-def: $sgpr87
+; GFX11-NEXT: ; implicit-def: $sgpr86
+; GFX11-NEXT: ; implicit-def: $sgpr85
+; GFX11-NEXT: ; implicit-def: $sgpr92
+; GFX11-NEXT: ; implicit-def: $sgpr84
+; GFX11-NEXT: ; implicit-def: $sgpr83
+; GFX11-NEXT: ; implicit-def: $sgpr82
+; GFX11-NEXT: ; implicit-def: $sgpr81
+; GFX11-NEXT: ; implicit-def: $sgpr80
+; GFX11-NEXT: ; implicit-def: $sgpr90
+; GFX11-NEXT: ; implicit-def: $sgpr71
+; GFX11-NEXT: ; implicit-def: $sgpr70
+; GFX11-NEXT: ; implicit-def: $sgpr69
+; GFX11-NEXT: ; implicit-def: $sgpr68
+; GFX11-NEXT: ; implicit-def: $sgpr67
+; GFX11-NEXT: ; implicit-def: $sgpr76
+; GFX11-NEXT: ; implicit-def: $sgpr66
+; GFX11-NEXT: ; implicit-def: $sgpr65
+; GFX11-NEXT: ; implicit-def: $sgpr64
+; GFX11-NEXT: ; implicit-def: $sgpr55
+; GFX11-NEXT: ; implicit-def: $sgpr54
+; GFX11-NEXT: ; implicit-def: $sgpr74
+; GFX11-NEXT: ; implicit-def: $sgpr53
+; GFX11-NEXT: ; implicit-def: $sgpr52
+; GFX11-NEXT: ; implicit-def: $sgpr51
+; GFX11-NEXT: ; implicit-def: $sgpr50
+; GFX11-NEXT: ; implicit-def: $sgpr49
+; GFX11-NEXT: ; implicit-def: $sgpr48
+; GFX11-NEXT: ; implicit-def: $sgpr39
+; GFX11-NEXT: ; implicit-def: $sgpr38
+; GFX11-NEXT: ; implicit-def: $sgpr37
+; GFX11-NEXT: ; implicit-def: $sgpr36
+; GFX11-NEXT: ; implicit-def: $sgpr35
+; GFX11-NEXT: ; implicit-def: $sgpr34
+; GFX11-NEXT: ; implicit-def: $vcc_hi
+; GFX11-NEXT: ; implicit-def: $sgpr88
+; GFX11-NEXT: ; implicit-def: $sgpr78
+; GFX11-NEXT: ; implicit-def: $sgpr72
+; GFX11-NEXT: ; implicit-def: $sgpr62
+; GFX11-NEXT: ; implicit-def: $sgpr60
+; GFX11-NEXT: ; implicit-def: $sgpr58
+; GFX11-NEXT: ; implicit-def: $sgpr56
+; GFX11-NEXT: ; implicit-def: $sgpr46
+; GFX11-NEXT: ; implicit-def: $sgpr44
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, vcc_lo
+; GFX11-NEXT: s_cbranch_vccz .LBB13_2
+; GFX11-NEXT: s_branch .LBB13_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -17446,8 +17496,17 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:332
; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32
@@ -17456,15 +17515,15 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:24
; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:32
; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:40
-; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:48
-; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:56
-; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:64
-; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:72
-; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:80
-; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:88
-; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:96
-; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:104
-; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:112
+; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:48
+; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:56
+; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:64
+; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:72
+; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:80
+; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:88
+; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:96
+; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:104
+; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:112
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:120
; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:128
; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:136
@@ -17474,113 +17533,92 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:168
; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:176
; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v1
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v7
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v9
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v11
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v13
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v17
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v19
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v21
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
-; SI-NEXT: v_lshlrev_b32_e32 v60, 8, v3
-; SI-NEXT: v_lshlrev_b32_e32 v30, 24, v5
-; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v15
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v16, 8, v3
+; SI-NEXT: v_lshlrev_b32_e32 v62, 24, v5
+; SI-NEXT: v_lshlrev_b32_e32 v28, 8, v7
+; SI-NEXT: v_lshlrev_b32_e32 v20, 24, v9
+; SI-NEXT: v_lshlrev_b32_e32 v46, 8, v11
+; SI-NEXT: v_lshlrev_b32_e32 v57, 24, v13
+; SI-NEXT: v_lshlrev_b32_e32 v24, 8, v15
+; SI-NEXT: v_lshlrev_b32_e32 v12, 24, v17
+; SI-NEXT: v_lshlrev_b32_e32 v26, 8, v19
+; SI-NEXT: v_lshlrev_b32_e32 v17, 24, v21
+; SI-NEXT: v_lshlrev_b32_e32 v19, 8, v23
+; SI-NEXT: v_lshlrev_b32_e32 v15, 24, v25
+; SI-NEXT: v_lshlrev_b32_e32 v21, 8, v27
+; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v29
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v23
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v25
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v27
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v29
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v45
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v44
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v43
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v42
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v41
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v40
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v55
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v54
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v54
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v53
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v53
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v52
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v52
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v51
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v51
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v50
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v50
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v49
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v49
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v48
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v48
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v39
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v39
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v30
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v31
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v32
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v33
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v34
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v35
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v36
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v37
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v38
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:184
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:192
@@ -17590,31 +17628,31 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:224
; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:232
; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:240
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_and_b64 s[6:7], vcc, exec
; SI-NEXT: s_waitcnt vmcnt(7)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v0
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v1
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v13
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v3
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v11
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v5
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v9
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v7
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:248
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:256
@@ -17626,140 +17664,157 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:304
; SI-NEXT: s_waitcnt vmcnt(7)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v0
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v1
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v13
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v3
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v11
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v5
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:312
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:320
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:328
-; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:4
-; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:12
-; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:20
-; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:28
-; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:36
-; SI-NEXT: s_waitcnt vmcnt(14)
+; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:324
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:316
+; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:308
+; SI-NEXT: s_waitcnt vmcnt(13)
; SI-NEXT: v_lshlrev_b32_e32 v9, 24, v9
-; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v7
-; SI-NEXT: s_waitcnt vmcnt(7)
+; SI-NEXT: s_waitcnt vmcnt(12)
+; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v7
+; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v0
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:44
-; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:52
-; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:60
-; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:68
-; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:76
-; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:84
-; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:92
-; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:100
-; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:108
-; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:116
-; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:124
-; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:132
-; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:140
-; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:148
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:156
-; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:164
-; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:172
-; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:180
-; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:188
-; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:196
-; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:204
-; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:212
-; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:220
-; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:228
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:236
-; SI-NEXT: s_waitcnt vmcnt(14)
-; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v1
-; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v3
; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
+; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:300
+; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:292
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v1
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:244
-; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:252
-; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:260
-; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:268
-; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:276
-; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:284
-; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:292
-; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:300
-; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:308
-; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:316
-; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:324
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v3
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
+; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:284
+; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:276
+; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:268
+; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:260
+; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:252
+; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:244
+; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:236
+; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:228
+; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:220
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:212
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill
+; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:204
+; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:196
+; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:188
+; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:180
+; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:172
+; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:164
+; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:156
+; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:148
+; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:140
+; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:132
+; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:124
+; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:116
+; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:108
+; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:100
+; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:92
+; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:84
+; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:76
+; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:68
+; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:60
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:52
+; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:44
+; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:36
+; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:28
+; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:20
+; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:12
+; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:4
+; SI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(14)
-; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(14)
+; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB15_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
-; SI-NEXT: v_mov_b32_e32 v57, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v4
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v0, 0xff, v2
-; SI-NEXT: v_and_b32_e32 v2, 0xff, v6
-; SI-NEXT: v_or_b32_e32 v0, v0, v60
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v4
+; SI-NEXT: v_or_b32_e32 v0, v0, v16
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v30, v1
-; SI-NEXT: s_waitcnt expcnt(4)
-; SI-NEXT: v_mov_b32_e32 v30, v5
+; SI-NEXT: v_or_b32_e32 v1, v62, v1
+; SI-NEXT: v_or_b32_e32 v4, v0, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v10
+; SI-NEXT: v_or_b32_e32 v0, v0, v46
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: v_and_b32_e32 v2, 0xff, v6
+; SI-NEXT: v_and_b32_e32 v3, 0xff, v8
+; SI-NEXT: v_or_b32_e32 v2, v2, v28
+; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; SI-NEXT: v_or_b32_e32 v3, v20, v3
+; SI-NEXT: v_or_b32_e32 v5, v2, v3
+; SI-NEXT: v_mov_b32_e32 v2, v9
; SI-NEXT: s_and_b32 s4, s28, 0xff
; SI-NEXT: s_lshl_b32 s5, s29, 8
; SI-NEXT: s_or_b32 s4, s4, s5
@@ -17768,306 +17823,310 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: s_lshl_b32 s6, s19, 24
; SI-NEXT: s_lshl_b32 s7, s23, 24
; SI-NEXT: s_lshl_b32 s8, s27, 24
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_or_b32_e32 v2, v2, v3
-; SI-NEXT: v_and_b32_e32 v3, 0xff, v8
-; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_or_b32_e32 v3, v4, v3
-; SI-NEXT: v_or_b32_e32 v4, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; SI-NEXT: v_or_b32_e32 v5, v2, v3
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v10
-; SI-NEXT: v_mov_b32_e32 v3, v7
-; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v12
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: v_or_b32_e32 v1, v57, v1
; SI-NEXT: v_or_b32_e32 v6, v0, v1
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v14
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v16
-; SI-NEXT: v_or_b32_e32 v0, v0, v15
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v57, v37
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
+; SI-NEXT: v_or_b32_e32 v0, v0, v24
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(4)
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
+; SI-NEXT: v_or_b32_e32 v1, v12, v1
; SI-NEXT: v_or_b32_e32 v7, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v18
-; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v20
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
+; SI-NEXT: v_or_b32_e32 v0, v0, v26
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
+; SI-NEXT: v_or_b32_e32 v1, v17, v1
; SI-NEXT: v_or_b32_e32 v8, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v22
-; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(3)
-; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v24
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
+; SI-NEXT: v_or_b32_e32 v0, v0, v19
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
-; SI-NEXT: v_mov_b32_e32 v2, v9
+; SI-NEXT: v_or_b32_e32 v1, v15, v1
; SI-NEXT: v_or_b32_e32 v9, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v26
-; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v28
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
+; SI-NEXT: v_or_b32_e32 v0, v0, v21
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v10, v1
+; SI-NEXT: v_or_b32_e32 v1, v27, v1
; SI-NEXT: v_or_b32_e32 v10, v0, v1
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
; SI-NEXT: v_and_b32_e32 v1, 0xff, v11
-; SI-NEXT: s_waitcnt expcnt(1)
-; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_or_b32_e32 v1, v11, v1
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_or_b32_e32 v11, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v17
-; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v39
+; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v23
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v54
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v12, v1
-; SI-NEXT: v_or_b32_e32 v12, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v25
-; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
+; SI-NEXT: v_or_b32_e32 v12, v0, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v23
+; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
; SI-NEXT: v_and_b32_e32 v1, 0xff, v13
-; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_or_b32_e32 v1, v13, v1
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
; SI-NEXT: v_or_b32_e32 v13, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v58
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v58, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v25
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v29
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v14
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v14, v1
-; SI-NEXT: v_or_b32_e32 v14, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v27
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v60, v1
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
+; SI-NEXT: v_or_b32_e32 v14, v0, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v43
+; SI-NEXT: v_mov_b32_e32 v43, v63
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v62
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v48
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v15, v1
-; SI-NEXT: v_or_b32_e32 v15, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v43
-; SI-NEXT: v_mov_b32_e32 v43, v16
; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
+; SI-NEXT: v_or_b32_e32 v15, v0, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v42
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_mov_b32_e32 v42, v1
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v21
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v40
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v16, v1
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
; SI-NEXT: v_or_b32_e32 v16, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v19
-; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v18
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_mov_b32_e32 v46, v1
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v55
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v49
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v17, v1
-; SI-NEXT: v_or_b32_e32 v17, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v51
-; SI-NEXT: v_mov_b32_e32 v55, v22
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v51, v1
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
+; SI-NEXT: v_or_b32_e32 v17, v0, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v53
+; SI-NEXT: v_mov_b32_e32 v49, v61
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v44
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v22
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v18, v1
-; SI-NEXT: v_or_b32_e32 v18, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v50
-; SI-NEXT: v_mov_b32_e32 v44, v23
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v50, v1
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
+; SI-NEXT: v_or_b32_e32 v18, v0, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v53, v3
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v29
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v63
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v38
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v19, v1
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
; SI-NEXT: v_or_b32_e32 v19, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v61
-; SI-NEXT: v_mov_b32_e32 v61, v45
-; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v36
+; SI-NEXT: v_mov_b32_e32 v36, v31
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_mov_b32_e32 v40, v1
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v40
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v30
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v20, v1
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
; SI-NEXT: v_or_b32_e32 v20, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v31
-; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v34
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_mov_b32_e32 v34, v1
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v32
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v51
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v21, v1
-; SI-NEXT: v_or_b32_e32 v21, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v59
-; SI-NEXT: v_mov_b32_e32 v59, v24
; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
+; SI-NEXT: v_or_b32_e32 v21, v0, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v33
+; SI-NEXT: v_mov_b32_e32 v51, v47
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v39
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v37
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v22, v1
+; SI-NEXT: v_or_b32_e32 v1, v62, v1
; SI-NEXT: v_or_b32_e32 v22, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v61
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v39, v1
-; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v49
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v44
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v59
+; SI-NEXT: v_or_b32_e32 v0, v0, v54
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v23, v1
+; SI-NEXT: v_or_b32_e32 v1, v39, v1
; SI-NEXT: v_or_b32_e32 v23, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v53
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v37, v56
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v37
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: v_mov_b32_e32 v33, v3
+; SI-NEXT: v_mov_b32_e32 v44, v59
+; SI-NEXT: v_mov_b32_e32 v59, v58
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v47
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v56
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v24, v1
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
; SI-NEXT: v_or_b32_e32 v24, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v42
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v42, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v32
+; SI-NEXT: v_mov_b32_e32 v32, v35
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v52
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v35
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v25, v1
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
; SI-NEXT: v_or_b32_e32 v25, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v0, 0xff, v45
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v45, v41
+; SI-NEXT: v_mov_b32_e32 v35, v39
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v56
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v41
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v63, v1
-; SI-NEXT: v_or_b32_e32 v26, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v48
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v32, v1
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
+; SI-NEXT: v_or_b32_e32 v26, v0, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v63
+; SI-NEXT: v_mov_b32_e32 v41, v62
+; SI-NEXT: v_mov_b32_e32 v63, v56
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v46
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v47
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v27, v1
-; SI-NEXT: v_or_b32_e32 v27, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v38
; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
+; SI-NEXT: v_or_b32_e32 v27, v0, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v58
+; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v41
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v50
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v28, v1
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
; SI-NEXT: v_or_b32_e32 v28, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v37
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v62, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v60
+; SI-NEXT: v_mov_b32_e32 v50, v60
+; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v38, v3
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v54
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v61
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v29, v0, v1
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v36
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v35
-; SI-NEXT: v_or_b32_e32 v0, v0, v30
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v52
+; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v61, v54
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: v_or_b32_e32 v0, v0, v1
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v60
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
+; SI-NEXT: v_or_b32_e32 v1, v58, v1
; SI-NEXT: v_or_b32_e32 v30, v0, v1
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v34
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v33
-; SI-NEXT: v_or_b32_e32 v0, v0, v3
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v31
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v0, v0, v1
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v55
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v57, v1
+; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v31, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v40
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v52
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v33, v34
-; SI-NEXT: v_mov_b32_e32 v34, v35
-; SI-NEXT: v_mov_b32_e32 v35, v36
-; SI-NEXT: v_mov_b32_e32 v36, v54
-; SI-NEXT: v_mov_b32_e32 v54, v37
-; SI-NEXT: v_mov_b32_e32 v37, v41
-; SI-NEXT: v_mov_b32_e32 v41, v38
-; SI-NEXT: v_mov_b32_e32 v38, v63
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_or_b32_e32 v3, s4, v0
@@ -18094,61 +18153,64 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: s_and_b32 s6, s6, 0xffff
; SI-NEXT: s_or_b32 s7, s8, s7
; SI-NEXT: s_or_b32 s6, s6, s7
-; SI-NEXT: v_mov_b32_e32 v57, v1
+; SI-NEXT: v_mov_b32_e32 v48, v1
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: s_mov_b64 s[4:5], 0
; SI-NEXT: s_branch .LBB15_3
; SI-NEXT: .LBB15_2:
-; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(2)
-; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
-; SI-NEXT: v_mov_b32_e32 v61, v45
-; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v44, v59
+; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v32, v35
+; SI-NEXT: v_mov_b32_e32 v45, v41
+; SI-NEXT: v_mov_b32_e32 v43, v63
+; SI-NEXT: v_mov_b32_e32 v59, v58
+; SI-NEXT: v_mov_b32_e32 v50, v60
+; SI-NEXT: v_mov_b32_e32 v49, v61
+; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v57, v37
+; SI-NEXT: v_mov_b32_e32 v51, v47
+; SI-NEXT: v_mov_b32_e32 v36, v31
+; SI-NEXT: v_mov_b32_e32 v37, v56
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_mov_b32_e32 v45, v33
-; SI-NEXT: v_mov_b32_e32 v33, v34
-; SI-NEXT: v_mov_b32_e32 v34, v35
-; SI-NEXT: v_mov_b32_e32 v35, v36
-; SI-NEXT: v_mov_b32_e32 v36, v54
-; SI-NEXT: v_mov_b32_e32 v54, v37
-; SI-NEXT: v_mov_b32_e32 v37, v41
-; SI-NEXT: v_mov_b32_e32 v41, v38
-; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
; SI-NEXT: .LBB15_3: ; %Flow
-; SI-NEXT: v_mov_b32_e32 v63, v46
+; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(2)
+; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(1)
+; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: v_mov_b32_e32 v47, v44
; SI-NEXT: s_cbranch_vccnz .LBB15_5
; SI-NEXT: ; %bb.4: ; %cmp.true
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
; SI-NEXT: s_add_i32 s28, s28, 3
-; SI-NEXT: s_waitcnt vmcnt(4)
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v40
+; SI-NEXT: s_waitcnt vmcnt(11)
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v52
; SI-NEXT: s_and_b32 s4, s28, 0xff
; SI-NEXT: s_lshl_b32 s5, s29, 8
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_or_b32 s4, s5, s4
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; SI-NEXT: s_addk_i32 s4, 0x300
-; SI-NEXT: v_or_b32_e32 v0, v57, v0
+; SI-NEXT: v_or_b32_e32 v0, v48, v0
; SI-NEXT: s_and_b32 s4, s4, 0xffff
; SI-NEXT: v_or_b32_e32 v0, s4, v0
; SI-NEXT: s_add_i32 s16, s16, 3
@@ -18195,7 +18257,7 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v1, v2, v1
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v1, vcc, 0x300, v1
; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -18204,17 +18266,17 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; SI-NEXT: v_or_b32_e32 v2, v3, v2
; SI-NEXT: v_add_i32_e32 v3, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_add_i32_e32 v4, vcc, 0x3000000, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -18224,15 +18286,15 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v5, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -18242,15 +18304,15 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v6, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -18260,15 +18322,15 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v7, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -18278,15 +18340,15 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v8, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -18296,15 +18358,15 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v9, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -18314,15 +18376,15 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v10, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -18331,34 +18393,66 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_add_i32_e32 v11, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
+; SI-NEXT: v_or_b32_e32 v0, v39, v0
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
+; SI-NEXT: v_or_b32_e32 v1, v54, v1
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v12, vcc, 0x3000000, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
+; SI-NEXT: v_or_b32_e32 v1, v55, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v12, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: v_add_i32_e32 v13, vcc, 0x3000000, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT: v_or_b32_e32 v1, v62, v1
+; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: v_add_i32_e32 v14, vcc, 0x3000000, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -18367,16 +18461,16 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v13, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v15, vcc, 0x3000000, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
-; SI-NEXT: v_or_b32_e32 v0, v58, v0
+; SI-NEXT: v_or_b32_e32 v0, v42, v0
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
@@ -18384,16 +18478,16 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v14, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v16, vcc, 0x3000000, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
-; SI-NEXT: v_or_b32_e32 v0, v60, v0
+; SI-NEXT: v_or_b32_e32 v0, v46, v0
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
@@ -18401,33 +18495,33 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v15, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v17, vcc, 0x3000000, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_or_b32_e32 v1, v43, v1
+; SI-NEXT: v_or_b32_e32 v1, v53, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v16, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v18, vcc, 0x3000000, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -18436,16 +18530,16 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v17, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v19, vcc, 0x3000000, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
-; SI-NEXT: v_or_b32_e32 v0, v51, v0
+; SI-NEXT: v_or_b32_e32 v0, v40, v0
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
@@ -18453,16 +18547,16 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v18, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v20, vcc, 0x3000000, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
-; SI-NEXT: v_or_b32_e32 v0, v50, v0
+; SI-NEXT: v_or_b32_e32 v0, v34, v0
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
@@ -18470,173 +18564,147 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v19, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
-; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
-; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v20, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v21, vcc, 0x3000000, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
-; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v57
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
-; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v21, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
+; SI-NEXT: v_or_b32_e32 v1, v41, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_or_b32_e32 v1, v55, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v22, vcc, 0x3000000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v61
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v49
-; SI-NEXT: v_or_b32_e32 v0, v39, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v47
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT: v_or_b32_e32 v1, v35, v1
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
+; SI-NEXT: v_or_b32_e32 v0, v61, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
-; SI-NEXT: v_or_b32_e32 v1, v44, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v23, vcc, 0x3000000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v47
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v63
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
-; SI-NEXT: v_or_b32_e32 v1, v59, v1
+; SI-NEXT: v_or_b32_e32 v1, v33, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v24, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v52
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: v_or_b32_e32 v0, v42, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v32
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v25, vcc, 0x3000000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v45
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v56
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v45
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v38, v1
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v26, vcc, 0x3000000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v48
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v43
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v63
-; SI-NEXT: v_or_b32_e32 v0, v32, v0
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v51
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v27, vcc, 0x3000000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v41
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v59
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v37
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT: v_or_b32_e32 v1, v38, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v28, vcc, 0x3000000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v54
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v50
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v36
-; SI-NEXT: v_or_b32_e32 v0, v62, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v49
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v29, vcc, 0x3000000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v34
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v60
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
+; SI-NEXT: v_or_b32_e32 v1, v58, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v30, vcc, 0x3000000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v33
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v36
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -18666,7 +18734,7 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
; VI-LABEL: bitcast_v128i8_to_v32i32_scalar:
@@ -18688,21 +18756,21 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:332
+; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
+; VI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:332
; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32
; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:8
; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:16
@@ -18717,7 +18785,7 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:88
; VI-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:96
; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:104
-; VI-NEXT: buffer_load_ushort v22, off, s[0:3], s32 offset:112
+; VI-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:112
; VI-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:120
; VI-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:128
; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:136
@@ -18726,76 +18794,80 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:160
; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:168
; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:176
-; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v1
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v11
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v13
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v15
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v17
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v19
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v21
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v23
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v8, 8, v3
-; VI-NEXT: v_lshlrev_b32_e32 v59, 8, v5
-; VI-NEXT: v_lshlrev_b32_e32 v14, 8, v7
-; VI-NEXT: v_lshlrev_b32_e32 v10, 8, v9
-; VI-NEXT: v_lshlrev_b32_e32 v16, 8, v11
-; VI-NEXT: v_lshlrev_b32_e32 v6, 8, v13
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v25
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v27
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v6, 8, v3
+; VI-NEXT: v_lshlrev_b32_e32 v10, 8, v5
+; VI-NEXT: v_lshlrev_b32_e32 v8, 8, v7
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v9
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: v_lshlrev_b32_e32 v46, 8, v29
; VI-NEXT: s_waitcnt vmcnt(14)
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v23
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v25
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v27
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v29
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v44
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v43
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v42
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v41
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v40
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v55
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v54
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v53
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v52
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v51
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v50
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v49
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v48
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v39
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v45
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v44
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v43
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v42
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v41
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v40
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v55
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v54
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v53
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v52
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v51
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v50
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v49
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v48
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v39
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v30
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v31
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v32
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v33
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v34
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
; VI-NEXT: s_waitcnt vmcnt(14)
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v22
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v31
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v32
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v33
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v34
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v35
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v36
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:184
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v35
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v36
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v37
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
+; VI-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:184
; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:192
; VI-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:200
; VI-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:208
@@ -18803,30 +18875,30 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:224
; VI-NEXT: buffer_load_ushort v9, off, s[0:3], s32 offset:232
; VI-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:240
-; VI-NEXT: v_lshlrev_b32_e32 v52, 8, v37
-; VI-NEXT: v_lshlrev_b32_e32 v31, 8, v38
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_and_b64 s[6:7], vcc, exec
+; VI-NEXT: v_lshlrev_b32_e32 v38, 8, v38
; VI-NEXT: s_waitcnt vmcnt(7)
-; VI-NEXT: v_lshlrev_b32_e32 v26, 8, v0
+; VI-NEXT: v_lshlrev_b32_e32 v35, 8, v15
; VI-NEXT: s_waitcnt vmcnt(6)
-; VI-NEXT: v_lshlrev_b32_e32 v32, 8, v1
+; VI-NEXT: v_lshlrev_b32_e32 v24, 8, v1
; VI-NEXT: s_waitcnt vmcnt(5)
-; VI-NEXT: v_lshlrev_b32_e32 v54, 8, v13
+; VI-NEXT: v_lshlrev_b32_e32 v26, 8, v13
; VI-NEXT: s_waitcnt vmcnt(4)
-; VI-NEXT: v_lshlrev_b32_e32 v49, 8, v3
-; VI-NEXT: s_waitcnt vmcnt(3)
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v11
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(3)
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v5
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(3)
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v9
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(3)
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v7
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:248
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v3
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v11
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v5
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v9
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v7
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
+; VI-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:248
; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:256
; VI-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:264
; VI-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:272
@@ -18835,130 +18907,127 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: buffer_load_ushort v9, off, s[0:3], s32 offset:296
; VI-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:304
; VI-NEXT: s_waitcnt vmcnt(7)
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v0
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v12, 8, v15
+; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
; VI-NEXT: s_waitcnt vmcnt(7)
-; VI-NEXT: v_lshlrev_b32_e32 v48, 8, v1
-; VI-NEXT: s_waitcnt vmcnt(6)
+; VI-NEXT: v_lshlrev_b32_e32 v50, 8, v1
+; VI-NEXT: s_waitcnt vmcnt(5)
+; VI-NEXT: v_lshlrev_b32_e32 v51, 8, v3
; VI-NEXT: v_lshlrev_b32_e32 v27, 8, v13
-; VI-NEXT: s_waitcnt vmcnt(4)
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v11
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(4)
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v5
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v29, 8, v3
-; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:312
+; VI-NEXT: s_waitcnt vmcnt(3)
+; VI-NEXT: v_lshlrev_b32_e32 v31, 8, v5
+; VI-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:312
; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:320
; VI-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:328
-; VI-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:4
-; VI-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:12
-; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:20
-; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:28
-; VI-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:36
-; VI-NEXT: s_waitcnt vmcnt(11)
-; VI-NEXT: v_lshlrev_b32_e32 v5, 8, v7
-; VI-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; VI-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:324
+; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:316
+; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:308
+; VI-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:300
+; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:292
+; VI-NEXT: v_lshlrev_b32_e32 v52, 8, v11
+; VI-NEXT: s_waitcnt vmcnt(10)
+; VI-NEXT: v_lshlrev_b32_e32 v29, 8, v9
+; VI-NEXT: s_waitcnt vmcnt(9)
+; VI-NEXT: v_lshlrev_b32_e32 v30, 8, v7
; VI-NEXT: s_waitcnt vmcnt(7)
-; VI-NEXT: v_lshlrev_b32_e32 v7, 8, v0
+; VI-NEXT: v_lshlrev_b32_e32 v5, 8, v5
; VI-NEXT: s_waitcnt vmcnt(6)
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v1
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v18, off, s[0:3], s32 offset:44
-; VI-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:52
-; VI-NEXT: buffer_load_ushort v17, off, s[0:3], s32 offset:60
-; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:68
-; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:76
-; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:84
-; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:92
-; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:100
-; VI-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:108
-; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:116
-; VI-NEXT: buffer_load_ushort v25, off, s[0:3], s32 offset:124
-; VI-NEXT: buffer_load_ushort v19, off, s[0:3], s32 offset:132
-; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:140
-; VI-NEXT: buffer_load_ushort v20, off, s[0:3], s32 offset:148
-; VI-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:156
-; VI-NEXT: buffer_load_ushort v21, off, s[0:3], s32 offset:164
-; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:172
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(6)
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v3
+; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill
+; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:284
+; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:276
+; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:268
+; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:260
+; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:252
+; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:244
+; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:236
+; VI-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:228
+; VI-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:220
+; VI-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:212
+; VI-NEXT: buffer_load_ushort v25, off, s[0:3], s32 offset:204
+; VI-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:196
+; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:188
; VI-NEXT: buffer_load_ushort v22, off, s[0:3], s32 offset:180
-; VI-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:188
-; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:196
-; VI-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:204
-; VI-NEXT: buffer_load_ushort v24, off, s[0:3], s32 offset:212
-; VI-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:220
-; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:228
-; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:236
-; VI-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:244
-; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:252
-; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:260
-; VI-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:268
-; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:276
-; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:284
-; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:292
-; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:300
-; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:308
-; VI-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:316
-; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:324
+; VI-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:172
+; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:164
+; VI-NEXT: buffer_load_ushort v21, off, s[0:3], s32 offset:156
+; VI-NEXT: buffer_load_ushort v20, off, s[0:3], s32 offset:148
+; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:140
+; VI-NEXT: buffer_load_ushort v19, off, s[0:3], s32 offset:132
+; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:124
+; VI-NEXT: buffer_load_ushort v18, off, s[0:3], s32 offset:116
+; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:108
+; VI-NEXT: buffer_load_ushort v17, off, s[0:3], s32 offset:100
+; VI-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:92
+; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:84
+; VI-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:76
+; VI-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:68
+; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:60
+; VI-NEXT: buffer_load_ushort v16, off, s[0:3], s32 offset:52
+; VI-NEXT: buffer_load_ushort v14, off, s[0:3], s32 offset:44
+; VI-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:36
+; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:28
+; VI-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:20
+; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:12
+; VI-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:4
+; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
; VI-NEXT: s_waitcnt vmcnt(14)
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v3
-; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(12)
-; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(14)
+; VI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill
; VI-NEXT: s_cbranch_scc0 .LBB15_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: v_or_b32_sdwa v0, v2, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v4, v59 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v5, v2, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
-; VI-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v1, v4, v10 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v4, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: s_and_b32 s4, s28, 0xff
; VI-NEXT: s_lshl_b32 s5, s29, 8
; VI-NEXT: s_or_b32 s4, s4, s5
@@ -18967,208 +19036,207 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: s_lshl_b32 s6, s19, 8
; VI-NEXT: s_lshl_b32 s7, s23, 8
; VI-NEXT: s_lshl_b32 s8, s27, 8
-; VI-NEXT: s_waitcnt vmcnt(3)
-; VI-NEXT: v_or_b32_sdwa v2, v2, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(2)
-; VI-NEXT: v_or_b32_sdwa v3, v3, v10 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v0, v0, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v2, v2, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v1, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
-; VI-NEXT: v_mov_b32_e32 v3, v7
+; VI-NEXT: v_or_b32_sdwa v3, v3, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v2, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v1, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v6, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v3, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v1, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v7, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
-; VI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill
-; VI-NEXT: v_mov_b32_e32 v29, v9
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v1, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v1, v46 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v46, v47
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v11, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v0, v12, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v50, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v12, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v50, v0
-; VI-NEXT: v_or_b32_sdwa v0, v56, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v13, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v59, v0
-; VI-NEXT: v_or_b32_sdwa v0, v18, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v14, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v15, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v16, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v56, v0
-; VI-NEXT: v_or_b32_sdwa v0, v17, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v61, v0
+; VI-NEXT: v_or_b32_sdwa v0, v37, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v39, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v37, v1
+; VI-NEXT: v_or_b32_sdwa v1, v15, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v39, v0
-; VI-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v38, v1
-; VI-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v63, v1
+; VI-NEXT: v_or_b32_sdwa v1, v36, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v16, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v37, v0
-; VI-NEXT: v_or_b32_sdwa v0, v57, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v36, v0
+; VI-NEXT: v_or_b32_sdwa v0, v59, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v36, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v17, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v17, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v59, v45
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v36, v0
-; VI-NEXT: v_or_b32_sdwa v0, v35, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v34, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v35, v1
-; VI-NEXT: v_or_b32_sdwa v1, v33, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v34, v1
+; VI-NEXT: v_or_b32_sdwa v1, v18, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v18, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v33, v0
-; VI-NEXT: v_or_b32_sdwa v0, v25, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v19, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v19, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v0, v51, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v58, v0
+; VI-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v20, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v20, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
-; VI-NEXT: v_or_b32_sdwa v1, v21, v52 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v51, v3
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_or_b32_sdwa v0, v21, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v0, v28, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v42, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v21, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v34, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v22, v26 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v28, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v22, v35 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v22, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v23, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v43, v54 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v62, v24 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v23, v26 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v23, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
-; VI-NEXT: v_mov_b32_e32 v43, v49
-; VI-NEXT: v_or_b32_sdwa v0, v30, v49 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v32, v54
-; VI-NEXT: v_mov_b32_e32 v34, v26
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v35, v24
+; VI-NEXT: v_mov_b32_e32 v62, v26
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_or_b32_sdwa v0, v25, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v49, v1
-; VI-NEXT: v_or_b32_sdwa v1, v24, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v33, v1
+; VI-NEXT: v_or_b32_sdwa v1, v54, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v24, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v54, v0
-; VI-NEXT: v_or_b32_sdwa v0, v46, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v45, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v46, v61
+; VI-NEXT: v_or_b32_sdwa v1, v32, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v45, v32
; VI-NEXT: v_or_b32_sdwa v25, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v54, v0
+; VI-NEXT: v_or_b32_sdwa v0, v44, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v45, v61 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v57, v32 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v26, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v58, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v44, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v47, v45
+; VI-NEXT: v_or_b32_sdwa v0, v41, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v43, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v27, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
-; VI-NEXT: v_mov_b32_e32 v58, v44
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v48, v0
-; VI-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v42, v45 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v39, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v40, v52 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v28, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
-; VI-NEXT: v_or_b32_sdwa v1, v40, v29 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v63, v42
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v0, v41, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v56, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v60, v29 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v29, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
-; VI-NEXT: v_or_b32_sdwa v1, v60, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v55, v30 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v50, v51
+; VI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v56, v60
+; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: v_or_b32_sdwa v1, v53, v55 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v30, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v0, v55, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v49, v52 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v53, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v48, v51 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v31, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v57, v0
-; VI-NEXT: v_or_b32_sdwa v0, v52, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v48, v2
+; VI-NEXT: v_mov_b32_e32 v53, v55
+; VI-NEXT: v_mov_b32_e32 v55, v40
+; VI-NEXT: v_mov_b32_e32 v40, v39
+; VI-NEXT: v_mov_b32_e32 v39, v43
+; VI-NEXT: v_mov_b32_e32 v43, v32
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v42, v0
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_e32 v3, s4, v0
; VI-NEXT: s_and_b32 s4, s16, 0xff
; VI-NEXT: s_or_b32 s4, s4, s5
@@ -19199,52 +19267,49 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: s_mov_b64 s[4:5], 0
; VI-NEXT: s_branch .LBB15_3
; VI-NEXT: .LBB15_2:
-; VI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v35, v24
+; VI-NEXT: v_mov_b32_e32 v62, v26
+; VI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; VI-NEXT: v_mov_b32_e32 v32, v54
-; VI-NEXT: v_mov_b32_e32 v43, v49
-; VI-NEXT: v_mov_b32_e32 v46, v61
-; VI-NEXT: v_mov_b32_e32 v47, v45
-; VI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
-; VI-NEXT: v_mov_b32_e32 v34, v26
-; VI-NEXT: v_mov_b32_e32 v58, v44
-; VI-NEXT: s_waitcnt vmcnt(14)
-; VI-NEXT: v_mov_b32_e32 v63, v42
-; VI-NEXT: v_mov_b32_e32 v51, v7
-; VI-NEXT: v_mov_b32_e32 v48, v29
-; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v50, v51
+; VI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; VI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v55, v40
+; VI-NEXT: v_mov_b32_e32 v40, v39
+; VI-NEXT: v_mov_b32_e32 v39, v43
+; VI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v46, v47
+; VI-NEXT: v_mov_b32_e32 v59, v45
+; VI-NEXT: v_mov_b32_e32 v45, v32
+; VI-NEXT: v_mov_b32_e32 v56, v60
; VI-NEXT: .LBB15_3: ; %Flow
; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
-; VI-NEXT: v_mov_b32_e32 v44, v47
-; VI-NEXT: v_mov_b32_e32 v47, v46
-; VI-NEXT: s_waitcnt vmcnt(3)
-; VI-NEXT: v_mov_b32_e32 v46, v49
+; VI-NEXT: v_mov_b32_e32 v32, v59
+; VI-NEXT: s_waitcnt vmcnt(7)
+; VI-NEXT: v_mov_b32_e32 v59, v33
; VI-NEXT: s_cbranch_vccnz .LBB15_5
; VI-NEXT: ; %bb.4: ; %cmp.true
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
; VI-NEXT: s_add_i32 s28, s28, 3
; VI-NEXT: s_and_b32 s4, s28, 0xff
; VI-NEXT: s_lshl_b32 s5, s29, 8
; VI-NEXT: s_or_b32 s4, s5, s4
-; VI-NEXT: s_waitcnt vmcnt(4)
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v52
+; VI-NEXT: s_waitcnt vmcnt(7)
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v48
; VI-NEXT: s_addk_i32 s4, 0x300
-; VI-NEXT: v_or_b32_sdwa v0, v57, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v0, v42, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_and_b32 s4, s4, 0xffff
; VI-NEXT: v_or_b32_e32 v0, s4, v0
; VI-NEXT: s_add_i32 s16, s16, 3
@@ -19290,17 +19355,17 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v1, vcc, 0x300, v1
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
; VI-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v3, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v4, vcc, 0x3000000, v1
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: s_waitcnt vmcnt(1)
@@ -19313,327 +19378,332 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v5, vcc, 0x3000000, v0
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v6, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v7, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v8, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v9, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v10, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v11, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(2)
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v46
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v12, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v50, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
+; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v13, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v59, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
+; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v14, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v56, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v15, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(2)
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v39, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
-; VI-NEXT: v_or_b32_sdwa v1, v38, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
+; VI-NEXT: v_or_b32_sdwa v1, v63, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v16, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v37, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v0, v36, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v17, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v36, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
-; VI-NEXT: v_or_b32_sdwa v1, v35, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
+; VI-NEXT: v_or_b32_sdwa v1, v34, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v18, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v19, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
+; VI-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
-; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v20, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v21, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
-; VI-NEXT: v_or_b32_sdwa v1, v34, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v22, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
-; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v0, v35, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
-; VI-NEXT: v_or_b32_sdwa v1, v32, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v1, v62, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v23, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v43, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
-; VI-NEXT: v_or_b32_sdwa v1, v46, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
+; VI-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v24, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v47
-; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v54, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v32
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v45
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
+; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v25, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v44
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v44
+; VI-NEXT: v_or_b32_sdwa v0, v54, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v57
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
-; VI-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v1, v43, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v26, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v41
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v58
-; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v39
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v27, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v63
-; VI-NEXT: v_or_b32_sdwa v1, v45, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v48, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v40
+; VI-NEXT: v_or_b32_sdwa v0, v50, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v55
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v28, vcc, 0x3000000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v41
-; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
+; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v40
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v56
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v29, vcc, 0x3000000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v62
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v2, s6
; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v60
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
-; VI-NEXT: v_or_b32_sdwa v1, v51, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
+; VI-NEXT: v_or_b32_sdwa v1, v53, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v30, vcc, 0x3000000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v55
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v53
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v49
+; VI-NEXT: v_or_b32_sdwa v0, v52, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
-; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
+; VI-NEXT: v_or_b32_sdwa v1, v51, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v31, vcc, 0x3000000, v0
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
-; VI-NEXT: v_mov_b32_e32 v2, s6
; VI-NEXT: .LBB15_5: ; %end
; VI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload
@@ -19673,28 +19743,37 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:332
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_load_ushort v2, off, s[0:3], s32
-; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:8
-; GFX9-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:16
-; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:24
-; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:32
-; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:40
-; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:48
-; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:56
-; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:64
-; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:72
-; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:80
-; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:88
-; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:96
-; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:104
-; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:112
+; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:332
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32
+; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:8
+; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:16
+; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:24
+; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:32
+; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:40
+; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:48
+; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:56
+; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:64
+; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:72
+; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:80
+; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:88
+; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:96
+; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:104
+; GFX9-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:112
; GFX9-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:120
; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:128
; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:136
@@ -19704,270 +19783,294 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:168
; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:176
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v7
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v11
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v13
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v15
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v17
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v19
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v21
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v23
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v25
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v27
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v29
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshlrev_b32_e32 v10, 8, v3
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 8, v3
; GFX9-NEXT: v_lshlrev_b32_e32 v8, 8, v5
-; GFX9-NEXT: v_lshlrev_b32_e32 v9, 8, v9
-; GFX9-NEXT: s_waitcnt vmcnt(35)
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v43
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v2
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v4
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v6
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v42
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v41
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v40
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v55
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v54
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v53
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v52
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v51
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v50
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v49
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v48
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v39
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v31
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v32
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v33
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v34
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v35
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v36
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v37
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v38
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:184
+; GFX9-NEXT: v_lshlrev_b32_e32 v18, 8, v7
+; GFX9-NEXT: v_lshlrev_b32_e32 v10, 8, v9
+; GFX9-NEXT: v_lshlrev_b32_e32 v6, 8, v13
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_waitcnt vmcnt(28)
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v21
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v23
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v25
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v27
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v29
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(32)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v44
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(32)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v43
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(32)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v42
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(32)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v41
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(32)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v40
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(32)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v55
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(32)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v54
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(32)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v53
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(32)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v52
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v50
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v49
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v48
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v39
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v30
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v31
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v32
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v33
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v34
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v35
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v36
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v37
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v38
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:184
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:192
-; GFX9-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:200
+; GFX9-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:200
; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:208
-; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:216
+; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:216
; GFX9-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:224
-; GFX9-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:232
+; GFX9-NEXT: buffer_load_ushort v9, off, s[0:3], s32 offset:232
; GFX9-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:240
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_and_b64 s[6:7], vcc, exec
+; GFX9-NEXT: v_lshlrev_b32_e32 v51, 8, v51
; GFX9-NEXT: s_waitcnt vmcnt(7)
-; GFX9-NEXT: v_lshlrev_b32_e32 v38, 8, v11
+; GFX9-NEXT: v_lshlrev_b32_e32 v61, 8, v0
; GFX9-NEXT: s_waitcnt vmcnt(6)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v1
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v2
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v13
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v3
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(5)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v5
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(5)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v6
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(5)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v7
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshlrev_b32_e32 v49, 8, v4
-; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:248
+; GFX9-NEXT: v_lshlrev_b32_e32 v40, 8, v3
+; GFX9-NEXT: s_waitcnt vmcnt(3)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v9
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(3)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v7
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v25, 8, v11
+; GFX9-NEXT: v_lshlrev_b32_e32 v32, 8, v5
+; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:248
; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:256
-; GFX9-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:264
+; GFX9-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:264
; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:272
-; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:280
+; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:280
; GFX9-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:288
-; GFX9-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:296
+; GFX9-NEXT: buffer_load_ushort v9, off, s[0:3], s32 offset:296
; GFX9-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:304
; GFX9-NEXT: s_waitcnt vmcnt(7)
-; GFX9-NEXT: v_lshlrev_b32_e32 v11, 8, v11
-; GFX9-NEXT: s_waitcnt vmcnt(6)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(6)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v2
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(6)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v3
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v0
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(7)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v1
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v4
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v3
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v5
-; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:312
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v11
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:312
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:320
-; GFX9-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:328
-; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:4
-; GFX9-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:12
-; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:20
-; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:28
-; GFX9-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:36
+; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:328
+; GFX9-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:324
+; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:316
+; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:308
+; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:300
+; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:292
+; GFX9-NEXT: v_lshlrev_b32_e32 v27, 8, v13
; GFX9-NEXT: s_waitcnt vmcnt(14)
-; GFX9-NEXT: v_lshlrev_b32_e32 v4, 8, v7
-; GFX9-NEXT: v_lshlrev_b32_e32 v5, 8, v6
+; GFX9-NEXT: v_lshlrev_b32_e32 v31, 8, v5
+; GFX9-NEXT: s_waitcnt vmcnt(13)
+; GFX9-NEXT: v_lshlrev_b32_e32 v29, 8, v9
+; GFX9-NEXT: s_waitcnt vmcnt(12)
+; GFX9-NEXT: v_lshlrev_b32_e32 v30, 8, v7
; GFX9-NEXT: s_waitcnt vmcnt(7)
-; GFX9-NEXT: v_lshlrev_b32_e32 v3, 8, v3
+; GFX9-NEXT: v_lshlrev_b32_e32 v39, 8, v0
; GFX9-NEXT: s_waitcnt vmcnt(6)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_ushort v25, off, s[0:3], s32 offset:44
-; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:52
-; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:60
-; GFX9-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:68
-; GFX9-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:76
-; GFX9-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:84
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v1
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(6)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v3
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:284
+; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:276
+; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:268
+; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:260
+; GFX9-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:252
+; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:244
+; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:236
+; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:228
+; GFX9-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:220
+; GFX9-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:212
+; GFX9-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:204
+; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:196
+; GFX9-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:188
+; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:180
+; GFX9-NEXT: buffer_load_ushort v26, off, s[0:3], s32 offset:172
+; GFX9-NEXT: buffer_load_ushort v21, off, s[0:3], s32 offset:164
+; GFX9-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:156
+; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:148
+; GFX9-NEXT: buffer_load_ushort v24, off, s[0:3], s32 offset:140
+; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:132
+; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:124
+; GFX9-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:116
+; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:108
+; GFX9-NEXT: buffer_load_ushort v19, off, s[0:3], s32 offset:100
; GFX9-NEXT: buffer_load_ushort v17, off, s[0:3], s32 offset:92
-; GFX9-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:100
-; GFX9-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:108
-; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:116
-; GFX9-NEXT: buffer_load_ushort v27, off, s[0:3], s32 offset:124
-; GFX9-NEXT: buffer_load_ushort v19, off, s[0:3], s32 offset:132
-; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:140
-; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:148
-; GFX9-NEXT: buffer_load_ushort v21, off, s[0:3], s32 offset:156
-; GFX9-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:164
-; GFX9-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:172
-; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:180
-; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:188
-; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:196
-; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:204
-; GFX9-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:212
-; GFX9-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:220
-; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:228
-; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:236
-; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:244
-; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:252
-; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:260
-; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:268
-; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:276
-; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:284
-; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:292
-; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:300
-; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:308
-; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:316
-; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:324
-; GFX9-NEXT: s_waitcnt vmcnt(42)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v2
-; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:84
+; GFX9-NEXT: buffer_load_ushort v22, off, s[0:3], s32 offset:76
+; GFX9-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:68
+; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:60
+; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:52
+; GFX9-NEXT: buffer_load_ushort v14, off, s[0:3], s32 offset:44
+; GFX9-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:36
+; GFX9-NEXT: buffer_load_ushort v20, off, s[0:3], s32 offset:28
+; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:20
+; GFX9-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:12
+; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:4
+; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(28)
-; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(43)
+; GFX9-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(40)
+; GFX9-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(39)
+; GFX9-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(36)
+; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
+; GFX9-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
+; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
+; GFX9-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
+; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
+; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
; GFX9-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
+; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
+; GFX9-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
+; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
+; GFX9-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
+; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
+; GFX9-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(39)
+; GFX9-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(39)
+; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(39)
+; GFX9-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(41)
+; GFX9-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(41)
+; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(42)
+; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(43)
+; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(43)
+; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(43)
+; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(43)
+; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(43)
+; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(43)
+; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(43)
+; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(43)
+; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill
; GFX9-NEXT: s_cbranch_scc0 .LBB15_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
-; GFX9-NEXT: v_mov_b32_e32 v38, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v0, v2, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v4, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
; GFX9-NEXT: s_and_b32 s4, s28, 0xff
-; GFX9-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
; GFX9-NEXT: s_lshl_b32 s5, s29, 8
; GFX9-NEXT: s_or_b32 s4, s4, s5
; GFX9-NEXT: s_and_b32 s4, s4, 0xffff
@@ -19975,202 +20078,199 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: s_lshl_b32 s6, s19, 8
; GFX9-NEXT: s_lshl_b32 s7, s23, 8
; GFX9-NEXT: s_lshl_b32 s8, s27, 8
-; GFX9-NEXT: s_waitcnt vmcnt(5)
-; GFX9-NEXT: v_or_b32_sdwa v0, v0, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(4)
-; GFX9-NEXT: v_or_b32_sdwa v1, v1, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(4)
-; GFX9-NEXT: v_or_b32_sdwa v2, v2, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_waitcnt vmcnt(3)
+; GFX9-NEXT: v_or_b32_sdwa v2, v2, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_or_b32_sdwa v3, v3, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v3, v3, v10 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v12, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v1, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v14, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v16, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v18, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v20, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v22, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v24, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v26, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v28, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v30, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v1, v11, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v34, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v12, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v60, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v62, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_or_b32_sdwa v0, v53, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v20, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v1, v13, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v25, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v14, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v62, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v43, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v1, v15, v51 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v1, v15, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v43, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
-; GFX9-NEXT: v_mov_b32_e32 v61, v38
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v22, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v58, v1
+; GFX9-NEXT: v_or_b32_sdwa v1, v35, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v16, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(3)
-; GFX9-NEXT: v_or_b32_sdwa v0, v17, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v1, v63, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v35, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v17, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v1, v19, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v17, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
-; GFX9-NEXT: v_mov_b32_e32 v63, v57
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v57, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v54, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v56, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v54, v1
+; GFX9-NEXT: v_or_b32_sdwa v1, v23, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v18, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_or_b32_sdwa v0, v27, v56 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_or_b32_sdwa v0, v44, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v19, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v52, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v19, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v0, v51, v62 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v58, v59 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: v_mov_b32_e32 v52, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v24, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_or_b32_sdwa v1, v50, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v20, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
-; GFX9-NEXT: v_or_b32_sdwa v0, v21, v47 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v1, v31, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v50, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v28, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v1, v21, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v21, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
-; GFX9-NEXT: v_or_b32_sdwa v1, v50, v60 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v1, v53, v61 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v23, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v26, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v22, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v0, v44, v58 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v37, v57 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v23, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v0, v52, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v29, v49 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v47, v53 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v60, v51 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v23, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v46, v40 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v63, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v24, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
-; GFX9-NEXT: v_mov_b32_e32 v37, v57
-; GFX9-NEXT: v_mov_b32_e32 v57, v60
-; GFX9-NEXT: v_mov_b32_e32 v52, v56
-; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v0, v57, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v40, v25
+; GFX9-NEXT: v_mov_b32_e32 v57, v41
+; GFX9-NEXT: v_mov_b32_e32 v46, v61
+; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_mov_b32_e32 v34, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v46, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v1, v48, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v41, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v25, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v1, v45, v44 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v32, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v37, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v0, v39, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v56, v41 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v26, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v40, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v55, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v34, v44 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v33, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v27, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_mov_b32_e32 v51, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v43, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v36, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v36, v62 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v49, v60 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v28, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v42, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v41, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v38, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v55, v29 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v29, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v53, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v35, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v48, v30 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v48, v39
+; GFX9-NEXT: v_or_b32_sdwa v1, v45, v39 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
; GFX9-NEXT: v_or_b32_sdwa v30, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v0, v42, v47 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v38, v55
+; GFX9-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v36, v49
+; GFX9-NEXT: v_mov_b32_e32 v49, v56
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v54, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v33, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v59, v39 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v31, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v59, v39
+; GFX9-NEXT: v_mov_b32_e32 v39, v41
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_mov_b32_e32 v56, v55
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v61, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v55, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_e32 v3, s4, v0
; GFX9-NEXT: s_and_b32 s4, s16, 0xff
; GFX9-NEXT: s_or_b32 s4, s4, s5
@@ -20201,32 +20301,39 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: s_mov_b64 s[4:5], 0
; GFX9-NEXT: s_branch .LBB15_3
; GFX9-NEXT: .LBB15_2:
-; GFX9-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
-; GFX9-NEXT: v_mov_b32_e32 v61, v0
-; GFX9-NEXT: v_mov_b32_e32 v63, v57
-; GFX9-NEXT: v_mov_b32_e32 v53, v3
-; GFX9-NEXT: s_mov_b64 s[4:5], -1
-; GFX9-NEXT: v_mov_b32_e32 v57, v38
+; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v48, v39
+; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v36, v49
+; GFX9-NEXT: v_mov_b32_e32 v49, v56
+; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v40, v25
+; GFX9-NEXT: v_mov_b32_e32 v57, v41
+; GFX9-NEXT: v_mov_b32_e32 v38, v55
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; GFX9-NEXT: .LBB15_3: ; %Flow
-; GFX9-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_waitcnt vmcnt(12)
+; GFX9-NEXT: v_mov_b32_e32 v41, v52
; GFX9-NEXT: s_cbranch_vccnz .LBB15_5
; GFX9-NEXT: ; %bb.4: ; %cmp.true
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v61
; GFX9-NEXT: s_add_i32 s16, s16, 3
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_and_b32 s4, s16, 0xff
@@ -20270,190 +20377,210 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: s_and_b32 s8, s28, 0xff
; GFX9-NEXT: s_lshl_b32 s9, s29, 8
; GFX9-NEXT: s_or_b32 s8, s9, s8
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v56
; GFX9-NEXT: s_movk_i32 s4, 0x300
; GFX9-NEXT: s_addk_i32 s8, 0x300
+; GFX9-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: s_and_b32 s8, s8, 0xffff
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_sdwa v0, v0, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_e32 v3, s8, v0
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v60
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v52, v54
+; GFX9-NEXT: v_mov_b32_e32 v55, v57
+; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v38
-; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
+; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v49
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v5, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
-; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
+; GFX9-NEXT: v_or_b32_sdwa v0, v43, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v58, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v16, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
+; GFX9-NEXT: v_or_b32_sdwa v0, v35, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
+; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v17, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
@@ -20463,163 +20590,155 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v52, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v17, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v63
+; GFX9-NEXT: v_or_b32_sdwa v18, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v18, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v19, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v0, v52, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v0, v41, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v19, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v20, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
-; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v20, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v50, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v21, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v57, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v46, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v22, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v0, v53, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v51, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v23, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v50, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v63
+; GFX9-NEXT: v_or_b32_sdwa v1, v40, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v24, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v46
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v48
-; GFX9-NEXT: v_or_b32_sdwa v0, v34, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v55
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v25, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v39
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v45
-; GFX9-NEXT: v_or_b32_sdwa v1, v44, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v37
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v49
+; GFX9-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v39, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v26, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v40
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v34
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v33
+; GFX9-NEXT: v_or_b32_sdwa v0, v44, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v55
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v27, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v43
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v1, 3, v36
-; GFX9-NEXT: v_or_b32_sdwa v0, v51, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v60, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_or_b32_sdwa v28, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v42
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v41
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v38
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v29, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v32
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v2, s7
; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v35
-; GFX9-NEXT: v_or_b32_sdwa v1, v53, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v45
+; GFX9-NEXT: v_or_b32_sdwa v1, v48, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v30, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v54
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v33
-; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v42
+; GFX9-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
+; GFX9-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v31, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_mov_b32_e32 v0, s5
; GFX9-NEXT: v_mov_b32_e32 v1, s6
-; GFX9-NEXT: v_mov_b32_e32 v2, s7
; GFX9-NEXT: .LBB15_5: ; %end
; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload
@@ -20791,7 +20910,7 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v73, 8, v25
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v74, 8, v27
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v75, 8, v29
-; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, -1
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(62)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v57, 8, v2
@@ -20862,24 +20981,24 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v54
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v53
-; GFX11-TRUE16-NEXT: s_and_b32 s5, s28, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s29, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s2, 0xff
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s28, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s29, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s2, 0xff
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v90
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v91
-; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s5, s5, 0xffff
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s4, 0xffff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s3, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s11, s26, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s26, 0xff
; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v0, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v50
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v49
-; GFX11-TRUE16-NEXT: s_lshl_b32 s12, s27, 8
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s27, 8
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v76
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v77
@@ -21121,40 +21240,40 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v89
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, s5, v0
-; GFX11-TRUE16-NEXT: s_and_b32 s5, s0, 0xff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, s4, v0
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s0, 0xff
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s5, s5, 0xffff
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s6, 16
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s8, 16
-; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s23, 8
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s25, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-TRUE16-NEXT: s_or_b32 s9, s9, s10
-; GFX11-TRUE16-NEXT: s_or_b32 s10, s11, s12
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s8, 16
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s9, 0xffff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s10, 16
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s16, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s6, 0xffff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s7, 16
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s20, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s21, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s25, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s6, 0xffff
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s8, s9
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s11
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s7, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s8, 0xffff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s9, 16
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s6, s7
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v51
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s9
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v52
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v93
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v92
@@ -21163,9 +21282,8 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v2, v3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB15_3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB15_3
; GFX11-TRUE16-NEXT: .LBB15_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_add_i32 s0, s0, 3
; GFX11-TRUE16-NEXT: s_add_i32 s2, s2, 3
@@ -21579,7 +21697,9 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB15_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB15_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB15_2
+; GFX11-TRUE16-NEXT: s_branch .LBB15_3
;
; GFX11-FAKE16-LABEL: bitcast_v128i8_to_v32i32_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -21732,7 +21852,7 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v73, 8, v25
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v74, 8, v27
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v75, 8, v29
-; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, -1
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(62)
; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v57, 8, v2
@@ -21803,24 +21923,24 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v54
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v53
-; GFX11-FAKE16-NEXT: s_and_b32 s5, s28, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s29, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s7, s2, 0xff
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s28, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s29, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s2, 0xff
; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v90
; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, v1, v91
-; GFX11-FAKE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s5, s5, 0xffff
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s4, 0xffff
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s3, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s11, s26, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s10, s26, 0xff
; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, v0, v1
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v50
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v49
-; GFX11-FAKE16-NEXT: s_lshl_b32 s12, s27, 8
+; GFX11-FAKE16-NEXT: s_lshl_b32 s11, s27, 8
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v76
; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, v1, v77
@@ -22062,40 +22182,40 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v89
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, s5, v0
-; GFX11-FAKE16-NEXT: s_and_b32 s5, s0, 0xff
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, s4, v0
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s0, 0xff
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-FAKE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-FAKE16-NEXT: s_or_b32 s6, s7, s8
-; GFX11-FAKE16-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s5, s5, 0xffff
-; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s6, 16
-; GFX11-FAKE16-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s8, 16
-; GFX11-FAKE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-FAKE16-NEXT: s_or_b32 s6, s7, s8
-; GFX11-FAKE16-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s23, 8
-; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-FAKE16-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s25, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-FAKE16-NEXT: s_or_b32 s9, s9, s10
-; GFX11-FAKE16-NEXT: s_or_b32 s10, s11, s12
-; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s8, 16
-; GFX11-FAKE16-NEXT: s_and_b32 s9, s9, 0xffff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s10, 16
-; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s16, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s6, 0xffff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s7, 16
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s20, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s21, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s25, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s6, 0xffff
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s8, s9
+; GFX11-FAKE16-NEXT: s_or_b32 s9, s10, s11
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s7, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s8, 0xffff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s9, 16
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s6, s7
; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v51
-; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s8, s9
; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v52
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, v3, v93
; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, v2, v92
@@ -22104,9 +22224,8 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, v2, v3
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB15_3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB15_3
; GFX11-FAKE16-NEXT: .LBB15_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_add_i32 s0, s0, 3
; GFX11-FAKE16-NEXT: s_add_i32 s2, s2, 3
@@ -22520,7 +22639,9 @@ define inreg <32 x i32> @bitcast_v128i8_to_v32i32_scalar(<128 x i8> inreg %a, i3
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB15_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB15_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB15_2
+; GFX11-FAKE16-NEXT: s_branch .LBB15_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -23482,8 +23603,9 @@ define inreg <64 x bfloat> @bitcast_v32i32_to_v64bf16_scalar(<32 x i32> inreg %a
; SI-NEXT: v_writelane_b32 v20, s87, 31
; SI-NEXT: v_writelane_b32 v20, s96, 32
; SI-NEXT: v_writelane_b32 v20, s97, 33
-; SI-NEXT: v_writelane_b32 v20, s98, 34
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v19
+; SI-NEXT: v_writelane_b32 v20, s98, 34
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_writelane_b32 v20, s99, 35
; SI-NEXT: v_readfirstlane_b32 s70, v1
; SI-NEXT: v_readfirstlane_b32 s71, v2
@@ -23502,8 +23624,8 @@ define inreg <64 x bfloat> @bitcast_v32i32_to_v64bf16_scalar(<32 x i32> inreg %a
; SI-NEXT: v_readfirstlane_b32 s6, v15
; SI-NEXT: v_readfirstlane_b32 s7, v16
; SI-NEXT: v_readfirstlane_b32 s8, v17
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s9, v18
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: ; implicit-def: $vgpr21 : SGPR spill to VGPR lane
; SI-NEXT: s_cbranch_scc0 .LBB17_2
; SI-NEXT: ; %bb.1: ; %cmp.false
@@ -23579,8 +23701,8 @@ define inreg <64 x bfloat> @bitcast_v32i32_to_v64bf16_scalar(<32 x i32> inreg %a
; SI-NEXT: s_mov_b64 s[4:5], 0
; SI-NEXT: s_branch .LBB17_3
; SI-NEXT: .LBB17_2:
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; kill: killed $sgpr4
+; SI-NEXT: ; implicit-def: $sgpr10
+; SI-NEXT: ; kill: killed $sgpr10
; SI-NEXT: ; implicit-def: $sgpr68
; SI-NEXT: ; implicit-def: $sgpr69
; SI-NEXT: ; implicit-def: $sgpr66
@@ -23639,15 +23761,14 @@ define inreg <64 x bfloat> @bitcast_v32i32_to_v64bf16_scalar(<32 x i32> inreg %a
; SI-NEXT: ; implicit-def: $sgpr15
; SI-NEXT: ; implicit-def: $sgpr12
; SI-NEXT: ; implicit-def: $sgpr13
-; SI-NEXT: ; implicit-def: $sgpr10
; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; kill: killed $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; kill: killed $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; kill: killed $sgpr4
-; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: ; implicit-def: $sgpr10
+; SI-NEXT: ; kill: killed $sgpr10
+; SI-NEXT: ; implicit-def: $sgpr10
+; SI-NEXT: ; kill: killed $sgpr10
+; SI-NEXT: ; implicit-def: $sgpr10
+; SI-NEXT: ; kill: killed $sgpr10
+; SI-NEXT: ; implicit-def: $sgpr10
; SI-NEXT: .LBB17_3: ; %Flow
; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
; SI-NEXT: s_mov_b32 s4, s10
@@ -24052,6 +24173,7 @@ define inreg <64 x bfloat> @bitcast_v32i32_to_v64bf16_scalar(<32 x i32> inreg %a
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v31, v17
; VI-NEXT: v_mov_b32_e32 v30, v16
; VI-NEXT: v_mov_b32_e32 v29, v15
@@ -24072,7 +24194,7 @@ define inreg <64 x bfloat> @bitcast_v32i32_to_v64bf16_scalar(<32 x i32> inreg %a
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
@@ -24085,10 +24207,13 @@ define inreg <64 x bfloat> @bitcast_v32i32_to_v64bf16_scalar(<32 x i32> inreg %a
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB17_4
+; VI-NEXT: s_cbranch_scc0 .LBB17_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB17_3
-; VI-NEXT: .LBB17_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB17_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB17_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v15, vcc, 3, v15
; VI-NEXT: v_add_u32_e32 v14, vcc, 3, v14
; VI-NEXT: v_add_u32_e32 v13, vcc, 3, v13
@@ -24121,16 +24246,15 @@ define inreg <64 x bfloat> @bitcast_v32i32_to_v64bf16_scalar(<32 x i32> inreg %a
; VI-NEXT: v_add_u32_e32 v32, vcc, 3, v32
; VI-NEXT: v_add_u32_e32 v17, vcc, 3, v17
; VI-NEXT: v_add_u32_e32 v16, vcc, 3, v16
-; VI-NEXT: .LBB17_3: ; %end
+; VI-NEXT: .LBB17_4: ; %end
; VI-NEXT: v_mov_b32_e32 v18, v32
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB17_4:
-; VI-NEXT: s_branch .LBB17_2
;
; GFX9-LABEL: bitcast_v32i32_to_v64bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v31, v17
; GFX9-NEXT: v_mov_b32_e32 v30, v16
; GFX9-NEXT: v_mov_b32_e32 v29, v15
@@ -24151,7 +24275,7 @@ define inreg <64 x bfloat> @bitcast_v32i32_to_v64bf16_scalar(<32 x i32> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -24164,10 +24288,13 @@ define inreg <64 x bfloat> @bitcast_v32i32_to_v64bf16_scalar(<32 x i32> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB17_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB17_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB17_3
-; GFX9-NEXT: .LBB17_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB17_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB17_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_u32_e32 v15, 3, v15
; GFX9-NEXT: v_add_u32_e32 v14, 3, v14
; GFX9-NEXT: v_add_u32_e32 v13, 3, v13
@@ -24200,44 +24327,42 @@ define inreg <64 x bfloat> @bitcast_v32i32_to_v64bf16_scalar(<32 x i32> inreg %a
; GFX9-NEXT: v_add_u32_e32 v32, 3, v32
; GFX9-NEXT: v_add_u32_e32 v17, 3, v17
; GFX9-NEXT: v_add_u32_e32 v16, 3, v16
-; GFX9-NEXT: .LBB17_3: ; %end
+; GFX9-NEXT: .LBB17_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v18, v32
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB17_4:
-; GFX9-NEXT: s_branch .LBB17_2
;
; GFX11-LABEL: bitcast_v32i32_to_v64bf16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v14 :: v_dual_mov_b32 v31, v13
-; GFX11-NEXT: v_dual_mov_b32 v30, v12 :: v_dual_mov_b32 v29, v11
-; GFX11-NEXT: v_dual_mov_b32 v28, v10 :: v_dual_mov_b32 v27, v9
+; GFX11-NEXT: v_dual_mov_b32 v15, v14 :: v_dual_mov_b32 v30, v12
+; GFX11-NEXT: v_dual_mov_b32 v31, v13 :: v_dual_mov_b32 v28, v10
+; GFX11-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v26, v8
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB17_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB17_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB17_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB17_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB17_3:
-; GFX11-NEXT: .LBB17_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB17_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_nc_u32_e32 v15, 3, v15
; GFX11-NEXT: v_add_nc_u32_e32 v14, 3, v14
; GFX11-NEXT: v_add_nc_u32_e32 v13, 3, v13
@@ -24270,6 +24395,7 @@ define inreg <64 x bfloat> @bitcast_v32i32_to_v64bf16_scalar(<32 x i32> inreg %a
; GFX11-NEXT: v_add_nc_u32_e32 v18, 3, v18
; GFX11-NEXT: v_add_nc_u32_e32 v17, 3, v17
; GFX11-NEXT: v_add_nc_u32_e32 v16, 3, v16
+; GFX11-NEXT: .LBB17_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -27259,23 +27385,28 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v3
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v2
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mul_f32_e32 v0, 1.0, v5
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v4
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mul_f32_e32 v0, 1.0, v7
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v6
-; SI-NEXT: v_mov_b32_e32 v39, v10
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v8
; SI-NEXT: v_mov_b32_e32 v38, v12
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v39
+; SI-NEXT: v_mul_f32_e32 v0, 1.0, v10
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v38
@@ -27289,14 +27420,11 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v30
; SI-NEXT: v_mov_b32_e32 v37, v14
-; SI-NEXT: v_mov_b32_e32 v14, v11
; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
-; SI-NEXT: v_mul_f32_e32 v11, 1.0, v5
-; SI-NEXT: v_mul_f32_e32 v10, 1.0, v7
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
; SI-NEXT: v_mul_f32_e32 v12, 1.0, v9
-; SI-NEXT: v_mul_f32_e32 v14, 1.0, v14
+; SI-NEXT: v_mul_f32_e32 v14, 1.0, v11
; SI-NEXT: v_mul_f32_e32 v13, 1.0, v13
; SI-NEXT: v_mul_f32_e32 v38, 1.0, v37
; SI-NEXT: v_mul_f32_e32 v15, 1.0, v17
@@ -27315,7 +27443,9 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s16
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e64 v1, 1.0, s19
+; SI-NEXT: v_mul_f32_e64 v10, 1.0, s18
; SI-NEXT: v_mul_f32_e64 v3, 1.0, s23
+; SI-NEXT: v_mul_f32_e64 v11, 1.0, s22
; SI-NEXT: v_mul_f32_e64 v4, 1.0, s25
; SI-NEXT: v_mul_f32_e64 v9, 1.0, s24
; SI-NEXT: v_mul_f32_e64 v5, 1.0, s27
@@ -27324,8 +27454,8 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_mul_f32_e64 v7, 1.0, s28
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31
-; SI-NEXT: v_mul_f32_e32 v22, 1.0, v42
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: v_mul_f32_e32 v22, 1.0, v42
; SI-NEXT: v_mul_f32_e32 v23, 1.0, v43
; SI-NEXT: v_mul_f32_e32 v52, 1.0, v44
; SI-NEXT: v_mul_f32_e32 v24, 1.0, v45
@@ -27341,77 +27471,76 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_mul_f32_e32 v29, 1.0, v63
; SI-NEXT: v_mul_f32_e32 v32, 1.0, v32
; SI-NEXT: v_mul_f32_e32 v30, 1.0, v33
-; SI-NEXT: s_waitcnt vmcnt(13)
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v35
; SI-NEXT: v_mul_f32_e32 v31, 1.0, v34
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(13)
; SI-NEXT: v_mul_f32_e32 v34, 1.0, v36
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e64 v0, 1.0, s17
-; SI-NEXT: v_mul_f32_e64 v35, 1.0, s18
-; SI-NEXT: v_mul_f32_e64 v36, 1.0, s21
-; SI-NEXT: v_mul_f32_e64 v42, 1.0, s20
-; SI-NEXT: v_mul_f32_e64 v33, 1.0, s22
-; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
+; SI-NEXT: v_mul_f32_e64 v33, 1.0, s21
+; SI-NEXT: v_mul_f32_e64 v35, 1.0, s20
+; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB19_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v6
; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v5
; SI-NEXT: v_alignbit_b32 v6, v6, v7, 16
-; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
; SI-NEXT: v_alignbit_b32 v5, v5, v8, 16
; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(2)
-; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_alignbit_b32 v1, v1, v35, 16
-; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; SI-NEXT: v_alignbit_b32 v4, v4, v9, 16
-; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(5)
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; SI-NEXT: v_mov_b32_e32 v59, v2
; SI-NEXT: v_alignbit_b32 v0, v0, v2, 16
-; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v36
-; SI-NEXT: v_alignbit_b32 v2, v2, v42, 16
-; SI-NEXT: v_mov_b32_e32 v57, v11
-; SI-NEXT: v_mov_b32_e32 v47, v10
-; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v10
-; SI-NEXT: v_mov_b32_e32 v45, v12
+; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v33
+; SI-NEXT: v_alignbit_b32 v2, v2, v35, 16
+; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; SI-NEXT: v_alignbit_b32 v4, v4, v9, 16
+; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(4)
+; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; SI-NEXT: v_alignbit_b32 v1, v1, v10, 16
+; SI-NEXT: s_waitcnt expcnt(3)
+; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; SI-NEXT: v_alignbit_b32 v3, v3, v33, 16
-; SI-NEXT: v_mov_b32_e32 v33, v14
+; SI-NEXT: v_alignbit_b32 v3, v3, v11, 16
+; SI-NEXT: s_waitcnt expcnt(2)
+; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v45, v12
+; SI-NEXT: v_mov_b32_e32 v44, v14
; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v13
; SI-NEXT: v_mov_b32_e32 v62, v38
; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v15
@@ -27452,30 +27581,35 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_alignbit_b32 v28, v28, v37, 16
; SI-NEXT: v_mov_b32_e32 v37, v34
; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: s_waitcnt vmcnt(5) expcnt(0)
; SI-NEXT: v_mov_b32_e32 v35, v7
; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v7
-; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: s_waitcnt vmcnt(4)
; SI-NEXT: v_mov_b32_e32 v43, v8
; SI-NEXT: v_alignbit_b32 v7, v7, v8, 16
-; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(2) expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v42, v9
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: v_mov_b32_e32 v60, v9
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: v_mov_b32_e32 v58, v10
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: v_mov_b32_e32 v56, v11
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v32
; SI-NEXT: v_alignbit_b32 v31, v31, v34, 16
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v60, v8
+; SI-NEXT: v_mov_b32_e32 v42, v8
; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v8
; SI-NEXT: v_alignbit_b32 v8, v8, v9, 16
-; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v11
-; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v58, v11
-; SI-NEXT: v_alignbit_b32 v9, v9, v11, 16
-; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v57, v9
+; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v9
+; SI-NEXT: v_alignbit_b32 v9, v9, v10, 16
+; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v56, v11
+; SI-NEXT: v_mov_b32_e32 v47, v10
+; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v10
; SI-NEXT: v_alignbit_b32 v10, v10, v11, 16
; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v12
; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
@@ -27489,7 +27623,7 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_alignbit_b32 v12, v12, v14, 16
; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v44, v14
+; SI-NEXT: v_mov_b32_e32 v33, v14
; SI-NEXT: v_alignbit_b32 v13, v13, v14, 16
; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -27510,25 +27644,25 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_alignbit_b32 v22, v22, v54, 16
; SI-NEXT: s_cbranch_execnz .LBB19_3
; SI-NEXT: .LBB19_2: ; %cmp.true
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v59
; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
-; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_and_b32_e32 v8, 0xffff0000, v35
; SI-NEXT: v_add_f32_e32 v8, 0x40c00000, v8
; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v8
-; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v60
+; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v42
; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v9
; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v9
; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v57
@@ -27540,28 +27674,28 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_and_b32_e32 v12, 0xffff0000, v45
; SI-NEXT: v_add_f32_e32 v12, 0x40c00000, v12
; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v12
-; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v33
+; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v44
; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v13
; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v13
; SI-NEXT: v_and_b32_e32 v15, 0xffff0000, v36
; SI-NEXT: v_add_f32_e32 v15, 0x40c00000, v15
; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v15
-; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
; SI-NEXT: v_add_f32_e32 v32, 0x40c00000, v32
; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
@@ -27572,8 +27706,8 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3
; SI-NEXT: v_alignbit_b32 v0, v1, v0, 16
; SI-NEXT: v_alignbit_b32 v1, v3, v2, 16
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; SI-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v4
@@ -27652,22 +27786,22 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3
; SI-NEXT: v_alignbit_b32 v2, v3, v2, 16
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
; SI-NEXT: v_alignbit_b32 v3, v4, v3, 16
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; SI-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
; SI-NEXT: v_alignbit_b32 v4, v5, v4, 16
-; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; SI-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
; SI-NEXT: v_alignbit_b32 v5, v6, v5, 16
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6
@@ -27675,7 +27809,7 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v43
; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v7
; SI-NEXT: v_alignbit_b32 v7, v8, v7, 16
-; SI-NEXT: v_and_b32_e32 v8, 0xffff0000, v42
+; SI-NEXT: v_and_b32_e32 v8, 0xffff0000, v60
; SI-NEXT: v_add_f32_e32 v8, 0x40c00000, v8
; SI-NEXT: v_alignbit_b32 v8, v9, v8, 16
; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v58
@@ -27690,7 +27824,7 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_and_b32_e32 v12, 0xffff0000, v63
; SI-NEXT: v_add_f32_e32 v12, 0x40c00000, v12
; SI-NEXT: v_alignbit_b32 v12, v13, v12, 16
-; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v44
+; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v33
; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v13
; SI-NEXT: v_alignbit_b32 v13, v14, v13, 16
; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v62
@@ -27722,7 +27856,7 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
; SI-NEXT: v_add_f32_e32 v22, 0x40c00000, v22
; SI-NEXT: v_alignbit_b32 v22, v23, v22, 16
-; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
; SI-NEXT: v_add_f32_e32 v23, 0x40c00000, v23
@@ -27742,12 +27876,12 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_and_b32_e32 v28, 0xffff0000, v48
; SI-NEXT: v_add_f32_e32 v28, 0x40c00000, v28
; SI-NEXT: v_alignbit_b32 v28, v29, v28, 16
-; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
; SI-NEXT: v_add_f32_e32 v29, 0x40c00000, v29
; SI-NEXT: v_alignbit_b32 v29, v30, v29, 16
-; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
; SI-NEXT: v_add_f32_e32 v30, 0x40c00000, v30
@@ -27776,25 +27910,24 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB19_4:
; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(3)
-; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
; SI-NEXT: v_mov_b32_e32 v61, v53
; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
; SI-NEXT: v_mov_b32_e32 v59, v2
-; SI-NEXT: v_mov_b32_e32 v57, v11
-; SI-NEXT: v_mov_b32_e32 v47, v10
; SI-NEXT: v_mov_b32_e32 v45, v12
-; SI-NEXT: v_mov_b32_e32 v33, v14
+; SI-NEXT: v_mov_b32_e32 v44, v14
; SI-NEXT: v_mov_b32_e32 v62, v38
; SI-NEXT: v_mov_b32_e32 v38, v39
; SI-NEXT: v_mov_b32_e32 v39, v41
@@ -27808,12 +27941,15 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_mov_b32_e32 v48, v37
; SI-NEXT: v_mov_b32_e32 v37, v34
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; SI-NEXT: s_branch .LBB19_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB19_2
+; SI-NEXT: s_branch .LBB19_3
;
; VI-LABEL: bitcast_v64bf16_to_v32i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v31, v17
; VI-NEXT: v_mov_b32_e32 v30, v16
; VI-NEXT: v_mov_b32_e32 v29, v15
@@ -27834,7 +27970,7 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
@@ -27847,10 +27983,13 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB19_4
+; VI-NEXT: s_cbranch_scc0 .LBB19_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB19_3
-; VI-NEXT: .LBB19_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB19_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB19_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_lshlrev_b32_e32 v18, 16, v15
; VI-NEXT: v_add_f32_e32 v18, 0x40c00000, v18
; VI-NEXT: v_bfe_u32 v33, v18, 16, 1
@@ -28427,16 +28566,15 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; VI-NEXT: v_cndmask_b32_e32 v16, v33, v34, vcc
; VI-NEXT: v_lshrrev_b32_e32 v16, 16, v16
; VI-NEXT: v_alignbit_b32 v16, v16, v18, 16
-; VI-NEXT: .LBB19_3: ; %end
+; VI-NEXT: .LBB19_4: ; %end
; VI-NEXT: v_mov_b32_e32 v18, v32
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB19_4:
-; VI-NEXT: s_branch .LBB19_2
;
; GFX9-LABEL: bitcast_v64bf16_to_v32i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v31, v17
; GFX9-NEXT: v_mov_b32_e32 v30, v16
; GFX9-NEXT: v_mov_b32_e32 v29, v15
@@ -28457,7 +28595,7 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -28470,10 +28608,13 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB19_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB19_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB19_3
-; GFX9-NEXT: .LBB19_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB19_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB19_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_and_b32_e32 v18, 0xffff0000, v15
; GFX9-NEXT: v_add_f32_e32 v18, 0x40c00000, v18
; GFX9-NEXT: v_bfe_u32 v33, v18, 16, 1
@@ -29083,11 +29224,9 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: v_and_b32_sdwa v16, v18, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: v_lshrrev_b32_e32 v18, 16, v33
; GFX9-NEXT: v_lshl_or_b32 v16, v18, 16, v16
-; GFX9-NEXT: .LBB19_3: ; %end
+; GFX9-NEXT: .LBB19_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v18, v32
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB19_4:
-; GFX9-NEXT: s_branch .LBB19_2
;
; GFX11-LABEL: bitcast_v64bf16_to_v32i32_scalar:
; GFX11: ; %bb.0:
@@ -29177,8 +29316,8 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; GFX11-NEXT: v_dual_mov_b32 v174, v5 :: v_dual_mov_b32 v173, v0
; GFX11-NEXT: v_dual_mov_b32 v184, v2 :: v_dual_mov_b32 v175, v1
; GFX11-NEXT: v_dual_mov_b32 v183, s28 :: v_dual_mov_b32 v172, s29
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s4, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB19_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_dual_mov_b32 v32, s0 :: v_dual_mov_b32 v37, s2
@@ -29189,8 +29328,7 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; GFX11-NEXT: v_dual_mov_b32 v86, s21 :: v_dual_mov_b32 v109, s23
; GFX11-NEXT: v_dual_mov_b32 v122, s24 :: v_dual_mov_b32 v151, s26
; GFX11-NEXT: v_dual_mov_b32 v136, s25 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB19_3
+; GFX11-NEXT: s_cbranch_execnz .LBB19_3
; GFX11-NEXT: .LBB19_2: ; %cmp.true
; GFX11-NEXT: s_and_b32 s5, s27, 0xffff0000
; GFX11-NEXT: s_lshl_b32 s4, s27, 16
@@ -29937,8 +30075,8 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB19_4:
; GFX11-NEXT: ; implicit-def: $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
-; GFX11-NEXT: ; implicit-def: $vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64
; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-NEXT: ; implicit-def: $vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64
; GFX11-NEXT: ; implicit-def: $vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66
; GFX11-NEXT: ; implicit-def: $vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69
; GFX11-NEXT: ; implicit-def: $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73
@@ -29952,7 +30090,9 @@ define inreg <32 x i32> @bitcast_v64bf16_to_v32i32_scalar(<64 x bfloat> inreg %a
; GFX11-NEXT: ; implicit-def: $vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141
; GFX11-NEXT: ; implicit-def: $vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154
; GFX11-NEXT: ; implicit-def: $vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168
-; GFX11-NEXT: s_branch .LBB19_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_vccz .LBB19_2
+; GFX11-NEXT: s_branch .LBB19_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -30952,8 +31092,9 @@ define inreg <64 x half> @bitcast_v32i32_to_v64f16_scalar(<32 x i32> inreg %a, i
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_writelane_b32 v63, s30, 0
; SI-NEXT: v_writelane_b32 v63, s31, 1
-; SI-NEXT: v_writelane_b32 v63, s34, 2
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v19
+; SI-NEXT: v_writelane_b32 v63, s34, 2
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_writelane_b32 v63, s35, 3
; SI-NEXT: v_readfirstlane_b32 s47, v1
; SI-NEXT: v_readfirstlane_b32 s46, v2
@@ -30969,11 +31110,11 @@ define inreg <64 x half> @bitcast_v32i32_to_v64f16_scalar(<32 x i32> inreg %a, i
; SI-NEXT: v_readfirstlane_b32 s12, v12
; SI-NEXT: v_readfirstlane_b32 s11, v13
; SI-NEXT: v_readfirstlane_b32 s10, v14
-; SI-NEXT: v_readfirstlane_b32 s8, v15
-; SI-NEXT: v_readfirstlane_b32 s7, v16
-; SI-NEXT: v_readfirstlane_b32 s6, v17
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: v_readfirstlane_b32 s9, v18
+; SI-NEXT: v_readfirstlane_b32 s9, v15
+; SI-NEXT: v_readfirstlane_b32 s8, v16
+; SI-NEXT: v_readfirstlane_b32 s7, v17
+; SI-NEXT: v_readfirstlane_b32 s6, v18
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
@@ -30991,17 +31132,17 @@ define inreg <64 x half> @bitcast_v32i32_to_v64f16_scalar(<32 x i32> inreg %a, i
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB21_4
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_lshr_b32 s4, s9, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v1, s4
; SI-NEXT: s_lshr_b32 s4, s6, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v5, s9
-; SI-NEXT: v_cvt_f32_f16_e32 v7, s6
+; SI-NEXT: v_cvt_f32_f16_e32 v1, s4
+; SI-NEXT: s_lshr_b32 s4, s7, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v5, s6
+; SI-NEXT: v_cvt_f32_f16_e32 v7, s7
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v1, s4
-; SI-NEXT: s_lshr_b32 s4, s7, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v3, s4
; SI-NEXT: s_lshr_b32 s4, s8, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v3, s4
+; SI-NEXT: s_lshr_b32 s4, s9, 16
; SI-NEXT: v_cvt_f32_f16_e32 v4, s4
; SI-NEXT: s_lshr_b32 s4, s10, 16
; SI-NEXT: v_cvt_f32_f16_e32 v6, s4
@@ -31060,8 +31201,8 @@ define inreg <64 x half> @bitcast_v32i32_to_v64f16_scalar(<32 x i32> inreg %a, i
; SI-NEXT: s_lshr_b32 s4, s16, 16
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; SI-NEXT: v_cvt_f32_f16_e32 v61, s4
-; SI-NEXT: v_cvt_f32_f16_e32 v8, s7
-; SI-NEXT: v_cvt_f32_f16_e32 v10, s8
+; SI-NEXT: v_cvt_f32_f16_e32 v8, s8
+; SI-NEXT: v_cvt_f32_f16_e32 v10, s9
; SI-NEXT: v_cvt_f32_f16_e32 v12, s10
; SI-NEXT: v_cvt_f32_f16_e32 v14, s11
; SI-NEXT: v_cvt_f32_f16_e32 v16, s12
@@ -31093,11 +31234,11 @@ define inreg <64 x half> @bitcast_v32i32_to_v64f16_scalar(<32 x i32> inreg %a, i
; SI-NEXT: v_cvt_f32_f16_e32 v2, s16
; SI-NEXT: s_cbranch_execnz .LBB21_3
; SI-NEXT: .LBB21_2: ; %cmp.true
-; SI-NEXT: s_add_i32 s9, s9, 3
-; SI-NEXT: s_lshr_b32 s35, s9, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v3, s35
; SI-NEXT: s_add_i32 s6, s6, 3
-; SI-NEXT: s_lshr_b32 s34, s6, 16
+; SI-NEXT: s_lshr_b32 s35, s6, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v3, s35
+; SI-NEXT: s_add_i32 s7, s7, 3
+; SI-NEXT: s_lshr_b32 s34, s7, 16
; SI-NEXT: s_add_i32 s16, s16, 3
; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -31129,8 +31270,8 @@ define inreg <64 x half> @bitcast_v32i32_to_v64f16_scalar(<32 x i32> inreg %a, i
; SI-NEXT: s_add_i32 s12, s12, 3
; SI-NEXT: s_add_i32 s11, s11, 3
; SI-NEXT: s_add_i32 s10, s10, 3
+; SI-NEXT: s_add_i32 s9, s9, 3
; SI-NEXT: s_add_i32 s8, s8, 3
-; SI-NEXT: s_add_i32 s7, s7, 3
; SI-NEXT: s_lshr_b32 s4, s16, 16
; SI-NEXT: s_lshr_b32 s5, s17, 16
; SI-NEXT: s_lshr_b32 s56, s18, 16
@@ -31159,12 +31300,12 @@ define inreg <64 x half> @bitcast_v32i32_to_v64f16_scalar(<32 x i32> inreg %a, i
; SI-NEXT: s_lshr_b32 s95, s12, 16
; SI-NEXT: s_lshr_b32 vcc_lo, s11, 16
; SI-NEXT: s_lshr_b32 vcc_hi, s10, 16
-; SI-NEXT: s_lshr_b32 s30, s8, 16
-; SI-NEXT: s_lshr_b32 s31, s7, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v5, s9
-; SI-NEXT: v_cvt_f32_f16_e32 v7, s6
-; SI-NEXT: v_cvt_f32_f16_e32 v8, s7
-; SI-NEXT: v_cvt_f32_f16_e32 v10, s8
+; SI-NEXT: s_lshr_b32 s30, s9, 16
+; SI-NEXT: s_lshr_b32 s31, s8, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v5, s6
+; SI-NEXT: v_cvt_f32_f16_e32 v7, s7
+; SI-NEXT: v_cvt_f32_f16_e32 v8, s8
+; SI-NEXT: v_cvt_f32_f16_e32 v10, s9
; SI-NEXT: v_cvt_f32_f16_e32 v12, s10
; SI-NEXT: v_cvt_f32_f16_e32 v14, s11
; SI-NEXT: v_cvt_f32_f16_e32 v16, s12
@@ -31480,7 +31621,6 @@ define inreg <64 x half> @bitcast_v32i32_to_v64f16_scalar(<32 x i32> inreg %a, i
; SI-NEXT: .LBB21_4:
; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; kill: killed $vgpr3
-; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr61
; SI-NEXT: ; implicit-def: $vgpr1
@@ -31542,14 +31682,18 @@ define inreg <64 x half> @bitcast_v32i32_to_v64f16_scalar(<32 x i32> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr8
; SI-NEXT: ; implicit-def: $vgpr7
; SI-NEXT: ; implicit-def: $vgpr5
+; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; kill: killed $vgpr3
; SI-NEXT: ; implicit-def: $vgpr3
-; SI-NEXT: s_branch .LBB21_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB21_2
+; SI-NEXT: s_branch .LBB21_3
;
; VI-LABEL: bitcast_v32i32_to_v64f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v31, v17
; VI-NEXT: v_mov_b32_e32 v30, v16
; VI-NEXT: v_mov_b32_e32 v29, v15
@@ -31570,7 +31714,7 @@ define inreg <64 x half> @bitcast_v32i32_to_v64f16_scalar(<32 x i32> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
@@ -31583,10 +31727,13 @@ define inreg <64 x half> @bitcast_v32i32_to_v64f16_scalar(<32 x i32> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB21_4
+; VI-NEXT: s_cbranch_scc0 .LBB21_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB21_3
-; VI-NEXT: .LBB21_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB21_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB21_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v15, vcc, 3, v15
; VI-NEXT: v_add_u32_e32 v14, vcc, 3, v14
; VI-NEXT: v_add_u32_e32 v13, vcc, 3, v13
@@ -31619,16 +31766,15 @@ define inreg <64 x half> @bitcast_v32i32_to_v64f16_scalar(<32 x i32> inreg %a, i
; VI-NEXT: v_add_u32_e32 v32, vcc, 3, v32
; VI-NEXT: v_add_u32_e32 v17, vcc, 3, v17
; VI-NEXT: v_add_u32_e32 v16, vcc, 3, v16
-; VI-NEXT: .LBB21_3: ; %end
+; VI-NEXT: .LBB21_4: ; %end
; VI-NEXT: v_mov_b32_e32 v18, v32
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB21_4:
-; VI-NEXT: s_branch .LBB21_2
;
; GFX9-LABEL: bitcast_v32i32_to_v64f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v31, v17
; GFX9-NEXT: v_mov_b32_e32 v30, v16
; GFX9-NEXT: v_mov_b32_e32 v29, v15
@@ -31649,7 +31795,7 @@ define inreg <64 x half> @bitcast_v32i32_to_v64f16_scalar(<32 x i32> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -31662,10 +31808,13 @@ define inreg <64 x half> @bitcast_v32i32_to_v64f16_scalar(<32 x i32> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB21_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB21_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB21_3
-; GFX9-NEXT: .LBB21_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB21_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_u32_e32 v15, 3, v15
; GFX9-NEXT: v_add_u32_e32 v14, 3, v14
; GFX9-NEXT: v_add_u32_e32 v13, 3, v13
@@ -31698,44 +31847,42 @@ define inreg <64 x half> @bitcast_v32i32_to_v64f16_scalar(<32 x i32> inreg %a, i
; GFX9-NEXT: v_add_u32_e32 v32, 3, v32
; GFX9-NEXT: v_add_u32_e32 v17, 3, v17
; GFX9-NEXT: v_add_u32_e32 v16, 3, v16
-; GFX9-NEXT: .LBB21_3: ; %end
+; GFX9-NEXT: .LBB21_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v18, v32
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB21_4:
-; GFX9-NEXT: s_branch .LBB21_2
;
; GFX11-LABEL: bitcast_v32i32_to_v64f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v14 :: v_dual_mov_b32 v31, v13
-; GFX11-NEXT: v_dual_mov_b32 v30, v12 :: v_dual_mov_b32 v29, v11
-; GFX11-NEXT: v_dual_mov_b32 v28, v10 :: v_dual_mov_b32 v27, v9
+; GFX11-NEXT: v_dual_mov_b32 v15, v14 :: v_dual_mov_b32 v30, v12
+; GFX11-NEXT: v_dual_mov_b32 v31, v13 :: v_dual_mov_b32 v28, v10
+; GFX11-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v26, v8
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB21_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB21_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB21_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB21_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB21_3:
-; GFX11-NEXT: .LBB21_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_nc_u32_e32 v15, 3, v15
; GFX11-NEXT: v_add_nc_u32_e32 v14, 3, v14
; GFX11-NEXT: v_add_nc_u32_e32 v13, 3, v13
@@ -31768,6 +31915,7 @@ define inreg <64 x half> @bitcast_v32i32_to_v64f16_scalar(<32 x i32> inreg %a, i
; GFX11-NEXT: v_add_nc_u32_e32 v18, 3, v18
; GFX11-NEXT: v_add_nc_u32_e32 v17, 3, v17
; GFX11-NEXT: v_add_nc_u32_e32 v16, 3, v16
+; GFX11-NEXT: .LBB21_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -32820,22 +32968,23 @@ define inreg <32 x i32> @bitcast_v64f16_to_v32i32_scalar(<64 x half> inreg %a, i
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
-; SI-NEXT: v_mov_b32_e32 v53, v26
-; SI-NEXT: v_mov_b32_e32 v45, v6
+; SI-NEXT: v_mov_b32_e32 v52, v30
+; SI-NEXT: v_mov_b32_e32 v54, v26
+; SI-NEXT: v_mov_b32_e32 v41, v6
; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:76
-; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32
-; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:8
-; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:4
+; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32
+; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:8
+; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:4
; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:16
-; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:12
+; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:12
; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:24
-; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:20
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:32
+; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:20
+; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:32
; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:28
-; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:40
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:40
; SI-NEXT: s_waitcnt expcnt(3)
; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:36
-; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:48
+; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:48
; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:44
; SI-NEXT: s_waitcnt expcnt(0)
@@ -32845,12 +32994,12 @@ define inreg <32 x i32> @bitcast_v64f16_to_v32i32_scalar(<64 x half> inreg %a, i
; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:60
; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:72
; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:68
-; SI-NEXT: v_mov_b32_e32 v54, v14
+; SI-NEXT: v_mov_b32_e32 v53, v14
; SI-NEXT: v_mov_b32_e32 v55, v12
-; SI-NEXT: v_mov_b32_e32 v41, v11
+; SI-NEXT: v_mov_b32_e32 v43, v11
; SI-NEXT: v_mov_b32_e32 v40, v10
-; SI-NEXT: v_mov_b32_e32 v44, v9
-; SI-NEXT: v_mov_b32_e32 v43, v8
+; SI-NEXT: v_mov_b32_e32 v45, v9
+; SI-NEXT: v_mov_b32_e32 v44, v8
; SI-NEXT: v_cvt_f16_f32_e32 v9, v1
; SI-NEXT: v_cvt_f16_f32_e32 v8, v0
; SI-NEXT: v_cvt_f16_f32_e32 v11, v3
@@ -32858,27 +33007,27 @@ define inreg <32 x i32> @bitcast_v64f16_to_v32i32_scalar(<64 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v12, v5
; SI-NEXT: v_cvt_f16_f32_e32 v14, v4
; SI-NEXT: v_cvt_f16_f32_e32 v58, v7
+; SI-NEXT: v_cvt_f16_f32_e32 v41, v41
; SI-NEXT: v_cvt_f16_f32_e32 v56, v45
; SI-NEXT: v_cvt_f16_f32_e32 v46, v44
; SI-NEXT: v_cvt_f16_f32_e32 v44, v43
-; SI-NEXT: v_cvt_f16_f32_e32 v61, v41
-; SI-NEXT: v_cvt_f16_f32_e32 v59, v40
+; SI-NEXT: v_cvt_f16_f32_e32 v61, v40
; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
-; SI-NEXT: v_cvt_f16_f32_e32 v57, v55
-; SI-NEXT: v_cvt_f16_f32_e32 v47, v15
-; SI-NEXT: v_cvt_f16_f32_e32 v45, v54
+; SI-NEXT: v_cvt_f16_f32_e32 v59, v55
+; SI-NEXT: v_cvt_f16_f32_e32 v57, v15
+; SI-NEXT: v_cvt_f16_f32_e32 v47, v53
; SI-NEXT: v_cvt_f16_f32_e32 v15, v17
-; SI-NEXT: v_cvt_f16_f32_e32 v43, v16
+; SI-NEXT: v_cvt_f16_f32_e32 v45, v16
; SI-NEXT: v_cvt_f16_f32_e32 v16, v19
-; SI-NEXT: v_cvt_f16_f32_e32 v41, v18
+; SI-NEXT: v_cvt_f16_f32_e32 v43, v18
; SI-NEXT: v_cvt_f16_f32_e32 v17, v21
-; SI-NEXT: v_cvt_f16_f32_e32 v40, v20
+; SI-NEXT: v_cvt_f16_f32_e32 v53, v20
; SI-NEXT: v_cvt_f16_f32_e32 v18, v23
-; SI-NEXT: v_cvt_f16_f32_e32 v55, v22
+; SI-NEXT: v_cvt_f16_f32_e32 v40, v22
; SI-NEXT: v_cvt_f16_f32_e32 v19, v25
-; SI-NEXT: v_cvt_f16_f32_e32 v54, v24
+; SI-NEXT: v_cvt_f16_f32_e32 v55, v24
; SI-NEXT: v_cvt_f16_f32_e32 v20, v27
-; SI-NEXT: v_cvt_f16_f32_e32 v53, v53
+; SI-NEXT: v_cvt_f16_f32_e32 v54, v54
; SI-NEXT: v_cvt_f16_f32_e32 v21, v29
; SI-NEXT: v_cvt_f16_f32_e32 v22, v28
; SI-NEXT: v_cvt_f16_f32_e32 v0, s17
@@ -32890,26 +33039,26 @@ define inreg <32 x i32> @bitcast_v64f16_to_v32i32_scalar(<64 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v7, s28
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v51
-; SI-NEXT: v_cvt_f16_f32_e32 v51, v30
-; SI-NEXT: v_cvt_f16_f32_e32 v52, v52
-; SI-NEXT: v_cvt_f16_f32_e32 v23, v50
-; SI-NEXT: v_cvt_f16_f32_e32 v50, v48
+; SI-NEXT: v_cvt_f16_f32_e32 v51, v52
+; SI-NEXT: v_cvt_f16_f32_e32 v52, v49
+; SI-NEXT: v_cvt_f16_f32_e32 v23, v37
+; SI-NEXT: v_cvt_f16_f32_e32 v50, v50
; SI-NEXT: v_cvt_f16_f32_e32 v24, v38
-; SI-NEXT: v_cvt_f16_f32_e32 v49, v49
+; SI-NEXT: v_cvt_f16_f32_e32 v49, v48
; SI-NEXT: s_waitcnt vmcnt(13)
; SI-NEXT: v_cvt_f16_f32_e32 v25, v39
; SI-NEXT: s_waitcnt vmcnt(12)
-; SI-NEXT: v_cvt_f16_f32_e32 v48, v26
+; SI-NEXT: v_cvt_f16_f32_e32 v48, v30
; SI-NEXT: s_waitcnt vmcnt(11)
-; SI-NEXT: v_cvt_f16_f32_e32 v26, v31
+; SI-NEXT: v_cvt_f16_f32_e32 v26, v26
; SI-NEXT: s_waitcnt vmcnt(10)
; SI-NEXT: v_cvt_f16_f32_e32 v39, v6
; SI-NEXT: s_waitcnt vmcnt(9)
-; SI-NEXT: v_cvt_f16_f32_e32 v27, v42
+; SI-NEXT: v_cvt_f16_f32_e32 v27, v31
; SI-NEXT: s_waitcnt vmcnt(8)
; SI-NEXT: v_cvt_f16_f32_e32 v38, v60
; SI-NEXT: s_waitcnt vmcnt(7)
-; SI-NEXT: v_cvt_f16_f32_e32 v28, v37
+; SI-NEXT: v_cvt_f16_f32_e32 v28, v42
; SI-NEXT: s_waitcnt vmcnt(6)
; SI-NEXT: v_cvt_f16_f32_e32 v37, v62
; SI-NEXT: s_waitcnt vmcnt(5)
@@ -32919,70 +33068,74 @@ define inreg <32 x i32> @bitcast_v64f16_to_v32i32_scalar(<64 x half> inreg %a, i
; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_cvt_f16_f32_e32 v30, v33
; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_cvt_f16_f32_e32 v32, v34
+; SI-NEXT: v_cvt_f16_f32_e32 v33, v34
; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_cvt_f16_f32_e32 v34, v35
+; SI-NEXT: v_cvt_f16_f32_e32 v63, v35
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v36, v36
-; SI-NEXT: v_cvt_f16_f32_e32 v63, s16
-; SI-NEXT: v_cvt_f16_f32_e32 v62, s18
-; SI-NEXT: v_cvt_f16_f32_e32 v60, s20
-; SI-NEXT: v_cvt_f16_f32_e32 v42, s22
-; SI-NEXT: v_cvt_f16_f32_e32 v35, s24
-; SI-NEXT: v_cvt_f16_f32_e32 v33, s26
+; SI-NEXT: v_cvt_f16_f32_e32 v35, v36
+; SI-NEXT: v_cvt_f16_f32_e32 v62, s16
+; SI-NEXT: v_cvt_f16_f32_e32 v60, s18
+; SI-NEXT: v_cvt_f16_f32_e32 v42, s20
+; SI-NEXT: v_cvt_f16_f32_e32 v36, s22
+; SI-NEXT: v_cvt_f16_f32_e32 v34, s24
+; SI-NEXT: v_cvt_f16_f32_e32 v32, s26
; SI-NEXT: v_cvt_f16_f32_e32 v6, s29
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB23_2
; SI-NEXT: ; %bb.1: ; %cmp.false
+; SI-NEXT: s_waitcnt expcnt(6)
+; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20
+; SI-NEXT: v_or_b32_e32 v3, v36, v3
+; SI-NEXT: v_mov_b32_e32 v36, v54
+; SI-NEXT: v_or_b32_e32 v20, v54, v20
+; SI-NEXT: v_mov_b32_e32 v54, v21
; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21
-; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: v_or_b32_e32 v21, v22, v21
; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v52
-; SI-NEXT: v_or_b32_e32 v5, v33, v5
-; SI-NEXT: v_mov_b32_e32 v33, v52
-; SI-NEXT: v_mov_b32_e32 v52, v51
; SI-NEXT: v_or_b32_e32 v22, v51, v22
; SI-NEXT: v_mov_b32_e32 v51, v23
; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v23
@@ -33004,11 +33157,9 @@ define inreg <32 x i32> @bitcast_v64f16_to_v32i32_scalar(<64 x half> inreg %a, i
; SI-NEXT: v_or_b32_e32 v27, v38, v27
; SI-NEXT: v_mov_b32_e32 v38, v28
; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v28
-; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
; SI-NEXT: v_or_b32_e32 v7, v8, v7
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v11
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v12
-; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19
; SI-NEXT: v_or_b32_e32 v28, v37, v28
; SI-NEXT: v_mov_b32_e32 v37, v29
; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v29
@@ -33016,70 +33167,68 @@ define inreg <32 x i32> @bitcast_v64f16_to_v32i32_scalar(<64 x half> inreg %a, i
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; SI-NEXT: v_or_b32_e32 v4, v35, v4
+; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: v_or_b32_e32 v8, v10, v8
; SI-NEXT: v_or_b32_e32 v9, v14, v9
; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v58
-; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v46
-; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v61
+; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v56
+; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v44
; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
-; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v47
+; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v57
; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15
; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17
; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18
-; SI-NEXT: v_mov_b32_e32 v35, v54
-; SI-NEXT: v_or_b32_e32 v19, v54, v19
-; SI-NEXT: v_mov_b32_e32 v54, v20
-; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20
+; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19
; SI-NEXT: v_or_b32_e32 v29, v31, v29
; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v30
-; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v34
-; SI-NEXT: v_or_b32_e32 v0, v63, v0
-; SI-NEXT: v_or_b32_e32 v1, v62, v1
-; SI-NEXT: v_or_b32_e32 v2, v60, v2
-; SI-NEXT: v_or_b32_e32 v3, v42, v3
-; SI-NEXT: v_or_b32_e32 v10, v56, v10
-; SI-NEXT: v_mov_b32_e32 v63, v44
-; SI-NEXT: v_or_b32_e32 v11, v44, v11
+; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v63
+; SI-NEXT: v_or_b32_e32 v0, v62, v0
+; SI-NEXT: v_or_b32_e32 v1, v60, v1
+; SI-NEXT: v_or_b32_e32 v2, v42, v2
+; SI-NEXT: v_or_b32_e32 v4, v34, v4
+; SI-NEXT: v_or_b32_e32 v5, v32, v5
+; SI-NEXT: v_or_b32_e32 v10, v41, v10
+; SI-NEXT: v_or_b32_e32 v11, v46, v11
+; SI-NEXT: v_mov_b32_e32 v41, v44
; SI-NEXT: v_mov_b32_e32 v62, v61
+; SI-NEXT: v_or_b32_e32 v12, v61, v12
; SI-NEXT: v_mov_b32_e32 v60, v59
-; SI-NEXT: v_or_b32_e32 v12, v59, v12
+; SI-NEXT: v_or_b32_e32 v13, v59, v13
; SI-NEXT: v_mov_b32_e32 v58, v57
-; SI-NEXT: v_or_b32_e32 v13, v57, v13
; SI-NEXT: v_mov_b32_e32 v56, v47
+; SI-NEXT: v_or_b32_e32 v14, v47, v14
; SI-NEXT: v_mov_b32_e32 v46, v45
-; SI-NEXT: v_or_b32_e32 v14, v45, v14
+; SI-NEXT: v_or_b32_e32 v15, v45, v15
; SI-NEXT: v_mov_b32_e32 v44, v43
-; SI-NEXT: v_or_b32_e32 v15, v43, v15
-; SI-NEXT: v_mov_b32_e32 v42, v41
-; SI-NEXT: v_or_b32_e32 v16, v41, v16
-; SI-NEXT: v_or_b32_e32 v17, v40, v17
+; SI-NEXT: v_or_b32_e32 v16, v43, v16
+; SI-NEXT: v_mov_b32_e32 v42, v53
+; SI-NEXT: v_or_b32_e32 v17, v53, v17
+; SI-NEXT: v_or_b32_e32 v18, v40, v18
; SI-NEXT: v_mov_b32_e32 v40, v55
-; SI-NEXT: v_or_b32_e32 v18, v55, v18
-; SI-NEXT: v_or_b32_e32 v20, v53, v20
-; SI-NEXT: v_or_b32_e32 v30, v32, v30
-; SI-NEXT: v_mov_b32_e32 v32, v34
-; SI-NEXT: v_or_b32_e32 v31, v36, v31
+; SI-NEXT: v_or_b32_e32 v19, v55, v19
+; SI-NEXT: v_mov_b32_e32 v32, v52
+; SI-NEXT: v_mov_b32_e32 v34, v33
+; SI-NEXT: v_or_b32_e32 v30, v33, v30
+; SI-NEXT: v_or_b32_e32 v31, v35, v31
; SI-NEXT: s_mov_b64 s[4:5], 0
; SI-NEXT: s_branch .LBB23_3
; SI-NEXT: .LBB23_2:
+; SI-NEXT: v_mov_b32_e32 v41, v44
; SI-NEXT: s_waitcnt expcnt(1)
-; SI-NEXT: v_mov_b32_e32 v63, v44
; SI-NEXT: v_mov_b32_e32 v62, v61
; SI-NEXT: v_mov_b32_e32 v60, v59
; SI-NEXT: v_mov_b32_e32 v58, v57
; SI-NEXT: v_mov_b32_e32 v56, v47
; SI-NEXT: v_mov_b32_e32 v46, v45
; SI-NEXT: v_mov_b32_e32 v44, v43
-; SI-NEXT: v_mov_b32_e32 v42, v41
+; SI-NEXT: v_mov_b32_e32 v42, v53
; SI-NEXT: v_mov_b32_e32 v40, v55
-; SI-NEXT: v_mov_b32_e32 v35, v54
-; SI-NEXT: v_mov_b32_e32 v54, v20
-; SI-NEXT: v_mov_b32_e32 v33, v52
-; SI-NEXT: v_mov_b32_e32 v32, v34
-; SI-NEXT: v_mov_b32_e32 v52, v51
+; SI-NEXT: v_mov_b32_e32 v36, v54
+; SI-NEXT: v_mov_b32_e32 v54, v21
+; SI-NEXT: v_mov_b32_e32 v32, v52
+; SI-NEXT: v_mov_b32_e32 v34, v33
; SI-NEXT: v_mov_b32_e32 v51, v23
; SI-NEXT: v_mov_b32_e32 v50, v24
; SI-NEXT: v_mov_b32_e32 v49, v25
@@ -33087,25 +33236,29 @@ define inreg <32 x i32> @bitcast_v64f16_to_v32i32_scalar(<64 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v39, v27
; SI-NEXT: v_mov_b32_e32 v38, v28
; SI-NEXT: v_mov_b32_e32 v37, v29
-; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; SI-NEXT: .LBB23_3: ; %Flow
-; SI-NEXT: v_mov_b32_e32 v34, v33
-; SI-NEXT: v_mov_b32_e32 v33, v35
-; SI-NEXT: v_mov_b32_e32 v35, v40
+; SI-NEXT: v_mov_b32_e32 v33, v63
+; SI-NEXT: v_mov_b32_e32 v52, v36
+; SI-NEXT: v_mov_b32_e32 v36, v40
; SI-NEXT: v_mov_b32_e32 v53, v42
+; SI-NEXT: v_mov_b32_e32 v55, v44
; SI-NEXT: v_mov_b32_e32 v40, v46
-; SI-NEXT: v_mov_b32_e32 v41, v56
+; SI-NEXT: v_mov_b32_e32 v57, v56
; SI-NEXT: v_mov_b32_e32 v42, v58
; SI-NEXT: v_mov_b32_e32 v43, v60
+; SI-NEXT: v_mov_b32_e32 v44, v62
+; SI-NEXT: v_mov_b32_e32 v45, v41
; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
; SI-NEXT: s_cbranch_vccnz .LBB23_5
; SI-NEXT: ; %bb.4: ; %cmp.true
@@ -33114,11 +33267,11 @@ define inreg <32 x i32> @bitcast_v64f16_to_v32i32_scalar(<64 x half> inreg %a, i
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(4)
-; SI-NEXT: v_cvt_f32_f16_e32 v8, v61
-; SI-NEXT: v_cvt_f32_f16_e32 v9, v59
-; SI-NEXT: v_cvt_f32_f16_e32 v10, v57
-; SI-NEXT: v_cvt_f32_f16_e32 v11, v47
+; SI-NEXT: s_waitcnt vmcnt(5)
+; SI-NEXT: v_cvt_f32_f16_e32 v8, v62
+; SI-NEXT: v_cvt_f32_f16_e32 v9, v60
+; SI-NEXT: v_cvt_f32_f16_e32 v10, v58
+; SI-NEXT: v_cvt_f32_f16_e32 v11, v56
; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8
; SI-NEXT: v_cvt_f16_f32_e32 v8, v8
; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9
@@ -33127,10 +33280,10 @@ define inreg <32 x i32> @bitcast_v64f16_to_v32i32_scalar(<64 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v10, v10
; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11
; SI-NEXT: v_cvt_f16_f32_e32 v11, v11
-; SI-NEXT: v_cvt_f32_f16_e32 v12, v63
-; SI-NEXT: v_cvt_f32_f16_e32 v13, v43
-; SI-NEXT: v_cvt_f32_f16_e32 v14, v42
-; SI-NEXT: v_cvt_f32_f16_e32 v15, v40
+; SI-NEXT: v_cvt_f32_f16_e32 v12, v46
+; SI-NEXT: v_cvt_f32_f16_e32 v13, v44
+; SI-NEXT: v_cvt_f32_f16_e32 v14, v43
+; SI-NEXT: v_cvt_f32_f16_e32 v15, v57
; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12
; SI-NEXT: v_cvt_f16_f32_e32 v12, v12
; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13
@@ -33139,33 +33292,32 @@ define inreg <32 x i32> @bitcast_v64f16_to_v32i32_scalar(<64 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v14, v14
; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15
; SI-NEXT: v_cvt_f16_f32_e32 v15, v15
-; SI-NEXT: v_mov_b32_e32 v55, v44
-; SI-NEXT: v_cvt_f32_f16_e32 v16, v55
-; SI-NEXT: v_cvt_f32_f16_e32 v17, v53
-; SI-NEXT: v_cvt_f32_f16_e32 v19, v35
-; SI-NEXT: v_cvt_f32_f16_e32 v21, v33
+; SI-NEXT: v_cvt_f32_f16_e32 v16, v40
+; SI-NEXT: v_cvt_f32_f16_e32 v17, v55
+; SI-NEXT: v_cvt_f32_f16_e32 v18, v53
+; SI-NEXT: v_cvt_f32_f16_e32 v22, v52
; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16
; SI-NEXT: v_cvt_f16_f32_e32 v16, v16
; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17
; SI-NEXT: v_cvt_f16_f32_e32 v17, v17
-; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19
-; SI-NEXT: v_cvt_f16_f32_e32 v19, v19
-; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21
-; SI-NEXT: v_cvt_f16_f32_e32 v21, v21
-; SI-NEXT: v_cvt_f32_f16_e32 v23, v34
-; SI-NEXT: v_cvt_f32_f16_e32 v24, v52
+; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18
+; SI-NEXT: v_cvt_f16_f32_e32 v18, v18
+; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22
+; SI-NEXT: v_cvt_f16_f32_e32 v22, v22
+; SI-NEXT: v_cvt_f32_f16_e32 v21, v36
+; SI-NEXT: v_cvt_f32_f16_e32 v23, v32
; SI-NEXT: v_cvt_f32_f16_e32 v26, v49
; SI-NEXT: v_cvt_f32_f16_e32 v29, v38
+; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21
+; SI-NEXT: v_cvt_f16_f32_e32 v21, v21
; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23
; SI-NEXT: v_cvt_f16_f32_e32 v23, v23
-; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24
-; SI-NEXT: v_cvt_f16_f32_e32 v24, v24
; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26
; SI-NEXT: v_cvt_f16_f32_e32 v26, v26
; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v29
; SI-NEXT: v_cvt_f16_f32_e32 v29, v29
-; SI-NEXT: v_cvt_f32_f16_e32 v32, v32
-; SI-NEXT: v_cvt_f32_f16_e32 v33, v36
+; SI-NEXT: v_cvt_f32_f16_e32 v32, v33
+; SI-NEXT: v_cvt_f32_f16_e32 v33, v35
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v32, 0x38000000, v32
@@ -33174,14 +33326,14 @@ define inreg <32 x i32> @bitcast_v64f16_to_v32i32_scalar(<64 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v33, v33
; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
@@ -33216,26 +33368,22 @@ define inreg <32 x i32> @bitcast_v64f16_to_v32i32_scalar(<64 x half> inreg %a, i
; SI-NEXT: s_waitcnt vmcnt(10)
; SI-NEXT: v_cvt_f32_f16_e32 v7, v7
; SI-NEXT: s_waitcnt vmcnt(9)
-; SI-NEXT: v_cvt_f32_f16_e32 v18, v18
+; SI-NEXT: v_cvt_f32_f16_e32 v19, v19
; SI-NEXT: s_waitcnt vmcnt(8)
; SI-NEXT: v_cvt_f32_f16_e32 v20, v20
; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6
; SI-NEXT: v_cvt_f16_f32_e32 v6, v6
; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7
; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
-; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18
-; SI-NEXT: v_cvt_f16_f32_e32 v18, v18
+; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19
; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20
+; SI-NEXT: v_cvt_f16_f32_e32 v19, v19
; SI-NEXT: v_cvt_f16_f32_e32 v20, v20
-; SI-NEXT: s_waitcnt vmcnt(7)
-; SI-NEXT: v_cvt_f32_f16_e32 v22, v22
-; SI-NEXT: s_waitcnt vmcnt(6)
+; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_cvt_f32_f16_e32 v25, v25
-; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_cvt_f32_f16_e32 v28, v28
+; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_cvt_f32_f16_e32 v30, v30
-; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22
-; SI-NEXT: v_cvt_f16_f32_e32 v22, v22
; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25
; SI-NEXT: v_cvt_f16_f32_e32 v25, v25
; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28
@@ -33280,72 +33428,70 @@ define inreg <32 x i32> @bitcast_v64f16_to_v32i32_scalar(<64 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v6, v6
; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6
; SI-NEXT: v_or_b32_e32 v6, v7, v6
-; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v7, v7
+; SI-NEXT: v_cvt_f32_f16_e32 v7, v63
; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7
; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7
; SI-NEXT: v_or_b32_e32 v7, v8, v7
-; SI-NEXT: v_cvt_f32_f16_e32 v8, v60
+; SI-NEXT: v_cvt_f32_f16_e32 v8, v61
; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8
; SI-NEXT: v_cvt_f16_f32_e32 v8, v8
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8
; SI-NEXT: v_or_b32_e32 v8, v9, v8
-; SI-NEXT: v_cvt_f32_f16_e32 v9, v58
+; SI-NEXT: v_cvt_f32_f16_e32 v9, v59
; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9
; SI-NEXT: v_cvt_f16_f32_e32 v9, v9
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9
; SI-NEXT: v_or_b32_e32 v9, v10, v9
-; SI-NEXT: v_cvt_f32_f16_e32 v10, v56
+; SI-NEXT: v_cvt_f32_f16_e32 v10, v41
; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10
; SI-NEXT: v_cvt_f16_f32_e32 v10, v10
; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10
; SI-NEXT: v_or_b32_e32 v10, v11, v10
-; SI-NEXT: v_cvt_f32_f16_e32 v11, v46
+; SI-NEXT: v_cvt_f32_f16_e32 v11, v47
; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11
; SI-NEXT: v_cvt_f16_f32_e32 v11, v11
; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11
; SI-NEXT: v_or_b32_e32 v11, v12, v11
-; SI-NEXT: v_cvt_f32_f16_e32 v12, v62
+; SI-NEXT: v_cvt_f32_f16_e32 v12, v45
; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12
; SI-NEXT: v_cvt_f16_f32_e32 v12, v12
; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12
; SI-NEXT: v_or_b32_e32 v12, v13, v12
-; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v13, v13
; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13
; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
; SI-NEXT: v_or_b32_e32 v13, v14, v13
-; SI-NEXT: v_cvt_f32_f16_e32 v14, v41
+; SI-NEXT: v_cvt_f32_f16_e32 v14, v42
; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14
; SI-NEXT: v_cvt_f16_f32_e32 v14, v14
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
; SI-NEXT: v_or_b32_e32 v14, v15, v14
-; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v15, v15
; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15
; SI-NEXT: v_cvt_f16_f32_e32 v15, v15
; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15
; SI-NEXT: v_or_b32_e32 v15, v16, v15
-; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v16, v16
; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16
; SI-NEXT: v_cvt_f16_f32_e32 v16, v16
; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16
; SI-NEXT: v_or_b32_e32 v16, v17, v16
-; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v17, v17
; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17
; SI-NEXT: v_cvt_f16_f32_e32 v17, v17
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17
; SI-NEXT: v_or_b32_e32 v17, v18, v17
-; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v18, v18
; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18
@@ -33353,35 +33499,38 @@ define inreg <32 x i32> @bitcast_v64f16_to_v32i32_scalar(<64 x half> inreg %a, i
; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18
; SI-NEXT: v_or_b32_e32 v18, v19, v18
; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v20
-; SI-NEXT: v_cvt_f32_f16_e32 v20, v54
+; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
; SI-NEXT: v_or_b32_e32 v19, v21, v19
-; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
+; SI-NEXT: v_cvt_f32_f16_e32 v21, v54
+; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21
+; SI-NEXT: v_cvt_f16_f32_e32 v21, v21
+; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_cvt_f32_f16_e32 v20, v20
; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20
; SI-NEXT: v_cvt_f16_f32_e32 v20, v20
; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20
; SI-NEXT: v_or_b32_e32 v20, v22, v20
-; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_cvt_f32_f16_e32 v21, v21
-; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21
-; SI-NEXT: v_cvt_f16_f32_e32 v21, v21
+; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v22, v22
-; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21
; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22
; SI-NEXT: v_cvt_f16_f32_e32 v22, v22
; SI-NEXT: v_or_b32_e32 v21, v22, v21
; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v23
; SI-NEXT: v_cvt_f32_f16_e32 v23, v51
-; SI-NEXT: v_or_b32_e32 v22, v24, v22
-; SI-NEXT: v_cvt_f32_f16_e32 v24, v50
; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23
; SI-NEXT: v_cvt_f16_f32_e32 v23, v23
-; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24
-; SI-NEXT: v_cvt_f16_f32_e32 v24, v24
; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v23
; SI-NEXT: v_or_b32_e32 v23, v25, v23
-; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
+; SI-NEXT: v_cvt_f32_f16_e32 v24, v24
+; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24
+; SI-NEXT: v_cvt_f16_f32_e32 v24, v24
+; SI-NEXT: v_or_b32_e32 v22, v24, v22
+; SI-NEXT: v_cvt_f32_f16_e32 v24, v50
+; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24
+; SI-NEXT: v_cvt_f16_f32_e32 v24, v24
; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v24
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v25, v25
@@ -33394,7 +33543,7 @@ define inreg <32 x i32> @bitcast_v64f16_to_v32i32_scalar(<64 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v26, v26
; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v26
; SI-NEXT: v_or_b32_e32 v26, v28, v26
-; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
; SI-NEXT: v_cvt_f32_f16_e32 v27, v27
; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27
; SI-NEXT: v_cvt_f16_f32_e32 v27, v27
@@ -33409,9 +33558,9 @@ define inreg <32 x i32> @bitcast_v64f16_to_v32i32_scalar(<64 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v28, v28
; SI-NEXT: v_or_b32_e32 v27, v28, v27
; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v29
-; SI-NEXT: v_cvt_f32_f16_e32 v29, v37
; SI-NEXT: v_or_b32_e32 v28, v30, v28
-; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
+; SI-NEXT: v_cvt_f32_f16_e32 v29, v37
; SI-NEXT: v_cvt_f32_f16_e32 v31, v31
; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v29
; SI-NEXT: v_cvt_f16_f32_e32 v29, v29
@@ -33419,16 +33568,14 @@ define inreg <32 x i32> @bitcast_v64f16_to_v32i32_scalar(<64 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v31, v31
; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v29
; SI-NEXT: v_or_b32_e32 v29, v31, v29
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_cvt_f32_f16_e32 v31, v34
+; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31
+; SI-NEXT: v_cvt_f16_f32_e32 v31, v31
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v30, v30
; SI-NEXT: v_add_f32_e32 v30, 0x38000000, v30
; SI-NEXT: v_cvt_f16_f32_e32 v30, v30
; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v30
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v31, v31
-; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31
-; SI-NEXT: v_cvt_f16_f32_e32 v31, v31
; SI-NEXT: v_or_b32_e32 v30, v31, v30
; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v32
; SI-NEXT: v_or_b32_e32 v31, v33, v31
@@ -33456,6 +33603,7 @@ define inreg <32 x i32> @bitcast_v64f16_to_v32i32_scalar(<64 x half> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v31, v17
; VI-NEXT: v_mov_b32_e32 v30, v16
; VI-NEXT: v_mov_b32_e32 v29, v15
@@ -33476,7 +33624,7 @@ define inreg <32 x i32> @bitcast_v64f16_to_v32i32_scalar(<64 x half> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
@@ -33489,10 +33637,13 @@ define inreg <32 x i32> @bitcast_v64f16_to_v32i32_scalar(<64 x half> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB23_4
+; VI-NEXT: s_cbranch_scc0 .LBB23_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB23_3
-; VI-NEXT: .LBB23_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB23_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB23_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v18, 0x200
; VI-NEXT: v_add_f16_sdwa v33, v15, v18 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v15, 0x200, v15
@@ -33590,16 +33741,15 @@ define inreg <32 x i32> @bitcast_v64f16_to_v32i32_scalar(<64 x half> inreg %a, i
; VI-NEXT: v_add_f16_e32 v16, 0x200, v16
; VI-NEXT: v_or_b32_e32 v17, v17, v33
; VI-NEXT: v_or_b32_e32 v16, v16, v18
-; VI-NEXT: .LBB23_3: ; %end
+; VI-NEXT: .LBB23_4: ; %end
; VI-NEXT: v_mov_b32_e32 v18, v32
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB23_4:
-; VI-NEXT: s_branch .LBB23_2
;
; GFX9-LABEL: bitcast_v64f16_to_v32i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v31, v17
; GFX9-NEXT: v_mov_b32_e32 v30, v16
; GFX9-NEXT: v_mov_b32_e32 v29, v15
@@ -33620,7 +33770,7 @@ define inreg <32 x i32> @bitcast_v64f16_to_v32i32_scalar(<64 x half> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -33633,10 +33783,13 @@ define inreg <32 x i32> @bitcast_v64f16_to_v32i32_scalar(<64 x half> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB23_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB23_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB23_3
-; GFX9-NEXT: .LBB23_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB23_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_movk_i32 s4, 0x200
; GFX9-NEXT: v_pk_add_f16 v15, v15, s4 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v14, v14, s4 op_sel_hi:[1,0]
@@ -33670,118 +33823,113 @@ define inreg <32 x i32> @bitcast_v64f16_to_v32i32_scalar(<64 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v32, v32, s4 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v17, v17, s4 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v16, v16, s4 op_sel_hi:[1,0]
-; GFX9-NEXT: .LBB23_3: ; %end
+; GFX9-NEXT: .LBB23_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v18, v32
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB23_4:
-; GFX9-NEXT: s_branch .LBB23_2
;
; GFX11-LABEL: bitcast_v64f16_to_v32i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_store_b32 off, v40, s32 offset:292
-; GFX11-NEXT: scratch_store_b32 off, v41, s32 offset:288
-; GFX11-NEXT: scratch_store_b32 off, v42, s32 offset:284
-; GFX11-NEXT: scratch_store_b32 off, v43, s32 offset:280
-; GFX11-NEXT: scratch_store_b32 off, v44, s32 offset:276
-; GFX11-NEXT: scratch_store_b32 off, v45, s32 offset:272
-; GFX11-NEXT: scratch_store_b32 off, v46, s32 offset:268
-; GFX11-NEXT: scratch_store_b32 off, v47, s32 offset:264
-; GFX11-NEXT: scratch_store_b32 off, v56, s32 offset:260
-; GFX11-NEXT: scratch_store_b32 off, v57, s32 offset:256
-; GFX11-NEXT: scratch_store_b32 off, v58, s32 offset:252
-; GFX11-NEXT: scratch_store_b32 off, v59, s32 offset:248
-; GFX11-NEXT: scratch_store_b32 off, v60, s32 offset:244
-; GFX11-NEXT: scratch_store_b32 off, v61, s32 offset:240
-; GFX11-NEXT: scratch_store_b32 off, v62, s32 offset:236
-; GFX11-NEXT: scratch_store_b32 off, v63, s32 offset:232
-; GFX11-NEXT: scratch_store_b32 off, v72, s32 offset:228
-; GFX11-NEXT: scratch_store_b32 off, v73, s32 offset:224
-; GFX11-NEXT: scratch_store_b32 off, v74, s32 offset:220
-; GFX11-NEXT: scratch_store_b32 off, v75, s32 offset:216
-; GFX11-NEXT: scratch_store_b32 off, v76, s32 offset:212
-; GFX11-NEXT: scratch_store_b32 off, v77, s32 offset:208
-; GFX11-NEXT: scratch_store_b32 off, v78, s32 offset:204
-; GFX11-NEXT: scratch_store_b32 off, v79, s32 offset:200
-; GFX11-NEXT: scratch_store_b32 off, v88, s32 offset:196
-; GFX11-NEXT: scratch_store_b32 off, v89, s32 offset:192
-; GFX11-NEXT: scratch_store_b32 off, v90, s32 offset:188
-; GFX11-NEXT: scratch_store_b32 off, v91, s32 offset:184
-; GFX11-NEXT: scratch_store_b32 off, v92, s32 offset:180
-; GFX11-NEXT: scratch_store_b32 off, v93, s32 offset:176
-; GFX11-NEXT: scratch_store_b32 off, v94, s32 offset:172
-; GFX11-NEXT: scratch_store_b32 off, v95, s32 offset:168
+; GFX11-NEXT: scratch_store_b32 off, v40, s32 offset:284
+; GFX11-NEXT: scratch_store_b32 off, v41, s32 offset:280
+; GFX11-NEXT: scratch_store_b32 off, v42, s32 offset:276
+; GFX11-NEXT: scratch_store_b32 off, v43, s32 offset:272
+; GFX11-NEXT: scratch_store_b32 off, v44, s32 offset:268
+; GFX11-NEXT: scratch_store_b32 off, v45, s32 offset:264
+; GFX11-NEXT: scratch_store_b32 off, v46, s32 offset:260
+; GFX11-NEXT: scratch_store_b32 off, v47, s32 offset:256
+; GFX11-NEXT: scratch_store_b32 off, v56, s32 offset:252
+; GFX11-NEXT: scratch_store_b32 off, v57, s32 offset:248
+; GFX11-NEXT: scratch_store_b32 off, v58, s32 offset:244
+; GFX11-NEXT: scratch_store_b32 off, v59, s32 offset:240
+; GFX11-NEXT: scratch_store_b32 off, v60, s32 offset:236
+; GFX11-NEXT: scratch_store_b32 off, v61, s32 offset:232
+; GFX11-NEXT: scratch_store_b32 off, v62, s32 offset:228
+; GFX11-NEXT: scratch_store_b32 off, v63, s32 offset:224
+; GFX11-NEXT: scratch_store_b32 off, v72, s32 offset:220
+; GFX11-NEXT: scratch_store_b32 off, v73, s32 offset:216
+; GFX11-NEXT: scratch_store_b32 off, v74, s32 offset:212
+; GFX11-NEXT: scratch_store_b32 off, v75, s32 offset:208
+; GFX11-NEXT: scratch_store_b32 off, v76, s32 offset:204
+; GFX11-NEXT: scratch_store_b32 off, v77, s32 offset:200
+; GFX11-NEXT: scratch_store_b32 off, v78, s32 offset:196
+; GFX11-NEXT: scratch_store_b32 off, v79, s32 offset:192
+; GFX11-NEXT: scratch_store_b32 off, v88, s32 offset:188
+; GFX11-NEXT: scratch_store_b32 off, v89, s32 offset:184
+; GFX11-NEXT: scratch_store_b32 off, v90, s32 offset:180
+; GFX11-NEXT: scratch_store_b32 off, v91, s32 offset:176
+; GFX11-NEXT: scratch_store_b32 off, v92, s32 offset:172
+; GFX11-NEXT: scratch_store_b32 off, v93, s32 offset:168
+; GFX11-NEXT: scratch_store_b32 off, v94, s32 offset:164
+; GFX11-NEXT: scratch_store_b32 off, v95, s32 offset:160
; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_store_b32 off, v104, s32 offset:164
-; GFX11-NEXT: scratch_store_b32 off, v105, s32 offset:160
-; GFX11-NEXT: scratch_store_b32 off, v106, s32 offset:156
-; GFX11-NEXT: scratch_store_b32 off, v107, s32 offset:152
-; GFX11-NEXT: scratch_store_b32 off, v108, s32 offset:148
-; GFX11-NEXT: scratch_store_b32 off, v109, s32 offset:144
-; GFX11-NEXT: scratch_store_b32 off, v110, s32 offset:140
-; GFX11-NEXT: scratch_store_b32 off, v111, s32 offset:136
-; GFX11-NEXT: scratch_store_b32 off, v120, s32 offset:132
-; GFX11-NEXT: scratch_store_b32 off, v121, s32 offset:128
-; GFX11-NEXT: scratch_store_b32 off, v122, s32 offset:124
-; GFX11-NEXT: scratch_store_b32 off, v123, s32 offset:120
-; GFX11-NEXT: scratch_store_b32 off, v124, s32 offset:116
-; GFX11-NEXT: scratch_store_b32 off, v125, s32 offset:112
-; GFX11-NEXT: scratch_store_b32 off, v126, s32 offset:108
-; GFX11-NEXT: scratch_store_b32 off, v127, s32 offset:104
-; GFX11-NEXT: scratch_store_b32 off, v136, s32 offset:100
-; GFX11-NEXT: scratch_store_b32 off, v137, s32 offset:96
-; GFX11-NEXT: scratch_store_b32 off, v138, s32 offset:92
-; GFX11-NEXT: scratch_store_b32 off, v139, s32 offset:88
-; GFX11-NEXT: scratch_store_b32 off, v140, s32 offset:84
-; GFX11-NEXT: scratch_store_b32 off, v141, s32 offset:80
-; GFX11-NEXT: scratch_store_b32 off, v142, s32 offset:76
-; GFX11-NEXT: scratch_store_b32 off, v143, s32 offset:72
-; GFX11-NEXT: scratch_store_b32 off, v152, s32 offset:68
-; GFX11-NEXT: scratch_store_b32 off, v153, s32 offset:64
-; GFX11-NEXT: scratch_store_b32 off, v154, s32 offset:60
-; GFX11-NEXT: scratch_store_b32 off, v155, s32 offset:56
-; GFX11-NEXT: scratch_store_b32 off, v156, s32 offset:52
-; GFX11-NEXT: scratch_store_b32 off, v157, s32 offset:48
-; GFX11-NEXT: scratch_store_b32 off, v158, s32 offset:44
-; GFX11-NEXT: scratch_store_b32 off, v159, s32 offset:40
-; GFX11-NEXT: s_clause 0x9
-; GFX11-NEXT: scratch_store_b32 off, v168, s32 offset:36
-; GFX11-NEXT: scratch_store_b32 off, v169, s32 offset:32
-; GFX11-NEXT: scratch_store_b32 off, v170, s32 offset:28
-; GFX11-NEXT: scratch_store_b32 off, v171, s32 offset:24
-; GFX11-NEXT: scratch_store_b32 off, v172, s32 offset:20
-; GFX11-NEXT: scratch_store_b32 off, v173, s32 offset:16
-; GFX11-NEXT: scratch_store_b32 off, v174, s32 offset:12
-; GFX11-NEXT: scratch_store_b32 off, v175, s32 offset:8
-; GFX11-NEXT: scratch_store_b32 off, v184, s32 offset:4
-; GFX11-NEXT: scratch_store_b32 off, v185, s32
+; GFX11-NEXT: scratch_store_b32 off, v104, s32 offset:156
+; GFX11-NEXT: scratch_store_b32 off, v105, s32 offset:152
+; GFX11-NEXT: scratch_store_b32 off, v106, s32 offset:148
+; GFX11-NEXT: scratch_store_b32 off, v107, s32 offset:144
+; GFX11-NEXT: scratch_store_b32 off, v108, s32 offset:140
+; GFX11-NEXT: scratch_store_b32 off, v109, s32 offset:136
+; GFX11-NEXT: scratch_store_b32 off, v110, s32 offset:132
+; GFX11-NEXT: scratch_store_b32 off, v111, s32 offset:128
+; GFX11-NEXT: scratch_store_b32 off, v120, s32 offset:124
+; GFX11-NEXT: scratch_store_b32 off, v121, s32 offset:120
+; GFX11-NEXT: scratch_store_b32 off, v122, s32 offset:116
+; GFX11-NEXT: scratch_store_b32 off, v123, s32 offset:112
+; GFX11-NEXT: scratch_store_b32 off, v124, s32 offset:108
+; GFX11-NEXT: scratch_store_b32 off, v125, s32 offset:104
+; GFX11-NEXT: scratch_store_b32 off, v126, s32 offset:100
+; GFX11-NEXT: scratch_store_b32 off, v127, s32 offset:96
+; GFX11-NEXT: scratch_store_b32 off, v136, s32 offset:92
+; GFX11-NEXT: scratch_store_b32 off, v137, s32 offset:88
+; GFX11-NEXT: scratch_store_b32 off, v138, s32 offset:84
+; GFX11-NEXT: scratch_store_b32 off, v139, s32 offset:80
+; GFX11-NEXT: scratch_store_b32 off, v140, s32 offset:76
+; GFX11-NEXT: scratch_store_b32 off, v141, s32 offset:72
+; GFX11-NEXT: scratch_store_b32 off, v142, s32 offset:68
+; GFX11-NEXT: scratch_store_b32 off, v143, s32 offset:64
+; GFX11-NEXT: scratch_store_b32 off, v152, s32 offset:60
+; GFX11-NEXT: scratch_store_b32 off, v153, s32 offset:56
+; GFX11-NEXT: scratch_store_b32 off, v154, s32 offset:52
+; GFX11-NEXT: scratch_store_b32 off, v155, s32 offset:48
+; GFX11-NEXT: scratch_store_b32 off, v156, s32 offset:44
+; GFX11-NEXT: scratch_store_b32 off, v157, s32 offset:40
+; GFX11-NEXT: scratch_store_b32 off, v158, s32 offset:36
+; GFX11-NEXT: scratch_store_b32 off, v159, s32 offset:32
+; GFX11-NEXT: s_clause 0x7
+; GFX11-NEXT: scratch_store_b32 off, v168, s32 offset:28
+; GFX11-NEXT: scratch_store_b32 off, v169, s32 offset:24
+; GFX11-NEXT: scratch_store_b32 off, v170, s32 offset:20
+; GFX11-NEXT: scratch_store_b32 off, v171, s32 offset:16
+; GFX11-NEXT: scratch_store_b32 off, v172, s32 offset:12
+; GFX11-NEXT: scratch_store_b32 off, v173, s32 offset:8
+; GFX11-NEXT: scratch_store_b32 off, v174, s32 offset:4
+; GFX11-NEXT: scratch_store_b32 off, v175, s32
; GFX11-NEXT: v_dual_mov_b32 v176, v13 :: v_dual_mov_b32 v177, v12
; GFX11-NEXT: v_dual_mov_b32 v178, v11 :: v_dual_mov_b32 v179, v10
; GFX11-NEXT: v_dual_mov_b32 v180, v9 :: v_dual_mov_b32 v181, v8
; GFX11-NEXT: v_dual_mov_b32 v182, v7 :: v_dual_mov_b32 v183, v6
-; GFX11-NEXT: v_dual_mov_b32 v170, v5 :: v_dual_mov_b32 v171, v4
-; GFX11-NEXT: v_dual_mov_b32 v172, v3 :: v_dual_mov_b32 v173, v2
-; GFX11-NEXT: v_dual_mov_b32 v174, v1 :: v_dual_mov_b32 v175, v0
-; GFX11-NEXT: v_dual_mov_b32 v184, s28 :: v_dual_mov_b32 v185, s29
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-NEXT: v_dual_mov_b32 v168, v5 :: v_dual_mov_b32 v169, v4
+; GFX11-NEXT: v_dual_mov_b32 v170, v3 :: v_dual_mov_b32 v171, v2
+; GFX11-NEXT: v_dual_mov_b32 v172, v1 :: v_dual_mov_b32 v173, v0
+; GFX11-NEXT: v_dual_mov_b32 v174, s28 :: v_dual_mov_b32 v175, s29
+; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s4, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB23_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_dual_mov_b32 v47, s0 :: v_dual_mov_b32 v52, s2
-; GFX11-NEXT: v_dual_mov_b32 v49, s1 :: v_dual_mov_b32 v56, s3
-; GFX11-NEXT: v_dual_mov_b32 v61, s16 :: v_dual_mov_b32 v74, s18
-; GFX11-NEXT: v_dual_mov_b32 v67, s17 :: v_dual_mov_b32 v82, s19
-; GFX11-NEXT: v_dual_mov_b32 v91, s20 :: v_dual_mov_b32 v112, s22
-; GFX11-NEXT: v_dual_mov_b32 v101, s21 :: v_dual_mov_b32 v124, s23
-; GFX11-NEXT: v_dual_mov_b32 v137, s24 :: v_dual_mov_b32 v14, s26
-; GFX11-NEXT: v_dual_mov_b32 v151, s25 :: v_dual_mov_b32 v30, s27
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB23_3
+; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v59, s16
+; GFX11-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v65, s17
+; GFX11-NEXT: v_dual_mov_b32 v50, s2 :: v_dual_mov_b32 v89, s20
+; GFX11-NEXT: v_dual_mov_b32 v54, s3 :: v_dual_mov_b32 v99, s21
+; GFX11-NEXT: v_dual_mov_b32 v72, s18 :: v_dual_mov_b32 v135, s24
+; GFX11-NEXT: v_dual_mov_b32 v80, s19 :: v_dual_mov_b32 v149, s25
+; GFX11-NEXT: v_dual_mov_b32 v110, s22 :: v_dual_mov_b32 v17, s26
+; GFX11-NEXT: v_dual_mov_b32 v122, s23 :: v_dual_mov_b32 v33, s27
+; GFX11-NEXT: s_cbranch_execnz .LBB23_3
; GFX11-NEXT: .LBB23_2: ; %cmp.true
-; GFX11-NEXT: v_pk_add_f16 v30, 0x200, s27 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v14, 0x200, s26 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v33, 0x200, s27 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v17, 0x200, s26 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v176, 0x200, v176 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v177, 0x200, v177 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v178, 0x200, v178 op_sel_hi:[0,1]
@@ -33790,119 +33938,117 @@ define inreg <32 x i32> @bitcast_v64f16_to_v32i32_scalar(<64 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v181, 0x200, v181 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v182, 0x200, v182 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v183, 0x200, v183 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v168, 0x200, v168 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v169, 0x200, v169 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v170, 0x200, v170 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v171, 0x200, v171 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v172, 0x200, v172 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v173, 0x200, v173 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v174, 0x200, v174 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v175, 0x200, v175 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v184, 0x200, v184 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v151, 0x200, s25 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v137, 0x200, s24 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v124, 0x200, s23 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v112, 0x200, s22 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v101, 0x200, s21 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v91, 0x200, s20 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v82, 0x200, s19 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v74, 0x200, s18 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v67, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v61, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v56, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v52, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v49, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v47, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v174, 0x200, v174 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v149, 0x200, s25 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v135, 0x200, s24 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v122, 0x200, s23 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v110, 0x200, s22 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v99, 0x200, s21 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v89, 0x200, s20 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v80, 0x200, s19 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v72, 0x200, s18 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v65, 0x200, s17 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v59, 0x200, s16 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v54, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v50, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v2, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: .LBB23_3: ; %end
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mov_b32 v0, v47 :: v_dual_mov_b32 v1, v49
-; GFX11-NEXT: v_dual_mov_b32 v3, v56 :: v_dual_mov_b32 v4, v61
-; GFX11-NEXT: v_dual_mov_b32 v6, v74 :: v_dual_mov_b32 v9, v101
-; GFX11-NEXT: v_dual_mov_b32 v7, v82 :: v_dual_mov_b32 v8, v91
-; GFX11-NEXT: v_dual_mov_b32 v11, v124 :: v_dual_mov_b32 v12, v137
-; GFX11-NEXT: v_dual_mov_b32 v15, v30 :: v_dual_mov_b32 v16, v184
-; GFX11-NEXT: v_dual_mov_b32 v17, v185 :: v_dual_mov_b32 v18, v175
-; GFX11-NEXT: v_dual_mov_b32 v19, v174 :: v_dual_mov_b32 v20, v173
-; GFX11-NEXT: v_dual_mov_b32 v21, v172 :: v_dual_mov_b32 v22, v171
-; GFX11-NEXT: v_dual_mov_b32 v23, v170 :: v_dual_mov_b32 v24, v183
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v4, v59
+; GFX11-NEXT: v_dual_mov_b32 v3, v54 :: v_dual_mov_b32 v6, v72
+; GFX11-NEXT: v_dual_mov_b32 v7, v80 :: v_dual_mov_b32 v8, v89
+; GFX11-NEXT: v_dual_mov_b32 v9, v99 :: v_dual_mov_b32 v10, v110
+; GFX11-NEXT: v_dual_mov_b32 v11, v122 :: v_dual_mov_b32 v12, v135
+; GFX11-NEXT: v_dual_mov_b32 v13, v149 :: v_dual_mov_b32 v16, v174
+; GFX11-NEXT: v_dual_mov_b32 v14, v17 :: v_dual_mov_b32 v17, v175
+; GFX11-NEXT: v_dual_mov_b32 v15, v33 :: v_dual_mov_b32 v20, v171
+; GFX11-NEXT: v_dual_mov_b32 v18, v173 :: v_dual_mov_b32 v19, v172
+; GFX11-NEXT: v_dual_mov_b32 v21, v170 :: v_dual_mov_b32 v22, v169
+; GFX11-NEXT: v_dual_mov_b32 v23, v168 :: v_dual_mov_b32 v24, v183
; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_load_b32 v185, off, s32
-; GFX11-NEXT: scratch_load_b32 v184, off, s32 offset:4
-; GFX11-NEXT: scratch_load_b32 v175, off, s32 offset:8
-; GFX11-NEXT: scratch_load_b32 v174, off, s32 offset:12
-; GFX11-NEXT: scratch_load_b32 v173, off, s32 offset:16
-; GFX11-NEXT: scratch_load_b32 v172, off, s32 offset:20
-; GFX11-NEXT: scratch_load_b32 v171, off, s32 offset:24
-; GFX11-NEXT: scratch_load_b32 v170, off, s32 offset:28
-; GFX11-NEXT: scratch_load_b32 v169, off, s32 offset:32
-; GFX11-NEXT: scratch_load_b32 v168, off, s32 offset:36
-; GFX11-NEXT: scratch_load_b32 v159, off, s32 offset:40
-; GFX11-NEXT: scratch_load_b32 v158, off, s32 offset:44
-; GFX11-NEXT: scratch_load_b32 v157, off, s32 offset:48
-; GFX11-NEXT: scratch_load_b32 v156, off, s32 offset:52
-; GFX11-NEXT: scratch_load_b32 v155, off, s32 offset:56
-; GFX11-NEXT: scratch_load_b32 v154, off, s32 offset:60
-; GFX11-NEXT: scratch_load_b32 v153, off, s32 offset:64
-; GFX11-NEXT: scratch_load_b32 v152, off, s32 offset:68
-; GFX11-NEXT: scratch_load_b32 v143, off, s32 offset:72
-; GFX11-NEXT: scratch_load_b32 v142, off, s32 offset:76
-; GFX11-NEXT: scratch_load_b32 v141, off, s32 offset:80
-; GFX11-NEXT: scratch_load_b32 v140, off, s32 offset:84
-; GFX11-NEXT: scratch_load_b32 v139, off, s32 offset:88
-; GFX11-NEXT: scratch_load_b32 v138, off, s32 offset:92
-; GFX11-NEXT: scratch_load_b32 v137, off, s32 offset:96
-; GFX11-NEXT: scratch_load_b32 v136, off, s32 offset:100
-; GFX11-NEXT: scratch_load_b32 v127, off, s32 offset:104
-; GFX11-NEXT: scratch_load_b32 v126, off, s32 offset:108
-; GFX11-NEXT: scratch_load_b32 v125, off, s32 offset:112
-; GFX11-NEXT: scratch_load_b32 v124, off, s32 offset:116
-; GFX11-NEXT: scratch_load_b32 v123, off, s32 offset:120
-; GFX11-NEXT: scratch_load_b32 v122, off, s32 offset:124
+; GFX11-NEXT: scratch_load_b32 v175, off, s32
+; GFX11-NEXT: scratch_load_b32 v174, off, s32 offset:4
+; GFX11-NEXT: scratch_load_b32 v173, off, s32 offset:8
+; GFX11-NEXT: scratch_load_b32 v172, off, s32 offset:12
+; GFX11-NEXT: scratch_load_b32 v171, off, s32 offset:16
+; GFX11-NEXT: scratch_load_b32 v170, off, s32 offset:20
+; GFX11-NEXT: scratch_load_b32 v169, off, s32 offset:24
+; GFX11-NEXT: scratch_load_b32 v168, off, s32 offset:28
+; GFX11-NEXT: scratch_load_b32 v159, off, s32 offset:32
+; GFX11-NEXT: scratch_load_b32 v158, off, s32 offset:36
+; GFX11-NEXT: scratch_load_b32 v157, off, s32 offset:40
+; GFX11-NEXT: scratch_load_b32 v156, off, s32 offset:44
+; GFX11-NEXT: scratch_load_b32 v155, off, s32 offset:48
+; GFX11-NEXT: scratch_load_b32 v154, off, s32 offset:52
+; GFX11-NEXT: scratch_load_b32 v153, off, s32 offset:56
+; GFX11-NEXT: scratch_load_b32 v152, off, s32 offset:60
+; GFX11-NEXT: scratch_load_b32 v143, off, s32 offset:64
+; GFX11-NEXT: scratch_load_b32 v142, off, s32 offset:68
+; GFX11-NEXT: scratch_load_b32 v141, off, s32 offset:72
+; GFX11-NEXT: scratch_load_b32 v140, off, s32 offset:76
+; GFX11-NEXT: scratch_load_b32 v139, off, s32 offset:80
+; GFX11-NEXT: scratch_load_b32 v138, off, s32 offset:84
+; GFX11-NEXT: scratch_load_b32 v137, off, s32 offset:88
+; GFX11-NEXT: scratch_load_b32 v136, off, s32 offset:92
+; GFX11-NEXT: scratch_load_b32 v127, off, s32 offset:96
+; GFX11-NEXT: scratch_load_b32 v126, off, s32 offset:100
+; GFX11-NEXT: scratch_load_b32 v125, off, s32 offset:104
+; GFX11-NEXT: scratch_load_b32 v124, off, s32 offset:108
+; GFX11-NEXT: scratch_load_b32 v123, off, s32 offset:112
+; GFX11-NEXT: scratch_load_b32 v122, off, s32 offset:116
+; GFX11-NEXT: scratch_load_b32 v121, off, s32 offset:120
+; GFX11-NEXT: scratch_load_b32 v120, off, s32 offset:124
; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_load_b32 v121, off, s32 offset:128
-; GFX11-NEXT: scratch_load_b32 v120, off, s32 offset:132
-; GFX11-NEXT: scratch_load_b32 v111, off, s32 offset:136
-; GFX11-NEXT: scratch_load_b32 v110, off, s32 offset:140
-; GFX11-NEXT: scratch_load_b32 v109, off, s32 offset:144
-; GFX11-NEXT: scratch_load_b32 v108, off, s32 offset:148
-; GFX11-NEXT: scratch_load_b32 v107, off, s32 offset:152
-; GFX11-NEXT: scratch_load_b32 v106, off, s32 offset:156
-; GFX11-NEXT: scratch_load_b32 v105, off, s32 offset:160
-; GFX11-NEXT: scratch_load_b32 v104, off, s32 offset:164
-; GFX11-NEXT: scratch_load_b32 v95, off, s32 offset:168
-; GFX11-NEXT: scratch_load_b32 v94, off, s32 offset:172
-; GFX11-NEXT: scratch_load_b32 v93, off, s32 offset:176
-; GFX11-NEXT: scratch_load_b32 v92, off, s32 offset:180
-; GFX11-NEXT: scratch_load_b32 v91, off, s32 offset:184
-; GFX11-NEXT: scratch_load_b32 v90, off, s32 offset:188
-; GFX11-NEXT: scratch_load_b32 v89, off, s32 offset:192
-; GFX11-NEXT: scratch_load_b32 v88, off, s32 offset:196
-; GFX11-NEXT: scratch_load_b32 v79, off, s32 offset:200
-; GFX11-NEXT: scratch_load_b32 v78, off, s32 offset:204
-; GFX11-NEXT: scratch_load_b32 v77, off, s32 offset:208
-; GFX11-NEXT: scratch_load_b32 v76, off, s32 offset:212
-; GFX11-NEXT: scratch_load_b32 v75, off, s32 offset:216
-; GFX11-NEXT: scratch_load_b32 v74, off, s32 offset:220
-; GFX11-NEXT: scratch_load_b32 v73, off, s32 offset:224
-; GFX11-NEXT: scratch_load_b32 v72, off, s32 offset:228
-; GFX11-NEXT: scratch_load_b32 v63, off, s32 offset:232
-; GFX11-NEXT: scratch_load_b32 v62, off, s32 offset:236
-; GFX11-NEXT: scratch_load_b32 v61, off, s32 offset:240
-; GFX11-NEXT: scratch_load_b32 v60, off, s32 offset:244
-; GFX11-NEXT: scratch_load_b32 v59, off, s32 offset:248
-; GFX11-NEXT: scratch_load_b32 v58, off, s32 offset:252
-; GFX11-NEXT: s_clause 0x9
-; GFX11-NEXT: scratch_load_b32 v57, off, s32 offset:256
-; GFX11-NEXT: scratch_load_b32 v56, off, s32 offset:260
-; GFX11-NEXT: scratch_load_b32 v47, off, s32 offset:264
-; GFX11-NEXT: scratch_load_b32 v46, off, s32 offset:268
-; GFX11-NEXT: scratch_load_b32 v45, off, s32 offset:272
-; GFX11-NEXT: scratch_load_b32 v44, off, s32 offset:276
-; GFX11-NEXT: scratch_load_b32 v43, off, s32 offset:280
-; GFX11-NEXT: scratch_load_b32 v42, off, s32 offset:284
-; GFX11-NEXT: scratch_load_b32 v41, off, s32 offset:288
-; GFX11-NEXT: scratch_load_b32 v40, off, s32 offset:292
-; GFX11-NEXT: v_dual_mov_b32 v2, v52 :: v_dual_mov_b32 v5, v67
-; GFX11-NEXT: v_dual_mov_b32 v10, v112 :: v_dual_mov_b32 v13, v151
+; GFX11-NEXT: scratch_load_b32 v111, off, s32 offset:128
+; GFX11-NEXT: scratch_load_b32 v110, off, s32 offset:132
+; GFX11-NEXT: scratch_load_b32 v109, off, s32 offset:136
+; GFX11-NEXT: scratch_load_b32 v108, off, s32 offset:140
+; GFX11-NEXT: scratch_load_b32 v107, off, s32 offset:144
+; GFX11-NEXT: scratch_load_b32 v106, off, s32 offset:148
+; GFX11-NEXT: scratch_load_b32 v105, off, s32 offset:152
+; GFX11-NEXT: scratch_load_b32 v104, off, s32 offset:156
+; GFX11-NEXT: scratch_load_b32 v95, off, s32 offset:160
+; GFX11-NEXT: scratch_load_b32 v94, off, s32 offset:164
+; GFX11-NEXT: scratch_load_b32 v93, off, s32 offset:168
+; GFX11-NEXT: scratch_load_b32 v92, off, s32 offset:172
+; GFX11-NEXT: scratch_load_b32 v91, off, s32 offset:176
+; GFX11-NEXT: scratch_load_b32 v90, off, s32 offset:180
+; GFX11-NEXT: scratch_load_b32 v89, off, s32 offset:184
+; GFX11-NEXT: scratch_load_b32 v88, off, s32 offset:188
+; GFX11-NEXT: scratch_load_b32 v79, off, s32 offset:192
+; GFX11-NEXT: scratch_load_b32 v78, off, s32 offset:196
+; GFX11-NEXT: scratch_load_b32 v77, off, s32 offset:200
+; GFX11-NEXT: scratch_load_b32 v76, off, s32 offset:204
+; GFX11-NEXT: scratch_load_b32 v75, off, s32 offset:208
+; GFX11-NEXT: scratch_load_b32 v74, off, s32 offset:212
+; GFX11-NEXT: scratch_load_b32 v73, off, s32 offset:216
+; GFX11-NEXT: scratch_load_b32 v72, off, s32 offset:220
+; GFX11-NEXT: scratch_load_b32 v63, off, s32 offset:224
+; GFX11-NEXT: scratch_load_b32 v62, off, s32 offset:228
+; GFX11-NEXT: scratch_load_b32 v61, off, s32 offset:232
+; GFX11-NEXT: scratch_load_b32 v60, off, s32 offset:236
+; GFX11-NEXT: scratch_load_b32 v59, off, s32 offset:240
+; GFX11-NEXT: scratch_load_b32 v58, off, s32 offset:244
+; GFX11-NEXT: scratch_load_b32 v57, off, s32 offset:248
+; GFX11-NEXT: scratch_load_b32 v56, off, s32 offset:252
+; GFX11-NEXT: s_clause 0x7
+; GFX11-NEXT: scratch_load_b32 v47, off, s32 offset:256
+; GFX11-NEXT: scratch_load_b32 v46, off, s32 offset:260
+; GFX11-NEXT: scratch_load_b32 v45, off, s32 offset:264
+; GFX11-NEXT: scratch_load_b32 v44, off, s32 offset:268
+; GFX11-NEXT: scratch_load_b32 v43, off, s32 offset:272
+; GFX11-NEXT: scratch_load_b32 v42, off, s32 offset:276
+; GFX11-NEXT: scratch_load_b32 v41, off, s32 offset:280
+; GFX11-NEXT: scratch_load_b32 v40, off, s32 offset:284
+; GFX11-NEXT: v_dual_mov_b32 v2, v50 :: v_dual_mov_b32 v5, v65
; GFX11-NEXT: v_dual_mov_b32 v25, v182 :: v_dual_mov_b32 v26, v181
; GFX11-NEXT: v_dual_mov_b32 v27, v180 :: v_dual_mov_b32 v28, v179
; GFX11-NEXT: v_dual_mov_b32 v29, v178 :: v_dual_mov_b32 v30, v177
@@ -33910,23 +34056,25 @@ define inreg <32 x i32> @bitcast_v64f16_to_v32i32_scalar(<64 x half> inreg %a, i
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB23_4:
-; GFX11-NEXT: ; implicit-def: $vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78
; GFX11-NEXT: ; implicit-def: $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79
; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
-; GFX11-NEXT: ; implicit-def: $vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81
-; GFX11-NEXT: ; implicit-def: $vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84
-; GFX11-NEXT: ; implicit-def: $vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88
-; GFX11-NEXT: ; implicit-def: $vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93
-; GFX11-NEXT: ; implicit-def: $vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99
-; GFX11-NEXT: ; implicit-def: $vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106
-; GFX11-NEXT: ; implicit-def: $vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114
-; GFX11-NEXT: ; implicit-def: $vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123
-; GFX11-NEXT: ; implicit-def: $vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133
-; GFX11-NEXT: ; implicit-def: $vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144
-; GFX11-NEXT: ; implicit-def: $vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156
-; GFX11-NEXT: ; implicit-def: $vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169
-; GFX11-NEXT: s_branch .LBB23_2
+; GFX11-NEXT: ; implicit-def: $vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82
+; GFX11-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-NEXT: ; implicit-def: $vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91
+; GFX11-NEXT: ; implicit-def: $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49
+; GFX11-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-NEXT: ; implicit-def: $vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104
+; GFX11-NEXT: ; implicit-def: $vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112
+; GFX11-NEXT: ; implicit-def: $vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121
+; GFX11-NEXT: ; implicit-def: $vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131
+; GFX11-NEXT: ; implicit-def: $vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142
+; GFX11-NEXT: ; implicit-def: $vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154
+; GFX11-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_vccz .LBB23_2
+; GFX11-NEXT: s_branch .LBB23_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -34509,6 +34657,7 @@ define inreg <64 x i16> @bitcast_v32i32_to_v64i16_scalar(<32 x i32> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v19
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s47, v1
; SI-NEXT: v_readfirstlane_b32 s46, v2
; SI-NEXT: v_readfirstlane_b32 s45, v3
@@ -34526,8 +34675,8 @@ define inreg <64 x i16> @bitcast_v32i32_to_v64i16_scalar(<32 x i32> inreg %a, i3
; SI-NEXT: v_readfirstlane_b32 s9, v15
; SI-NEXT: v_readfirstlane_b32 s8, v16
; SI-NEXT: v_readfirstlane_b32 s7, v17
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s6, v18
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB25_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v1, s7
@@ -34887,12 +35036,15 @@ define inreg <64 x i16> @bitcast_v32i32_to_v64i16_scalar(<32 x i32> inreg %a, i3
; SI-NEXT: ; implicit-def: $sgpr57
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: s_branch .LBB25_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB25_2
+; SI-NEXT: s_branch .LBB25_3
;
; VI-LABEL: bitcast_v32i32_to_v64i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v31, v17
; VI-NEXT: v_mov_b32_e32 v30, v16
; VI-NEXT: v_mov_b32_e32 v29, v15
@@ -34913,7 +35065,7 @@ define inreg <64 x i16> @bitcast_v32i32_to_v64i16_scalar(<32 x i32> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
@@ -34926,10 +35078,13 @@ define inreg <64 x i16> @bitcast_v32i32_to_v64i16_scalar(<32 x i32> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB25_4
+; VI-NEXT: s_cbranch_scc0 .LBB25_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB25_3
-; VI-NEXT: .LBB25_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB25_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB25_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v15, vcc, 3, v15
; VI-NEXT: v_add_u32_e32 v14, vcc, 3, v14
; VI-NEXT: v_add_u32_e32 v13, vcc, 3, v13
@@ -34962,16 +35117,15 @@ define inreg <64 x i16> @bitcast_v32i32_to_v64i16_scalar(<32 x i32> inreg %a, i3
; VI-NEXT: v_add_u32_e32 v32, vcc, 3, v32
; VI-NEXT: v_add_u32_e32 v17, vcc, 3, v17
; VI-NEXT: v_add_u32_e32 v16, vcc, 3, v16
-; VI-NEXT: .LBB25_3: ; %end
+; VI-NEXT: .LBB25_4: ; %end
; VI-NEXT: v_mov_b32_e32 v18, v32
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB25_4:
-; VI-NEXT: s_branch .LBB25_2
;
; GFX9-LABEL: bitcast_v32i32_to_v64i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v31, v17
; GFX9-NEXT: v_mov_b32_e32 v30, v16
; GFX9-NEXT: v_mov_b32_e32 v29, v15
@@ -34992,7 +35146,7 @@ define inreg <64 x i16> @bitcast_v32i32_to_v64i16_scalar(<32 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -35005,10 +35159,13 @@ define inreg <64 x i16> @bitcast_v32i32_to_v64i16_scalar(<32 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB25_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB25_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB25_3
-; GFX9-NEXT: .LBB25_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB25_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB25_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_u32_e32 v15, 3, v15
; GFX9-NEXT: v_add_u32_e32 v14, 3, v14
; GFX9-NEXT: v_add_u32_e32 v13, 3, v13
@@ -35041,44 +35198,42 @@ define inreg <64 x i16> @bitcast_v32i32_to_v64i16_scalar(<32 x i32> inreg %a, i3
; GFX9-NEXT: v_add_u32_e32 v32, 3, v32
; GFX9-NEXT: v_add_u32_e32 v17, 3, v17
; GFX9-NEXT: v_add_u32_e32 v16, 3, v16
-; GFX9-NEXT: .LBB25_3: ; %end
+; GFX9-NEXT: .LBB25_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v18, v32
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB25_4:
-; GFX9-NEXT: s_branch .LBB25_2
;
; GFX11-LABEL: bitcast_v32i32_to_v64i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v14 :: v_dual_mov_b32 v31, v13
-; GFX11-NEXT: v_dual_mov_b32 v30, v12 :: v_dual_mov_b32 v29, v11
-; GFX11-NEXT: v_dual_mov_b32 v28, v10 :: v_dual_mov_b32 v27, v9
+; GFX11-NEXT: v_dual_mov_b32 v15, v14 :: v_dual_mov_b32 v30, v12
+; GFX11-NEXT: v_dual_mov_b32 v31, v13 :: v_dual_mov_b32 v28, v10
+; GFX11-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v26, v8
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB25_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB25_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB25_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB25_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB25_3:
-; GFX11-NEXT: .LBB25_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB25_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_nc_u32_e32 v15, 3, v15
; GFX11-NEXT: v_add_nc_u32_e32 v14, 3, v14
; GFX11-NEXT: v_add_nc_u32_e32 v13, 3, v13
@@ -35111,6 +35266,7 @@ define inreg <64 x i16> @bitcast_v32i32_to_v64i16_scalar(<32 x i32> inreg %a, i3
; GFX11-NEXT: v_add_nc_u32_e32 v18, 3, v18
; GFX11-NEXT: v_add_nc_u32_e32 v17, 3, v17
; GFX11-NEXT: v_add_nc_u32_e32 v16, 3, v16
+; GFX11-NEXT: .LBB25_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -35957,43 +36113,43 @@ define inreg <32 x i32> @bitcast_v64i16_to_v32i32_scalar(<64 x i16> inreg %a, i3
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
; SI-NEXT: v_mov_b32_e32 v49, v12
-; SI-NEXT: v_mov_b32_e32 v56, v10
-; SI-NEXT: s_waitcnt expcnt(6)
-; SI-NEXT: v_mov_b32_e32 v57, v8
+; SI-NEXT: v_mov_b32_e32 v47, v10
+; SI-NEXT: s_waitcnt expcnt(3)
+; SI-NEXT: v_mov_b32_e32 v60, v8
; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:76
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32
; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:8
-; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:4
+; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:4
; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:16
-; SI-NEXT: s_waitcnt expcnt(3)
-; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:12
+; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:12
; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:24
-; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:20
+; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:20
; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:32
-; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:28
+; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:28
; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:40
; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:36
-; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:48
+; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:48
; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:44
-; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:56
-; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:52
+; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:56
+; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:52
; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:64
-; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:60
+; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:60
; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:72
-; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:68
+; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:68
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_waitcnt expcnt(2)
; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v55, 16, v3
-; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v5
-; SI-NEXT: v_lshlrev_b32_e32 v43, 16, v7
-; SI-NEXT: v_lshlrev_b32_e32 v41, 16, v9
+; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v5
+; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v7
+; SI-NEXT: v_lshlrev_b32_e32 v43, 16, v9
; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v11
; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v15
; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v17
-; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v19
+; SI-NEXT: v_lshlrev_b32_e32 v38, 16, v19
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v21
-; SI-NEXT: v_lshlrev_b32_e32 v35, 16, v23
+; SI-NEXT: v_lshlrev_b32_e32 v36, 16, v23
; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v25
; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v27
; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v29
@@ -36001,7 +36157,7 @@ define inreg <32 x i32> @bitcast_v64i16_to_v32i32_scalar(<64 x i16> inreg %a, i3
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v53
; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v31
; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v52
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_and_b64 s[6:7], vcc, exec
; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v51
; SI-NEXT: s_waitcnt vmcnt(13)
; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v8
@@ -36010,102 +36166,103 @@ define inreg <32 x i32> @bitcast_v64i16_to_v32i32_scalar(<64 x i16> inreg %a, i3
; SI-NEXT: s_waitcnt vmcnt(9)
; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v12
; SI-NEXT: s_waitcnt vmcnt(7)
-; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v38
-; SI-NEXT: s_waitcnt vmcnt(5) expcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v36
+; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v37
+; SI-NEXT: s_waitcnt vmcnt(5) expcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v35
; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v34
; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v32
-; SI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v32
+; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(4)
-; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
-; SI-NEXT: s_cbranch_scc0 .LBB27_4
+; SI-NEXT: s_cbranch_scc0 .LBB27_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_waitcnt expcnt(2)
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v7, v0, v61
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v4
-; SI-NEXT: v_or_b32_e32 v9, v0, v50
+; SI-NEXT: v_or_b32_e32 v9, v0, v57
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v6
-; SI-NEXT: v_or_b32_e32 v10, v0, v43
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v57
-; SI-NEXT: v_or_b32_e32 v11, v0, v41
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v56
+; SI-NEXT: v_or_b32_e32 v10, v0, v50
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v60
+; SI-NEXT: v_or_b32_e32 v11, v0, v43
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v47
; SI-NEXT: v_or_b32_e32 v12, v0, v40
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49
-; SI-NEXT: v_mov_b32_e32 v52, v57
-; SI-NEXT: v_mov_b32_e32 v57, v40
-; SI-NEXT: v_mov_b32_e32 v40, v49
-; SI-NEXT: v_mov_b32_e32 v49, v13
+; SI-NEXT: v_mov_b32_e32 v35, v61
+; SI-NEXT: v_mov_b32_e32 v61, v50
+; SI-NEXT: v_mov_b32_e32 v50, v43
+; SI-NEXT: v_mov_b32_e32 v43, v40
+; SI-NEXT: v_mov_b32_e32 v40, v13
; SI-NEXT: v_or_b32_e32 v13, v0, v13
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v14
-; SI-NEXT: v_mov_b32_e32 v36, v41
-; SI-NEXT: v_mov_b32_e32 v41, v14
+; SI-NEXT: v_mov_b32_e32 v52, v60
+; SI-NEXT: v_mov_b32_e32 v60, v14
; SI-NEXT: v_or_b32_e32 v14, v0, v48
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v16
-; SI-NEXT: v_mov_b32_e32 v51, v50
-; SI-NEXT: v_mov_b32_e32 v50, v43
-; SI-NEXT: v_mov_b32_e32 v43, v48
+; SI-NEXT: v_mov_b32_e32 v51, v57
+; SI-NEXT: v_mov_b32_e32 v57, v49
+; SI-NEXT: v_mov_b32_e32 v49, v48
; SI-NEXT: v_mov_b32_e32 v48, v15
; SI-NEXT: v_or_b32_e32 v15, v0, v15
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v18
-; SI-NEXT: v_mov_b32_e32 v38, v61
-; SI-NEXT: v_mov_b32_e32 v61, v56
-; SI-NEXT: v_mov_b32_e32 v56, v16
-; SI-NEXT: v_or_b32_e32 v16, v0, v37
+; SI-NEXT: v_mov_b32_e32 v32, v47
+; SI-NEXT: v_mov_b32_e32 v47, v16
+; SI-NEXT: v_or_b32_e32 v16, v0, v38
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v20
; SI-NEXT: v_or_b32_e32 v17, v0, v17
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v22
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_or_b32_e32 v18, v0, v35
+; SI-NEXT: v_or_b32_e32 v18, v0, v36
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v24
; SI-NEXT: v_or_b32_e32 v19, v0, v19
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v26
-; SI-NEXT: v_mov_b32_e32 v37, v20
+; SI-NEXT: v_mov_b32_e32 v38, v20
; SI-NEXT: v_or_b32_e32 v20, v0, v33
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v28
; SI-NEXT: v_or_b32_e32 v21, v0, v21
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v30
; SI-NEXT: v_or_b32_e32 v22, v0, v31
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v39
-; SI-NEXT: v_mov_b32_e32 v35, v24
-; SI-NEXT: v_mov_b32_e32 v39, v23
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v56
+; SI-NEXT: v_mov_b32_e32 v36, v24
+; SI-NEXT: v_mov_b32_e32 v56, v23
; SI-NEXT: v_or_b32_e32 v23, v0, v23
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v60
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v39
; SI-NEXT: v_mov_b32_e32 v24, v29
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s17, 16
; SI-NEXT: v_or_b32_e32 v24, v0, v24
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v47
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v46
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: s_and_b32 s5, s18, 0xffff
; SI-NEXT: s_lshl_b32 s6, s19, 16
+; SI-NEXT: v_mov_b32_e32 v46, v25
; SI-NEXT: v_or_b32_e32 v25, v0, v25
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v46
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v59
; SI-NEXT: v_mov_b32_e32 v26, v27
; SI-NEXT: s_or_b32 s5, s5, s6
; SI-NEXT: s_and_b32 s6, s20, 0xffff
@@ -36122,29 +36279,28 @@ define inreg <32 x i32> @bitcast_v64i16_to_v32i32_scalar(<64 x i16> inreg %a, i3
; SI-NEXT: s_lshl_b32 s9, s25, 16
; SI-NEXT: v_mov_b32_e32 v33, v28
; SI-NEXT: v_or_b32_e32 v28, v0, v5
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v59
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v42
; SI-NEXT: s_or_b32 s8, s8, s9
; SI-NEXT: s_and_b32 s9, s26, 0xffff
; SI-NEXT: s_lshl_b32 s10, s27, 16
-; SI-NEXT: v_mov_b32_e32 v60, v29
-; SI-NEXT: v_or_b32_e32 v29, v0, v62
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v42
+; SI-NEXT: v_mov_b32_e32 v39, v29
+; SI-NEXT: v_or_b32_e32 v29, v0, v63
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v58
; SI-NEXT: s_or_b32 s9, s9, s10
; SI-NEXT: s_and_b32 s10, s28, 0xffff
; SI-NEXT: s_lshl_b32 s11, s29, 16
; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2
; SI-NEXT: v_or_b32_e32 v30, v0, v3
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v58
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v41
; SI-NEXT: s_or_b32 s10, s10, s11
-; SI-NEXT: v_mov_b32_e32 v63, v2
-; SI-NEXT: v_mov_b32_e32 v32, v55
+; SI-NEXT: v_mov_b32_e32 v37, v2
+; SI-NEXT: v_mov_b32_e32 v34, v55
; SI-NEXT: v_or_b32_e32 v8, v1, v55
; SI-NEXT: v_mov_b32_e32 v55, v4
; SI-NEXT: v_mov_b32_e32 v53, v6
-; SI-NEXT: v_mov_b32_e32 v47, v46
; SI-NEXT: v_mov_b32_e32 v45, v44
-; SI-NEXT: v_mov_b32_e32 v59, v42
-; SI-NEXT: v_or_b32_e32 v31, v0, v34
+; SI-NEXT: v_mov_b32_e32 v42, v58
+; SI-NEXT: v_or_b32_e32 v31, v0, v62
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: v_mov_b32_e32 v2, s6
@@ -36152,12 +36308,45 @@ define inreg <32 x i32> @bitcast_v64i16_to_v32i32_scalar(<64 x i16> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v4, s8
; SI-NEXT: v_mov_b32_e32 v5, s9
; SI-NEXT: v_mov_b32_e32 v6, s10
-; SI-NEXT: s_cbranch_execnz .LBB27_3
-; SI-NEXT: .LBB27_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: s_branch .LBB27_3
+; SI-NEXT: .LBB27_2:
+; SI-NEXT: v_mov_b32_e32 v35, v61
+; SI-NEXT: v_mov_b32_e32 v34, v55
+; SI-NEXT: v_mov_b32_e32 v37, v2
+; SI-NEXT: v_mov_b32_e32 v55, v4
+; SI-NEXT: v_mov_b32_e32 v53, v6
+; SI-NEXT: v_mov_b32_e32 v52, v60
+; SI-NEXT: v_mov_b32_e32 v51, v57
+; SI-NEXT: v_mov_b32_e32 v61, v50
+; SI-NEXT: v_mov_b32_e32 v32, v47
+; SI-NEXT: v_mov_b32_e32 v50, v43
+; SI-NEXT: v_mov_b32_e32 v43, v40
+; SI-NEXT: v_mov_b32_e32 v40, v13
+; SI-NEXT: v_mov_b32_e32 v57, v49
+; SI-NEXT: v_mov_b32_e32 v49, v48
+; SI-NEXT: v_mov_b32_e32 v48, v15
+; SI-NEXT: v_mov_b32_e32 v60, v14
+; SI-NEXT: v_mov_b32_e32 v47, v16
+; SI-NEXT: v_mov_b32_e32 v45, v44
+; SI-NEXT: v_mov_b32_e32 v42, v58
+; SI-NEXT: s_waitcnt expcnt(1)
+; SI-NEXT: v_mov_b32_e32 v38, v20
+; SI-NEXT: v_mov_b32_e32 v56, v23
+; SI-NEXT: v_mov_b32_e32 v36, v24
+; SI-NEXT: v_mov_b32_e32 v33, v28
+; SI-NEXT: v_mov_b32_e32 v39, v29
+; SI-NEXT: v_mov_b32_e32 v46, v25
+; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; SI-NEXT: .LBB27_3: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: v_mov_b32_e32 v41, v42
+; SI-NEXT: s_cbranch_vccnz .LBB27_5
+; SI-NEXT: ; %bb.4: ; %cmp.true
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v63
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v37
; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; SI-NEXT: v_or_b32_e32 v1, v32, v1
+; SI-NEXT: v_or_b32_e32 v1, v34, v1
; SI-NEXT: v_add_i32_e32 v8, vcc, 0x30000, v1
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
; SI-NEXT: s_add_i32 s16, s16, 3
@@ -36203,7 +36392,7 @@ define inreg <32 x i32> @bitcast_v64i16_to_v32i32_scalar(<64 x i16> inreg %a, i3
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v0, v38, v0
+; SI-NEXT: v_or_b32_e32 v0, v35, v0
; SI-NEXT: v_add_i32_e32 v7, vcc, 0x30000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v55
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
@@ -36211,25 +36400,25 @@ define inreg <32 x i32> @bitcast_v64i16_to_v32i32_scalar(<64 x i16> inreg %a, i3
; SI-NEXT: v_add_i32_e32 v9, vcc, 0x30000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v0, v50, v0
+; SI-NEXT: v_or_b32_e32 v0, v61, v0
; SI-NEXT: v_add_i32_e32 v10, vcc, 0x30000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v52
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v0, v36, v0
+; SI-NEXT: v_or_b32_e32 v0, v50, v0
; SI-NEXT: v_add_i32_e32 v11, vcc, 0x30000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v61
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v32
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v0, v57, v0
+; SI-NEXT: v_or_b32_e32 v0, v43, v0
; SI-NEXT: v_add_i32_e32 v12, vcc, 0x30000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v40
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v57
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v0, v49, v0
+; SI-NEXT: v_or_b32_e32 v0, v40, v0
; SI-NEXT: v_add_i32_e32 v13, vcc, 0x30000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v41
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v60
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v0, v43, v0
+; SI-NEXT: v_or_b32_e32 v0, v49, v0
; SI-NEXT: v_add_i32_e32 v14, vcc, 0x30000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v56
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v47
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v48, v0
; SI-NEXT: v_add_i32_e32 v15, vcc, 0x30000, v0
@@ -36240,7 +36429,7 @@ define inreg <32 x i32> @bitcast_v64i16_to_v32i32_scalar(<64 x i16> inreg %a, i3
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v16, vcc, 0x30000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v38
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
@@ -36255,7 +36444,7 @@ define inreg <32 x i32> @bitcast_v64i16_to_v32i32_scalar(<64 x i16> inreg %a, i3
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_add_i32_e32 v18, vcc, 0x30000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v36
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
@@ -36283,31 +36472,31 @@ define inreg <32 x i32> @bitcast_v64i16_to_v32i32_scalar(<64 x i16> inreg %a, i3
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v22, vcc, 0x30000, v0
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v0, v39, v0
+; SI-NEXT: v_or_b32_e32 v0, v56, v0
; SI-NEXT: v_add_i32_e32 v23, vcc, 0x30000, v0
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v0, v60, v0
+; SI-NEXT: v_or_b32_e32 v0, v39, v0
; SI-NEXT: v_add_i32_e32 v24, vcc, 0x30000, v0
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
+; SI-NEXT: v_or_b32_e32 v0, v46, v0
; SI-NEXT: v_add_i32_e32 v25, vcc, 0x30000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v47
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v26, vcc, 0x30000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
@@ -36321,7 +36510,7 @@ define inreg <32 x i32> @bitcast_v64i16_to_v32i32_scalar(<64 x i16> inreg %a, i3
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v28, vcc, 0x30000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
@@ -36330,7 +36519,7 @@ define inreg <32 x i32> @bitcast_v64i16_to_v32i32_scalar(<64 x i16> inreg %a, i3
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v29, vcc, 0x30000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v59
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v41
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
@@ -36345,7 +36534,7 @@ define inreg <32 x i32> @bitcast_v64i16_to_v32i32_scalar(<64 x i16> inreg %a, i3
; SI-NEXT: v_add_i32_e32 v31, vcc, 0x30000, v0
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
-; SI-NEXT: .LBB27_3: ; %end
+; SI-NEXT: .LBB27_5: ; %end
; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
@@ -36364,40 +36553,12 @@ define inreg <32 x i32> @bitcast_v64i16_to_v32i32_scalar(<64 x i16> inreg %a, i3
; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB27_4:
-; SI-NEXT: v_mov_b32_e32 v38, v61
-; SI-NEXT: v_mov_b32_e32 v32, v55
-; SI-NEXT: v_mov_b32_e32 v63, v2
-; SI-NEXT: v_mov_b32_e32 v55, v4
-; SI-NEXT: v_mov_b32_e32 v53, v6
-; SI-NEXT: v_mov_b32_e32 v52, v57
-; SI-NEXT: v_mov_b32_e32 v51, v50
-; SI-NEXT: v_mov_b32_e32 v61, v56
-; SI-NEXT: v_mov_b32_e32 v50, v43
-; SI-NEXT: v_mov_b32_e32 v36, v41
-; SI-NEXT: v_mov_b32_e32 v57, v40
-; SI-NEXT: v_mov_b32_e32 v40, v49
-; SI-NEXT: v_mov_b32_e32 v49, v13
-; SI-NEXT: v_mov_b32_e32 v43, v48
-; SI-NEXT: v_mov_b32_e32 v48, v15
-; SI-NEXT: v_mov_b32_e32 v41, v14
-; SI-NEXT: v_mov_b32_e32 v56, v16
-; SI-NEXT: v_mov_b32_e32 v47, v46
-; SI-NEXT: v_mov_b32_e32 v45, v44
-; SI-NEXT: v_mov_b32_e32 v59, v42
-; SI-NEXT: s_waitcnt expcnt(1)
-; SI-NEXT: v_mov_b32_e32 v37, v20
-; SI-NEXT: v_mov_b32_e32 v39, v23
-; SI-NEXT: v_mov_b32_e32 v35, v24
-; SI-NEXT: v_mov_b32_e32 v33, v28
-; SI-NEXT: v_mov_b32_e32 v60, v29
-; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; SI-NEXT: s_branch .LBB27_2
;
; VI-LABEL: bitcast_v64i16_to_v32i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s6, v2
; VI-NEXT: v_readfirstlane_b32 s7, v3
; VI-NEXT: v_readfirstlane_b32 s8, v4
@@ -36415,12 +36576,15 @@ define inreg <32 x i32> @bitcast_v64i16_to_v32i32_scalar(<64 x i16> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s44, v16
; VI-NEXT: v_readfirstlane_b32 s45, v17
; VI-NEXT: v_readfirstlane_b32 s46, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s47, v1
-; VI-NEXT: s_cbranch_scc0 .LBB27_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB27_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB27_3
-; VI-NEXT: .LBB27_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB27_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB27_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s47, 3
; VI-NEXT: s_and_b32 s4, s47, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
@@ -36581,7 +36745,7 @@ define inreg <32 x i32> @bitcast_v64i16_to_v32i32_scalar(<64 x i16> inreg %a, i3
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s6, s4, 0x30000
-; VI-NEXT: .LBB27_3: ; %end
+; VI-NEXT: .LBB27_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -36615,13 +36779,12 @@ define inreg <32 x i32> @bitcast_v64i16_to_v32i32_scalar(<64 x i16> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v30, s44
; VI-NEXT: v_mov_b32_e32 v31, s45
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB27_4:
-; VI-NEXT: s_branch .LBB27_2
;
; GFX9-LABEL: bitcast_v64i16_to_v32i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v31, v17
; GFX9-NEXT: v_mov_b32_e32 v30, v16
; GFX9-NEXT: v_mov_b32_e32 v29, v15
@@ -36642,7 +36805,7 @@ define inreg <32 x i32> @bitcast_v64i16_to_v32i32_scalar(<64 x i16> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -36655,10 +36818,13 @@ define inreg <32 x i32> @bitcast_v64i16_to_v32i32_scalar(<64 x i16> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB27_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB27_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB27_3
-; GFX9-NEXT: .LBB27_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB27_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB27_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
@@ -36691,118 +36857,113 @@ define inreg <32 x i32> @bitcast_v64i16_to_v32i32_scalar(<64 x i16> inreg %a, i3
; GFX9-NEXT: v_pk_add_u16 v32, v32, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: .LBB27_3: ; %end
+; GFX9-NEXT: .LBB27_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v18, v32
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB27_4:
-; GFX9-NEXT: s_branch .LBB27_2
;
; GFX11-LABEL: bitcast_v64i16_to_v32i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_store_b32 off, v40, s32 offset:292
-; GFX11-NEXT: scratch_store_b32 off, v41, s32 offset:288
-; GFX11-NEXT: scratch_store_b32 off, v42, s32 offset:284
-; GFX11-NEXT: scratch_store_b32 off, v43, s32 offset:280
-; GFX11-NEXT: scratch_store_b32 off, v44, s32 offset:276
-; GFX11-NEXT: scratch_store_b32 off, v45, s32 offset:272
-; GFX11-NEXT: scratch_store_b32 off, v46, s32 offset:268
-; GFX11-NEXT: scratch_store_b32 off, v47, s32 offset:264
-; GFX11-NEXT: scratch_store_b32 off, v56, s32 offset:260
-; GFX11-NEXT: scratch_store_b32 off, v57, s32 offset:256
-; GFX11-NEXT: scratch_store_b32 off, v58, s32 offset:252
-; GFX11-NEXT: scratch_store_b32 off, v59, s32 offset:248
-; GFX11-NEXT: scratch_store_b32 off, v60, s32 offset:244
-; GFX11-NEXT: scratch_store_b32 off, v61, s32 offset:240
-; GFX11-NEXT: scratch_store_b32 off, v62, s32 offset:236
-; GFX11-NEXT: scratch_store_b32 off, v63, s32 offset:232
-; GFX11-NEXT: scratch_store_b32 off, v72, s32 offset:228
-; GFX11-NEXT: scratch_store_b32 off, v73, s32 offset:224
-; GFX11-NEXT: scratch_store_b32 off, v74, s32 offset:220
-; GFX11-NEXT: scratch_store_b32 off, v75, s32 offset:216
-; GFX11-NEXT: scratch_store_b32 off, v76, s32 offset:212
-; GFX11-NEXT: scratch_store_b32 off, v77, s32 offset:208
-; GFX11-NEXT: scratch_store_b32 off, v78, s32 offset:204
-; GFX11-NEXT: scratch_store_b32 off, v79, s32 offset:200
-; GFX11-NEXT: scratch_store_b32 off, v88, s32 offset:196
-; GFX11-NEXT: scratch_store_b32 off, v89, s32 offset:192
-; GFX11-NEXT: scratch_store_b32 off, v90, s32 offset:188
-; GFX11-NEXT: scratch_store_b32 off, v91, s32 offset:184
-; GFX11-NEXT: scratch_store_b32 off, v92, s32 offset:180
-; GFX11-NEXT: scratch_store_b32 off, v93, s32 offset:176
-; GFX11-NEXT: scratch_store_b32 off, v94, s32 offset:172
-; GFX11-NEXT: scratch_store_b32 off, v95, s32 offset:168
+; GFX11-NEXT: scratch_store_b32 off, v40, s32 offset:284
+; GFX11-NEXT: scratch_store_b32 off, v41, s32 offset:280
+; GFX11-NEXT: scratch_store_b32 off, v42, s32 offset:276
+; GFX11-NEXT: scratch_store_b32 off, v43, s32 offset:272
+; GFX11-NEXT: scratch_store_b32 off, v44, s32 offset:268
+; GFX11-NEXT: scratch_store_b32 off, v45, s32 offset:264
+; GFX11-NEXT: scratch_store_b32 off, v46, s32 offset:260
+; GFX11-NEXT: scratch_store_b32 off, v47, s32 offset:256
+; GFX11-NEXT: scratch_store_b32 off, v56, s32 offset:252
+; GFX11-NEXT: scratch_store_b32 off, v57, s32 offset:248
+; GFX11-NEXT: scratch_store_b32 off, v58, s32 offset:244
+; GFX11-NEXT: scratch_store_b32 off, v59, s32 offset:240
+; GFX11-NEXT: scratch_store_b32 off, v60, s32 offset:236
+; GFX11-NEXT: scratch_store_b32 off, v61, s32 offset:232
+; GFX11-NEXT: scratch_store_b32 off, v62, s32 offset:228
+; GFX11-NEXT: scratch_store_b32 off, v63, s32 offset:224
+; GFX11-NEXT: scratch_store_b32 off, v72, s32 offset:220
+; GFX11-NEXT: scratch_store_b32 off, v73, s32 offset:216
+; GFX11-NEXT: scratch_store_b32 off, v74, s32 offset:212
+; GFX11-NEXT: scratch_store_b32 off, v75, s32 offset:208
+; GFX11-NEXT: scratch_store_b32 off, v76, s32 offset:204
+; GFX11-NEXT: scratch_store_b32 off, v77, s32 offset:200
+; GFX11-NEXT: scratch_store_b32 off, v78, s32 offset:196
+; GFX11-NEXT: scratch_store_b32 off, v79, s32 offset:192
+; GFX11-NEXT: scratch_store_b32 off, v88, s32 offset:188
+; GFX11-NEXT: scratch_store_b32 off, v89, s32 offset:184
+; GFX11-NEXT: scratch_store_b32 off, v90, s32 offset:180
+; GFX11-NEXT: scratch_store_b32 off, v91, s32 offset:176
+; GFX11-NEXT: scratch_store_b32 off, v92, s32 offset:172
+; GFX11-NEXT: scratch_store_b32 off, v93, s32 offset:168
+; GFX11-NEXT: scratch_store_b32 off, v94, s32 offset:164
+; GFX11-NEXT: scratch_store_b32 off, v95, s32 offset:160
; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_store_b32 off, v104, s32 offset:164
-; GFX11-NEXT: scratch_store_b32 off, v105, s32 offset:160
-; GFX11-NEXT: scratch_store_b32 off, v106, s32 offset:156
-; GFX11-NEXT: scratch_store_b32 off, v107, s32 offset:152
-; GFX11-NEXT: scratch_store_b32 off, v108, s32 offset:148
-; GFX11-NEXT: scratch_store_b32 off, v109, s32 offset:144
-; GFX11-NEXT: scratch_store_b32 off, v110, s32 offset:140
-; GFX11-NEXT: scratch_store_b32 off, v111, s32 offset:136
-; GFX11-NEXT: scratch_store_b32 off, v120, s32 offset:132
-; GFX11-NEXT: scratch_store_b32 off, v121, s32 offset:128
-; GFX11-NEXT: scratch_store_b32 off, v122, s32 offset:124
-; GFX11-NEXT: scratch_store_b32 off, v123, s32 offset:120
-; GFX11-NEXT: scratch_store_b32 off, v124, s32 offset:116
-; GFX11-NEXT: scratch_store_b32 off, v125, s32 offset:112
-; GFX11-NEXT: scratch_store_b32 off, v126, s32 offset:108
-; GFX11-NEXT: scratch_store_b32 off, v127, s32 offset:104
-; GFX11-NEXT: scratch_store_b32 off, v136, s32 offset:100
-; GFX11-NEXT: scratch_store_b32 off, v137, s32 offset:96
-; GFX11-NEXT: scratch_store_b32 off, v138, s32 offset:92
-; GFX11-NEXT: scratch_store_b32 off, v139, s32 offset:88
-; GFX11-NEXT: scratch_store_b32 off, v140, s32 offset:84
-; GFX11-NEXT: scratch_store_b32 off, v141, s32 offset:80
-; GFX11-NEXT: scratch_store_b32 off, v142, s32 offset:76
-; GFX11-NEXT: scratch_store_b32 off, v143, s32 offset:72
-; GFX11-NEXT: scratch_store_b32 off, v152, s32 offset:68
-; GFX11-NEXT: scratch_store_b32 off, v153, s32 offset:64
-; GFX11-NEXT: scratch_store_b32 off, v154, s32 offset:60
-; GFX11-NEXT: scratch_store_b32 off, v155, s32 offset:56
-; GFX11-NEXT: scratch_store_b32 off, v156, s32 offset:52
-; GFX11-NEXT: scratch_store_b32 off, v157, s32 offset:48
-; GFX11-NEXT: scratch_store_b32 off, v158, s32 offset:44
-; GFX11-NEXT: scratch_store_b32 off, v159, s32 offset:40
-; GFX11-NEXT: s_clause 0x9
-; GFX11-NEXT: scratch_store_b32 off, v168, s32 offset:36
-; GFX11-NEXT: scratch_store_b32 off, v169, s32 offset:32
-; GFX11-NEXT: scratch_store_b32 off, v170, s32 offset:28
-; GFX11-NEXT: scratch_store_b32 off, v171, s32 offset:24
-; GFX11-NEXT: scratch_store_b32 off, v172, s32 offset:20
-; GFX11-NEXT: scratch_store_b32 off, v173, s32 offset:16
-; GFX11-NEXT: scratch_store_b32 off, v174, s32 offset:12
-; GFX11-NEXT: scratch_store_b32 off, v175, s32 offset:8
-; GFX11-NEXT: scratch_store_b32 off, v184, s32 offset:4
-; GFX11-NEXT: scratch_store_b32 off, v185, s32
+; GFX11-NEXT: scratch_store_b32 off, v104, s32 offset:156
+; GFX11-NEXT: scratch_store_b32 off, v105, s32 offset:152
+; GFX11-NEXT: scratch_store_b32 off, v106, s32 offset:148
+; GFX11-NEXT: scratch_store_b32 off, v107, s32 offset:144
+; GFX11-NEXT: scratch_store_b32 off, v108, s32 offset:140
+; GFX11-NEXT: scratch_store_b32 off, v109, s32 offset:136
+; GFX11-NEXT: scratch_store_b32 off, v110, s32 offset:132
+; GFX11-NEXT: scratch_store_b32 off, v111, s32 offset:128
+; GFX11-NEXT: scratch_store_b32 off, v120, s32 offset:124
+; GFX11-NEXT: scratch_store_b32 off, v121, s32 offset:120
+; GFX11-NEXT: scratch_store_b32 off, v122, s32 offset:116
+; GFX11-NEXT: scratch_store_b32 off, v123, s32 offset:112
+; GFX11-NEXT: scratch_store_b32 off, v124, s32 offset:108
+; GFX11-NEXT: scratch_store_b32 off, v125, s32 offset:104
+; GFX11-NEXT: scratch_store_b32 off, v126, s32 offset:100
+; GFX11-NEXT: scratch_store_b32 off, v127, s32 offset:96
+; GFX11-NEXT: scratch_store_b32 off, v136, s32 offset:92
+; GFX11-NEXT: scratch_store_b32 off, v137, s32 offset:88
+; GFX11-NEXT: scratch_store_b32 off, v138, s32 offset:84
+; GFX11-NEXT: scratch_store_b32 off, v139, s32 offset:80
+; GFX11-NEXT: scratch_store_b32 off, v140, s32 offset:76
+; GFX11-NEXT: scratch_store_b32 off, v141, s32 offset:72
+; GFX11-NEXT: scratch_store_b32 off, v142, s32 offset:68
+; GFX11-NEXT: scratch_store_b32 off, v143, s32 offset:64
+; GFX11-NEXT: scratch_store_b32 off, v152, s32 offset:60
+; GFX11-NEXT: scratch_store_b32 off, v153, s32 offset:56
+; GFX11-NEXT: scratch_store_b32 off, v154, s32 offset:52
+; GFX11-NEXT: scratch_store_b32 off, v155, s32 offset:48
+; GFX11-NEXT: scratch_store_b32 off, v156, s32 offset:44
+; GFX11-NEXT: scratch_store_b32 off, v157, s32 offset:40
+; GFX11-NEXT: scratch_store_b32 off, v158, s32 offset:36
+; GFX11-NEXT: scratch_store_b32 off, v159, s32 offset:32
+; GFX11-NEXT: s_clause 0x7
+; GFX11-NEXT: scratch_store_b32 off, v168, s32 offset:28
+; GFX11-NEXT: scratch_store_b32 off, v169, s32 offset:24
+; GFX11-NEXT: scratch_store_b32 off, v170, s32 offset:20
+; GFX11-NEXT: scratch_store_b32 off, v171, s32 offset:16
+; GFX11-NEXT: scratch_store_b32 off, v172, s32 offset:12
+; GFX11-NEXT: scratch_store_b32 off, v173, s32 offset:8
+; GFX11-NEXT: scratch_store_b32 off, v174, s32 offset:4
+; GFX11-NEXT: scratch_store_b32 off, v175, s32
; GFX11-NEXT: v_dual_mov_b32 v176, v13 :: v_dual_mov_b32 v177, v12
; GFX11-NEXT: v_dual_mov_b32 v178, v11 :: v_dual_mov_b32 v179, v10
; GFX11-NEXT: v_dual_mov_b32 v180, v9 :: v_dual_mov_b32 v181, v8
; GFX11-NEXT: v_dual_mov_b32 v182, v7 :: v_dual_mov_b32 v183, v6
-; GFX11-NEXT: v_dual_mov_b32 v170, v5 :: v_dual_mov_b32 v171, v4
-; GFX11-NEXT: v_dual_mov_b32 v172, v3 :: v_dual_mov_b32 v173, v2
-; GFX11-NEXT: v_dual_mov_b32 v174, v1 :: v_dual_mov_b32 v175, v0
-; GFX11-NEXT: v_dual_mov_b32 v184, s28 :: v_dual_mov_b32 v185, s29
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-NEXT: v_dual_mov_b32 v168, v5 :: v_dual_mov_b32 v169, v4
+; GFX11-NEXT: v_dual_mov_b32 v170, v3 :: v_dual_mov_b32 v171, v2
+; GFX11-NEXT: v_dual_mov_b32 v172, v1 :: v_dual_mov_b32 v173, v0
+; GFX11-NEXT: v_dual_mov_b32 v174, s28 :: v_dual_mov_b32 v175, s29
+; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s4, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB27_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_dual_mov_b32 v47, s0 :: v_dual_mov_b32 v52, s2
-; GFX11-NEXT: v_dual_mov_b32 v49, s1 :: v_dual_mov_b32 v56, s3
-; GFX11-NEXT: v_dual_mov_b32 v61, s16 :: v_dual_mov_b32 v74, s18
-; GFX11-NEXT: v_dual_mov_b32 v67, s17 :: v_dual_mov_b32 v82, s19
-; GFX11-NEXT: v_dual_mov_b32 v91, s20 :: v_dual_mov_b32 v112, s22
-; GFX11-NEXT: v_dual_mov_b32 v101, s21 :: v_dual_mov_b32 v124, s23
-; GFX11-NEXT: v_dual_mov_b32 v137, s24 :: v_dual_mov_b32 v14, s26
-; GFX11-NEXT: v_dual_mov_b32 v151, s25 :: v_dual_mov_b32 v30, s27
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB27_3
+; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v59, s16
+; GFX11-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v65, s17
+; GFX11-NEXT: v_dual_mov_b32 v50, s2 :: v_dual_mov_b32 v89, s20
+; GFX11-NEXT: v_dual_mov_b32 v54, s3 :: v_dual_mov_b32 v99, s21
+; GFX11-NEXT: v_dual_mov_b32 v72, s18 :: v_dual_mov_b32 v135, s24
+; GFX11-NEXT: v_dual_mov_b32 v80, s19 :: v_dual_mov_b32 v149, s25
+; GFX11-NEXT: v_dual_mov_b32 v110, s22 :: v_dual_mov_b32 v17, s26
+; GFX11-NEXT: v_dual_mov_b32 v122, s23 :: v_dual_mov_b32 v33, s27
+; GFX11-NEXT: s_cbranch_execnz .LBB27_3
; GFX11-NEXT: .LBB27_2: ; %cmp.true
-; GFX11-NEXT: v_pk_add_u16 v30, s27, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v14, s26, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v33, s27, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v17, s26, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v176, v176, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v177, v177, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v178, v178, 3 op_sel_hi:[1,0]
@@ -36811,119 +36972,117 @@ define inreg <32 x i32> @bitcast_v64i16_to_v32i32_scalar(<64 x i16> inreg %a, i3
; GFX11-NEXT: v_pk_add_u16 v181, v181, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v182, v182, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v183, v183, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v168, v168, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v169, v169, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v170, v170, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v171, v171, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v172, v172, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v173, v173, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v174, v174, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v175, v175, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v184, v184, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v151, s25, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v137, s24, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v124, s23, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v112, s22, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v101, s21, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v91, s20, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v82, s19, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v74, s18, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v67, s17, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v61, s16, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v56, s3, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v52, s2, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v49, s1, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v47, s0, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v174, v174, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v149, s25, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v135, s24, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v122, s23, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v110, s22, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v99, s21, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v89, s20, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v80, s19, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v72, s18, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v65, s17, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v59, s16, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v54, s3, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v50, s2, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v2, s1, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: .LBB27_3: ; %end
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mov_b32 v0, v47 :: v_dual_mov_b32 v1, v49
-; GFX11-NEXT: v_dual_mov_b32 v3, v56 :: v_dual_mov_b32 v4, v61
-; GFX11-NEXT: v_dual_mov_b32 v6, v74 :: v_dual_mov_b32 v9, v101
-; GFX11-NEXT: v_dual_mov_b32 v7, v82 :: v_dual_mov_b32 v8, v91
-; GFX11-NEXT: v_dual_mov_b32 v11, v124 :: v_dual_mov_b32 v12, v137
-; GFX11-NEXT: v_dual_mov_b32 v15, v30 :: v_dual_mov_b32 v16, v184
-; GFX11-NEXT: v_dual_mov_b32 v17, v185 :: v_dual_mov_b32 v18, v175
-; GFX11-NEXT: v_dual_mov_b32 v19, v174 :: v_dual_mov_b32 v20, v173
-; GFX11-NEXT: v_dual_mov_b32 v21, v172 :: v_dual_mov_b32 v22, v171
-; GFX11-NEXT: v_dual_mov_b32 v23, v170 :: v_dual_mov_b32 v24, v183
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v4, v59
+; GFX11-NEXT: v_dual_mov_b32 v3, v54 :: v_dual_mov_b32 v6, v72
+; GFX11-NEXT: v_dual_mov_b32 v7, v80 :: v_dual_mov_b32 v8, v89
+; GFX11-NEXT: v_dual_mov_b32 v9, v99 :: v_dual_mov_b32 v10, v110
+; GFX11-NEXT: v_dual_mov_b32 v11, v122 :: v_dual_mov_b32 v12, v135
+; GFX11-NEXT: v_dual_mov_b32 v13, v149 :: v_dual_mov_b32 v16, v174
+; GFX11-NEXT: v_dual_mov_b32 v14, v17 :: v_dual_mov_b32 v17, v175
+; GFX11-NEXT: v_dual_mov_b32 v15, v33 :: v_dual_mov_b32 v20, v171
+; GFX11-NEXT: v_dual_mov_b32 v18, v173 :: v_dual_mov_b32 v19, v172
+; GFX11-NEXT: v_dual_mov_b32 v21, v170 :: v_dual_mov_b32 v22, v169
+; GFX11-NEXT: v_dual_mov_b32 v23, v168 :: v_dual_mov_b32 v24, v183
; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_load_b32 v185, off, s32
-; GFX11-NEXT: scratch_load_b32 v184, off, s32 offset:4
-; GFX11-NEXT: scratch_load_b32 v175, off, s32 offset:8
-; GFX11-NEXT: scratch_load_b32 v174, off, s32 offset:12
-; GFX11-NEXT: scratch_load_b32 v173, off, s32 offset:16
-; GFX11-NEXT: scratch_load_b32 v172, off, s32 offset:20
-; GFX11-NEXT: scratch_load_b32 v171, off, s32 offset:24
-; GFX11-NEXT: scratch_load_b32 v170, off, s32 offset:28
-; GFX11-NEXT: scratch_load_b32 v169, off, s32 offset:32
-; GFX11-NEXT: scratch_load_b32 v168, off, s32 offset:36
-; GFX11-NEXT: scratch_load_b32 v159, off, s32 offset:40
-; GFX11-NEXT: scratch_load_b32 v158, off, s32 offset:44
-; GFX11-NEXT: scratch_load_b32 v157, off, s32 offset:48
-; GFX11-NEXT: scratch_load_b32 v156, off, s32 offset:52
-; GFX11-NEXT: scratch_load_b32 v155, off, s32 offset:56
-; GFX11-NEXT: scratch_load_b32 v154, off, s32 offset:60
-; GFX11-NEXT: scratch_load_b32 v153, off, s32 offset:64
-; GFX11-NEXT: scratch_load_b32 v152, off, s32 offset:68
-; GFX11-NEXT: scratch_load_b32 v143, off, s32 offset:72
-; GFX11-NEXT: scratch_load_b32 v142, off, s32 offset:76
-; GFX11-NEXT: scratch_load_b32 v141, off, s32 offset:80
-; GFX11-NEXT: scratch_load_b32 v140, off, s32 offset:84
-; GFX11-NEXT: scratch_load_b32 v139, off, s32 offset:88
-; GFX11-NEXT: scratch_load_b32 v138, off, s32 offset:92
-; GFX11-NEXT: scratch_load_b32 v137, off, s32 offset:96
-; GFX11-NEXT: scratch_load_b32 v136, off, s32 offset:100
-; GFX11-NEXT: scratch_load_b32 v127, off, s32 offset:104
-; GFX11-NEXT: scratch_load_b32 v126, off, s32 offset:108
-; GFX11-NEXT: scratch_load_b32 v125, off, s32 offset:112
-; GFX11-NEXT: scratch_load_b32 v124, off, s32 offset:116
-; GFX11-NEXT: scratch_load_b32 v123, off, s32 offset:120
-; GFX11-NEXT: scratch_load_b32 v122, off, s32 offset:124
+; GFX11-NEXT: scratch_load_b32 v175, off, s32
+; GFX11-NEXT: scratch_load_b32 v174, off, s32 offset:4
+; GFX11-NEXT: scratch_load_b32 v173, off, s32 offset:8
+; GFX11-NEXT: scratch_load_b32 v172, off, s32 offset:12
+; GFX11-NEXT: scratch_load_b32 v171, off, s32 offset:16
+; GFX11-NEXT: scratch_load_b32 v170, off, s32 offset:20
+; GFX11-NEXT: scratch_load_b32 v169, off, s32 offset:24
+; GFX11-NEXT: scratch_load_b32 v168, off, s32 offset:28
+; GFX11-NEXT: scratch_load_b32 v159, off, s32 offset:32
+; GFX11-NEXT: scratch_load_b32 v158, off, s32 offset:36
+; GFX11-NEXT: scratch_load_b32 v157, off, s32 offset:40
+; GFX11-NEXT: scratch_load_b32 v156, off, s32 offset:44
+; GFX11-NEXT: scratch_load_b32 v155, off, s32 offset:48
+; GFX11-NEXT: scratch_load_b32 v154, off, s32 offset:52
+; GFX11-NEXT: scratch_load_b32 v153, off, s32 offset:56
+; GFX11-NEXT: scratch_load_b32 v152, off, s32 offset:60
+; GFX11-NEXT: scratch_load_b32 v143, off, s32 offset:64
+; GFX11-NEXT: scratch_load_b32 v142, off, s32 offset:68
+; GFX11-NEXT: scratch_load_b32 v141, off, s32 offset:72
+; GFX11-NEXT: scratch_load_b32 v140, off, s32 offset:76
+; GFX11-NEXT: scratch_load_b32 v139, off, s32 offset:80
+; GFX11-NEXT: scratch_load_b32 v138, off, s32 offset:84
+; GFX11-NEXT: scratch_load_b32 v137, off, s32 offset:88
+; GFX11-NEXT: scratch_load_b32 v136, off, s32 offset:92
+; GFX11-NEXT: scratch_load_b32 v127, off, s32 offset:96
+; GFX11-NEXT: scratch_load_b32 v126, off, s32 offset:100
+; GFX11-NEXT: scratch_load_b32 v125, off, s32 offset:104
+; GFX11-NEXT: scratch_load_b32 v124, off, s32 offset:108
+; GFX11-NEXT: scratch_load_b32 v123, off, s32 offset:112
+; GFX11-NEXT: scratch_load_b32 v122, off, s32 offset:116
+; GFX11-NEXT: scratch_load_b32 v121, off, s32 offset:120
+; GFX11-NEXT: scratch_load_b32 v120, off, s32 offset:124
; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_load_b32 v121, off, s32 offset:128
-; GFX11-NEXT: scratch_load_b32 v120, off, s32 offset:132
-; GFX11-NEXT: scratch_load_b32 v111, off, s32 offset:136
-; GFX11-NEXT: scratch_load_b32 v110, off, s32 offset:140
-; GFX11-NEXT: scratch_load_b32 v109, off, s32 offset:144
-; GFX11-NEXT: scratch_load_b32 v108, off, s32 offset:148
-; GFX11-NEXT: scratch_load_b32 v107, off, s32 offset:152
-; GFX11-NEXT: scratch_load_b32 v106, off, s32 offset:156
-; GFX11-NEXT: scratch_load_b32 v105, off, s32 offset:160
-; GFX11-NEXT: scratch_load_b32 v104, off, s32 offset:164
-; GFX11-NEXT: scratch_load_b32 v95, off, s32 offset:168
-; GFX11-NEXT: scratch_load_b32 v94, off, s32 offset:172
-; GFX11-NEXT: scratch_load_b32 v93, off, s32 offset:176
-; GFX11-NEXT: scratch_load_b32 v92, off, s32 offset:180
-; GFX11-NEXT: scratch_load_b32 v91, off, s32 offset:184
-; GFX11-NEXT: scratch_load_b32 v90, off, s32 offset:188
-; GFX11-NEXT: scratch_load_b32 v89, off, s32 offset:192
-; GFX11-NEXT: scratch_load_b32 v88, off, s32 offset:196
-; GFX11-NEXT: scratch_load_b32 v79, off, s32 offset:200
-; GFX11-NEXT: scratch_load_b32 v78, off, s32 offset:204
-; GFX11-NEXT: scratch_load_b32 v77, off, s32 offset:208
-; GFX11-NEXT: scratch_load_b32 v76, off, s32 offset:212
-; GFX11-NEXT: scratch_load_b32 v75, off, s32 offset:216
-; GFX11-NEXT: scratch_load_b32 v74, off, s32 offset:220
-; GFX11-NEXT: scratch_load_b32 v73, off, s32 offset:224
-; GFX11-NEXT: scratch_load_b32 v72, off, s32 offset:228
-; GFX11-NEXT: scratch_load_b32 v63, off, s32 offset:232
-; GFX11-NEXT: scratch_load_b32 v62, off, s32 offset:236
-; GFX11-NEXT: scratch_load_b32 v61, off, s32 offset:240
-; GFX11-NEXT: scratch_load_b32 v60, off, s32 offset:244
-; GFX11-NEXT: scratch_load_b32 v59, off, s32 offset:248
-; GFX11-NEXT: scratch_load_b32 v58, off, s32 offset:252
-; GFX11-NEXT: s_clause 0x9
-; GFX11-NEXT: scratch_load_b32 v57, off, s32 offset:256
-; GFX11-NEXT: scratch_load_b32 v56, off, s32 offset:260
-; GFX11-NEXT: scratch_load_b32 v47, off, s32 offset:264
-; GFX11-NEXT: scratch_load_b32 v46, off, s32 offset:268
-; GFX11-NEXT: scratch_load_b32 v45, off, s32 offset:272
-; GFX11-NEXT: scratch_load_b32 v44, off, s32 offset:276
-; GFX11-NEXT: scratch_load_b32 v43, off, s32 offset:280
-; GFX11-NEXT: scratch_load_b32 v42, off, s32 offset:284
-; GFX11-NEXT: scratch_load_b32 v41, off, s32 offset:288
-; GFX11-NEXT: scratch_load_b32 v40, off, s32 offset:292
-; GFX11-NEXT: v_dual_mov_b32 v2, v52 :: v_dual_mov_b32 v5, v67
-; GFX11-NEXT: v_dual_mov_b32 v10, v112 :: v_dual_mov_b32 v13, v151
+; GFX11-NEXT: scratch_load_b32 v111, off, s32 offset:128
+; GFX11-NEXT: scratch_load_b32 v110, off, s32 offset:132
+; GFX11-NEXT: scratch_load_b32 v109, off, s32 offset:136
+; GFX11-NEXT: scratch_load_b32 v108, off, s32 offset:140
+; GFX11-NEXT: scratch_load_b32 v107, off, s32 offset:144
+; GFX11-NEXT: scratch_load_b32 v106, off, s32 offset:148
+; GFX11-NEXT: scratch_load_b32 v105, off, s32 offset:152
+; GFX11-NEXT: scratch_load_b32 v104, off, s32 offset:156
+; GFX11-NEXT: scratch_load_b32 v95, off, s32 offset:160
+; GFX11-NEXT: scratch_load_b32 v94, off, s32 offset:164
+; GFX11-NEXT: scratch_load_b32 v93, off, s32 offset:168
+; GFX11-NEXT: scratch_load_b32 v92, off, s32 offset:172
+; GFX11-NEXT: scratch_load_b32 v91, off, s32 offset:176
+; GFX11-NEXT: scratch_load_b32 v90, off, s32 offset:180
+; GFX11-NEXT: scratch_load_b32 v89, off, s32 offset:184
+; GFX11-NEXT: scratch_load_b32 v88, off, s32 offset:188
+; GFX11-NEXT: scratch_load_b32 v79, off, s32 offset:192
+; GFX11-NEXT: scratch_load_b32 v78, off, s32 offset:196
+; GFX11-NEXT: scratch_load_b32 v77, off, s32 offset:200
+; GFX11-NEXT: scratch_load_b32 v76, off, s32 offset:204
+; GFX11-NEXT: scratch_load_b32 v75, off, s32 offset:208
+; GFX11-NEXT: scratch_load_b32 v74, off, s32 offset:212
+; GFX11-NEXT: scratch_load_b32 v73, off, s32 offset:216
+; GFX11-NEXT: scratch_load_b32 v72, off, s32 offset:220
+; GFX11-NEXT: scratch_load_b32 v63, off, s32 offset:224
+; GFX11-NEXT: scratch_load_b32 v62, off, s32 offset:228
+; GFX11-NEXT: scratch_load_b32 v61, off, s32 offset:232
+; GFX11-NEXT: scratch_load_b32 v60, off, s32 offset:236
+; GFX11-NEXT: scratch_load_b32 v59, off, s32 offset:240
+; GFX11-NEXT: scratch_load_b32 v58, off, s32 offset:244
+; GFX11-NEXT: scratch_load_b32 v57, off, s32 offset:248
+; GFX11-NEXT: scratch_load_b32 v56, off, s32 offset:252
+; GFX11-NEXT: s_clause 0x7
+; GFX11-NEXT: scratch_load_b32 v47, off, s32 offset:256
+; GFX11-NEXT: scratch_load_b32 v46, off, s32 offset:260
+; GFX11-NEXT: scratch_load_b32 v45, off, s32 offset:264
+; GFX11-NEXT: scratch_load_b32 v44, off, s32 offset:268
+; GFX11-NEXT: scratch_load_b32 v43, off, s32 offset:272
+; GFX11-NEXT: scratch_load_b32 v42, off, s32 offset:276
+; GFX11-NEXT: scratch_load_b32 v41, off, s32 offset:280
+; GFX11-NEXT: scratch_load_b32 v40, off, s32 offset:284
+; GFX11-NEXT: v_dual_mov_b32 v2, v50 :: v_dual_mov_b32 v5, v65
; GFX11-NEXT: v_dual_mov_b32 v25, v182 :: v_dual_mov_b32 v26, v181
; GFX11-NEXT: v_dual_mov_b32 v27, v180 :: v_dual_mov_b32 v28, v179
; GFX11-NEXT: v_dual_mov_b32 v29, v178 :: v_dual_mov_b32 v30, v177
@@ -36931,23 +37090,25 @@ define inreg <32 x i32> @bitcast_v64i16_to_v32i32_scalar(<64 x i16> inreg %a, i3
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB27_4:
-; GFX11-NEXT: ; implicit-def: $vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78
; GFX11-NEXT: ; implicit-def: $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79
; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
-; GFX11-NEXT: ; implicit-def: $vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81
-; GFX11-NEXT: ; implicit-def: $vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84
-; GFX11-NEXT: ; implicit-def: $vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88
-; GFX11-NEXT: ; implicit-def: $vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93
-; GFX11-NEXT: ; implicit-def: $vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99
-; GFX11-NEXT: ; implicit-def: $vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106
-; GFX11-NEXT: ; implicit-def: $vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114
-; GFX11-NEXT: ; implicit-def: $vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123
-; GFX11-NEXT: ; implicit-def: $vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133
-; GFX11-NEXT: ; implicit-def: $vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144
-; GFX11-NEXT: ; implicit-def: $vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156
-; GFX11-NEXT: ; implicit-def: $vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169
-; GFX11-NEXT: s_branch .LBB27_2
+; GFX11-NEXT: ; implicit-def: $vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82
+; GFX11-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-NEXT: ; implicit-def: $vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91
+; GFX11-NEXT: ; implicit-def: $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49
+; GFX11-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-NEXT: ; implicit-def: $vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104
+; GFX11-NEXT: ; implicit-def: $vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112
+; GFX11-NEXT: ; implicit-def: $vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121
+; GFX11-NEXT: ; implicit-def: $vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131
+; GFX11-NEXT: ; implicit-def: $vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142
+; GFX11-NEXT: ; implicit-def: $vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154
+; GFX11-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_vccz .LBB27_2
+; GFX11-NEXT: s_branch .LBB27_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -37173,6 +37334,7 @@ define inreg <16 x i64> @bitcast_v32f32_to_v16i64_scalar(<32 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v31, v17
; SI-NEXT: v_mov_b32_e32 v30, v16
; SI-NEXT: v_mov_b32_e32 v29, v15
@@ -37193,7 +37355,7 @@ define inreg <16 x i64> @bitcast_v32f32_to_v16i64_scalar(<32 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v14, v0
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v2, s18
; SI-NEXT: v_mov_b32_e32 v3, s19
; SI-NEXT: v_mov_b32_e32 v4, s20
@@ -37206,10 +37368,13 @@ define inreg <16 x i64> @bitcast_v32f32_to_v16i64_scalar(<32 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB29_4
+; SI-NEXT: s_cbranch_scc0 .LBB29_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB29_3
-; SI-NEXT: .LBB29_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB29_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB29_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e32 v31, 1.0, v31
; SI-NEXT: v_add_f32_e32 v30, 1.0, v30
; SI-NEXT: v_add_f32_e32 v29, 1.0, v29
@@ -37242,16 +37407,15 @@ define inreg <16 x i64> @bitcast_v32f32_to_v16i64_scalar(<32 x float> inreg %a,
; SI-NEXT: v_add_f32_e32 v2, 1.0, v2
; SI-NEXT: v_add_f32_e32 v1, 1.0, v1
; SI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; SI-NEXT: .LBB29_3: ; %end
+; SI-NEXT: .LBB29_4: ; %end
; SI-NEXT: v_mov_b32_e32 v18, v32
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB29_4:
-; SI-NEXT: s_branch .LBB29_2
;
; VI-LABEL: bitcast_v32f32_to_v16i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v31, v17
; VI-NEXT: v_mov_b32_e32 v30, v16
; VI-NEXT: v_mov_b32_e32 v29, v15
@@ -37272,7 +37436,7 @@ define inreg <16 x i64> @bitcast_v32f32_to_v16i64_scalar(<32 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
@@ -37285,10 +37449,13 @@ define inreg <16 x i64> @bitcast_v32f32_to_v16i64_scalar(<32 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB29_4
+; VI-NEXT: s_cbranch_scc0 .LBB29_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB29_3
-; VI-NEXT: .LBB29_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB29_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB29_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e32 v31, 1.0, v31
; VI-NEXT: v_add_f32_e32 v30, 1.0, v30
; VI-NEXT: v_add_f32_e32 v29, 1.0, v29
@@ -37321,16 +37488,15 @@ define inreg <16 x i64> @bitcast_v32f32_to_v16i64_scalar(<32 x float> inreg %a,
; VI-NEXT: v_add_f32_e32 v2, 1.0, v2
; VI-NEXT: v_add_f32_e32 v1, 1.0, v1
; VI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; VI-NEXT: .LBB29_3: ; %end
+; VI-NEXT: .LBB29_4: ; %end
; VI-NEXT: v_mov_b32_e32 v18, v32
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB29_4:
-; VI-NEXT: s_branch .LBB29_2
;
; GFX9-LABEL: bitcast_v32f32_to_v16i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v31, v17
; GFX9-NEXT: v_mov_b32_e32 v30, v16
; GFX9-NEXT: v_mov_b32_e32 v29, v15
@@ -37351,7 +37517,7 @@ define inreg <16 x i64> @bitcast_v32f32_to_v16i64_scalar(<32 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -37364,10 +37530,13 @@ define inreg <16 x i64> @bitcast_v32f32_to_v16i64_scalar(<32 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB29_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB29_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB29_3
-; GFX9-NEXT: .LBB29_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB29_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB29_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e32 v31, 1.0, v31
; GFX9-NEXT: v_add_f32_e32 v30, 1.0, v30
; GFX9-NEXT: v_add_f32_e32 v29, 1.0, v29
@@ -37400,44 +37569,42 @@ define inreg <16 x i64> @bitcast_v32f32_to_v16i64_scalar(<32 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e32 v2, 1.0, v2
; GFX9-NEXT: v_add_f32_e32 v1, 1.0, v1
; GFX9-NEXT: v_add_f32_e32 v0, 1.0, v0
-; GFX9-NEXT: .LBB29_3: ; %end
+; GFX9-NEXT: .LBB29_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v18, v32
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB29_4:
-; GFX9-NEXT: s_branch .LBB29_2
;
; GFX11-LABEL: bitcast_v32f32_to_v16i64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v14 :: v_dual_mov_b32 v31, v13
-; GFX11-NEXT: v_dual_mov_b32 v30, v12 :: v_dual_mov_b32 v29, v11
-; GFX11-NEXT: v_dual_mov_b32 v28, v10 :: v_dual_mov_b32 v27, v9
+; GFX11-NEXT: v_dual_mov_b32 v15, v14 :: v_dual_mov_b32 v30, v12
+; GFX11-NEXT: v_dual_mov_b32 v31, v13 :: v_dual_mov_b32 v28, v10
+; GFX11-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v26, v8
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB29_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB29_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB29_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB29_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB29_3:
-; GFX11-NEXT: .LBB29_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB29_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_dual_add_f32 v31, 1.0, v31 :: v_dual_add_f32 v30, 1.0, v30
; GFX11-NEXT: v_dual_add_f32 v29, 1.0, v29 :: v_dual_add_f32 v28, 1.0, v28
; GFX11-NEXT: v_dual_add_f32 v27, 1.0, v27 :: v_dual_add_f32 v26, 1.0, v26
@@ -37454,6 +37621,7 @@ define inreg <16 x i64> @bitcast_v32f32_to_v16i64_scalar(<32 x float> inreg %a,
; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-NEXT: .LBB29_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -37704,6 +37872,7 @@ define inreg <32 x float> @bitcast_v16i64_to_v32f32_scalar(<16 x i64> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v31, v17
; SI-NEXT: v_mov_b32_e32 v30, v16
; SI-NEXT: v_mov_b32_e32 v29, v15
@@ -37724,7 +37893,7 @@ define inreg <32 x float> @bitcast_v16i64_to_v32f32_scalar(<16 x i64> inreg %a,
; SI-NEXT: v_mov_b32_e32 v14, v0
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v2, s18
; SI-NEXT: v_mov_b32_e32 v3, s19
; SI-NEXT: v_mov_b32_e32 v4, s20
@@ -37737,10 +37906,13 @@ define inreg <32 x float> @bitcast_v16i64_to_v32f32_scalar(<16 x i64> inreg %a,
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB31_4
+; SI-NEXT: s_cbranch_scc0 .LBB31_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB31_3
-; SI-NEXT: .LBB31_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB31_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB31_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v30, vcc, 3, v30
; SI-NEXT: v_addc_u32_e32 v31, vcc, 0, v31, vcc
; SI-NEXT: v_add_i32_e32 v28, vcc, 3, v28
@@ -37773,16 +37945,15 @@ define inreg <32 x float> @bitcast_v16i64_to_v32f32_scalar(<16 x i64> inreg %a,
; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT: .LBB31_3: ; %end
+; SI-NEXT: .LBB31_4: ; %end
; SI-NEXT: v_mov_b32_e32 v18, v32
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB31_4:
-; SI-NEXT: s_branch .LBB31_2
;
; VI-LABEL: bitcast_v16i64_to_v32f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v31, v17
; VI-NEXT: v_mov_b32_e32 v30, v16
; VI-NEXT: v_mov_b32_e32 v29, v15
@@ -37803,7 +37974,7 @@ define inreg <32 x float> @bitcast_v16i64_to_v32f32_scalar(<16 x i64> inreg %a,
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
@@ -37816,10 +37987,13 @@ define inreg <32 x float> @bitcast_v16i64_to_v32f32_scalar(<16 x i64> inreg %a,
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB31_4
+; VI-NEXT: s_cbranch_scc0 .LBB31_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB31_3
-; VI-NEXT: .LBB31_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB31_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB31_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v30, vcc, 3, v30
; VI-NEXT: v_addc_u32_e32 v31, vcc, 0, v31, vcc
; VI-NEXT: v_add_u32_e32 v28, vcc, 3, v28
@@ -37852,16 +38026,15 @@ define inreg <32 x float> @bitcast_v16i64_to_v32f32_scalar(<16 x i64> inreg %a,
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT: .LBB31_3: ; %end
+; VI-NEXT: .LBB31_4: ; %end
; VI-NEXT: v_mov_b32_e32 v18, v32
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB31_4:
-; VI-NEXT: s_branch .LBB31_2
;
; GFX9-LABEL: bitcast_v16i64_to_v32f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v31, v17
; GFX9-NEXT: v_mov_b32_e32 v30, v16
; GFX9-NEXT: v_mov_b32_e32 v29, v15
@@ -37882,7 +38055,7 @@ define inreg <32 x float> @bitcast_v16i64_to_v32f32_scalar(<16 x i64> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -37895,10 +38068,13 @@ define inreg <32 x float> @bitcast_v16i64_to_v32f32_scalar(<16 x i64> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB31_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB31_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB31_3
-; GFX9-NEXT: .LBB31_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB31_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB31_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_co_u32_e32 v30, vcc, 3, v30
; GFX9-NEXT: v_addc_co_u32_e32 v31, vcc, 0, v31, vcc
; GFX9-NEXT: v_add_co_u32_e32 v28, vcc, 3, v28
@@ -37931,44 +38107,42 @@ define inreg <32 x float> @bitcast_v16i64_to_v32f32_scalar(<16 x i64> inreg %a,
; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 3, v0
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
-; GFX9-NEXT: .LBB31_3: ; %end
+; GFX9-NEXT: .LBB31_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v18, v32
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB31_4:
-; GFX9-NEXT: s_branch .LBB31_2
;
; GFX11-LABEL: bitcast_v16i64_to_v32f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v14 :: v_dual_mov_b32 v31, v13
-; GFX11-NEXT: v_dual_mov_b32 v30, v12 :: v_dual_mov_b32 v29, v11
-; GFX11-NEXT: v_dual_mov_b32 v28, v10 :: v_dual_mov_b32 v27, v9
+; GFX11-NEXT: v_dual_mov_b32 v15, v14 :: v_dual_mov_b32 v30, v12
+; GFX11-NEXT: v_dual_mov_b32 v31, v13 :: v_dual_mov_b32 v28, v10
+; GFX11-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v26, v8
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB31_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB31_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB31_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB31_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB31_3:
-; GFX11-NEXT: .LBB31_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB31_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_co_u32 v30, vcc_lo, v30, 3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add_co_ci_u32_e64 v31, null, 0, v31, vcc_lo
@@ -38009,6 +38183,7 @@ define inreg <32 x float> @bitcast_v16i64_to_v32f32_scalar(<16 x i64> inreg %a,
; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-NEXT: .LBB31_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -38235,6 +38410,7 @@ define inreg <16 x double> @bitcast_v32f32_to_v16f64_scalar(<32 x float> inreg %
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v31, v17
; SI-NEXT: v_mov_b32_e32 v30, v16
; SI-NEXT: v_mov_b32_e32 v29, v15
@@ -38255,7 +38431,7 @@ define inreg <16 x double> @bitcast_v32f32_to_v16f64_scalar(<32 x float> inreg %
; SI-NEXT: v_mov_b32_e32 v14, v0
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v2, s18
; SI-NEXT: v_mov_b32_e32 v3, s19
; SI-NEXT: v_mov_b32_e32 v4, s20
@@ -38268,10 +38444,13 @@ define inreg <16 x double> @bitcast_v32f32_to_v16f64_scalar(<32 x float> inreg %
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB33_4
+; SI-NEXT: s_cbranch_scc0 .LBB33_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB33_3
-; SI-NEXT: .LBB33_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB33_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB33_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e32 v31, 1.0, v31
; SI-NEXT: v_add_f32_e32 v30, 1.0, v30
; SI-NEXT: v_add_f32_e32 v29, 1.0, v29
@@ -38304,16 +38483,15 @@ define inreg <16 x double> @bitcast_v32f32_to_v16f64_scalar(<32 x float> inreg %
; SI-NEXT: v_add_f32_e32 v2, 1.0, v2
; SI-NEXT: v_add_f32_e32 v1, 1.0, v1
; SI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; SI-NEXT: .LBB33_3: ; %end
+; SI-NEXT: .LBB33_4: ; %end
; SI-NEXT: v_mov_b32_e32 v18, v32
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB33_4:
-; SI-NEXT: s_branch .LBB33_2
;
; VI-LABEL: bitcast_v32f32_to_v16f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v31, v17
; VI-NEXT: v_mov_b32_e32 v30, v16
; VI-NEXT: v_mov_b32_e32 v29, v15
@@ -38334,7 +38512,7 @@ define inreg <16 x double> @bitcast_v32f32_to_v16f64_scalar(<32 x float> inreg %
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
@@ -38347,10 +38525,13 @@ define inreg <16 x double> @bitcast_v32f32_to_v16f64_scalar(<32 x float> inreg %
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB33_4
+; VI-NEXT: s_cbranch_scc0 .LBB33_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB33_3
-; VI-NEXT: .LBB33_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB33_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB33_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e32 v31, 1.0, v31
; VI-NEXT: v_add_f32_e32 v30, 1.0, v30
; VI-NEXT: v_add_f32_e32 v29, 1.0, v29
@@ -38383,16 +38564,15 @@ define inreg <16 x double> @bitcast_v32f32_to_v16f64_scalar(<32 x float> inreg %
; VI-NEXT: v_add_f32_e32 v2, 1.0, v2
; VI-NEXT: v_add_f32_e32 v1, 1.0, v1
; VI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; VI-NEXT: .LBB33_3: ; %end
+; VI-NEXT: .LBB33_4: ; %end
; VI-NEXT: v_mov_b32_e32 v18, v32
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB33_4:
-; VI-NEXT: s_branch .LBB33_2
;
; GFX9-LABEL: bitcast_v32f32_to_v16f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v31, v17
; GFX9-NEXT: v_mov_b32_e32 v30, v16
; GFX9-NEXT: v_mov_b32_e32 v29, v15
@@ -38413,7 +38593,7 @@ define inreg <16 x double> @bitcast_v32f32_to_v16f64_scalar(<32 x float> inreg %
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -38426,10 +38606,13 @@ define inreg <16 x double> @bitcast_v32f32_to_v16f64_scalar(<32 x float> inreg %
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB33_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB33_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB33_3
-; GFX9-NEXT: .LBB33_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB33_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB33_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e32 v31, 1.0, v31
; GFX9-NEXT: v_add_f32_e32 v30, 1.0, v30
; GFX9-NEXT: v_add_f32_e32 v29, 1.0, v29
@@ -38462,44 +38645,42 @@ define inreg <16 x double> @bitcast_v32f32_to_v16f64_scalar(<32 x float> inreg %
; GFX9-NEXT: v_add_f32_e32 v2, 1.0, v2
; GFX9-NEXT: v_add_f32_e32 v1, 1.0, v1
; GFX9-NEXT: v_add_f32_e32 v0, 1.0, v0
-; GFX9-NEXT: .LBB33_3: ; %end
+; GFX9-NEXT: .LBB33_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v18, v32
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB33_4:
-; GFX9-NEXT: s_branch .LBB33_2
;
; GFX11-LABEL: bitcast_v32f32_to_v16f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v14 :: v_dual_mov_b32 v31, v13
-; GFX11-NEXT: v_dual_mov_b32 v30, v12 :: v_dual_mov_b32 v29, v11
-; GFX11-NEXT: v_dual_mov_b32 v28, v10 :: v_dual_mov_b32 v27, v9
+; GFX11-NEXT: v_dual_mov_b32 v15, v14 :: v_dual_mov_b32 v30, v12
+; GFX11-NEXT: v_dual_mov_b32 v31, v13 :: v_dual_mov_b32 v28, v10
+; GFX11-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v26, v8
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB33_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB33_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB33_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB33_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB33_3:
-; GFX11-NEXT: .LBB33_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB33_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_dual_add_f32 v31, 1.0, v31 :: v_dual_add_f32 v30, 1.0, v30
; GFX11-NEXT: v_dual_add_f32 v29, 1.0, v29 :: v_dual_add_f32 v28, 1.0, v28
; GFX11-NEXT: v_dual_add_f32 v27, 1.0, v27 :: v_dual_add_f32 v26, 1.0, v26
@@ -38516,6 +38697,7 @@ define inreg <16 x double> @bitcast_v32f32_to_v16f64_scalar(<32 x float> inreg %
; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-NEXT: .LBB33_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -38694,6 +38876,7 @@ define inreg <32 x float> @bitcast_v16f64_to_v32f32_scalar(<16 x double> inreg %
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v31, v17
; SI-NEXT: v_mov_b32_e32 v30, v16
; SI-NEXT: v_mov_b32_e32 v29, v15
@@ -38724,13 +38907,16 @@ define inreg <32 x float> @bitcast_v16f64_to_v32f32_scalar(<16 x double> inreg %
; SI-NEXT: v_mov_b32_e32 v9, s25
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB35_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB35_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB35_3
-; SI-NEXT: .LBB35_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB35_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB35_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[30:31], v[30:31], 1.0
; SI-NEXT: v_add_f64 v[28:29], v[28:29], 1.0
; SI-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
@@ -38747,17 +38933,16 @@ define inreg <32 x float> @bitcast_v16f64_to_v32f32_scalar(<16 x double> inreg %
; SI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; SI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; SI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; SI-NEXT: .LBB35_3: ; %end
+; SI-NEXT: .LBB35_4: ; %end
; SI-NEXT: v_mov_b32_e32 v18, v32
; SI-NEXT: v_mov_b32_e32 v19, v33
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB35_4:
-; SI-NEXT: s_branch .LBB35_2
;
; VI-LABEL: bitcast_v16f64_to_v32f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v31, v17
; VI-NEXT: v_mov_b32_e32 v30, v16
; VI-NEXT: v_mov_b32_e32 v29, v15
@@ -38788,13 +38973,16 @@ define inreg <32 x float> @bitcast_v16f64_to_v32f32_scalar(<16 x double> inreg %
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB35_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB35_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB35_3
-; VI-NEXT: .LBB35_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB35_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB35_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[30:31], v[30:31], 1.0
; VI-NEXT: v_add_f64 v[28:29], v[28:29], 1.0
; VI-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
@@ -38811,17 +38999,16 @@ define inreg <32 x float> @bitcast_v16f64_to_v32f32_scalar(<16 x double> inreg %
; VI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; VI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; VI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; VI-NEXT: .LBB35_3: ; %end
+; VI-NEXT: .LBB35_4: ; %end
; VI-NEXT: v_mov_b32_e32 v18, v32
; VI-NEXT: v_mov_b32_e32 v19, v33
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB35_4:
-; VI-NEXT: s_branch .LBB35_2
;
; GFX9-LABEL: bitcast_v16f64_to_v32f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v31, v17
; GFX9-NEXT: v_mov_b32_e32 v30, v16
; GFX9-NEXT: v_mov_b32_e32 v29, v15
@@ -38852,13 +39039,16 @@ define inreg <32 x float> @bitcast_v16f64_to_v32f32_scalar(<16 x double> inreg %
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB35_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB35_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB35_3
-; GFX9-NEXT: .LBB35_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB35_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB35_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[30:31], v[30:31], 1.0
; GFX9-NEXT: v_add_f64 v[28:29], v[28:29], 1.0
; GFX9-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
@@ -38875,45 +39065,43 @@ define inreg <32 x float> @bitcast_v16f64_to_v32f32_scalar(<16 x double> inreg %
; GFX9-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX9-NEXT: .LBB35_3: ; %end
+; GFX9-NEXT: .LBB35_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v18, v32
; GFX9-NEXT: v_mov_b32_e32 v19, v33
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB35_4:
-; GFX9-NEXT: s_branch .LBB35_2
;
; GFX11-LABEL: bitcast_v16f64_to_v32f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v14 :: v_dual_mov_b32 v31, v13
-; GFX11-NEXT: v_dual_mov_b32 v30, v12 :: v_dual_mov_b32 v29, v11
-; GFX11-NEXT: v_dual_mov_b32 v28, v10 :: v_dual_mov_b32 v27, v9
+; GFX11-NEXT: v_dual_mov_b32 v15, v14 :: v_dual_mov_b32 v30, v12
+; GFX11-NEXT: v_dual_mov_b32 v31, v13 :: v_dual_mov_b32 v28, v10
+; GFX11-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v26, v8
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB35_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB35_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB35_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB35_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB35_3:
-; GFX11-NEXT: .LBB35_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB35_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[30:31], v[30:31], 1.0
; GFX11-NEXT: v_add_f64 v[28:29], v[28:29], 1.0
; GFX11-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
@@ -38930,6 +39118,7 @@ define inreg <32 x float> @bitcast_v16f64_to_v32f32_scalar(<16 x double> inreg %
; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-NEXT: .LBB35_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -43381,6 +43570,7 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v19
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
@@ -43398,7 +43588,7 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: v_mov_b32_e32 v56, s16
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v47, s17
; SI-NEXT: v_mov_b32_e32 v44, s18
; SI-NEXT: v_mov_b32_e32 v42, s19
@@ -44434,8 +44624,6 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; SI-NEXT: .LBB37_4:
; SI-NEXT: ; implicit-def: $vgpr22
; SI-NEXT: ; kill: killed $vgpr22
-; SI-NEXT: ; implicit-def: $vgpr22
-; SI-NEXT: ; kill: killed $vgpr22
; SI-NEXT: ; implicit-def: $vgpr41
; SI-NEXT: ; implicit-def: $vgpr54
; SI-NEXT: ; implicit-def: $vgpr50
@@ -44593,7 +44781,11 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; SI-NEXT: ; implicit-def: $vgpr22
; SI-NEXT: ; kill: killed $vgpr22
; SI-NEXT: ; implicit-def: $vgpr22
-; SI-NEXT: s_branch .LBB37_2
+; SI-NEXT: ; kill: killed $vgpr22
+; SI-NEXT: ; implicit-def: $vgpr22
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB37_2
+; SI-NEXT: s_branch .LBB37_3
;
; VI-LABEL: bitcast_v32f32_to_v128i8_scalar:
; VI: ; %bb.0:
@@ -44652,8 +44844,9 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s6, v15
; VI-NEXT: v_readfirstlane_b32 s7, v16
; VI-NEXT: v_readfirstlane_b32 s4, v17
-; VI-NEXT: s_and_b64 s[46:47], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s5, v18
+; VI-NEXT: s_and_b64 s[46:47], vcc, exec
+; VI-NEXT: s_mov_b64 vcc, -1
; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
@@ -45037,8 +45230,6 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; VI-NEXT: .LBB37_3:
; VI-NEXT: ; implicit-def: $sgpr46
; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
; VI-NEXT: ; implicit-def: $sgpr71
; VI-NEXT: ; implicit-def: $sgpr69
; VI-NEXT: ; implicit-def: $sgpr70
@@ -45189,7 +45380,10 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; VI-NEXT: ; implicit-def: $sgpr46
; VI-NEXT: ; kill: killed $sgpr46
; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: s_branch .LBB37_2
+; VI-NEXT: ; kill: killed $sgpr46
+; VI-NEXT: ; implicit-def: $sgpr46
+; VI-NEXT: s_andn2_b64 vcc, exec, vcc
+; VI-NEXT: s_cbranch_vccz .LBB37_2
; VI-NEXT: .LBB37_4:
; VI-NEXT: v_mov_b32_e32 v1, s4
; VI-NEXT: v_readlane_b32 s4, v62, 0
@@ -45927,8 +46121,9 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s6, v15
; GFX9-NEXT: v_readfirstlane_b32 s7, v16
; GFX9-NEXT: v_readfirstlane_b32 s4, v17
-; GFX9-NEXT: s_and_b64 s[46:47], vcc, exec
; GFX9-NEXT: v_readfirstlane_b32 s5, v18
+; GFX9-NEXT: s_and_b64 s[46:47], vcc, exec
+; GFX9-NEXT: s_mov_b64 vcc, -1
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
@@ -46319,8 +46514,6 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; GFX9-NEXT: .LBB37_3:
; GFX9-NEXT: ; implicit-def: $sgpr46
; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
; GFX9-NEXT: ; implicit-def: $sgpr81
; GFX9-NEXT: ; implicit-def: $sgpr71
; GFX9-NEXT: ; implicit-def: $sgpr80
@@ -46463,7 +46656,10 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; GFX9-NEXT: ; implicit-def: $sgpr46
; GFX9-NEXT: ; kill: killed $sgpr46
; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: s_branch .LBB37_2
+; GFX9-NEXT: ; kill: killed $sgpr46
+; GFX9-NEXT: ; implicit-def: $sgpr46
+; GFX9-NEXT: s_andn2_b64 vcc, exec, vcc
+; GFX9-NEXT: s_cbranch_vccz .LBB37_2
; GFX9-NEXT: .LBB37_4:
; GFX9-NEXT: v_mov_b32_e32 v52, s48
; GFX9-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill
@@ -47183,8 +47379,8 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; GFX11-NEXT: v_readfirstlane_b32 s13, v14
; GFX11-NEXT: v_writelane_b32 v75, s37, 5
; GFX11-NEXT: v_writelane_b32 v76, s101, 5
-; GFX11-NEXT: s_mov_b32 vcc_hi, 0
; GFX11-NEXT: s_and_b32 s42, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 vcc_lo, -1
; GFX11-NEXT: s_clause 0x12
; GFX11-NEXT: scratch_store_b32 off, v40, s32 offset:72
; GFX11-NEXT: scratch_store_b32 off, v41, s32 offset:68
@@ -47239,155 +47435,152 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; GFX11-NEXT: s_cbranch_scc0 .LBB37_3
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s42, s13, 24
-; GFX11-NEXT: s_lshr_b32 s36, s27, 16
-; GFX11-NEXT: v_writelane_b32 v78, s42, 8
-; GFX11-NEXT: s_lshr_b32 s42, s13, 16
-; GFX11-NEXT: s_lshr_b32 s38, s27, 8
-; GFX11-NEXT: s_lshr_b32 s37, s26, 16
-; GFX11-NEXT: s_lshr_b32 s39, s26, 8
+; GFX11-NEXT: s_lshr_b32 vcc_hi, s27, 24
; GFX11-NEXT: v_writelane_b32 v78, s42, 7
-; GFX11-NEXT: s_lshr_b32 s42, s13, 8
-; GFX11-NEXT: s_lshr_b32 s48, s25, 24
-; GFX11-NEXT: s_lshr_b32 s49, s25, 16
-; GFX11-NEXT: s_lshr_b32 s51, s25, 8
+; GFX11-NEXT: s_lshr_b32 s42, s13, 16
+; GFX11-NEXT: s_lshr_b32 s34, s27, 16
+; GFX11-NEXT: s_lshr_b32 s36, s27, 8
+; GFX11-NEXT: s_lshr_b32 s35, s26, 16
; GFX11-NEXT: v_writelane_b32 v78, s42, 6
-; GFX11-NEXT: s_lshr_b32 s42, s12, 16
-; GFX11-NEXT: s_lshr_b32 s50, s24, 16
-; GFX11-NEXT: s_lshr_b32 s52, s24, 8
-; GFX11-NEXT: s_lshr_b32 s53, s23, 24
+; GFX11-NEXT: s_lshr_b32 s42, s13, 8
+; GFX11-NEXT: s_lshr_b32 s37, s26, 8
+; GFX11-NEXT: s_lshr_b32 s38, s25, 24
+; GFX11-NEXT: s_lshr_b32 s39, s25, 16
; GFX11-NEXT: v_writelane_b32 v78, s42, 5
-; GFX11-NEXT: s_lshr_b32 s42, s12, 8
-; GFX11-NEXT: s_lshr_b32 s54, s23, 16
-; GFX11-NEXT: s_lshr_b32 s64, s23, 8
-; GFX11-NEXT: s_lshr_b32 s55, s22, 16
+; GFX11-NEXT: s_lshr_b32 s42, s12, 16
+; GFX11-NEXT: s_lshr_b32 s49, s25, 8
+; GFX11-NEXT: s_lshr_b32 s48, s24, 16
+; GFX11-NEXT: s_lshr_b32 s50, s24, 8
; GFX11-NEXT: v_writelane_b32 v78, s42, 4
-; GFX11-NEXT: s_lshr_b32 s42, s11, 24
-; GFX11-NEXT: s_lshr_b32 s65, s22, 8
-; GFX11-NEXT: s_lshr_b32 s66, s21, 24
-; GFX11-NEXT: s_lshr_b32 s67, s21, 16
+; GFX11-NEXT: s_lshr_b32 s42, s12, 8
+; GFX11-NEXT: s_lshr_b32 s51, s23, 24
+; GFX11-NEXT: s_lshr_b32 s52, s23, 16
+; GFX11-NEXT: s_lshr_b32 s54, s23, 8
; GFX11-NEXT: v_writelane_b32 v78, s42, 3
-; GFX11-NEXT: s_lshr_b32 s42, s11, 16
-; GFX11-NEXT: s_lshr_b32 s69, s21, 8
-; GFX11-NEXT: s_lshr_b32 s68, s20, 16
-; GFX11-NEXT: s_lshr_b32 s70, s20, 8
+; GFX11-NEXT: s_lshr_b32 s42, s11, 24
+; GFX11-NEXT: s_lshr_b32 s53, s22, 16
+; GFX11-NEXT: s_lshr_b32 s55, s22, 8
+; GFX11-NEXT: s_lshr_b32 s64, s21, 24
; GFX11-NEXT: v_writelane_b32 v78, s42, 2
-; GFX11-NEXT: s_lshr_b32 s42, s11, 8
-; GFX11-NEXT: s_lshr_b32 s71, s19, 24
-; GFX11-NEXT: s_lshr_b32 s80, s19, 16
-; GFX11-NEXT: s_lshr_b32 s82, s19, 8
+; GFX11-NEXT: s_lshr_b32 s42, s11, 16
+; GFX11-NEXT: s_lshr_b32 s65, s21, 16
+; GFX11-NEXT: s_lshr_b32 s67, s21, 8
+; GFX11-NEXT: s_lshr_b32 s66, s20, 16
; GFX11-NEXT: v_writelane_b32 v78, s42, 1
-; GFX11-NEXT: s_lshr_b32 s42, s10, 16
-; GFX11-NEXT: s_lshr_b32 s81, s18, 16
-; GFX11-NEXT: s_lshr_b32 s83, s18, 8
-; GFX11-NEXT: s_lshr_b32 s84, s17, 24
+; GFX11-NEXT: s_lshr_b32 s42, s11, 8
+; GFX11-NEXT: s_lshr_b32 s68, s20, 8
+; GFX11-NEXT: s_lshr_b32 s69, s19, 24
+; GFX11-NEXT: s_lshr_b32 s70, s19, 16
; GFX11-NEXT: v_writelane_b32 v78, s42, 0
-; GFX11-NEXT: s_lshr_b32 s42, s10, 8
-; GFX11-NEXT: s_lshr_b32 s85, s17, 16
+; GFX11-NEXT: s_lshr_b32 s42, s10, 16
+; GFX11-NEXT: s_lshr_b32 s80, s19, 8
; GFX11-NEXT: v_writelane_b32 v77, s42, 31
-; GFX11-NEXT: s_lshr_b32 s42, s9, 24
-; GFX11-NEXT: s_lshr_b32 s87, s17, 8
-; GFX11-NEXT: s_lshr_b32 s86, s16, 16
-; GFX11-NEXT: s_lshr_b32 s96, s16, 8
+; GFX11-NEXT: s_lshr_b32 s42, s10, 8
+; GFX11-NEXT: s_lshr_b32 s71, s18, 16
+; GFX11-NEXT: s_lshr_b32 s81, s18, 8
+; GFX11-NEXT: s_lshr_b32 s82, s17, 24
; GFX11-NEXT: v_writelane_b32 v77, s42, 30
-; GFX11-NEXT: s_lshr_b32 s42, s9, 16
-; GFX11-NEXT: s_lshr_b32 s97, s3, 24
-; GFX11-NEXT: s_lshr_b32 s98, s3, 16
-; GFX11-NEXT: s_lshr_b32 s100, s3, 8
+; GFX11-NEXT: s_lshr_b32 s42, s9, 24
+; GFX11-NEXT: s_lshr_b32 s83, s17, 16
+; GFX11-NEXT: s_lshr_b32 s85, s17, 8
+; GFX11-NEXT: s_lshr_b32 s84, s16, 16
; GFX11-NEXT: v_writelane_b32 v77, s42, 29
-; GFX11-NEXT: s_lshr_b32 s42, s9, 8
-; GFX11-NEXT: s_lshr_b32 s99, s2, 16
-; GFX11-NEXT: s_lshr_b32 s101, s2, 8
-; GFX11-NEXT: s_lshr_b32 s102, s1, 24
+; GFX11-NEXT: s_lshr_b32 s42, s9, 16
+; GFX11-NEXT: s_lshr_b32 s86, s16, 8
+; GFX11-NEXT: s_lshr_b32 s87, s3, 24
+; GFX11-NEXT: s_lshr_b32 s96, s3, 16
; GFX11-NEXT: v_writelane_b32 v77, s42, 28
-; GFX11-NEXT: s_lshr_b32 s42, s8, 16
-; GFX11-NEXT: s_lshr_b32 s103, s1, 16
-; GFX11-NEXT: s_lshr_b32 s34, s1, 8
-; GFX11-NEXT: s_lshr_b32 s104, s0, 16
+; GFX11-NEXT: s_lshr_b32 s42, s9, 8
+; GFX11-NEXT: s_lshr_b32 s98, s3, 8
+; GFX11-NEXT: s_lshr_b32 s97, s2, 16
+; GFX11-NEXT: s_lshr_b32 s99, s2, 8
; GFX11-NEXT: v_writelane_b32 v77, s42, 27
-; GFX11-NEXT: s_lshr_b32 s42, s8, 8
-; GFX11-NEXT: s_lshr_b32 s35, s0, 8
-; GFX11-NEXT: s_lshr_b64 s[62:63], s[12:13], 24
-; GFX11-NEXT: s_lshr_b64 s[72:73], s[10:11], 24
+; GFX11-NEXT: s_lshr_b32 s42, s8, 16
+; GFX11-NEXT: s_lshr_b32 s100, s1, 24
+; GFX11-NEXT: s_lshr_b32 s101, s1, 16
+; GFX11-NEXT: s_lshr_b32 s103, s1, 8
; GFX11-NEXT: v_writelane_b32 v77, s42, 26
-; GFX11-NEXT: s_lshr_b32 s42, s7, 24
-; GFX11-NEXT: s_lshr_b64 s[74:75], s[8:9], 24
-; GFX11-NEXT: s_lshr_b64 s[76:77], s[6:7], 24
-; GFX11-NEXT: s_lshr_b64 s[78:79], s[4:5], 24
+; GFX11-NEXT: s_lshr_b32 s42, s8, 8
+; GFX11-NEXT: s_lshr_b32 s102, s0, 16
+; GFX11-NEXT: s_lshr_b32 s104, s0, 8
; GFX11-NEXT: v_writelane_b32 v77, s42, 25
-; GFX11-NEXT: s_lshr_b32 s42, s7, 16
-; GFX11-NEXT: s_lshr_b64 s[88:89], s[14:15], 24
-; GFX11-NEXT: s_lshr_b64 s[90:91], s[40:41], 24
-; GFX11-NEXT: s_lshr_b64 s[92:93], s[28:29], 24
+; GFX11-NEXT: s_lshr_b32 s42, s7, 24
+; GFX11-NEXT: s_lshr_b64 s[30:31], s[12:13], 24
+; GFX11-NEXT: s_lshr_b64 s[94:95], s[10:11], 24
+; GFX11-NEXT: s_lshr_b64 s[92:93], s[8:9], 24
; GFX11-NEXT: v_writelane_b32 v77, s42, 24
-; GFX11-NEXT: s_lshr_b32 s42, s7, 8
-; GFX11-NEXT: s_lshr_b64 s[94:95], s[26:27], 24
-; GFX11-NEXT: s_lshr_b64 s[30:31], s[24:25], 24
-; GFX11-NEXT: s_lshr_b64 s[60:61], s[22:23], 24
+; GFX11-NEXT: s_lshr_b32 s42, s7, 16
+; GFX11-NEXT: s_lshr_b64 s[90:91], s[6:7], 24
+; GFX11-NEXT: s_lshr_b64 s[88:89], s[4:5], 24
+; GFX11-NEXT: s_lshr_b64 s[78:79], s[14:15], 24
; GFX11-NEXT: v_writelane_b32 v77, s42, 23
+; GFX11-NEXT: s_lshr_b32 s42, s7, 8
+; GFX11-NEXT: s_lshr_b64 s[76:77], s[40:41], 24
+; GFX11-NEXT: s_lshr_b64 s[74:75], s[28:29], 24
+; GFX11-NEXT: s_lshr_b64 s[72:73], s[26:27], 24
+; GFX11-NEXT: v_writelane_b32 v77, s42, 22
; GFX11-NEXT: s_lshr_b32 s42, s6, 16
+; GFX11-NEXT: s_lshr_b64 s[62:63], s[24:25], 24
+; GFX11-NEXT: s_lshr_b64 s[60:61], s[22:23], 24
; GFX11-NEXT: s_lshr_b64 s[58:59], s[20:21], 24
+; GFX11-NEXT: v_writelane_b32 v77, s42, 21
+; GFX11-NEXT: s_lshr_b32 s42, s6, 8
; GFX11-NEXT: s_lshr_b64 s[56:57], s[18:19], 24
; GFX11-NEXT: s_lshr_b64 s[46:47], s[16:17], 24
-; GFX11-NEXT: v_writelane_b32 v77, s42, 22
-; GFX11-NEXT: s_lshr_b32 s42, s6, 8
; GFX11-NEXT: s_lshr_b64 s[44:45], s[2:3], 24
-; GFX11-NEXT: v_writelane_b32 v77, s42, 21
+; GFX11-NEXT: v_writelane_b32 v77, s42, 20
; GFX11-NEXT: s_lshr_b32 s42, s5, 24
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v77, s42, 20
-; GFX11-NEXT: s_lshr_b32 s42, s5, 16
; GFX11-NEXT: v_writelane_b32 v77, s42, 19
+; GFX11-NEXT: s_lshr_b32 s42, s5, 16
+; GFX11-NEXT: v_writelane_b32 v77, s42, 18
; GFX11-NEXT: s_lshr_b32 s42, s5, 8
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v77, s42, 18
-; GFX11-NEXT: s_lshr_b32 s42, s4, 16
; GFX11-NEXT: v_writelane_b32 v77, s42, 17
+; GFX11-NEXT: s_lshr_b32 s42, s4, 16
+; GFX11-NEXT: v_writelane_b32 v77, s42, 16
; GFX11-NEXT: s_lshr_b32 s42, s4, 8
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v77, s42, 16
-; GFX11-NEXT: s_lshr_b32 s42, s15, 24
; GFX11-NEXT: v_writelane_b32 v77, s42, 15
+; GFX11-NEXT: s_lshr_b32 s42, s15, 24
+; GFX11-NEXT: v_writelane_b32 v77, s42, 14
; GFX11-NEXT: s_lshr_b32 s42, s15, 16
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v77, s42, 14
-; GFX11-NEXT: s_lshr_b32 s42, s15, 8
; GFX11-NEXT: v_writelane_b32 v77, s42, 13
+; GFX11-NEXT: s_lshr_b32 s42, s15, 8
+; GFX11-NEXT: v_writelane_b32 v77, s42, 12
; GFX11-NEXT: s_lshr_b32 s42, s14, 16
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v77, s42, 12
-; GFX11-NEXT: s_lshr_b32 s42, s14, 8
; GFX11-NEXT: v_writelane_b32 v77, s42, 11
+; GFX11-NEXT: s_lshr_b32 s42, s14, 8
+; GFX11-NEXT: v_writelane_b32 v77, s42, 10
; GFX11-NEXT: s_lshr_b32 s42, s41, 24
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v77, s42, 10
-; GFX11-NEXT: s_lshr_b32 s42, s41, 16
; GFX11-NEXT: v_writelane_b32 v77, s42, 9
+; GFX11-NEXT: s_lshr_b32 s42, s41, 16
+; GFX11-NEXT: v_writelane_b32 v77, s42, 8
; GFX11-NEXT: s_lshr_b32 s42, s41, 8
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v77, s42, 8
-; GFX11-NEXT: s_lshr_b32 s42, s40, 16
; GFX11-NEXT: v_writelane_b32 v77, s42, 7
+; GFX11-NEXT: s_lshr_b32 s42, s40, 16
+; GFX11-NEXT: v_writelane_b32 v77, s42, 6
; GFX11-NEXT: s_lshr_b32 s42, s40, 8
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v77, s42, 6
-; GFX11-NEXT: s_lshr_b32 s42, s29, 24
; GFX11-NEXT: v_writelane_b32 v77, s42, 5
+; GFX11-NEXT: s_lshr_b32 s42, s29, 24
+; GFX11-NEXT: v_writelane_b32 v77, s42, 4
; GFX11-NEXT: s_lshr_b32 s42, s29, 16
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v77, s42, 4
-; GFX11-NEXT: s_lshr_b32 s42, s29, 8
; GFX11-NEXT: v_writelane_b32 v77, s42, 3
+; GFX11-NEXT: s_lshr_b32 s42, s29, 8
+; GFX11-NEXT: v_writelane_b32 v77, s42, 2
; GFX11-NEXT: s_lshr_b32 s42, s28, 16
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v77, s42, 2
-; GFX11-NEXT: s_lshr_b32 s42, s28, 8
; GFX11-NEXT: v_writelane_b32 v77, s42, 1
-; GFX11-NEXT: s_lshr_b32 s42, s27, 24
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_lshr_b32 s42, s28, 8
; GFX11-NEXT: v_writelane_b32 v77, s42, 0
; GFX11-NEXT: s_lshr_b64 s[42:43], s[0:1], 24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, vcc_hi
-; GFX11-NEXT: s_cbranch_vccnz .LBB37_4
+; GFX11-NEXT: s_cbranch_execnz .LBB37_4
; GFX11-NEXT: .LBB37_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v22, s27, 1.0
; GFX11-NEXT: v_add_f32_e64 v21, s26, 1.0
@@ -47410,8 +47603,8 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v48, s2, 1.0
; GFX11-NEXT: v_add_f32_e64 v18, s29, 1.0
; GFX11-NEXT: v_add_f32_e64 v17, s28, 1.0
-; GFX11-NEXT: v_add_f32_e64 v14, s41, 1.0
-; GFX11-NEXT: v_add_f32_e64 v13, s40, 1.0
+; GFX11-NEXT: v_add_f32_e64 v16, s41, 1.0
+; GFX11-NEXT: v_add_f32_e64 v15, s40, 1.0
; GFX11-NEXT: v_add_f32_e64 v12, s15, 1.0
; GFX11-NEXT: v_add_f32_e64 v11, s14, 1.0
; GFX11-NEXT: v_add_f32_e64 v10, s5, 1.0
@@ -47428,16 +47621,16 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; GFX11-NEXT: v_lshrrev_b64 v[68:69], 24, v[34:35]
; GFX11-NEXT: v_lshrrev_b64 v[25:26], 24, v[5:6]
; GFX11-NEXT: v_lshrrev_b64 v[69:70], 24, v[36:37]
-; GFX11-NEXT: v_lshrrev_b64 v[15:16], 24, v[1:2]
+; GFX11-NEXT: v_lshrrev_b64 v[13:14], 24, v[1:2]
; GFX11-NEXT: v_lshrrev_b64 v[19:20], 24, v[3:4]
; GFX11-NEXT: v_lshrrev_b64 v[26:27], 24, v[7:8]
; GFX11-NEXT: v_lshrrev_b64 v[32:33], 24, v[9:10]
; GFX11-NEXT: v_lshrrev_b64 v[38:39], 24, v[11:12]
-; GFX11-NEXT: v_lshrrev_b64 v[50:51], 24, v[13:14]
+; GFX11-NEXT: v_lshrrev_b64 v[50:51], 24, v[15:16]
; GFX11-NEXT: v_lshrrev_b64 v[54:55], 24, v[17:18]
; GFX11-NEXT: v_lshrrev_b64 v[70:71], 24, v[48:49]
; GFX11-NEXT: v_lshrrev_b64 v[80:81], 24, v[52:53]
-; GFX11-NEXT: v_lshrrev_b32_e32 v16, 24, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v14, 24, v2
; GFX11-NEXT: v_lshrrev_b32_e32 v20, 16, v2
; GFX11-NEXT: v_lshrrev_b32_e32 v27, 8, v2
; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v1
@@ -47467,18 +47660,18 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v118, 8, v12
; GFX11-NEXT: v_lshrrev_b32_e32 v119, 16, v11
; GFX11-NEXT: v_lshrrev_b32_e32 v128, 8, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v130, 24, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v129, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v131, 8, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v132, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v133, 8, v13
+; GFX11-NEXT: v_lshrrev_b32_e32 v130, 24, v16
+; GFX11-NEXT: v_lshrrev_b32_e32 v129, 16, v16
+; GFX11-NEXT: v_lshrrev_b32_e32 v131, 8, v16
+; GFX11-NEXT: v_lshrrev_b32_e32 v132, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v133, 8, v15
; GFX11-NEXT: v_lshrrev_b32_e32 v135, 24, v18
; GFX11-NEXT: v_lshrrev_b32_e32 v134, 16, v18
; GFX11-NEXT: v_lshrrev_b32_e32 v144, 8, v18
; GFX11-NEXT: v_lshrrev_b32_e32 v145, 16, v17
; GFX11-NEXT: v_lshrrev_b32_e32 v146, 8, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v148, 24, v22
-; GFX11-NEXT: v_lshrrev_b32_e32 v147, 16, v22
+; GFX11-NEXT: v_lshrrev_b32_e32 v147, 24, v22
+; GFX11-NEXT: v_lshrrev_b32_e32 v148, 16, v22
; GFX11-NEXT: v_lshrrev_b32_e32 v149, 8, v22
; GFX11-NEXT: v_lshrrev_b32_e32 v150, 16, v21
; GFX11-NEXT: v_lshrrev_b32_e32 v151, 8, v21
@@ -47521,63 +47714,62 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; GFX11-NEXT: .LBB37_3:
; GFX11-NEXT: ; implicit-def: $sgpr43
; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr35
; GFX11-NEXT: ; implicit-def: $sgpr104
+; GFX11-NEXT: ; implicit-def: $sgpr102
; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr34
; GFX11-NEXT: ; implicit-def: $sgpr103
-; GFX11-NEXT: ; implicit-def: $sgpr102
; GFX11-NEXT: ; implicit-def: $sgpr101
+; GFX11-NEXT: ; implicit-def: $sgpr100
; GFX11-NEXT: ; implicit-def: $sgpr99
+; GFX11-NEXT: ; implicit-def: $sgpr97
; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr100
; GFX11-NEXT: ; implicit-def: $sgpr98
-; GFX11-NEXT: ; implicit-def: $sgpr97
; GFX11-NEXT: ; implicit-def: $sgpr96
+; GFX11-NEXT: ; implicit-def: $sgpr87
; GFX11-NEXT: ; implicit-def: $sgpr86
+; GFX11-NEXT: ; implicit-def: $sgpr84
; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr87
; GFX11-NEXT: ; implicit-def: $sgpr85
-; GFX11-NEXT: ; implicit-def: $sgpr84
; GFX11-NEXT: ; implicit-def: $sgpr83
+; GFX11-NEXT: ; implicit-def: $sgpr82
; GFX11-NEXT: ; implicit-def: $sgpr81
+; GFX11-NEXT: ; implicit-def: $sgpr71
; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr82
; GFX11-NEXT: ; implicit-def: $sgpr80
-; GFX11-NEXT: ; implicit-def: $sgpr71
; GFX11-NEXT: ; implicit-def: $sgpr70
+; GFX11-NEXT: ; implicit-def: $sgpr69
; GFX11-NEXT: ; implicit-def: $sgpr68
+; GFX11-NEXT: ; implicit-def: $sgpr66
; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr69
; GFX11-NEXT: ; implicit-def: $sgpr67
-; GFX11-NEXT: ; implicit-def: $sgpr66
; GFX11-NEXT: ; implicit-def: $sgpr65
+; GFX11-NEXT: ; implicit-def: $sgpr64
; GFX11-NEXT: ; implicit-def: $sgpr55
+; GFX11-NEXT: ; implicit-def: $sgpr53
; GFX11-NEXT: ; implicit-def: $sgpr60
-; GFX11-NEXT: ; implicit-def: $sgpr64
; GFX11-NEXT: ; implicit-def: $sgpr54
-; GFX11-NEXT: ; implicit-def: $sgpr53
; GFX11-NEXT: ; implicit-def: $sgpr52
-; GFX11-NEXT: ; implicit-def: $sgpr50
; GFX11-NEXT: ; implicit-def: $sgpr51
-; GFX11-NEXT: ; implicit-def: $sgpr49
+; GFX11-NEXT: ; implicit-def: $sgpr50
; GFX11-NEXT: ; implicit-def: $sgpr48
+; GFX11-NEXT: ; implicit-def: $sgpr49
; GFX11-NEXT: ; implicit-def: $sgpr39
-; GFX11-NEXT: ; implicit-def: $sgpr37
; GFX11-NEXT: ; implicit-def: $sgpr38
+; GFX11-NEXT: ; implicit-def: $sgpr37
+; GFX11-NEXT: ; implicit-def: $sgpr35
; GFX11-NEXT: ; implicit-def: $sgpr36
-; GFX11-NEXT: ; implicit-def: $sgpr30
-; GFX11-NEXT: ; implicit-def: $sgpr94
-; GFX11-NEXT: ; implicit-def: $sgpr92
-; GFX11-NEXT: ; implicit-def: $sgpr90
-; GFX11-NEXT: ; implicit-def: $sgpr88
-; GFX11-NEXT: ; implicit-def: $sgpr78
-; GFX11-NEXT: ; implicit-def: $sgpr76
-; GFX11-NEXT: ; implicit-def: $sgpr74
-; GFX11-NEXT: ; implicit-def: $sgpr72
+; GFX11-NEXT: ; implicit-def: $sgpr34
+; GFX11-NEXT: ; implicit-def: $vcc_hi
; GFX11-NEXT: ; implicit-def: $sgpr62
+; GFX11-NEXT: ; implicit-def: $sgpr72
+; GFX11-NEXT: ; implicit-def: $sgpr74
+; GFX11-NEXT: ; implicit-def: $sgpr76
+; GFX11-NEXT: ; implicit-def: $sgpr78
+; GFX11-NEXT: ; implicit-def: $sgpr88
+; GFX11-NEXT: ; implicit-def: $sgpr90
+; GFX11-NEXT: ; implicit-def: $sgpr92
+; GFX11-NEXT: ; implicit-def: $sgpr94
+; GFX11-NEXT: ; implicit-def: $sgpr30
; GFX11-NEXT: ; implicit-def: $sgpr43
; GFX11-NEXT: ; kill: killed $sgpr43
; GFX11-NEXT: ; implicit-def: $sgpr43
@@ -47656,155 +47848,155 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; GFX11-NEXT: ; kill: killed $sgpr43
; GFX11-NEXT: ; implicit-def: $sgpr43
; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: s_branch .LBB37_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, vcc_lo
+; GFX11-NEXT: s_cbranch_vccz .LBB37_2
; GFX11-NEXT: .LBB37_4:
; GFX11-NEXT: v_dual_mov_b32 v52, s0 :: v_dual_mov_b32 v53, s1
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_readlane_b32 s0, v77, 0
-; GFX11-NEXT: v_dual_mov_b32 v147, s36 :: v_dual_mov_b32 v48, s2
-; GFX11-NEXT: v_dual_mov_b32 v49, s3 :: v_dual_mov_b32 v36, s16
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_mov_b32 v37, s17 :: v_dual_mov_b32 v148, s0
+; GFX11-NEXT: v_dual_mov_b32 v48, s2 :: v_dual_mov_b32 v49, s3
+; GFX11-NEXT: v_dual_mov_b32 v36, s16 :: v_dual_mov_b32 v37, s17
+; GFX11-NEXT: v_mov_b32_e32 v146, s0
; GFX11-NEXT: v_readlane_b32 s0, v77, 1
; GFX11-NEXT: v_dual_mov_b32 v34, s18 :: v_dual_mov_b32 v35, s19
; GFX11-NEXT: v_dual_mov_b32 v30, s20 :: v_dual_mov_b32 v31, s21
-; GFX11-NEXT: v_mov_b32_e32 v146, s0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_mov_b32_e32 v145, s0
; GFX11-NEXT: v_readlane_b32 s0, v77, 2
; GFX11-NEXT: v_dual_mov_b32 v28, s22 :: v_dual_mov_b32 v29, s23
; GFX11-NEXT: v_dual_mov_b32 v23, s24 :: v_dual_mov_b32 v24, s25
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_mov_b32_e32 v145, s0
+; GFX11-NEXT: v_mov_b32_e32 v144, s0
; GFX11-NEXT: v_readlane_b32 s0, v77, 3
; GFX11-NEXT: v_dual_mov_b32 v21, s26 :: v_dual_mov_b32 v22, s27
; GFX11-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v18, s29
-; GFX11-NEXT: v_mov_b32_e32 v144, s0
-; GFX11-NEXT: v_readlane_b32 s0, v77, 4
-; GFX11-NEXT: v_dual_mov_b32 v13, s40 :: v_dual_mov_b32 v14, s41
-; GFX11-NEXT: v_dual_mov_b32 v11, s14 :: v_dual_mov_b32 v12, s15
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_mov_b32_e32 v134, s0
+; GFX11-NEXT: v_readlane_b32 s0, v77, 4
+; GFX11-NEXT: v_dual_mov_b32 v15, s40 :: v_dual_mov_b32 v16, s41
+; GFX11-NEXT: v_dual_mov_b32 v11, s14 :: v_dual_mov_b32 v12, s15
+; GFX11-NEXT: v_mov_b32_e32 v135, s0
; GFX11-NEXT: v_readlane_b32 s0, v77, 5
; GFX11-NEXT: v_dual_mov_b32 v9, s4 :: v_dual_mov_b32 v10, s5
; GFX11-NEXT: v_dual_mov_b32 v7, s6 :: v_dual_mov_b32 v8, s7
-; GFX11-NEXT: v_mov_b32_e32 v135, s0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_mov_b32_e32 v133, s0
; GFX11-NEXT: v_readlane_b32 s0, v77, 6
; GFX11-NEXT: v_dual_mov_b32 v5, s8 :: v_dual_mov_b32 v6, s9
; GFX11-NEXT: v_dual_mov_b32 v3, s10 :: v_dual_mov_b32 v4, s11
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_mov_b32_e32 v133, s0
+; GFX11-NEXT: v_mov_b32_e32 v132, s0
; GFX11-NEXT: v_readlane_b32 s0, v77, 7
; GFX11-NEXT: v_dual_mov_b32 v1, s12 :: v_dual_mov_b32 v2, s13
-; GFX11-NEXT: v_dual_mov_b32 v74, s35 :: v_dual_mov_b32 v73, s104
-; GFX11-NEXT: v_mov_b32_e32 v132, s0
-; GFX11-NEXT: v_readlane_b32 s0, v77, 8
-; GFX11-NEXT: v_dual_mov_b32 v72, s34 :: v_dual_mov_b32 v63, s103
-; GFX11-NEXT: v_dual_mov_b32 v62, s102 :: v_dual_mov_b32 v61, s101
+; GFX11-NEXT: v_dual_mov_b32 v74, s104 :: v_dual_mov_b32 v73, s102
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_mov_b32_e32 v131, s0
-; GFX11-NEXT: v_readlane_b32 s0, v77, 9
-; GFX11-NEXT: v_dual_mov_b32 v60, s99 :: v_dual_mov_b32 v59, s100
-; GFX11-NEXT: v_dual_mov_b32 v58, s98 :: v_dual_mov_b32 v57, s97
+; GFX11-NEXT: v_readlane_b32 s0, v77, 8
+; GFX11-NEXT: v_dual_mov_b32 v72, s103 :: v_dual_mov_b32 v63, s101
+; GFX11-NEXT: v_dual_mov_b32 v62, s100 :: v_dual_mov_b32 v61, s99
; GFX11-NEXT: v_mov_b32_e32 v129, s0
-; GFX11-NEXT: v_readlane_b32 s0, v77, 10
-; GFX11-NEXT: v_dual_mov_b32 v56, s96 :: v_dual_mov_b32 v47, s86
-; GFX11-NEXT: v_dual_mov_b32 v46, s87 :: v_dual_mov_b32 v45, s85
+; GFX11-NEXT: v_readlane_b32 s0, v77, 9
+; GFX11-NEXT: v_dual_mov_b32 v60, s97 :: v_dual_mov_b32 v59, s98
+; GFX11-NEXT: v_dual_mov_b32 v58, s96 :: v_dual_mov_b32 v57, s87
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_mov_b32_e32 v130, s0
-; GFX11-NEXT: v_readlane_b32 s0, v77, 11
-; GFX11-NEXT: v_dual_mov_b32 v44, s84 :: v_dual_mov_b32 v43, s83
-; GFX11-NEXT: v_dual_mov_b32 v42, s81 :: v_dual_mov_b32 v41, s82
+; GFX11-NEXT: v_readlane_b32 s0, v77, 10
+; GFX11-NEXT: v_dual_mov_b32 v56, s86 :: v_dual_mov_b32 v47, s84
+; GFX11-NEXT: v_dual_mov_b32 v46, s85 :: v_dual_mov_b32 v45, s83
; GFX11-NEXT: v_mov_b32_e32 v128, s0
-; GFX11-NEXT: v_readlane_b32 s0, v77, 12
-; GFX11-NEXT: v_dual_mov_b32 v40, s80 :: v_dual_mov_b32 v183, s71
-; GFX11-NEXT: v_dual_mov_b32 v182, s70 :: v_dual_mov_b32 v181, s68
+; GFX11-NEXT: v_readlane_b32 s0, v77, 11
+; GFX11-NEXT: v_dual_mov_b32 v44, s82 :: v_dual_mov_b32 v43, s81
+; GFX11-NEXT: v_dual_mov_b32 v42, s71 :: v_dual_mov_b32 v41, s80
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_mov_b32_e32 v119, s0
-; GFX11-NEXT: v_readlane_b32 s0, v77, 13
-; GFX11-NEXT: v_dual_mov_b32 v180, s69 :: v_dual_mov_b32 v179, s67
-; GFX11-NEXT: v_dual_mov_b32 v178, s66 :: v_dual_mov_b32 v177, s65
+; GFX11-NEXT: v_readlane_b32 s0, v77, 12
+; GFX11-NEXT: v_dual_mov_b32 v40, s70 :: v_dual_mov_b32 v183, s69
+; GFX11-NEXT: v_dual_mov_b32 v182, s68 :: v_dual_mov_b32 v181, s66
; GFX11-NEXT: v_mov_b32_e32 v118, s0
-; GFX11-NEXT: v_readlane_b32 s0, v77, 14
-; GFX11-NEXT: v_dual_mov_b32 v176, s55 :: v_dual_mov_b32 v167, s64
-; GFX11-NEXT: v_dual_mov_b32 v166, s54 :: v_dual_mov_b32 v165, s53
+; GFX11-NEXT: v_readlane_b32 s0, v77, 13
+; GFX11-NEXT: v_dual_mov_b32 v180, s67 :: v_dual_mov_b32 v179, s65
+; GFX11-NEXT: v_dual_mov_b32 v178, s64 :: v_dual_mov_b32 v177, s55
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_mov_b32_e32 v116, s0
-; GFX11-NEXT: v_readlane_b32 s0, v77, 15
-; GFX11-NEXT: v_dual_mov_b32 v164, s52 :: v_dual_mov_b32 v163, s50
-; GFX11-NEXT: v_dual_mov_b32 v162, s51 :: v_dual_mov_b32 v161, s49
+; GFX11-NEXT: v_readlane_b32 s0, v77, 14
+; GFX11-NEXT: v_dual_mov_b32 v176, s53 :: v_dual_mov_b32 v167, s54
+; GFX11-NEXT: v_dual_mov_b32 v166, s52 :: v_dual_mov_b32 v165, s51
; GFX11-NEXT: v_mov_b32_e32 v117, s0
-; GFX11-NEXT: v_readlane_b32 s0, v77, 16
-; GFX11-NEXT: v_dual_mov_b32 v160, s48 :: v_dual_mov_b32 v151, s39
-; GFX11-NEXT: v_dual_mov_b32 v150, s37 :: v_dual_mov_b32 v149, s38
+; GFX11-NEXT: v_readlane_b32 s0, v77, 15
+; GFX11-NEXT: v_dual_mov_b32 v164, s50 :: v_dual_mov_b32 v163, s48
+; GFX11-NEXT: v_dual_mov_b32 v162, s49 :: v_dual_mov_b32 v161, s39
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_mov_b32_e32 v115, s0
-; GFX11-NEXT: v_readlane_b32 s0, v77, 17
-; GFX11-NEXT: v_dual_mov_b32 v15, s62 :: v_dual_mov_b32 v38, s88
-; GFX11-NEXT: v_dual_mov_b32 v19, s72 :: v_dual_mov_b32 v50, s90
+; GFX11-NEXT: v_readlane_b32 s0, v77, 16
+; GFX11-NEXT: v_dual_mov_b32 v160, s38 :: v_dual_mov_b32 v151, s37
+; GFX11-NEXT: v_dual_mov_b32 v150, s35 :: v_dual_mov_b32 v149, s36
; GFX11-NEXT: v_mov_b32_e32 v114, s0
-; GFX11-NEXT: v_readlane_b32 s0, v77, 18
-; GFX11-NEXT: v_dual_mov_b32 v25, s74 :: v_dual_mov_b32 v54, s92
-; GFX11-NEXT: v_dual_mov_b32 v64, s94 :: v_dual_mov_b32 v65, s30
+; GFX11-NEXT: v_readlane_b32 s0, v77, 17
+; GFX11-NEXT: v_dual_mov_b32 v148, s34 :: v_dual_mov_b32 v147, vcc_hi
+; GFX11-NEXT: v_dual_mov_b32 v13, s30 :: v_dual_mov_b32 v38, s78
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_mov_b32_e32 v113, s0
+; GFX11-NEXT: v_readlane_b32 s0, v77, 18
+; GFX11-NEXT: v_dual_mov_b32 v19, s94 :: v_dual_mov_b32 v50, s76
+; GFX11-NEXT: v_dual_mov_b32 v25, s92 :: v_dual_mov_b32 v54, s74
+; GFX11-NEXT: v_mov_b32_e32 v103, s0
; GFX11-NEXT: v_readlane_b32 s0, v77, 19
+; GFX11-NEXT: v_dual_mov_b32 v64, s72 :: v_dual_mov_b32 v65, s62
; GFX11-NEXT: v_dual_mov_b32 v66, s60 :: v_dual_mov_b32 v67, s58
-; GFX11-NEXT: v_dual_mov_b32 v68, s56 :: v_dual_mov_b32 v69, s46
-; GFX11-NEXT: v_mov_b32_e32 v103, s0
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_mov_b32_e32 v112, s0
; GFX11-NEXT: v_readlane_b32 s0, v77, 20
+; GFX11-NEXT: v_dual_mov_b32 v68, s56 :: v_dual_mov_b32 v69, s46
; GFX11-NEXT: v_mov_b32_e32 v70, s44
; GFX11-NEXT: v_mov_b32_e32 v80, s42
-; GFX11-NEXT: v_mov_b32_e32 v26, s76
-; GFX11-NEXT: v_mov_b32_e32 v32, s78
-; GFX11-NEXT: v_mov_b32_e32 v112, s0
-; GFX11-NEXT: v_readlane_b32 s0, v77, 21
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_mov_b32_e32 v102, s0
+; GFX11-NEXT: v_readlane_b32 s0, v77, 21
+; GFX11-NEXT: v_mov_b32_e32 v26, s90
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_dual_mov_b32 v32, s88 :: v_dual_mov_b32 v101, s0
; GFX11-NEXT: v_readlane_b32 s0, v77, 22
-; GFX11-NEXT: v_mov_b32_e32 v101, s0
+; GFX11-NEXT: v_mov_b32_e32 v100, s0
; GFX11-NEXT: v_readlane_b32 s0, v77, 23
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_mov_b32_e32 v100, s0
-; GFX11-NEXT: v_readlane_b32 s0, v77, 24
; GFX11-NEXT: v_mov_b32_e32 v98, s0
+; GFX11-NEXT: v_readlane_b32 s0, v77, 24
+; GFX11-NEXT: v_mov_b32_e32 v99, s0
; GFX11-NEXT: v_readlane_b32 s0, v77, 25
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_mov_b32_e32 v99, s0
-; GFX11-NEXT: v_readlane_b32 s0, v77, 26
; GFX11-NEXT: v_mov_b32_e32 v97, s0
+; GFX11-NEXT: v_readlane_b32 s0, v77, 26
+; GFX11-NEXT: v_mov_b32_e32 v96, s0
; GFX11-NEXT: v_readlane_b32 s0, v77, 27
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_mov_b32_e32 v96, s0
-; GFX11-NEXT: v_readlane_b32 s0, v77, 28
; GFX11-NEXT: v_mov_b32_e32 v87, s0
+; GFX11-NEXT: v_readlane_b32 s0, v77, 28
+; GFX11-NEXT: v_mov_b32_e32 v85, s0
; GFX11-NEXT: v_readlane_b32 s0, v77, 29
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_mov_b32_e32 v85, s0
-; GFX11-NEXT: v_readlane_b32 s0, v77, 30
; GFX11-NEXT: v_mov_b32_e32 v86, s0
+; GFX11-NEXT: v_readlane_b32 s0, v77, 30
+; GFX11-NEXT: v_mov_b32_e32 v84, s0
; GFX11-NEXT: v_readlane_b32 s0, v77, 31
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_mov_b32_e32 v84, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 0
; GFX11-NEXT: v_mov_b32_e32 v83, s0
+; GFX11-NEXT: v_readlane_b32 s0, v78, 0
+; GFX11-NEXT: v_mov_b32_e32 v82, s0
; GFX11-NEXT: v_readlane_b32 s0, v78, 1
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_mov_b32_e32 v82, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 2
; GFX11-NEXT: v_mov_b32_e32 v51, s0
+; GFX11-NEXT: v_readlane_b32 s0, v78, 2
+; GFX11-NEXT: v_mov_b32_e32 v55, s0
; GFX11-NEXT: v_readlane_b32 s0, v78, 3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_mov_b32_e32 v55, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 4
; GFX11-NEXT: v_mov_b32_e32 v39, s0
+; GFX11-NEXT: v_readlane_b32 s0, v78, 4
+; GFX11-NEXT: v_mov_b32_e32 v33, s0
; GFX11-NEXT: v_readlane_b32 s0, v78, 5
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_mov_b32_e32 v33, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 6
; GFX11-NEXT: v_mov_b32_e32 v27, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_readlane_b32 s0, v78, 6
; GFX11-NEXT: v_mov_b32_e32 v20, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 8
-; GFX11-NEXT: v_mov_b32_e32 v16, s0
+; GFX11-NEXT: v_readlane_b32 s0, v78, 7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v14, s0
; GFX11-NEXT: .LBB37_5: ; %end
; GFX11-NEXT: v_lshlrev_b32_e32 v71, 8, v80
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
@@ -47942,8 +48134,8 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; GFX11-NEXT: v_lshlrev_b32_e32 v53, 8, v146
; GFX11-NEXT: v_and_b32_e32 v64, 0xff, v145
; GFX11-NEXT: v_lshlrev_b32_e32 v54, 8, v54
-; GFX11-NEXT: v_and_b32_e32 v49, 0xff, v147
-; GFX11-NEXT: v_lshlrev_b32_e32 v52, 8, v148
+; GFX11-NEXT: v_and_b32_e32 v49, 0xff, v148
+; GFX11-NEXT: v_lshlrev_b32_e32 v52, 8, v147
; GFX11-NEXT: v_or_b32_e32 v21, v21, v37
; GFX11-NEXT: v_or_b32_e32 v22, v22, v48
; GFX11-NEXT: v_or_b32_e32 v17, v17, v53
@@ -47969,18 +48161,18 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; GFX11-NEXT: v_and_b32_e32 v48, 0xff, v132
; GFX11-NEXT: v_lshlrev_b32_e32 v49, 8, v50
; GFX11-NEXT: v_or_b32_e32 v24, v52, v37
-; GFX11-NEXT: v_and_b32_e32 v13, 0xff, v13
+; GFX11-NEXT: v_and_b32_e32 v15, 0xff, v15
; GFX11-NEXT: v_lshlrev_b32_e32 v37, 8, v133
; GFX11-NEXT: v_or_b32_e32 v17, v17, v18
; GFX11-NEXT: v_or_b32_e32 v18, v35, v36
; GFX11-NEXT: v_or_b32_e32 v35, v48, v49
-; GFX11-NEXT: v_and_b32_e32 v14, 0xff, v14
+; GFX11-NEXT: v_and_b32_e32 v16, 0xff, v16
; GFX11-NEXT: v_lshlrev_b32_e32 v50, 8, v131
-; GFX11-NEXT: v_or_b32_e32 v13, v13, v37
+; GFX11-NEXT: v_or_b32_e32 v15, v15, v37
; GFX11-NEXT: v_lshlrev_b32_e32 v37, 8, v130
; GFX11-NEXT: v_lshlrev_b32_e32 v36, 16, v35
; GFX11-NEXT: v_and_b32_e32 v35, 0xff, v129
-; GFX11-NEXT: v_or_b32_e32 v14, v14, v50
+; GFX11-NEXT: v_or_b32_e32 v16, v16, v50
; GFX11-NEXT: v_and_b32_e32 v11, 0xff, v11
; GFX11-NEXT: v_lshlrev_b32_e32 v48, 8, v128
; GFX11-NEXT: v_and_b32_e32 v49, 0xff, v119
@@ -47990,119 +48182,119 @@ define inreg <128 x i8> @bitcast_v32f32_to_v128i8_scalar(<32 x float> inreg %a,
; GFX11-NEXT: v_and_b32_e32 v52, 0xff, v116
; GFX11-NEXT: v_lshlrev_b32_e32 v53, 8, v117
; GFX11-NEXT: v_or_b32_e32 v35, v35, v37
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_lshlrev_b32_e32 v18, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v13
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
; GFX11-NEXT: v_or_b32_e32 v11, v11, v48
; GFX11-NEXT: v_or_b32_e32 v37, v49, v38
; GFX11-NEXT: v_or_b32_e32 v12, v12, v50
; GFX11-NEXT: v_or_b32_e32 v38, v52, v53
; GFX11-NEXT: v_lshlrev_b32_e32 v48, 16, v35
+; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-NEXT: v_lshlrev_b32_e32 v18, 16, v18
+; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
; GFX11-NEXT: v_lshlrev_b32_e32 v49, 16, v37
; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v12
; GFX11-NEXT: v_lshlrev_b32_e32 v38, 16, v38
+; GFX11-NEXT: v_or_b32_e32 v37, v16, v48
+; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v7
+; GFX11-NEXT: v_lshlrev_b32_e32 v48, 8, v102
; GFX11-NEXT: v_or_b32_e32 v35, v17, v18
-; GFX11-NEXT: v_or_b32_e32 v36, v13, v36
-; GFX11-NEXT: v_or_b32_e32 v37, v14, v48
+; GFX11-NEXT: v_or_b32_e32 v36, v15, v36
+; GFX11-NEXT: v_or_b32_e32 v15, v11, v49
+; GFX11-NEXT: v_or_b32_e32 v16, v12, v38
; GFX11-NEXT: v_and_b32_e32 v9, 0xff, v9
-; GFX11-NEXT: v_lshlrev_b32_e32 v13, 8, v115
-; GFX11-NEXT: v_and_b32_e32 v14, 0xff, v114
+; GFX11-NEXT: v_lshlrev_b32_e32 v11, 8, v115
+; GFX11-NEXT: v_and_b32_e32 v12, 0xff, v114
; GFX11-NEXT: v_lshlrev_b32_e32 v17, 8, v32
; GFX11-NEXT: v_and_b32_e32 v10, 0xff, v10
; GFX11-NEXT: v_lshlrev_b32_e32 v18, 8, v113
-; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v7
-; GFX11-NEXT: v_lshlrev_b32_e32 v48, 8, v102
-; GFX11-NEXT: v_or_b32_e32 v11, v11, v49
-; GFX11-NEXT: v_or_b32_e32 v12, v12, v38
; GFX11-NEXT: v_and_b32_e32 v32, 0xff, v103
; GFX11-NEXT: v_lshlrev_b32_e32 v38, 8, v112
-; GFX11-NEXT: v_or_b32_e32 v9, v9, v13
-; GFX11-NEXT: v_or_b32_e32 v13, v14, v17
-; GFX11-NEXT: v_or_b32_e32 v10, v10, v18
; GFX11-NEXT: v_or_b32_e32 v7, v7, v48
-; GFX11-NEXT: v_and_b32_e32 v17, 0xff, v101
-; GFX11-NEXT: v_lshlrev_b32_e32 v18, 8, v26
; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v5
; GFX11-NEXT: v_lshlrev_b32_e32 v48, 8, v97
; GFX11-NEXT: v_and_b32_e32 v49, 0xff, v96
; GFX11-NEXT: v_lshlrev_b32_e32 v25, 8, v25
-; GFX11-NEXT: v_or_b32_e32 v14, v32, v38
-; GFX11-NEXT: v_and_b32_e32 v8, 0xff, v8
-; GFX11-NEXT: v_lshlrev_b32_e32 v26, 8, v100
+; GFX11-NEXT: v_or_b32_e32 v9, v9, v11
+; GFX11-NEXT: v_or_b32_e32 v11, v12, v17
+; GFX11-NEXT: v_or_b32_e32 v10, v10, v18
+; GFX11-NEXT: v_or_b32_e32 v12, v32, v38
+; GFX11-NEXT: v_and_b32_e32 v17, 0xff, v101
+; GFX11-NEXT: v_lshlrev_b32_e32 v18, 8, v26
; GFX11-NEXT: v_and_b32_e32 v32, 0xff, v98
; GFX11-NEXT: v_lshlrev_b32_e32 v38, 8, v99
-; GFX11-NEXT: v_or_b32_e32 v17, v17, v18
; GFX11-NEXT: v_or_b32_e32 v5, v5, v48
; GFX11-NEXT: v_or_b32_e32 v25, v49, v25
; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; GFX11-NEXT: v_lshlrev_b32_e32 v13, 16, v13
+; GFX11-NEXT: v_lshlrev_b32_e32 v11, 16, v11
; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-NEXT: v_lshlrev_b32_e32 v14, 16, v14
-; GFX11-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX11-NEXT: v_or_b32_e32 v8, v8, v26
+; GFX11-NEXT: v_lshlrev_b32_e32 v12, 16, v12
+; GFX11-NEXT: v_and_b32_e32 v8, 0xff, v8
+; GFX11-NEXT: v_lshlrev_b32_e32 v26, 8, v100
+; GFX11-NEXT: v_or_b32_e32 v17, v17, v18
; GFX11-NEXT: v_or_b32_e32 v18, v32, v38
-; GFX11-NEXT: v_lshlrev_b32_e32 v17, 16, v17
; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
; GFX11-NEXT: v_lshlrev_b32_e32 v25, 16, v25
-; GFX11-NEXT: v_and_b32_e32 v8, 0xffff, v8
-; GFX11-NEXT: v_lshlrev_b32_e32 v18, 16, v18
-; GFX11-NEXT: v_or_b32_e32 v13, v9, v13
-; GFX11-NEXT: v_or_b32_e32 v14, v10, v14
-; GFX11-NEXT: v_or_b32_e32 v7, v7, v17
+; GFX11-NEXT: v_and_b32_e32 v7, 0xffff, v7
+; GFX11-NEXT: v_or_b32_e32 v8, v8, v26
+; GFX11-NEXT: v_lshlrev_b32_e32 v26, 16, v17
+; GFX11-NEXT: v_lshlrev_b32_e32 v32, 16, v18
+; GFX11-NEXT: v_or_b32_e32 v17, v9, v11
+; GFX11-NEXT: v_or_b32_e32 v18, v10, v12
; GFX11-NEXT: v_or_b32_e32 v9, v5, v25
; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v6
; GFX11-NEXT: v_lshlrev_b32_e32 v6, 8, v87
; GFX11-NEXT: v_and_b32_e32 v10, 0xff, v85
-; GFX11-NEXT: v_lshlrev_b32_e32 v17, 8, v86
+; GFX11-NEXT: v_lshlrev_b32_e32 v11, 8, v86
; GFX11-NEXT: v_and_b32_e32 v25, 0xff, v83
; GFX11-NEXT: v_lshlrev_b32_e32 v19, 8, v19
-; GFX11-NEXT: v_or_b32_e32 v8, v8, v18
+; GFX11-NEXT: v_or_b32_e32 v7, v7, v26
; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v3
-; GFX11-NEXT: v_lshlrev_b32_e32 v18, 8, v84
+; GFX11-NEXT: v_lshlrev_b32_e32 v12, 8, v84
; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v4
; GFX11-NEXT: v_lshlrev_b32_e32 v26, 8, v82
; GFX11-NEXT: v_or_b32_e32 v5, v5, v6
-; GFX11-NEXT: v_or_b32_e32 v6, v10, v17
+; GFX11-NEXT: v_or_b32_e32 v6, v10, v11
; GFX11-NEXT: v_or_b32_e32 v10, v25, v19
-; GFX11-NEXT: v_or_b32_e32 v3, v3, v18
+; GFX11-NEXT: v_or_b32_e32 v3, v3, v12
; GFX11-NEXT: v_or_b32_e32 v4, v4, v26
-; GFX11-NEXT: v_lshlrev_b32_e32 v18, 8, v55
+; GFX11-NEXT: v_lshlrev_b32_e32 v12, 8, v55
; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
-; GFX11-NEXT: v_lshlrev_b32_e32 v17, 16, v10
+; GFX11-NEXT: v_lshlrev_b32_e32 v11, 16, v10
; GFX11-NEXT: v_and_b32_e32 v10, 0xff, v51
; GFX11-NEXT: v_lshlrev_b32_e32 v19, 8, v39
; GFX11-NEXT: v_and_b32_e32 v25, 0xff, v33
-; GFX11-NEXT: v_lshlrev_b32_e32 v15, 8, v15
+; GFX11-NEXT: v_lshlrev_b32_e32 v13, 8, v13
; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
; GFX11-NEXT: v_lshlrev_b32_e32 v26, 8, v27
; GFX11-NEXT: v_and_b32_e32 v20, 0xff, v20
-; GFX11-NEXT: v_lshlrev_b32_e32 v16, 8, v16
-; GFX11-NEXT: v_or_b32_e32 v10, v10, v18
+; GFX11-NEXT: v_lshlrev_b32_e32 v14, 8, v14
+; GFX11-NEXT: v_or_b32_e32 v10, v10, v12
; GFX11-NEXT: v_or_b32_e32 v1, v1, v19
-; GFX11-NEXT: v_or_b32_e32 v15, v25, v15
+; GFX11-NEXT: v_or_b32_e32 v12, v25, v13
; GFX11-NEXT: v_or_b32_e32 v2, v2, v26
-; GFX11-NEXT: v_or_b32_e32 v16, v20, v16
+; GFX11-NEXT: v_or_b32_e32 v13, v20, v14
+; GFX11-NEXT: v_and_b32_e32 v8, 0xffff, v8
; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v6
; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; GFX11-NEXT: v_lshlrev_b32_e32 v18, 16, v10
+; GFX11-NEXT: v_lshlrev_b32_e32 v14, 16, v10
; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v1
-; GFX11-NEXT: v_lshlrev_b32_e32 v15, 16, v15
+; GFX11-NEXT: v_lshlrev_b32_e32 v12, 16, v12
; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v16, 16, v16
+; GFX11-NEXT: v_lshlrev_b32_e32 v13, 16, v13
+; GFX11-NEXT: v_or_b32_e32 v8, v8, v32
; GFX11-NEXT: v_or_b32_e32 v10, v5, v6
-; GFX11-NEXT: v_or_b32_e32 v1, v3, v17
-; GFX11-NEXT: v_or_b32_e32 v2, v4, v18
-; GFX11-NEXT: v_or_b32_e32 v3, v19, v15
-; GFX11-NEXT: v_or_b32_e32 v4, v20, v16
+; GFX11-NEXT: v_or_b32_e32 v1, v3, v11
+; GFX11-NEXT: v_or_b32_e32 v2, v4, v14
+; GFX11-NEXT: v_or_b32_e32 v3, v19, v12
+; GFX11-NEXT: v_or_b32_e32 v4, v20, v13
; GFX11-NEXT: s_clause 0x5
; GFX11-NEXT: scratch_store_b128 v0, v[28:31], off offset:32
; GFX11-NEXT: scratch_store_b128 v0, v[21:24], off offset:48
; GFX11-NEXT: scratch_store_b128 v0, v[34:37], off offset:64
-; GFX11-NEXT: scratch_store_b128 v0, v[11:14], off offset:80
+; GFX11-NEXT: scratch_store_b128 v0, v[15:18], off offset:80
; GFX11-NEXT: scratch_store_b128 v0, v[7:10], off offset:96
; GFX11-NEXT: scratch_store_b128 v0, v[1:4], off offset:112
; GFX11-NEXT: s_clause 0x12
@@ -54243,8 +54435,17 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:332
; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32
@@ -54253,15 +54454,15 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:24
; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:32
; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:40
-; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:48
-; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:56
-; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:64
-; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:72
-; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:80
-; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:88
-; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:96
-; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:104
-; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:112
+; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:48
+; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:56
+; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:64
+; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:72
+; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:80
+; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:88
+; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:96
+; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:104
+; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:112
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:120
; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:128
; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:136
@@ -54271,113 +54472,92 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:168
; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:176
; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v1
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v7
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v9
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v11
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v13
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v17
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v19
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v21
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
-; SI-NEXT: v_lshlrev_b32_e32 v60, 8, v3
-; SI-NEXT: v_lshlrev_b32_e32 v30, 24, v5
-; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v15
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v16, 8, v3
+; SI-NEXT: v_lshlrev_b32_e32 v62, 24, v5
+; SI-NEXT: v_lshlrev_b32_e32 v28, 8, v7
+; SI-NEXT: v_lshlrev_b32_e32 v20, 24, v9
+; SI-NEXT: v_lshlrev_b32_e32 v46, 8, v11
+; SI-NEXT: v_lshlrev_b32_e32 v57, 24, v13
+; SI-NEXT: v_lshlrev_b32_e32 v24, 8, v15
+; SI-NEXT: v_lshlrev_b32_e32 v12, 24, v17
+; SI-NEXT: v_lshlrev_b32_e32 v26, 8, v19
+; SI-NEXT: v_lshlrev_b32_e32 v17, 24, v21
+; SI-NEXT: v_lshlrev_b32_e32 v19, 8, v23
+; SI-NEXT: v_lshlrev_b32_e32 v15, 24, v25
+; SI-NEXT: v_lshlrev_b32_e32 v21, 8, v27
+; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v29
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v23
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v25
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v27
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v29
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v45
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v44
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v43
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v42
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v41
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v40
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v55
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v54
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v54
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v53
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v53
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v52
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v52
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v51
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v51
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v50
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v50
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v49
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v49
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v48
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v48
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v39
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v39
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v30
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v31
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v32
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v33
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v34
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v35
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v36
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v37
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v38
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:184
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:192
@@ -54387,31 +54567,31 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:224
; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:232
; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:240
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_and_b64 s[6:7], vcc, exec
; SI-NEXT: s_waitcnt vmcnt(7)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v0
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v1
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v13
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v3
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v11
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v5
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v9
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v7
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:248
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:256
@@ -54423,140 +54603,157 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:304
; SI-NEXT: s_waitcnt vmcnt(7)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v0
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v1
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v13
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v3
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v11
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v5
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:312
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:320
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:328
-; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:4
-; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:12
-; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:20
-; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:28
-; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:36
-; SI-NEXT: s_waitcnt vmcnt(14)
+; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:324
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:316
+; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:308
+; SI-NEXT: s_waitcnt vmcnt(13)
; SI-NEXT: v_lshlrev_b32_e32 v9, 24, v9
-; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v7
-; SI-NEXT: s_waitcnt vmcnt(7)
+; SI-NEXT: s_waitcnt vmcnt(12)
+; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v7
+; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v0
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:44
-; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:52
-; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:60
-; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:68
-; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:76
-; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:84
-; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:92
-; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:100
-; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:108
-; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:116
-; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:124
-; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:132
-; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:140
-; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:148
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:156
-; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:164
-; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:172
-; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:180
-; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:188
-; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:196
-; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:204
-; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:212
-; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:220
-; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:228
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:236
-; SI-NEXT: s_waitcnt vmcnt(14)
-; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v1
-; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v3
; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
+; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:300
+; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:292
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v1
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:244
-; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:252
-; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:260
-; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:268
-; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:276
-; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:284
-; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:292
-; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:300
-; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:308
-; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:316
-; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:324
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v3
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
+; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:284
+; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:276
+; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:268
+; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:260
+; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:252
+; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:244
+; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:236
+; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:228
+; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:220
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:212
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill
+; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:204
+; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:196
+; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:188
+; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:180
+; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:172
+; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:164
+; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:156
+; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:148
+; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:140
+; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:132
+; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:124
+; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:116
+; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:108
+; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:100
+; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:92
+; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:84
+; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:76
+; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:68
+; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:60
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:52
+; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:44
+; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:36
+; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:28
+; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:20
+; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:12
+; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:4
+; SI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(14)
-; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(14)
+; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB39_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
-; SI-NEXT: v_mov_b32_e32 v57, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v4
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v0, 0xff, v2
-; SI-NEXT: v_and_b32_e32 v2, 0xff, v6
-; SI-NEXT: v_or_b32_e32 v0, v0, v60
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v4
+; SI-NEXT: v_or_b32_e32 v0, v0, v16
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v30, v1
-; SI-NEXT: s_waitcnt expcnt(4)
-; SI-NEXT: v_mov_b32_e32 v30, v5
+; SI-NEXT: v_or_b32_e32 v1, v62, v1
+; SI-NEXT: v_or_b32_e32 v4, v0, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v10
+; SI-NEXT: v_or_b32_e32 v0, v0, v46
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: v_and_b32_e32 v2, 0xff, v6
+; SI-NEXT: v_and_b32_e32 v3, 0xff, v8
+; SI-NEXT: v_or_b32_e32 v2, v2, v28
+; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; SI-NEXT: v_or_b32_e32 v3, v20, v3
+; SI-NEXT: v_or_b32_e32 v5, v2, v3
+; SI-NEXT: v_mov_b32_e32 v2, v9
; SI-NEXT: s_and_b32 s4, s28, 0xff
; SI-NEXT: s_lshl_b32 s5, s29, 8
; SI-NEXT: s_or_b32 s4, s4, s5
@@ -54565,306 +54762,310 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; SI-NEXT: s_lshl_b32 s6, s19, 24
; SI-NEXT: s_lshl_b32 s7, s23, 24
; SI-NEXT: s_lshl_b32 s8, s27, 24
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_or_b32_e32 v2, v2, v3
-; SI-NEXT: v_and_b32_e32 v3, 0xff, v8
-; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_or_b32_e32 v3, v4, v3
-; SI-NEXT: v_or_b32_e32 v4, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; SI-NEXT: v_or_b32_e32 v5, v2, v3
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v10
-; SI-NEXT: v_mov_b32_e32 v3, v7
-; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v12
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: v_or_b32_e32 v1, v57, v1
; SI-NEXT: v_or_b32_e32 v6, v0, v1
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v14
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v16
-; SI-NEXT: v_or_b32_e32 v0, v0, v15
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v57, v37
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
+; SI-NEXT: v_or_b32_e32 v0, v0, v24
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(4)
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
+; SI-NEXT: v_or_b32_e32 v1, v12, v1
; SI-NEXT: v_or_b32_e32 v7, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v18
-; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v20
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
+; SI-NEXT: v_or_b32_e32 v0, v0, v26
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
+; SI-NEXT: v_or_b32_e32 v1, v17, v1
; SI-NEXT: v_or_b32_e32 v8, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v22
-; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(3)
-; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v24
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
+; SI-NEXT: v_or_b32_e32 v0, v0, v19
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
-; SI-NEXT: v_mov_b32_e32 v2, v9
+; SI-NEXT: v_or_b32_e32 v1, v15, v1
; SI-NEXT: v_or_b32_e32 v9, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v26
-; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v28
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
+; SI-NEXT: v_or_b32_e32 v0, v0, v21
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v10, v1
+; SI-NEXT: v_or_b32_e32 v1, v27, v1
; SI-NEXT: v_or_b32_e32 v10, v0, v1
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
; SI-NEXT: v_and_b32_e32 v1, 0xff, v11
-; SI-NEXT: s_waitcnt expcnt(1)
-; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_or_b32_e32 v1, v11, v1
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_or_b32_e32 v11, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v17
-; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v39
+; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v23
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v54
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v12, v1
-; SI-NEXT: v_or_b32_e32 v12, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v25
-; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
+; SI-NEXT: v_or_b32_e32 v12, v0, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v23
+; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
; SI-NEXT: v_and_b32_e32 v1, 0xff, v13
-; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_or_b32_e32 v1, v13, v1
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
; SI-NEXT: v_or_b32_e32 v13, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v58
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v58, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v25
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v29
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v14
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v14, v1
-; SI-NEXT: v_or_b32_e32 v14, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v27
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v60, v1
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
+; SI-NEXT: v_or_b32_e32 v14, v0, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v43
+; SI-NEXT: v_mov_b32_e32 v43, v63
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v62
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v48
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v15, v1
-; SI-NEXT: v_or_b32_e32 v15, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v43
-; SI-NEXT: v_mov_b32_e32 v43, v16
; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
+; SI-NEXT: v_or_b32_e32 v15, v0, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v42
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_mov_b32_e32 v42, v1
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v21
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v40
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v16, v1
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
; SI-NEXT: v_or_b32_e32 v16, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v19
-; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v18
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_mov_b32_e32 v46, v1
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v55
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v49
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v17, v1
-; SI-NEXT: v_or_b32_e32 v17, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v51
-; SI-NEXT: v_mov_b32_e32 v55, v22
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v51, v1
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
+; SI-NEXT: v_or_b32_e32 v17, v0, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v53
+; SI-NEXT: v_mov_b32_e32 v49, v61
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v44
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v22
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v18, v1
-; SI-NEXT: v_or_b32_e32 v18, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v50
-; SI-NEXT: v_mov_b32_e32 v44, v23
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v50, v1
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
+; SI-NEXT: v_or_b32_e32 v18, v0, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v53, v3
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v29
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v63
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v38
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v19, v1
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
; SI-NEXT: v_or_b32_e32 v19, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v61
-; SI-NEXT: v_mov_b32_e32 v61, v45
-; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v36
+; SI-NEXT: v_mov_b32_e32 v36, v31
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_mov_b32_e32 v40, v1
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v40
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v30
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v20, v1
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
; SI-NEXT: v_or_b32_e32 v20, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v31
-; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v34
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_mov_b32_e32 v34, v1
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v32
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v51
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v21, v1
-; SI-NEXT: v_or_b32_e32 v21, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v59
-; SI-NEXT: v_mov_b32_e32 v59, v24
; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
+; SI-NEXT: v_or_b32_e32 v21, v0, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v33
+; SI-NEXT: v_mov_b32_e32 v51, v47
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v39
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v37
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v22, v1
+; SI-NEXT: v_or_b32_e32 v1, v62, v1
; SI-NEXT: v_or_b32_e32 v22, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v61
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v39, v1
-; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v49
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v44
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v59
+; SI-NEXT: v_or_b32_e32 v0, v0, v54
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v23, v1
+; SI-NEXT: v_or_b32_e32 v1, v39, v1
; SI-NEXT: v_or_b32_e32 v23, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v53
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v37, v56
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v37
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: v_mov_b32_e32 v33, v3
+; SI-NEXT: v_mov_b32_e32 v44, v59
+; SI-NEXT: v_mov_b32_e32 v59, v58
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v47
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v56
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v24, v1
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
; SI-NEXT: v_or_b32_e32 v24, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v42
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v42, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v32
+; SI-NEXT: v_mov_b32_e32 v32, v35
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v52
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v35
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v25, v1
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
; SI-NEXT: v_or_b32_e32 v25, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v0, 0xff, v45
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v45, v41
+; SI-NEXT: v_mov_b32_e32 v35, v39
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v56
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v41
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v63, v1
-; SI-NEXT: v_or_b32_e32 v26, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v48
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v32, v1
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
+; SI-NEXT: v_or_b32_e32 v26, v0, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v63
+; SI-NEXT: v_mov_b32_e32 v41, v62
+; SI-NEXT: v_mov_b32_e32 v63, v56
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v46
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v47
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v27, v1
-; SI-NEXT: v_or_b32_e32 v27, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v38
; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
+; SI-NEXT: v_or_b32_e32 v27, v0, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v58
+; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v41
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v50
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v28, v1
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
; SI-NEXT: v_or_b32_e32 v28, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v37
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v62, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v60
+; SI-NEXT: v_mov_b32_e32 v50, v60
+; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v38, v3
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v54
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v61
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v29, v0, v1
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v36
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v35
-; SI-NEXT: v_or_b32_e32 v0, v0, v30
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v52
+; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v61, v54
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: v_or_b32_e32 v0, v0, v1
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v60
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
+; SI-NEXT: v_or_b32_e32 v1, v58, v1
; SI-NEXT: v_or_b32_e32 v30, v0, v1
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v34
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v33
-; SI-NEXT: v_or_b32_e32 v0, v0, v3
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v31
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v0, v0, v1
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v55
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v57, v1
+; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v31, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v40
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v52
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v33, v34
-; SI-NEXT: v_mov_b32_e32 v34, v35
-; SI-NEXT: v_mov_b32_e32 v35, v36
-; SI-NEXT: v_mov_b32_e32 v36, v54
-; SI-NEXT: v_mov_b32_e32 v54, v37
-; SI-NEXT: v_mov_b32_e32 v37, v41
-; SI-NEXT: v_mov_b32_e32 v41, v38
-; SI-NEXT: v_mov_b32_e32 v38, v63
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_or_b32_e32 v3, s4, v0
@@ -54891,61 +55092,64 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; SI-NEXT: s_and_b32 s6, s6, 0xffff
; SI-NEXT: s_or_b32 s7, s8, s7
; SI-NEXT: s_or_b32 s6, s6, s7
-; SI-NEXT: v_mov_b32_e32 v57, v1
+; SI-NEXT: v_mov_b32_e32 v48, v1
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: s_mov_b64 s[4:5], 0
; SI-NEXT: s_branch .LBB39_3
; SI-NEXT: .LBB39_2:
-; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(2)
-; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
-; SI-NEXT: v_mov_b32_e32 v61, v45
-; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v44, v59
+; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v32, v35
+; SI-NEXT: v_mov_b32_e32 v45, v41
+; SI-NEXT: v_mov_b32_e32 v43, v63
+; SI-NEXT: v_mov_b32_e32 v59, v58
+; SI-NEXT: v_mov_b32_e32 v50, v60
+; SI-NEXT: v_mov_b32_e32 v49, v61
+; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v57, v37
+; SI-NEXT: v_mov_b32_e32 v51, v47
+; SI-NEXT: v_mov_b32_e32 v36, v31
+; SI-NEXT: v_mov_b32_e32 v37, v56
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_mov_b32_e32 v45, v33
-; SI-NEXT: v_mov_b32_e32 v33, v34
-; SI-NEXT: v_mov_b32_e32 v34, v35
-; SI-NEXT: v_mov_b32_e32 v35, v36
-; SI-NEXT: v_mov_b32_e32 v36, v54
-; SI-NEXT: v_mov_b32_e32 v54, v37
-; SI-NEXT: v_mov_b32_e32 v37, v41
-; SI-NEXT: v_mov_b32_e32 v41, v38
-; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
; SI-NEXT: .LBB39_3: ; %Flow
-; SI-NEXT: v_mov_b32_e32 v63, v46
+; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(2)
+; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(1)
+; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: v_mov_b32_e32 v47, v44
; SI-NEXT: s_cbranch_vccnz .LBB39_5
; SI-NEXT: ; %bb.4: ; %cmp.true
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
; SI-NEXT: s_add_i32 s28, s28, 3
-; SI-NEXT: s_waitcnt vmcnt(4)
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v40
+; SI-NEXT: s_waitcnt vmcnt(11)
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v52
; SI-NEXT: s_and_b32 s4, s28, 0xff
; SI-NEXT: s_lshl_b32 s5, s29, 8
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_or_b32 s4, s5, s4
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; SI-NEXT: s_addk_i32 s4, 0x300
-; SI-NEXT: v_or_b32_e32 v0, v57, v0
+; SI-NEXT: v_or_b32_e32 v0, v48, v0
; SI-NEXT: s_and_b32 s4, s4, 0xffff
; SI-NEXT: v_or_b32_e32 v0, s4, v0
; SI-NEXT: s_add_i32 s16, s16, 3
@@ -54992,7 +55196,7 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v1, v2, v1
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v1, vcc, 0x300, v1
; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -55001,17 +55205,17 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; SI-NEXT: v_or_b32_e32 v2, v3, v2
; SI-NEXT: v_add_i32_e32 v3, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_add_i32_e32 v4, vcc, 0x3000000, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -55021,15 +55225,15 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v5, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -55039,15 +55243,15 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v6, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -55057,15 +55261,15 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v7, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -55075,15 +55279,15 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v8, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -55093,15 +55297,15 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v9, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -55111,15 +55315,15 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v10, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -55128,34 +55332,66 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_add_i32_e32 v11, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
+; SI-NEXT: v_or_b32_e32 v0, v39, v0
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
+; SI-NEXT: v_or_b32_e32 v1, v54, v1
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v12, vcc, 0x3000000, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
+; SI-NEXT: v_or_b32_e32 v1, v55, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v12, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: v_add_i32_e32 v13, vcc, 0x3000000, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT: v_or_b32_e32 v1, v62, v1
+; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: v_add_i32_e32 v14, vcc, 0x3000000, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -55164,16 +55400,16 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v13, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v15, vcc, 0x3000000, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
-; SI-NEXT: v_or_b32_e32 v0, v58, v0
+; SI-NEXT: v_or_b32_e32 v0, v42, v0
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
@@ -55181,16 +55417,16 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v14, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v16, vcc, 0x3000000, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
-; SI-NEXT: v_or_b32_e32 v0, v60, v0
+; SI-NEXT: v_or_b32_e32 v0, v46, v0
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
@@ -55198,33 +55434,33 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v15, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v17, vcc, 0x3000000, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_or_b32_e32 v1, v43, v1
+; SI-NEXT: v_or_b32_e32 v1, v53, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v16, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v18, vcc, 0x3000000, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -55233,16 +55469,16 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v17, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v19, vcc, 0x3000000, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
-; SI-NEXT: v_or_b32_e32 v0, v51, v0
+; SI-NEXT: v_or_b32_e32 v0, v40, v0
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
@@ -55250,16 +55486,16 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v18, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v20, vcc, 0x3000000, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
-; SI-NEXT: v_or_b32_e32 v0, v50, v0
+; SI-NEXT: v_or_b32_e32 v0, v34, v0
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
@@ -55267,173 +55503,147 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v19, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
-; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
-; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v20, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v21, vcc, 0x3000000, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
-; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v57
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
-; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v21, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
+; SI-NEXT: v_or_b32_e32 v1, v41, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_or_b32_e32 v1, v55, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v22, vcc, 0x3000000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v61
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v49
-; SI-NEXT: v_or_b32_e32 v0, v39, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v47
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT: v_or_b32_e32 v1, v35, v1
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
+; SI-NEXT: v_or_b32_e32 v0, v61, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
-; SI-NEXT: v_or_b32_e32 v1, v44, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v23, vcc, 0x3000000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v47
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v63
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
-; SI-NEXT: v_or_b32_e32 v1, v59, v1
+; SI-NEXT: v_or_b32_e32 v1, v33, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v24, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v52
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: v_or_b32_e32 v0, v42, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v32
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v25, vcc, 0x3000000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v45
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v56
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v45
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v38, v1
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v26, vcc, 0x3000000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v48
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v43
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v63
-; SI-NEXT: v_or_b32_e32 v0, v32, v0
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v51
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v27, vcc, 0x3000000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v41
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v59
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v37
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT: v_or_b32_e32 v1, v38, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v28, vcc, 0x3000000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v54
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v50
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v36
-; SI-NEXT: v_or_b32_e32 v0, v62, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v49
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v29, vcc, 0x3000000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v34
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v60
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
+; SI-NEXT: v_or_b32_e32 v1, v58, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v30, vcc, 0x3000000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v33
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v36
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -55463,7 +55673,7 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
; VI-LABEL: bitcast_v128i8_to_v32f32_scalar:
@@ -55485,21 +55695,21 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:332
+; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
+; VI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:332
; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32
; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:8
; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:16
@@ -55514,7 +55724,7 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:88
; VI-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:96
; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:104
-; VI-NEXT: buffer_load_ushort v22, off, s[0:3], s32 offset:112
+; VI-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:112
; VI-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:120
; VI-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:128
; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:136
@@ -55523,76 +55733,80 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:160
; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:168
; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:176
-; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v1
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v11
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v13
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v15
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v17
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v19
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v21
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v23
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v8, 8, v3
-; VI-NEXT: v_lshlrev_b32_e32 v59, 8, v5
-; VI-NEXT: v_lshlrev_b32_e32 v14, 8, v7
-; VI-NEXT: v_lshlrev_b32_e32 v10, 8, v9
-; VI-NEXT: v_lshlrev_b32_e32 v16, 8, v11
-; VI-NEXT: v_lshlrev_b32_e32 v6, 8, v13
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v25
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v27
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v6, 8, v3
+; VI-NEXT: v_lshlrev_b32_e32 v10, 8, v5
+; VI-NEXT: v_lshlrev_b32_e32 v8, 8, v7
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v9
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: v_lshlrev_b32_e32 v46, 8, v29
; VI-NEXT: s_waitcnt vmcnt(14)
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v23
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v25
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v27
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v29
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v44
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v43
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v42
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v41
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v40
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v55
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v54
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v53
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v52
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v51
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v50
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v49
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v48
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v39
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v45
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v44
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v43
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v42
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v41
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v40
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v55
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v54
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v53
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v52
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v51
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v50
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v49
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v48
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v39
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v30
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v31
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v32
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v33
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v34
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
; VI-NEXT: s_waitcnt vmcnt(14)
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v22
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v31
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v32
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v33
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v34
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v35
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v36
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:184
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v35
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v36
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v37
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
+; VI-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:184
; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:192
; VI-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:200
; VI-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:208
@@ -55600,30 +55814,30 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; VI-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:224
; VI-NEXT: buffer_load_ushort v9, off, s[0:3], s32 offset:232
; VI-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:240
-; VI-NEXT: v_lshlrev_b32_e32 v52, 8, v37
-; VI-NEXT: v_lshlrev_b32_e32 v31, 8, v38
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_and_b64 s[6:7], vcc, exec
+; VI-NEXT: v_lshlrev_b32_e32 v38, 8, v38
; VI-NEXT: s_waitcnt vmcnt(7)
-; VI-NEXT: v_lshlrev_b32_e32 v26, 8, v0
+; VI-NEXT: v_lshlrev_b32_e32 v35, 8, v15
; VI-NEXT: s_waitcnt vmcnt(6)
-; VI-NEXT: v_lshlrev_b32_e32 v32, 8, v1
+; VI-NEXT: v_lshlrev_b32_e32 v24, 8, v1
; VI-NEXT: s_waitcnt vmcnt(5)
-; VI-NEXT: v_lshlrev_b32_e32 v54, 8, v13
+; VI-NEXT: v_lshlrev_b32_e32 v26, 8, v13
; VI-NEXT: s_waitcnt vmcnt(4)
-; VI-NEXT: v_lshlrev_b32_e32 v49, 8, v3
-; VI-NEXT: s_waitcnt vmcnt(3)
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v11
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(3)
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v5
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(3)
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v9
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(3)
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v7
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:248
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v3
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v11
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v5
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v9
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v7
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
+; VI-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:248
; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:256
; VI-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:264
; VI-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:272
@@ -55632,130 +55846,127 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; VI-NEXT: buffer_load_ushort v9, off, s[0:3], s32 offset:296
; VI-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:304
; VI-NEXT: s_waitcnt vmcnt(7)
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v0
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v12, 8, v15
+; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
; VI-NEXT: s_waitcnt vmcnt(7)
-; VI-NEXT: v_lshlrev_b32_e32 v48, 8, v1
-; VI-NEXT: s_waitcnt vmcnt(6)
+; VI-NEXT: v_lshlrev_b32_e32 v50, 8, v1
+; VI-NEXT: s_waitcnt vmcnt(5)
+; VI-NEXT: v_lshlrev_b32_e32 v51, 8, v3
; VI-NEXT: v_lshlrev_b32_e32 v27, 8, v13
-; VI-NEXT: s_waitcnt vmcnt(4)
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v11
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(4)
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v5
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v29, 8, v3
-; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:312
+; VI-NEXT: s_waitcnt vmcnt(3)
+; VI-NEXT: v_lshlrev_b32_e32 v31, 8, v5
+; VI-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:312
; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:320
; VI-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:328
-; VI-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:4
-; VI-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:12
-; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:20
-; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:28
-; VI-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:36
-; VI-NEXT: s_waitcnt vmcnt(11)
-; VI-NEXT: v_lshlrev_b32_e32 v5, 8, v7
-; VI-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; VI-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:324
+; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:316
+; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:308
+; VI-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:300
+; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:292
+; VI-NEXT: v_lshlrev_b32_e32 v52, 8, v11
+; VI-NEXT: s_waitcnt vmcnt(10)
+; VI-NEXT: v_lshlrev_b32_e32 v29, 8, v9
+; VI-NEXT: s_waitcnt vmcnt(9)
+; VI-NEXT: v_lshlrev_b32_e32 v30, 8, v7
; VI-NEXT: s_waitcnt vmcnt(7)
-; VI-NEXT: v_lshlrev_b32_e32 v7, 8, v0
+; VI-NEXT: v_lshlrev_b32_e32 v5, 8, v5
; VI-NEXT: s_waitcnt vmcnt(6)
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v1
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v18, off, s[0:3], s32 offset:44
-; VI-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:52
-; VI-NEXT: buffer_load_ushort v17, off, s[0:3], s32 offset:60
-; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:68
-; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:76
-; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:84
-; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:92
-; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:100
-; VI-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:108
-; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:116
-; VI-NEXT: buffer_load_ushort v25, off, s[0:3], s32 offset:124
-; VI-NEXT: buffer_load_ushort v19, off, s[0:3], s32 offset:132
-; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:140
-; VI-NEXT: buffer_load_ushort v20, off, s[0:3], s32 offset:148
-; VI-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:156
-; VI-NEXT: buffer_load_ushort v21, off, s[0:3], s32 offset:164
-; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:172
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(6)
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v3
+; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill
+; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:284
+; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:276
+; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:268
+; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:260
+; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:252
+; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:244
+; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:236
+; VI-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:228
+; VI-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:220
+; VI-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:212
+; VI-NEXT: buffer_load_ushort v25, off, s[0:3], s32 offset:204
+; VI-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:196
+; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:188
; VI-NEXT: buffer_load_ushort v22, off, s[0:3], s32 offset:180
-; VI-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:188
-; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:196
-; VI-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:204
-; VI-NEXT: buffer_load_ushort v24, off, s[0:3], s32 offset:212
-; VI-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:220
-; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:228
-; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:236
-; VI-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:244
-; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:252
-; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:260
-; VI-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:268
-; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:276
-; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:284
-; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:292
-; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:300
-; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:308
-; VI-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:316
-; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:324
+; VI-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:172
+; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:164
+; VI-NEXT: buffer_load_ushort v21, off, s[0:3], s32 offset:156
+; VI-NEXT: buffer_load_ushort v20, off, s[0:3], s32 offset:148
+; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:140
+; VI-NEXT: buffer_load_ushort v19, off, s[0:3], s32 offset:132
+; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:124
+; VI-NEXT: buffer_load_ushort v18, off, s[0:3], s32 offset:116
+; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:108
+; VI-NEXT: buffer_load_ushort v17, off, s[0:3], s32 offset:100
+; VI-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:92
+; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:84
+; VI-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:76
+; VI-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:68
+; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:60
+; VI-NEXT: buffer_load_ushort v16, off, s[0:3], s32 offset:52
+; VI-NEXT: buffer_load_ushort v14, off, s[0:3], s32 offset:44
+; VI-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:36
+; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:28
+; VI-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:20
+; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:12
+; VI-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:4
+; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
; VI-NEXT: s_waitcnt vmcnt(14)
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v3
-; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(12)
-; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(14)
+; VI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill
; VI-NEXT: s_cbranch_scc0 .LBB39_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: v_or_b32_sdwa v0, v2, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v4, v59 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v5, v2, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
-; VI-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v1, v4, v10 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v4, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: s_and_b32 s4, s28, 0xff
; VI-NEXT: s_lshl_b32 s5, s29, 8
; VI-NEXT: s_or_b32 s4, s4, s5
@@ -55764,208 +55975,207 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; VI-NEXT: s_lshl_b32 s6, s19, 8
; VI-NEXT: s_lshl_b32 s7, s23, 8
; VI-NEXT: s_lshl_b32 s8, s27, 8
-; VI-NEXT: s_waitcnt vmcnt(3)
-; VI-NEXT: v_or_b32_sdwa v2, v2, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(2)
-; VI-NEXT: v_or_b32_sdwa v3, v3, v10 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v0, v0, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v2, v2, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v1, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
-; VI-NEXT: v_mov_b32_e32 v3, v7
+; VI-NEXT: v_or_b32_sdwa v3, v3, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v2, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v1, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v6, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v3, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v1, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v7, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
-; VI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill
-; VI-NEXT: v_mov_b32_e32 v29, v9
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v1, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v1, v46 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v46, v47
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v11, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v0, v12, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v50, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v12, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v50, v0
-; VI-NEXT: v_or_b32_sdwa v0, v56, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v13, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v59, v0
-; VI-NEXT: v_or_b32_sdwa v0, v18, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v14, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v15, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v16, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v56, v0
-; VI-NEXT: v_or_b32_sdwa v0, v17, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v61, v0
+; VI-NEXT: v_or_b32_sdwa v0, v37, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v39, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v37, v1
+; VI-NEXT: v_or_b32_sdwa v1, v15, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v39, v0
-; VI-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v38, v1
-; VI-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v63, v1
+; VI-NEXT: v_or_b32_sdwa v1, v36, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v16, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v37, v0
-; VI-NEXT: v_or_b32_sdwa v0, v57, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v36, v0
+; VI-NEXT: v_or_b32_sdwa v0, v59, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v36, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v17, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v17, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v59, v45
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v36, v0
-; VI-NEXT: v_or_b32_sdwa v0, v35, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v34, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v35, v1
-; VI-NEXT: v_or_b32_sdwa v1, v33, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v34, v1
+; VI-NEXT: v_or_b32_sdwa v1, v18, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v18, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v33, v0
-; VI-NEXT: v_or_b32_sdwa v0, v25, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v19, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v19, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v0, v51, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v58, v0
+; VI-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v20, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v20, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
-; VI-NEXT: v_or_b32_sdwa v1, v21, v52 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v51, v3
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_or_b32_sdwa v0, v21, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v0, v28, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v42, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v21, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v34, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v22, v26 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v28, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v22, v35 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v22, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v23, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v43, v54 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v62, v24 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v23, v26 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v23, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
-; VI-NEXT: v_mov_b32_e32 v43, v49
-; VI-NEXT: v_or_b32_sdwa v0, v30, v49 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v32, v54
-; VI-NEXT: v_mov_b32_e32 v34, v26
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v35, v24
+; VI-NEXT: v_mov_b32_e32 v62, v26
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_or_b32_sdwa v0, v25, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v49, v1
-; VI-NEXT: v_or_b32_sdwa v1, v24, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v33, v1
+; VI-NEXT: v_or_b32_sdwa v1, v54, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v24, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v54, v0
-; VI-NEXT: v_or_b32_sdwa v0, v46, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v45, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v46, v61
+; VI-NEXT: v_or_b32_sdwa v1, v32, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v45, v32
; VI-NEXT: v_or_b32_sdwa v25, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v54, v0
+; VI-NEXT: v_or_b32_sdwa v0, v44, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v45, v61 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v57, v32 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v26, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v58, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v44, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v47, v45
+; VI-NEXT: v_or_b32_sdwa v0, v41, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v43, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v27, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
-; VI-NEXT: v_mov_b32_e32 v58, v44
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v48, v0
-; VI-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v42, v45 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v39, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v40, v52 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v28, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
-; VI-NEXT: v_or_b32_sdwa v1, v40, v29 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v63, v42
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v0, v41, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v56, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v60, v29 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v29, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
-; VI-NEXT: v_or_b32_sdwa v1, v60, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v55, v30 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v50, v51
+; VI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v56, v60
+; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: v_or_b32_sdwa v1, v53, v55 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v30, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v0, v55, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v49, v52 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v53, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v48, v51 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v31, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v57, v0
-; VI-NEXT: v_or_b32_sdwa v0, v52, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v48, v2
+; VI-NEXT: v_mov_b32_e32 v53, v55
+; VI-NEXT: v_mov_b32_e32 v55, v40
+; VI-NEXT: v_mov_b32_e32 v40, v39
+; VI-NEXT: v_mov_b32_e32 v39, v43
+; VI-NEXT: v_mov_b32_e32 v43, v32
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v42, v0
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_e32 v3, s4, v0
; VI-NEXT: s_and_b32 s4, s16, 0xff
; VI-NEXT: s_or_b32 s4, s4, s5
@@ -55996,52 +56206,49 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; VI-NEXT: s_mov_b64 s[4:5], 0
; VI-NEXT: s_branch .LBB39_3
; VI-NEXT: .LBB39_2:
-; VI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v35, v24
+; VI-NEXT: v_mov_b32_e32 v62, v26
+; VI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; VI-NEXT: v_mov_b32_e32 v32, v54
-; VI-NEXT: v_mov_b32_e32 v43, v49
-; VI-NEXT: v_mov_b32_e32 v46, v61
-; VI-NEXT: v_mov_b32_e32 v47, v45
-; VI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
-; VI-NEXT: v_mov_b32_e32 v34, v26
-; VI-NEXT: v_mov_b32_e32 v58, v44
-; VI-NEXT: s_waitcnt vmcnt(14)
-; VI-NEXT: v_mov_b32_e32 v63, v42
-; VI-NEXT: v_mov_b32_e32 v51, v7
-; VI-NEXT: v_mov_b32_e32 v48, v29
-; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v50, v51
+; VI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; VI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v55, v40
+; VI-NEXT: v_mov_b32_e32 v40, v39
+; VI-NEXT: v_mov_b32_e32 v39, v43
+; VI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v46, v47
+; VI-NEXT: v_mov_b32_e32 v59, v45
+; VI-NEXT: v_mov_b32_e32 v45, v32
+; VI-NEXT: v_mov_b32_e32 v56, v60
; VI-NEXT: .LBB39_3: ; %Flow
; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
-; VI-NEXT: v_mov_b32_e32 v44, v47
-; VI-NEXT: v_mov_b32_e32 v47, v46
-; VI-NEXT: s_waitcnt vmcnt(3)
-; VI-NEXT: v_mov_b32_e32 v46, v49
+; VI-NEXT: v_mov_b32_e32 v32, v59
+; VI-NEXT: s_waitcnt vmcnt(7)
+; VI-NEXT: v_mov_b32_e32 v59, v33
; VI-NEXT: s_cbranch_vccnz .LBB39_5
; VI-NEXT: ; %bb.4: ; %cmp.true
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
; VI-NEXT: s_add_i32 s28, s28, 3
; VI-NEXT: s_and_b32 s4, s28, 0xff
; VI-NEXT: s_lshl_b32 s5, s29, 8
; VI-NEXT: s_or_b32 s4, s5, s4
-; VI-NEXT: s_waitcnt vmcnt(4)
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v52
+; VI-NEXT: s_waitcnt vmcnt(7)
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v48
; VI-NEXT: s_addk_i32 s4, 0x300
-; VI-NEXT: v_or_b32_sdwa v0, v57, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v0, v42, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_and_b32 s4, s4, 0xffff
; VI-NEXT: v_or_b32_e32 v0, s4, v0
; VI-NEXT: s_add_i32 s16, s16, 3
@@ -56087,17 +56294,17 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v1, vcc, 0x300, v1
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
; VI-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v3, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v4, vcc, 0x3000000, v1
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: s_waitcnt vmcnt(1)
@@ -56110,327 +56317,332 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v5, vcc, 0x3000000, v0
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v6, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v7, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v8, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v9, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v10, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v11, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(2)
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v46
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v12, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v50, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
+; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v13, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v59, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
+; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v14, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v56, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v15, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(2)
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v39, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
-; VI-NEXT: v_or_b32_sdwa v1, v38, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
+; VI-NEXT: v_or_b32_sdwa v1, v63, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v16, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v37, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v0, v36, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v17, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v36, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
-; VI-NEXT: v_or_b32_sdwa v1, v35, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
+; VI-NEXT: v_or_b32_sdwa v1, v34, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v18, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v19, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
+; VI-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
-; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v20, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v21, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
-; VI-NEXT: v_or_b32_sdwa v1, v34, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v22, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
-; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v0, v35, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
-; VI-NEXT: v_or_b32_sdwa v1, v32, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v1, v62, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v23, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v43, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
-; VI-NEXT: v_or_b32_sdwa v1, v46, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
+; VI-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v24, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v47
-; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v54, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v32
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v45
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
+; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v25, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v44
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v44
+; VI-NEXT: v_or_b32_sdwa v0, v54, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v57
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
-; VI-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v1, v43, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v26, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v41
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v58
-; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v39
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v27, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v63
-; VI-NEXT: v_or_b32_sdwa v1, v45, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v48, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v40
+; VI-NEXT: v_or_b32_sdwa v0, v50, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v55
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v28, vcc, 0x3000000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v41
-; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
+; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v40
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v56
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v29, vcc, 0x3000000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v62
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v2, s6
; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v60
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
-; VI-NEXT: v_or_b32_sdwa v1, v51, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
+; VI-NEXT: v_or_b32_sdwa v1, v53, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v30, vcc, 0x3000000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v55
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v53
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v49
+; VI-NEXT: v_or_b32_sdwa v0, v52, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
-; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
+; VI-NEXT: v_or_b32_sdwa v1, v51, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v31, vcc, 0x3000000, v0
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
-; VI-NEXT: v_mov_b32_e32 v2, s6
; VI-NEXT: .LBB39_5: ; %end
; VI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload
@@ -56470,28 +56682,37 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:332
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_load_ushort v2, off, s[0:3], s32
-; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:8
-; GFX9-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:16
-; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:24
-; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:32
-; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:40
-; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:48
-; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:56
-; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:64
-; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:72
-; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:80
-; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:88
-; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:96
-; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:104
-; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:112
+; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:332
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32
+; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:8
+; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:16
+; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:24
+; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:32
+; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:40
+; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:48
+; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:56
+; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:64
+; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:72
+; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:80
+; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:88
+; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:96
+; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:104
+; GFX9-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:112
; GFX9-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:120
; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:128
; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:136
@@ -56501,270 +56722,294 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:168
; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:176
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v7
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v11
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v13
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v15
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v17
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v19
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v21
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v23
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v25
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v27
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v29
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshlrev_b32_e32 v10, 8, v3
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 8, v3
; GFX9-NEXT: v_lshlrev_b32_e32 v8, 8, v5
-; GFX9-NEXT: v_lshlrev_b32_e32 v9, 8, v9
-; GFX9-NEXT: s_waitcnt vmcnt(35)
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v43
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v2
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v4
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v6
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v42
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v41
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v40
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v55
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v54
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v53
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v52
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v51
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v50
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v49
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v48
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v39
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v31
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v32
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v33
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v34
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v35
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v36
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v37
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v38
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:184
+; GFX9-NEXT: v_lshlrev_b32_e32 v18, 8, v7
+; GFX9-NEXT: v_lshlrev_b32_e32 v10, 8, v9
+; GFX9-NEXT: v_lshlrev_b32_e32 v6, 8, v13
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_waitcnt vmcnt(28)
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v21
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v23
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v25
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v27
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v29
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(32)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v44
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(32)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v43
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(32)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v42
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(32)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v41
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(32)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v40
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(32)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v55
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(32)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v54
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(32)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v53
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(32)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v52
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v50
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v49
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v48
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v39
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v30
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v31
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v32
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v33
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v34
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v35
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v36
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v37
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v38
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:184
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:192
-; GFX9-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:200
+; GFX9-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:200
; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:208
-; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:216
+; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:216
; GFX9-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:224
-; GFX9-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:232
+; GFX9-NEXT: buffer_load_ushort v9, off, s[0:3], s32 offset:232
; GFX9-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:240
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_and_b64 s[6:7], vcc, exec
+; GFX9-NEXT: v_lshlrev_b32_e32 v51, 8, v51
; GFX9-NEXT: s_waitcnt vmcnt(7)
-; GFX9-NEXT: v_lshlrev_b32_e32 v38, 8, v11
+; GFX9-NEXT: v_lshlrev_b32_e32 v61, 8, v0
; GFX9-NEXT: s_waitcnt vmcnt(6)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v1
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v2
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v13
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v3
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(5)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v5
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(5)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v6
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(5)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v7
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshlrev_b32_e32 v49, 8, v4
-; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:248
+; GFX9-NEXT: v_lshlrev_b32_e32 v40, 8, v3
+; GFX9-NEXT: s_waitcnt vmcnt(3)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v9
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(3)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v7
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v25, 8, v11
+; GFX9-NEXT: v_lshlrev_b32_e32 v32, 8, v5
+; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:248
; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:256
-; GFX9-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:264
+; GFX9-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:264
; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:272
-; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:280
+; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:280
; GFX9-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:288
-; GFX9-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:296
+; GFX9-NEXT: buffer_load_ushort v9, off, s[0:3], s32 offset:296
; GFX9-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:304
; GFX9-NEXT: s_waitcnt vmcnt(7)
-; GFX9-NEXT: v_lshlrev_b32_e32 v11, 8, v11
-; GFX9-NEXT: s_waitcnt vmcnt(6)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(6)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v2
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(6)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v3
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v0
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(7)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v1
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v4
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v3
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v5
-; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:312
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v11
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:312
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:320
-; GFX9-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:328
-; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:4
-; GFX9-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:12
-; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:20
-; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:28
-; GFX9-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:36
+; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:328
+; GFX9-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:324
+; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:316
+; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:308
+; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:300
+; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:292
+; GFX9-NEXT: v_lshlrev_b32_e32 v27, 8, v13
; GFX9-NEXT: s_waitcnt vmcnt(14)
-; GFX9-NEXT: v_lshlrev_b32_e32 v4, 8, v7
-; GFX9-NEXT: v_lshlrev_b32_e32 v5, 8, v6
+; GFX9-NEXT: v_lshlrev_b32_e32 v31, 8, v5
+; GFX9-NEXT: s_waitcnt vmcnt(13)
+; GFX9-NEXT: v_lshlrev_b32_e32 v29, 8, v9
+; GFX9-NEXT: s_waitcnt vmcnt(12)
+; GFX9-NEXT: v_lshlrev_b32_e32 v30, 8, v7
; GFX9-NEXT: s_waitcnt vmcnt(7)
-; GFX9-NEXT: v_lshlrev_b32_e32 v3, 8, v3
+; GFX9-NEXT: v_lshlrev_b32_e32 v39, 8, v0
; GFX9-NEXT: s_waitcnt vmcnt(6)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_ushort v25, off, s[0:3], s32 offset:44
-; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:52
-; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:60
-; GFX9-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:68
-; GFX9-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:76
-; GFX9-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:84
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v1
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(6)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v3
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:284
+; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:276
+; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:268
+; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:260
+; GFX9-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:252
+; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:244
+; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:236
+; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:228
+; GFX9-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:220
+; GFX9-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:212
+; GFX9-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:204
+; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:196
+; GFX9-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:188
+; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:180
+; GFX9-NEXT: buffer_load_ushort v26, off, s[0:3], s32 offset:172
+; GFX9-NEXT: buffer_load_ushort v21, off, s[0:3], s32 offset:164
+; GFX9-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:156
+; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:148
+; GFX9-NEXT: buffer_load_ushort v24, off, s[0:3], s32 offset:140
+; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:132
+; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:124
+; GFX9-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:116
+; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:108
+; GFX9-NEXT: buffer_load_ushort v19, off, s[0:3], s32 offset:100
; GFX9-NEXT: buffer_load_ushort v17, off, s[0:3], s32 offset:92
-; GFX9-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:100
-; GFX9-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:108
-; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:116
-; GFX9-NEXT: buffer_load_ushort v27, off, s[0:3], s32 offset:124
-; GFX9-NEXT: buffer_load_ushort v19, off, s[0:3], s32 offset:132
-; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:140
-; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:148
-; GFX9-NEXT: buffer_load_ushort v21, off, s[0:3], s32 offset:156
-; GFX9-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:164
-; GFX9-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:172
-; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:180
-; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:188
-; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:196
-; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:204
-; GFX9-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:212
-; GFX9-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:220
-; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:228
-; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:236
-; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:244
-; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:252
-; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:260
-; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:268
-; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:276
-; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:284
-; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:292
-; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:300
-; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:308
-; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:316
-; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:324
-; GFX9-NEXT: s_waitcnt vmcnt(42)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v2
-; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:84
+; GFX9-NEXT: buffer_load_ushort v22, off, s[0:3], s32 offset:76
+; GFX9-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:68
+; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:60
+; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:52
+; GFX9-NEXT: buffer_load_ushort v14, off, s[0:3], s32 offset:44
+; GFX9-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:36
+; GFX9-NEXT: buffer_load_ushort v20, off, s[0:3], s32 offset:28
+; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:20
+; GFX9-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:12
+; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:4
+; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(28)
-; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(43)
+; GFX9-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(40)
+; GFX9-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(39)
+; GFX9-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(36)
+; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
+; GFX9-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
+; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
+; GFX9-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
+; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
+; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
; GFX9-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
+; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
+; GFX9-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
+; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
+; GFX9-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
+; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
+; GFX9-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(39)
+; GFX9-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(39)
+; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(39)
+; GFX9-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(41)
+; GFX9-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(41)
+; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(42)
+; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(43)
+; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(43)
+; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(43)
+; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(43)
+; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(43)
+; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(43)
+; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(43)
+; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(43)
+; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill
; GFX9-NEXT: s_cbranch_scc0 .LBB39_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
-; GFX9-NEXT: v_mov_b32_e32 v38, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v0, v2, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v4, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
; GFX9-NEXT: s_and_b32 s4, s28, 0xff
-; GFX9-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
; GFX9-NEXT: s_lshl_b32 s5, s29, 8
; GFX9-NEXT: s_or_b32 s4, s4, s5
; GFX9-NEXT: s_and_b32 s4, s4, 0xffff
@@ -56772,202 +57017,199 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; GFX9-NEXT: s_lshl_b32 s6, s19, 8
; GFX9-NEXT: s_lshl_b32 s7, s23, 8
; GFX9-NEXT: s_lshl_b32 s8, s27, 8
-; GFX9-NEXT: s_waitcnt vmcnt(5)
-; GFX9-NEXT: v_or_b32_sdwa v0, v0, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(4)
-; GFX9-NEXT: v_or_b32_sdwa v1, v1, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(4)
-; GFX9-NEXT: v_or_b32_sdwa v2, v2, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_waitcnt vmcnt(3)
+; GFX9-NEXT: v_or_b32_sdwa v2, v2, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_or_b32_sdwa v3, v3, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v3, v3, v10 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v12, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v1, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v14, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v16, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v18, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v20, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v22, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v24, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v26, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v28, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v30, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v1, v11, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v34, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v12, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v60, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v62, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_or_b32_sdwa v0, v53, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v20, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v1, v13, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v25, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v14, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v62, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v43, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v1, v15, v51 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v1, v15, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v43, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
-; GFX9-NEXT: v_mov_b32_e32 v61, v38
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v22, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v58, v1
+; GFX9-NEXT: v_or_b32_sdwa v1, v35, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v16, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(3)
-; GFX9-NEXT: v_or_b32_sdwa v0, v17, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v1, v63, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v35, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v17, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v1, v19, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v17, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
-; GFX9-NEXT: v_mov_b32_e32 v63, v57
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v57, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v54, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v56, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v54, v1
+; GFX9-NEXT: v_or_b32_sdwa v1, v23, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v18, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_or_b32_sdwa v0, v27, v56 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_or_b32_sdwa v0, v44, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v19, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v52, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v19, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v0, v51, v62 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v58, v59 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: v_mov_b32_e32 v52, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v24, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_or_b32_sdwa v1, v50, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v20, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
-; GFX9-NEXT: v_or_b32_sdwa v0, v21, v47 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v1, v31, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v50, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v28, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v1, v21, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v21, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
-; GFX9-NEXT: v_or_b32_sdwa v1, v50, v60 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v1, v53, v61 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v23, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v26, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v22, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v0, v44, v58 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v37, v57 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v23, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v0, v52, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v29, v49 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v47, v53 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v60, v51 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v23, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v46, v40 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v63, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v24, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
-; GFX9-NEXT: v_mov_b32_e32 v37, v57
-; GFX9-NEXT: v_mov_b32_e32 v57, v60
-; GFX9-NEXT: v_mov_b32_e32 v52, v56
-; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v0, v57, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v40, v25
+; GFX9-NEXT: v_mov_b32_e32 v57, v41
+; GFX9-NEXT: v_mov_b32_e32 v46, v61
+; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_mov_b32_e32 v34, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v46, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v1, v48, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v41, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v25, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v1, v45, v44 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v32, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v37, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v0, v39, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v56, v41 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v26, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v40, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v55, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v34, v44 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v33, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v27, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_mov_b32_e32 v51, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v43, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v36, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v36, v62 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v49, v60 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v28, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v42, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v41, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v38, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v55, v29 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v29, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v53, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v35, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v48, v30 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v48, v39
+; GFX9-NEXT: v_or_b32_sdwa v1, v45, v39 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
; GFX9-NEXT: v_or_b32_sdwa v30, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v0, v42, v47 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v38, v55
+; GFX9-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v36, v49
+; GFX9-NEXT: v_mov_b32_e32 v49, v56
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v54, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v33, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v59, v39 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v31, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v59, v39
+; GFX9-NEXT: v_mov_b32_e32 v39, v41
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_mov_b32_e32 v56, v55
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v61, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v55, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_e32 v3, s4, v0
; GFX9-NEXT: s_and_b32 s4, s16, 0xff
; GFX9-NEXT: s_or_b32 s4, s4, s5
@@ -56998,32 +57240,39 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; GFX9-NEXT: s_mov_b64 s[4:5], 0
; GFX9-NEXT: s_branch .LBB39_3
; GFX9-NEXT: .LBB39_2:
-; GFX9-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
-; GFX9-NEXT: v_mov_b32_e32 v61, v0
-; GFX9-NEXT: v_mov_b32_e32 v63, v57
-; GFX9-NEXT: v_mov_b32_e32 v53, v3
-; GFX9-NEXT: s_mov_b64 s[4:5], -1
-; GFX9-NEXT: v_mov_b32_e32 v57, v38
+; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v48, v39
+; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v36, v49
+; GFX9-NEXT: v_mov_b32_e32 v49, v56
+; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v40, v25
+; GFX9-NEXT: v_mov_b32_e32 v57, v41
+; GFX9-NEXT: v_mov_b32_e32 v38, v55
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; GFX9-NEXT: .LBB39_3: ; %Flow
-; GFX9-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_waitcnt vmcnt(12)
+; GFX9-NEXT: v_mov_b32_e32 v41, v52
; GFX9-NEXT: s_cbranch_vccnz .LBB39_5
; GFX9-NEXT: ; %bb.4: ; %cmp.true
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v61
; GFX9-NEXT: s_add_i32 s16, s16, 3
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_and_b32 s4, s16, 0xff
@@ -57067,190 +57316,210 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; GFX9-NEXT: s_and_b32 s8, s28, 0xff
; GFX9-NEXT: s_lshl_b32 s9, s29, 8
; GFX9-NEXT: s_or_b32 s8, s9, s8
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v56
; GFX9-NEXT: s_movk_i32 s4, 0x300
; GFX9-NEXT: s_addk_i32 s8, 0x300
+; GFX9-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: s_and_b32 s8, s8, 0xffff
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_sdwa v0, v0, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_e32 v3, s8, v0
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v60
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v52, v54
+; GFX9-NEXT: v_mov_b32_e32 v55, v57
+; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v38
-; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
+; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v49
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v5, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
-; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
+; GFX9-NEXT: v_or_b32_sdwa v0, v43, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v58, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v16, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
+; GFX9-NEXT: v_or_b32_sdwa v0, v35, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
+; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v17, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
@@ -57260,163 +57529,155 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v52, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v17, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v63
+; GFX9-NEXT: v_or_b32_sdwa v18, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v18, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v19, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v0, v52, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v0, v41, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v19, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v20, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
-; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v20, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v50, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v21, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v57, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v46, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v22, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v0, v53, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v51, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v23, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v50, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v63
+; GFX9-NEXT: v_or_b32_sdwa v1, v40, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v24, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v46
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v48
-; GFX9-NEXT: v_or_b32_sdwa v0, v34, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v55
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v25, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v39
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v45
-; GFX9-NEXT: v_or_b32_sdwa v1, v44, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v37
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v49
+; GFX9-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v39, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v26, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v40
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v34
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v33
+; GFX9-NEXT: v_or_b32_sdwa v0, v44, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v55
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v27, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v43
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v1, 3, v36
-; GFX9-NEXT: v_or_b32_sdwa v0, v51, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v60, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_or_b32_sdwa v28, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v42
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v41
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v38
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v29, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v32
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v2, s7
; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v35
-; GFX9-NEXT: v_or_b32_sdwa v1, v53, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v45
+; GFX9-NEXT: v_or_b32_sdwa v1, v48, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v30, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v54
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v33
-; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v42
+; GFX9-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
+; GFX9-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v31, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_mov_b32_e32 v0, s5
; GFX9-NEXT: v_mov_b32_e32 v1, s6
-; GFX9-NEXT: v_mov_b32_e32 v2, s7
; GFX9-NEXT: .LBB39_5: ; %end
; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload
@@ -57588,7 +57849,7 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v73, 8, v25
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v74, 8, v27
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v75, 8, v29
-; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, -1
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(62)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v57, 8, v2
@@ -57659,24 +57920,24 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v54
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v53
-; GFX11-TRUE16-NEXT: s_and_b32 s5, s28, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s29, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s2, 0xff
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s28, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s29, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s2, 0xff
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v90
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v91
-; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s5, s5, 0xffff
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s4, 0xffff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s3, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s11, s26, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s26, 0xff
; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v0, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v50
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v49
-; GFX11-TRUE16-NEXT: s_lshl_b32 s12, s27, 8
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s27, 8
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v76
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v77
@@ -57918,40 +58179,40 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v89
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, s5, v0
-; GFX11-TRUE16-NEXT: s_and_b32 s5, s0, 0xff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, s4, v0
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s0, 0xff
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s5, s5, 0xffff
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s6, 16
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s8, 16
-; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s23, 8
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s25, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-TRUE16-NEXT: s_or_b32 s9, s9, s10
-; GFX11-TRUE16-NEXT: s_or_b32 s10, s11, s12
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s8, 16
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s9, 0xffff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s10, 16
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s16, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s6, 0xffff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s7, 16
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s20, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s21, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s25, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s6, 0xffff
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s8, s9
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s11
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s7, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s8, 0xffff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s9, 16
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s6, s7
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v51
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s9
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v52
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v93
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v92
@@ -57960,9 +58221,8 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v2, v3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB39_3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB39_3
; GFX11-TRUE16-NEXT: .LBB39_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_add_i32 s0, s0, 3
; GFX11-TRUE16-NEXT: s_add_i32 s2, s2, 3
@@ -58376,7 +58636,9 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB39_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB39_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB39_2
+; GFX11-TRUE16-NEXT: s_branch .LBB39_3
;
; GFX11-FAKE16-LABEL: bitcast_v128i8_to_v32f32_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -58529,7 +58791,7 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v73, 8, v25
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v74, 8, v27
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v75, 8, v29
-; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, -1
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(62)
; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v57, 8, v2
@@ -58600,24 +58862,24 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v54
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v53
-; GFX11-FAKE16-NEXT: s_and_b32 s5, s28, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s29, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s7, s2, 0xff
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s28, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s29, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s2, 0xff
; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v90
; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, v1, v91
-; GFX11-FAKE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s5, s5, 0xffff
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s4, 0xffff
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s3, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s11, s26, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s10, s26, 0xff
; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, v0, v1
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v50
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v49
-; GFX11-FAKE16-NEXT: s_lshl_b32 s12, s27, 8
+; GFX11-FAKE16-NEXT: s_lshl_b32 s11, s27, 8
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v76
; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, v1, v77
@@ -58859,40 +59121,40 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v89
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, s5, v0
-; GFX11-FAKE16-NEXT: s_and_b32 s5, s0, 0xff
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, s4, v0
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s0, 0xff
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-FAKE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-FAKE16-NEXT: s_or_b32 s6, s7, s8
-; GFX11-FAKE16-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s5, s5, 0xffff
-; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s6, 16
-; GFX11-FAKE16-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s8, 16
-; GFX11-FAKE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-FAKE16-NEXT: s_or_b32 s6, s7, s8
-; GFX11-FAKE16-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s23, 8
-; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-FAKE16-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s25, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-FAKE16-NEXT: s_or_b32 s9, s9, s10
-; GFX11-FAKE16-NEXT: s_or_b32 s10, s11, s12
-; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s8, 16
-; GFX11-FAKE16-NEXT: s_and_b32 s9, s9, 0xffff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s10, 16
-; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s16, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s6, 0xffff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s7, 16
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s20, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s21, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s25, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s6, 0xffff
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s8, s9
+; GFX11-FAKE16-NEXT: s_or_b32 s9, s10, s11
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s7, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s8, 0xffff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s9, 16
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s6, s7
; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v51
-; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s8, s9
; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v52
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, v3, v93
; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, v2, v92
@@ -58901,9 +59163,8 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, v2, v3
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB39_3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB39_3
; GFX11-FAKE16-NEXT: .LBB39_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_add_i32 s0, s0, 3
; GFX11-FAKE16-NEXT: s_add_i32 s2, s2, 3
@@ -59317,7 +59578,9 @@ define inreg <32 x float> @bitcast_v128i8_to_v32f32_scalar(<128 x i8> inreg %a,
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB39_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB39_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB39_2
+; GFX11-FAKE16-NEXT: s_branch .LBB39_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -60263,8 +60526,9 @@ define inreg <64 x bfloat> @bitcast_v32f32_to_v64bf16_scalar(<32 x float> inreg
; SI-NEXT: v_writelane_b32 v63, s87, 31
; SI-NEXT: v_writelane_b32 v63, s96, 32
; SI-NEXT: v_writelane_b32 v63, s97, 33
-; SI-NEXT: v_writelane_b32 v63, s98, 34
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v19
+; SI-NEXT: v_writelane_b32 v63, s98, 34
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_writelane_b32 v63, s99, 35
; SI-NEXT: v_readfirstlane_b32 s6, v1
; SI-NEXT: v_readfirstlane_b32 s7, v2
@@ -60283,8 +60547,8 @@ define inreg <64 x bfloat> @bitcast_v32f32_to_v64bf16_scalar(<32 x float> inreg
; SI-NEXT: v_readfirstlane_b32 s44, v15
; SI-NEXT: v_readfirstlane_b32 s45, v16
; SI-NEXT: v_readfirstlane_b32 s46, v17
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s47, v18
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
@@ -60304,13 +60568,13 @@ define inreg <64 x bfloat> @bitcast_v32f32_to_v64bf16_scalar(<32 x float> inreg
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s47, 0xffff0000
; SI-NEXT: v_writelane_b32 v62, s4, 3
-; SI-NEXT: s_lshl_b32 s4, s47, 16
-; SI-NEXT: v_writelane_b32 v62, s4, 2
; SI-NEXT: s_and_b32 s4, s46, 0xffff0000
-; SI-NEXT: v_writelane_b32 v62, s4, 1
+; SI-NEXT: v_writelane_b32 v62, s4, 2
; SI-NEXT: s_lshl_b32 s4, s46, 16
+; SI-NEXT: v_writelane_b32 v62, s4, 1
+; SI-NEXT: s_and_b32 s4, s45, 0xffff0000
+; SI-NEXT: s_lshl_b32 s60, s47, 16
; SI-NEXT: v_writelane_b32 v62, s4, 0
-; SI-NEXT: s_and_b32 s60, s45, 0xffff0000
; SI-NEXT: s_lshl_b32 s61, s45, 16
; SI-NEXT: s_and_b32 s62, s44, 0xffff0000
; SI-NEXT: s_lshl_b32 s63, s44, 16
@@ -60477,8 +60741,8 @@ define inreg <64 x bfloat> @bitcast_v32f32_to_v64bf16_scalar(<32 x float> inreg
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; SI-NEXT: s_branch .LBB41_5
; SI-NEXT: .LBB41_3:
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; kill: killed $sgpr4
+; SI-NEXT: ; implicit-def: $sgpr60
+; SI-NEXT: ; kill: killed $sgpr60
; SI-NEXT: ; implicit-def: $sgpr59
; SI-NEXT: ; implicit-def: $sgpr58
; SI-NEXT: ; implicit-def: $sgpr57
@@ -60539,24 +60803,26 @@ define inreg <64 x bfloat> @bitcast_v32f32_to_v64bf16_scalar(<32 x float> inreg
; SI-NEXT: ; implicit-def: $sgpr62
; SI-NEXT: ; implicit-def: $sgpr61
; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; kill: killed $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; kill: killed $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; kill: killed $sgpr4
-; SI-NEXT: s_branch .LBB41_2
+; SI-NEXT: ; kill: killed $sgpr60
+; SI-NEXT: ; implicit-def: $sgpr60
+; SI-NEXT: ; kill: killed $sgpr60
+; SI-NEXT: ; implicit-def: $sgpr60
+; SI-NEXT: ; kill: killed $sgpr60
+; SI-NEXT: ; implicit-def: $sgpr60
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB41_2
; SI-NEXT: .LBB41_4:
; SI-NEXT: v_readlane_b32 s4, v62, 0
-; SI-NEXT: v_mov_b32_e32 v4, s4
+; SI-NEXT: v_mov_b32_e32 v6, s4
; SI-NEXT: v_readlane_b32 s4, v62, 1
+; SI-NEXT: v_mov_b32_e32 v4, s4
+; SI-NEXT: v_readlane_b32 s4, v62, 2
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mov_b32_e32 v4, s4
-; SI-NEXT: v_readlane_b32 s4, v62, 2
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v4, s4
+; SI-NEXT: v_mov_b32_e32 v4, s60
; SI-NEXT: v_readlane_b32 s4, v62, 3
; SI-NEXT: v_mov_b32_e32 v2, s59
; SI-NEXT: v_mov_b32_e32 v3, s58
@@ -60617,7 +60883,6 @@ define inreg <64 x bfloat> @bitcast_v32f32_to_v64bf16_scalar(<32 x float> inreg
; SI-NEXT: v_mov_b32_e32 v7, s63
; SI-NEXT: v_mov_b32_e32 v8, s62
; SI-NEXT: v_mov_b32_e32 v5, s61
-; SI-NEXT: v_mov_b32_e32 v6, s60
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mov_b32_e32 v4, s4
@@ -60911,6 +61176,7 @@ define inreg <64 x bfloat> @bitcast_v32f32_to_v64bf16_scalar(<32 x float> inreg
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v31, v17
; VI-NEXT: v_mov_b32_e32 v30, v16
; VI-NEXT: v_mov_b32_e32 v29, v15
@@ -60931,7 +61197,7 @@ define inreg <64 x bfloat> @bitcast_v32f32_to_v64bf16_scalar(<32 x float> inreg
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
@@ -60944,10 +61210,13 @@ define inreg <64 x bfloat> @bitcast_v32f32_to_v64bf16_scalar(<32 x float> inreg
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB41_4
+; VI-NEXT: s_cbranch_scc0 .LBB41_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB41_3
-; VI-NEXT: .LBB41_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB41_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB41_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e32 v15, 1.0, v15
; VI-NEXT: v_add_f32_e32 v14, 1.0, v14
; VI-NEXT: v_add_f32_e32 v13, 1.0, v13
@@ -60980,16 +61249,15 @@ define inreg <64 x bfloat> @bitcast_v32f32_to_v64bf16_scalar(<32 x float> inreg
; VI-NEXT: v_add_f32_e32 v32, 1.0, v32
; VI-NEXT: v_add_f32_e32 v17, 1.0, v17
; VI-NEXT: v_add_f32_e32 v16, 1.0, v16
-; VI-NEXT: .LBB41_3: ; %end
+; VI-NEXT: .LBB41_4: ; %end
; VI-NEXT: v_mov_b32_e32 v18, v32
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB41_4:
-; VI-NEXT: s_branch .LBB41_2
;
; GFX9-LABEL: bitcast_v32f32_to_v64bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v31, v17
; GFX9-NEXT: v_mov_b32_e32 v30, v16
; GFX9-NEXT: v_mov_b32_e32 v29, v15
@@ -61010,7 +61278,7 @@ define inreg <64 x bfloat> @bitcast_v32f32_to_v64bf16_scalar(<32 x float> inreg
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -61023,10 +61291,13 @@ define inreg <64 x bfloat> @bitcast_v32f32_to_v64bf16_scalar(<32 x float> inreg
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB41_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB41_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB41_3
-; GFX9-NEXT: .LBB41_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB41_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB41_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e32 v15, 1.0, v15
; GFX9-NEXT: v_add_f32_e32 v14, 1.0, v14
; GFX9-NEXT: v_add_f32_e32 v13, 1.0, v13
@@ -61059,44 +61330,42 @@ define inreg <64 x bfloat> @bitcast_v32f32_to_v64bf16_scalar(<32 x float> inreg
; GFX9-NEXT: v_add_f32_e32 v32, 1.0, v32
; GFX9-NEXT: v_add_f32_e32 v17, 1.0, v17
; GFX9-NEXT: v_add_f32_e32 v16, 1.0, v16
-; GFX9-NEXT: .LBB41_3: ; %end
+; GFX9-NEXT: .LBB41_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v18, v32
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB41_4:
-; GFX9-NEXT: s_branch .LBB41_2
;
; GFX11-LABEL: bitcast_v32f32_to_v64bf16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v14 :: v_dual_mov_b32 v31, v13
-; GFX11-NEXT: v_dual_mov_b32 v30, v12 :: v_dual_mov_b32 v29, v11
-; GFX11-NEXT: v_dual_mov_b32 v28, v10 :: v_dual_mov_b32 v27, v9
+; GFX11-NEXT: v_dual_mov_b32 v15, v14 :: v_dual_mov_b32 v30, v12
+; GFX11-NEXT: v_dual_mov_b32 v31, v13 :: v_dual_mov_b32 v28, v10
+; GFX11-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v26, v8
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB41_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB41_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB41_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB41_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB41_3:
-; GFX11-NEXT: .LBB41_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB41_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
@@ -61113,6 +61382,7 @@ define inreg <64 x bfloat> @bitcast_v32f32_to_v64bf16_scalar(<32 x float> inreg
; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
; GFX11-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-NEXT: .LBB41_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -64102,23 +64372,28 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v3
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v2
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mul_f32_e32 v0, 1.0, v5
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v4
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mul_f32_e32 v0, 1.0, v7
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v6
-; SI-NEXT: v_mov_b32_e32 v39, v10
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v8
; SI-NEXT: v_mov_b32_e32 v38, v12
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v39
+; SI-NEXT: v_mul_f32_e32 v0, 1.0, v10
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v38
@@ -64132,14 +64407,11 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v30
; SI-NEXT: v_mov_b32_e32 v37, v14
-; SI-NEXT: v_mov_b32_e32 v14, v11
; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
-; SI-NEXT: v_mul_f32_e32 v11, 1.0, v5
-; SI-NEXT: v_mul_f32_e32 v10, 1.0, v7
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
; SI-NEXT: v_mul_f32_e32 v12, 1.0, v9
-; SI-NEXT: v_mul_f32_e32 v14, 1.0, v14
+; SI-NEXT: v_mul_f32_e32 v14, 1.0, v11
; SI-NEXT: v_mul_f32_e32 v13, 1.0, v13
; SI-NEXT: v_mul_f32_e32 v38, 1.0, v37
; SI-NEXT: v_mul_f32_e32 v15, 1.0, v17
@@ -64158,7 +64430,9 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s16
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e64 v1, 1.0, s19
+; SI-NEXT: v_mul_f32_e64 v10, 1.0, s18
; SI-NEXT: v_mul_f32_e64 v3, 1.0, s23
+; SI-NEXT: v_mul_f32_e64 v11, 1.0, s22
; SI-NEXT: v_mul_f32_e64 v4, 1.0, s25
; SI-NEXT: v_mul_f32_e64 v9, 1.0, s24
; SI-NEXT: v_mul_f32_e64 v5, 1.0, s27
@@ -64167,8 +64441,8 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; SI-NEXT: v_mul_f32_e64 v7, 1.0, s28
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31
-; SI-NEXT: v_mul_f32_e32 v22, 1.0, v42
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: v_mul_f32_e32 v22, 1.0, v42
; SI-NEXT: v_mul_f32_e32 v23, 1.0, v43
; SI-NEXT: v_mul_f32_e32 v52, 1.0, v44
; SI-NEXT: v_mul_f32_e32 v24, 1.0, v45
@@ -64184,77 +64458,76 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; SI-NEXT: v_mul_f32_e32 v29, 1.0, v63
; SI-NEXT: v_mul_f32_e32 v32, 1.0, v32
; SI-NEXT: v_mul_f32_e32 v30, 1.0, v33
-; SI-NEXT: s_waitcnt vmcnt(13)
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v35
; SI-NEXT: v_mul_f32_e32 v31, 1.0, v34
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(13)
; SI-NEXT: v_mul_f32_e32 v34, 1.0, v36
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e64 v0, 1.0, s17
-; SI-NEXT: v_mul_f32_e64 v35, 1.0, s18
-; SI-NEXT: v_mul_f32_e64 v36, 1.0, s21
-; SI-NEXT: v_mul_f32_e64 v42, 1.0, s20
-; SI-NEXT: v_mul_f32_e64 v33, 1.0, s22
-; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
+; SI-NEXT: v_mul_f32_e64 v33, 1.0, s21
+; SI-NEXT: v_mul_f32_e64 v35, 1.0, s20
+; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB43_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v6
; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v5
; SI-NEXT: v_alignbit_b32 v6, v6, v7, 16
-; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
; SI-NEXT: v_alignbit_b32 v5, v5, v8, 16
; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(2)
-; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_alignbit_b32 v1, v1, v35, 16
-; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; SI-NEXT: v_alignbit_b32 v4, v4, v9, 16
-; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(5)
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; SI-NEXT: v_mov_b32_e32 v59, v2
; SI-NEXT: v_alignbit_b32 v0, v0, v2, 16
-; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v36
-; SI-NEXT: v_alignbit_b32 v2, v2, v42, 16
-; SI-NEXT: v_mov_b32_e32 v57, v11
-; SI-NEXT: v_mov_b32_e32 v47, v10
-; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v10
-; SI-NEXT: v_mov_b32_e32 v45, v12
+; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v33
+; SI-NEXT: v_alignbit_b32 v2, v2, v35, 16
+; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; SI-NEXT: v_alignbit_b32 v4, v4, v9, 16
+; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(4)
+; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; SI-NEXT: v_alignbit_b32 v1, v1, v10, 16
+; SI-NEXT: s_waitcnt expcnt(3)
+; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; SI-NEXT: v_alignbit_b32 v3, v3, v33, 16
-; SI-NEXT: v_mov_b32_e32 v33, v14
+; SI-NEXT: v_alignbit_b32 v3, v3, v11, 16
+; SI-NEXT: s_waitcnt expcnt(2)
+; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v45, v12
+; SI-NEXT: v_mov_b32_e32 v44, v14
; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v13
; SI-NEXT: v_mov_b32_e32 v62, v38
; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v15
@@ -64295,30 +64568,35 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; SI-NEXT: v_alignbit_b32 v28, v28, v37, 16
; SI-NEXT: v_mov_b32_e32 v37, v34
; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: s_waitcnt vmcnt(5) expcnt(0)
; SI-NEXT: v_mov_b32_e32 v35, v7
; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v7
-; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: s_waitcnt vmcnt(4)
; SI-NEXT: v_mov_b32_e32 v43, v8
; SI-NEXT: v_alignbit_b32 v7, v7, v8, 16
-; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(2) expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v42, v9
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: v_mov_b32_e32 v60, v9
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: v_mov_b32_e32 v58, v10
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: v_mov_b32_e32 v56, v11
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v32
; SI-NEXT: v_alignbit_b32 v31, v31, v34, 16
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v60, v8
+; SI-NEXT: v_mov_b32_e32 v42, v8
; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v8
; SI-NEXT: v_alignbit_b32 v8, v8, v9, 16
-; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v11
-; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v58, v11
-; SI-NEXT: v_alignbit_b32 v9, v9, v11, 16
-; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v57, v9
+; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v9
+; SI-NEXT: v_alignbit_b32 v9, v9, v10, 16
+; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v56, v11
+; SI-NEXT: v_mov_b32_e32 v47, v10
+; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v10
; SI-NEXT: v_alignbit_b32 v10, v10, v11, 16
; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v12
; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
@@ -64332,7 +64610,7 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; SI-NEXT: v_alignbit_b32 v12, v12, v14, 16
; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v44, v14
+; SI-NEXT: v_mov_b32_e32 v33, v14
; SI-NEXT: v_alignbit_b32 v13, v13, v14, 16
; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -64353,25 +64631,25 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; SI-NEXT: v_alignbit_b32 v22, v22, v54, 16
; SI-NEXT: s_cbranch_execnz .LBB43_3
; SI-NEXT: .LBB43_2: ; %cmp.true
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v59
; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
-; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_and_b32_e32 v8, 0xffff0000, v35
; SI-NEXT: v_add_f32_e32 v8, 0x40c00000, v8
; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v8
-; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v60
+; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v42
; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v9
; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v9
; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v57
@@ -64383,28 +64661,28 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; SI-NEXT: v_and_b32_e32 v12, 0xffff0000, v45
; SI-NEXT: v_add_f32_e32 v12, 0x40c00000, v12
; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v12
-; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v33
+; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v44
; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v13
; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v13
; SI-NEXT: v_and_b32_e32 v15, 0xffff0000, v36
; SI-NEXT: v_add_f32_e32 v15, 0x40c00000, v15
; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v15
-; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
; SI-NEXT: v_add_f32_e32 v32, 0x40c00000, v32
; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
@@ -64415,8 +64693,8 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3
; SI-NEXT: v_alignbit_b32 v0, v1, v0, 16
; SI-NEXT: v_alignbit_b32 v1, v3, v2, 16
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; SI-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v4
@@ -64495,22 +64773,22 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3
; SI-NEXT: v_alignbit_b32 v2, v3, v2, 16
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
; SI-NEXT: v_alignbit_b32 v3, v4, v3, 16
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; SI-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
; SI-NEXT: v_alignbit_b32 v4, v5, v4, 16
-; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; SI-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
; SI-NEXT: v_alignbit_b32 v5, v6, v5, 16
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6
@@ -64518,7 +64796,7 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v43
; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v7
; SI-NEXT: v_alignbit_b32 v7, v8, v7, 16
-; SI-NEXT: v_and_b32_e32 v8, 0xffff0000, v42
+; SI-NEXT: v_and_b32_e32 v8, 0xffff0000, v60
; SI-NEXT: v_add_f32_e32 v8, 0x40c00000, v8
; SI-NEXT: v_alignbit_b32 v8, v9, v8, 16
; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v58
@@ -64533,7 +64811,7 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; SI-NEXT: v_and_b32_e32 v12, 0xffff0000, v63
; SI-NEXT: v_add_f32_e32 v12, 0x40c00000, v12
; SI-NEXT: v_alignbit_b32 v12, v13, v12, 16
-; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v44
+; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v33
; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v13
; SI-NEXT: v_alignbit_b32 v13, v14, v13, 16
; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v62
@@ -64565,7 +64843,7 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; SI-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
; SI-NEXT: v_add_f32_e32 v22, 0x40c00000, v22
; SI-NEXT: v_alignbit_b32 v22, v23, v22, 16
-; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
; SI-NEXT: v_add_f32_e32 v23, 0x40c00000, v23
@@ -64585,12 +64863,12 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; SI-NEXT: v_and_b32_e32 v28, 0xffff0000, v48
; SI-NEXT: v_add_f32_e32 v28, 0x40c00000, v28
; SI-NEXT: v_alignbit_b32 v28, v29, v28, 16
-; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
; SI-NEXT: v_add_f32_e32 v29, 0x40c00000, v29
; SI-NEXT: v_alignbit_b32 v29, v30, v29, 16
-; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
; SI-NEXT: v_add_f32_e32 v30, 0x40c00000, v30
@@ -64619,25 +64897,24 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB43_4:
; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(3)
-; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
; SI-NEXT: v_mov_b32_e32 v61, v53
; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
; SI-NEXT: v_mov_b32_e32 v59, v2
-; SI-NEXT: v_mov_b32_e32 v57, v11
-; SI-NEXT: v_mov_b32_e32 v47, v10
; SI-NEXT: v_mov_b32_e32 v45, v12
-; SI-NEXT: v_mov_b32_e32 v33, v14
+; SI-NEXT: v_mov_b32_e32 v44, v14
; SI-NEXT: v_mov_b32_e32 v62, v38
; SI-NEXT: v_mov_b32_e32 v38, v39
; SI-NEXT: v_mov_b32_e32 v39, v41
@@ -64651,12 +64928,15 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; SI-NEXT: v_mov_b32_e32 v48, v37
; SI-NEXT: v_mov_b32_e32 v37, v34
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; SI-NEXT: s_branch .LBB43_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB43_2
+; SI-NEXT: s_branch .LBB43_3
;
; VI-LABEL: bitcast_v64bf16_to_v32f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v31, v17
; VI-NEXT: v_mov_b32_e32 v30, v16
; VI-NEXT: v_mov_b32_e32 v29, v15
@@ -64677,7 +64957,7 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
@@ -64690,10 +64970,13 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB43_4
+; VI-NEXT: s_cbranch_scc0 .LBB43_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB43_3
-; VI-NEXT: .LBB43_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB43_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB43_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_lshlrev_b32_e32 v18, 16, v15
; VI-NEXT: v_add_f32_e32 v18, 0x40c00000, v18
; VI-NEXT: v_bfe_u32 v33, v18, 16, 1
@@ -65270,16 +65553,15 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; VI-NEXT: v_cndmask_b32_e32 v16, v33, v34, vcc
; VI-NEXT: v_lshrrev_b32_e32 v16, 16, v16
; VI-NEXT: v_alignbit_b32 v16, v16, v18, 16
-; VI-NEXT: .LBB43_3: ; %end
+; VI-NEXT: .LBB43_4: ; %end
; VI-NEXT: v_mov_b32_e32 v18, v32
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB43_4:
-; VI-NEXT: s_branch .LBB43_2
;
; GFX9-LABEL: bitcast_v64bf16_to_v32f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v31, v17
; GFX9-NEXT: v_mov_b32_e32 v30, v16
; GFX9-NEXT: v_mov_b32_e32 v29, v15
@@ -65300,7 +65582,7 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -65313,10 +65595,13 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB43_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB43_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB43_3
-; GFX9-NEXT: .LBB43_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB43_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB43_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_and_b32_e32 v18, 0xffff0000, v15
; GFX9-NEXT: v_add_f32_e32 v18, 0x40c00000, v18
; GFX9-NEXT: v_bfe_u32 v33, v18, 16, 1
@@ -65926,11 +66211,9 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; GFX9-NEXT: v_and_b32_sdwa v16, v18, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: v_lshrrev_b32_e32 v18, 16, v33
; GFX9-NEXT: v_lshl_or_b32 v16, v18, 16, v16
-; GFX9-NEXT: .LBB43_3: ; %end
+; GFX9-NEXT: .LBB43_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v18, v32
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB43_4:
-; GFX9-NEXT: s_branch .LBB43_2
;
; GFX11-LABEL: bitcast_v64bf16_to_v32f32_scalar:
; GFX11: ; %bb.0:
@@ -66020,8 +66303,8 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; GFX11-NEXT: v_dual_mov_b32 v174, v5 :: v_dual_mov_b32 v173, v0
; GFX11-NEXT: v_dual_mov_b32 v184, v2 :: v_dual_mov_b32 v175, v1
; GFX11-NEXT: v_dual_mov_b32 v183, s28 :: v_dual_mov_b32 v172, s29
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s4, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB43_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_dual_mov_b32 v32, s0 :: v_dual_mov_b32 v37, s2
@@ -66032,8 +66315,7 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; GFX11-NEXT: v_dual_mov_b32 v86, s21 :: v_dual_mov_b32 v109, s23
; GFX11-NEXT: v_dual_mov_b32 v122, s24 :: v_dual_mov_b32 v151, s26
; GFX11-NEXT: v_dual_mov_b32 v136, s25 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB43_3
+; GFX11-NEXT: s_cbranch_execnz .LBB43_3
; GFX11-NEXT: .LBB43_2: ; %cmp.true
; GFX11-NEXT: s_and_b32 s5, s27, 0xffff0000
; GFX11-NEXT: s_lshl_b32 s4, s27, 16
@@ -66780,8 +67062,8 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB43_4:
; GFX11-NEXT: ; implicit-def: $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
-; GFX11-NEXT: ; implicit-def: $vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64
; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-NEXT: ; implicit-def: $vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64
; GFX11-NEXT: ; implicit-def: $vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66
; GFX11-NEXT: ; implicit-def: $vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69
; GFX11-NEXT: ; implicit-def: $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73
@@ -66795,7 +67077,9 @@ define inreg <32 x float> @bitcast_v64bf16_to_v32f32_scalar(<64 x bfloat> inreg
; GFX11-NEXT: ; implicit-def: $vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141
; GFX11-NEXT: ; implicit-def: $vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154
; GFX11-NEXT: ; implicit-def: $vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168
-; GFX11-NEXT: s_branch .LBB43_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_vccz .LBB43_2
+; GFX11-NEXT: s_branch .LBB43_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -67774,6 +68058,7 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v19
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s47, v1
; SI-NEXT: v_readfirstlane_b32 s46, v2
; SI-NEXT: v_readfirstlane_b32 s45, v3
@@ -67788,11 +68073,11 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s12, v12
; SI-NEXT: v_readfirstlane_b32 s11, v13
; SI-NEXT: v_readfirstlane_b32 s10, v14
-; SI-NEXT: v_readfirstlane_b32 s8, v15
-; SI-NEXT: v_readfirstlane_b32 s7, v16
-; SI-NEXT: v_readfirstlane_b32 s6, v17
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: v_readfirstlane_b32 s9, v18
+; SI-NEXT: v_readfirstlane_b32 s9, v15
+; SI-NEXT: v_readfirstlane_b32 s8, v16
+; SI-NEXT: v_readfirstlane_b32 s7, v17
+; SI-NEXT: v_readfirstlane_b32 s6, v18
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
@@ -67811,13 +68096,13 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a,
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB45_4
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_lshr_b32 s4, s9, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v47, s4
; SI-NEXT: s_lshr_b32 s4, s6, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v43, s4
+; SI-NEXT: v_cvt_f32_f16_e32 v47, s4
; SI-NEXT: s_lshr_b32 s4, s7, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v55, s4
+; SI-NEXT: v_cvt_f32_f16_e32 v43, s4
; SI-NEXT: s_lshr_b32 s4, s8, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v55, s4
+; SI-NEXT: s_lshr_b32 s4, s9, 16
; SI-NEXT: v_cvt_f32_f16_e32 v51, s4
; SI-NEXT: s_lshr_b32 s4, s10, 16
; SI-NEXT: v_cvt_f32_f16_e32 v39, s4
@@ -67883,10 +68168,10 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a,
; SI-NEXT: v_cvt_f32_f16_e32 v60, s4
; SI-NEXT: s_lshr_b32 s4, s16, 16
; SI-NEXT: v_cvt_f32_f16_e32 v62, s4
-; SI-NEXT: v_cvt_f32_f16_e32 v5, s9
-; SI-NEXT: v_cvt_f32_f16_e32 v6, s6
-; SI-NEXT: v_cvt_f32_f16_e32 v7, s7
-; SI-NEXT: v_cvt_f32_f16_e32 v9, s8
+; SI-NEXT: v_cvt_f32_f16_e32 v5, s6
+; SI-NEXT: v_cvt_f32_f16_e32 v6, s7
+; SI-NEXT: v_cvt_f32_f16_e32 v7, s8
+; SI-NEXT: v_cvt_f32_f16_e32 v9, s9
; SI-NEXT: v_cvt_f32_f16_e32 v59, s10
; SI-NEXT: v_cvt_f32_f16_e32 v61, s11
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
@@ -67943,7 +68228,7 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a,
; SI-NEXT: v_cvt_f32_f16_e32 v19, v21
; SI-NEXT: v_cvt_f32_f16_e32 v21, v3
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
-; SI-NEXT: v_add_f32_e64 v41, s6, 1.0
+; SI-NEXT: v_add_f32_e64 v41, s7, 1.0
; SI-NEXT: v_cvt_f32_f16_e32 v7, v41
; SI-NEXT: v_add_f32_e64 v6, s21, 1.0
; SI-NEXT: v_add_f32_e64 v10, s23, 1.0
@@ -67962,7 +68247,7 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a,
; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v26
; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v25
; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v17
-; SI-NEXT: v_add_f32_e64 v53, s7, 1.0
+; SI-NEXT: v_add_f32_e64 v53, s8, 1.0
; SI-NEXT: v_cvt_f32_f16_e32 v17, v25
; SI-NEXT: v_cvt_f32_f16_e32 v25, v33
; SI-NEXT: v_cvt_f32_f16_e32 v33, v38
@@ -67971,7 +68256,7 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a,
; SI-NEXT: v_cvt_f32_f16_e32 v54, v42
; SI-NEXT: v_cvt_f32_f16_e32 v42, v46
; SI-NEXT: v_cvt_f32_f16_e32 v46, v57
-; SI-NEXT: v_add_f32_e64 v49, s8, 1.0
+; SI-NEXT: v_add_f32_e64 v49, s9, 1.0
; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v53
; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -67982,7 +68267,7 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a,
; SI-NEXT: v_add_f32_e64 v28, s43, 1.0
; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v49
; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v41
-; SI-NEXT: v_add_f32_e64 v45, s9, 1.0
+; SI-NEXT: v_add_f32_e64 v45, s6, 1.0
; SI-NEXT: v_cvt_f32_f16_e32 v9, v49
; SI-NEXT: v_cvt_f32_f16_e32 v49, v18
; SI-NEXT: v_cvt_f32_f16_e32 v41, v10
@@ -68311,7 +68596,6 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a,
; SI-NEXT: .LBB45_4:
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; kill: killed $vgpr2
-; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; implicit-def: $vgpr62
; SI-NEXT: ; implicit-def: $vgpr18
@@ -68359,7 +68643,6 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a,
; SI-NEXT: ; implicit-def: $vgpr20
; SI-NEXT: ; implicit-def: $vgpr63
; SI-NEXT: ; implicit-def: $vgpr11
-; SI-NEXT: ; kill: killed $vgpr2
; SI-NEXT: ; implicit-def: $vgpr13
; SI-NEXT: ; implicit-def: $vgpr32
; SI-NEXT: ; implicit-def: $vgpr61
@@ -68376,12 +68659,17 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a,
; SI-NEXT: ; implicit-def: $vgpr47
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; kill: killed $vgpr2
-; SI-NEXT: s_branch .LBB45_2
+; SI-NEXT: ; implicit-def: $vgpr2
+; SI-NEXT: ; kill: killed $vgpr2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB45_2
+; SI-NEXT: s_branch .LBB45_3
;
; VI-LABEL: bitcast_v32f32_to_v64f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v31, v17
; VI-NEXT: v_mov_b32_e32 v30, v16
; VI-NEXT: v_mov_b32_e32 v29, v15
@@ -68402,7 +68690,7 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
@@ -68415,10 +68703,13 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB45_4
+; VI-NEXT: s_cbranch_scc0 .LBB45_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB45_3
-; VI-NEXT: .LBB45_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB45_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB45_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e32 v15, 1.0, v15
; VI-NEXT: v_add_f32_e32 v14, 1.0, v14
; VI-NEXT: v_add_f32_e32 v13, 1.0, v13
@@ -68451,16 +68742,15 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a,
; VI-NEXT: v_add_f32_e32 v32, 1.0, v32
; VI-NEXT: v_add_f32_e32 v17, 1.0, v17
; VI-NEXT: v_add_f32_e32 v16, 1.0, v16
-; VI-NEXT: .LBB45_3: ; %end
+; VI-NEXT: .LBB45_4: ; %end
; VI-NEXT: v_mov_b32_e32 v18, v32
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB45_4:
-; VI-NEXT: s_branch .LBB45_2
;
; GFX9-LABEL: bitcast_v32f32_to_v64f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v31, v17
; GFX9-NEXT: v_mov_b32_e32 v30, v16
; GFX9-NEXT: v_mov_b32_e32 v29, v15
@@ -68481,7 +68771,7 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -68494,10 +68784,13 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB45_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB45_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB45_3
-; GFX9-NEXT: .LBB45_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB45_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB45_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e32 v15, 1.0, v15
; GFX9-NEXT: v_add_f32_e32 v14, 1.0, v14
; GFX9-NEXT: v_add_f32_e32 v13, 1.0, v13
@@ -68530,44 +68823,42 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e32 v32, 1.0, v32
; GFX9-NEXT: v_add_f32_e32 v17, 1.0, v17
; GFX9-NEXT: v_add_f32_e32 v16, 1.0, v16
-; GFX9-NEXT: .LBB45_3: ; %end
+; GFX9-NEXT: .LBB45_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v18, v32
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB45_4:
-; GFX9-NEXT: s_branch .LBB45_2
;
; GFX11-LABEL: bitcast_v32f32_to_v64f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v14 :: v_dual_mov_b32 v31, v13
-; GFX11-NEXT: v_dual_mov_b32 v30, v12 :: v_dual_mov_b32 v29, v11
-; GFX11-NEXT: v_dual_mov_b32 v28, v10 :: v_dual_mov_b32 v27, v9
+; GFX11-NEXT: v_dual_mov_b32 v15, v14 :: v_dual_mov_b32 v30, v12
+; GFX11-NEXT: v_dual_mov_b32 v31, v13 :: v_dual_mov_b32 v28, v10
+; GFX11-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v26, v8
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB45_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB45_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB45_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB45_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB45_3:
-; GFX11-NEXT: .LBB45_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB45_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
@@ -68584,6 +68875,7 @@ define inreg <64 x half> @bitcast_v32f32_to_v64f16_scalar(<32 x float> inreg %a,
; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
; GFX11-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-NEXT: .LBB45_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -69636,22 +69928,23 @@ define inreg <32 x float> @bitcast_v64f16_to_v32f32_scalar(<64 x half> inreg %a,
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
-; SI-NEXT: v_mov_b32_e32 v53, v26
-; SI-NEXT: v_mov_b32_e32 v45, v6
+; SI-NEXT: v_mov_b32_e32 v52, v30
+; SI-NEXT: v_mov_b32_e32 v54, v26
+; SI-NEXT: v_mov_b32_e32 v41, v6
; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:76
-; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32
-; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:8
-; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:4
+; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32
+; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:8
+; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:4
; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:16
-; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:12
+; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:12
; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:24
-; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:20
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:32
+; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:20
+; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:32
; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:28
-; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:40
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:40
; SI-NEXT: s_waitcnt expcnt(3)
; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:36
-; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:48
+; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:48
; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:44
; SI-NEXT: s_waitcnt expcnt(0)
@@ -69661,12 +69954,12 @@ define inreg <32 x float> @bitcast_v64f16_to_v32f32_scalar(<64 x half> inreg %a,
; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:60
; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:72
; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:68
-; SI-NEXT: v_mov_b32_e32 v54, v14
+; SI-NEXT: v_mov_b32_e32 v53, v14
; SI-NEXT: v_mov_b32_e32 v55, v12
-; SI-NEXT: v_mov_b32_e32 v41, v11
+; SI-NEXT: v_mov_b32_e32 v43, v11
; SI-NEXT: v_mov_b32_e32 v40, v10
-; SI-NEXT: v_mov_b32_e32 v44, v9
-; SI-NEXT: v_mov_b32_e32 v43, v8
+; SI-NEXT: v_mov_b32_e32 v45, v9
+; SI-NEXT: v_mov_b32_e32 v44, v8
; SI-NEXT: v_cvt_f16_f32_e32 v9, v1
; SI-NEXT: v_cvt_f16_f32_e32 v8, v0
; SI-NEXT: v_cvt_f16_f32_e32 v11, v3
@@ -69674,27 +69967,27 @@ define inreg <32 x float> @bitcast_v64f16_to_v32f32_scalar(<64 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v12, v5
; SI-NEXT: v_cvt_f16_f32_e32 v14, v4
; SI-NEXT: v_cvt_f16_f32_e32 v58, v7
+; SI-NEXT: v_cvt_f16_f32_e32 v41, v41
; SI-NEXT: v_cvt_f16_f32_e32 v56, v45
; SI-NEXT: v_cvt_f16_f32_e32 v46, v44
; SI-NEXT: v_cvt_f16_f32_e32 v44, v43
-; SI-NEXT: v_cvt_f16_f32_e32 v61, v41
-; SI-NEXT: v_cvt_f16_f32_e32 v59, v40
+; SI-NEXT: v_cvt_f16_f32_e32 v61, v40
; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
-; SI-NEXT: v_cvt_f16_f32_e32 v57, v55
-; SI-NEXT: v_cvt_f16_f32_e32 v47, v15
-; SI-NEXT: v_cvt_f16_f32_e32 v45, v54
+; SI-NEXT: v_cvt_f16_f32_e32 v59, v55
+; SI-NEXT: v_cvt_f16_f32_e32 v57, v15
+; SI-NEXT: v_cvt_f16_f32_e32 v47, v53
; SI-NEXT: v_cvt_f16_f32_e32 v15, v17
-; SI-NEXT: v_cvt_f16_f32_e32 v43, v16
+; SI-NEXT: v_cvt_f16_f32_e32 v45, v16
; SI-NEXT: v_cvt_f16_f32_e32 v16, v19
-; SI-NEXT: v_cvt_f16_f32_e32 v41, v18
+; SI-NEXT: v_cvt_f16_f32_e32 v43, v18
; SI-NEXT: v_cvt_f16_f32_e32 v17, v21
-; SI-NEXT: v_cvt_f16_f32_e32 v40, v20
+; SI-NEXT: v_cvt_f16_f32_e32 v53, v20
; SI-NEXT: v_cvt_f16_f32_e32 v18, v23
-; SI-NEXT: v_cvt_f16_f32_e32 v55, v22
+; SI-NEXT: v_cvt_f16_f32_e32 v40, v22
; SI-NEXT: v_cvt_f16_f32_e32 v19, v25
-; SI-NEXT: v_cvt_f16_f32_e32 v54, v24
+; SI-NEXT: v_cvt_f16_f32_e32 v55, v24
; SI-NEXT: v_cvt_f16_f32_e32 v20, v27
-; SI-NEXT: v_cvt_f16_f32_e32 v53, v53
+; SI-NEXT: v_cvt_f16_f32_e32 v54, v54
; SI-NEXT: v_cvt_f16_f32_e32 v21, v29
; SI-NEXT: v_cvt_f16_f32_e32 v22, v28
; SI-NEXT: v_cvt_f16_f32_e32 v0, s17
@@ -69706,26 +69999,26 @@ define inreg <32 x float> @bitcast_v64f16_to_v32f32_scalar(<64 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v7, s28
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v51
-; SI-NEXT: v_cvt_f16_f32_e32 v51, v30
-; SI-NEXT: v_cvt_f16_f32_e32 v52, v52
-; SI-NEXT: v_cvt_f16_f32_e32 v23, v50
-; SI-NEXT: v_cvt_f16_f32_e32 v50, v48
+; SI-NEXT: v_cvt_f16_f32_e32 v51, v52
+; SI-NEXT: v_cvt_f16_f32_e32 v52, v49
+; SI-NEXT: v_cvt_f16_f32_e32 v23, v37
+; SI-NEXT: v_cvt_f16_f32_e32 v50, v50
; SI-NEXT: v_cvt_f16_f32_e32 v24, v38
-; SI-NEXT: v_cvt_f16_f32_e32 v49, v49
+; SI-NEXT: v_cvt_f16_f32_e32 v49, v48
; SI-NEXT: s_waitcnt vmcnt(13)
; SI-NEXT: v_cvt_f16_f32_e32 v25, v39
; SI-NEXT: s_waitcnt vmcnt(12)
-; SI-NEXT: v_cvt_f16_f32_e32 v48, v26
+; SI-NEXT: v_cvt_f16_f32_e32 v48, v30
; SI-NEXT: s_waitcnt vmcnt(11)
-; SI-NEXT: v_cvt_f16_f32_e32 v26, v31
+; SI-NEXT: v_cvt_f16_f32_e32 v26, v26
; SI-NEXT: s_waitcnt vmcnt(10)
; SI-NEXT: v_cvt_f16_f32_e32 v39, v6
; SI-NEXT: s_waitcnt vmcnt(9)
-; SI-NEXT: v_cvt_f16_f32_e32 v27, v42
+; SI-NEXT: v_cvt_f16_f32_e32 v27, v31
; SI-NEXT: s_waitcnt vmcnt(8)
; SI-NEXT: v_cvt_f16_f32_e32 v38, v60
; SI-NEXT: s_waitcnt vmcnt(7)
-; SI-NEXT: v_cvt_f16_f32_e32 v28, v37
+; SI-NEXT: v_cvt_f16_f32_e32 v28, v42
; SI-NEXT: s_waitcnt vmcnt(6)
; SI-NEXT: v_cvt_f16_f32_e32 v37, v62
; SI-NEXT: s_waitcnt vmcnt(5)
@@ -69735,70 +70028,74 @@ define inreg <32 x float> @bitcast_v64f16_to_v32f32_scalar(<64 x half> inreg %a,
; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_cvt_f16_f32_e32 v30, v33
; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_cvt_f16_f32_e32 v32, v34
+; SI-NEXT: v_cvt_f16_f32_e32 v33, v34
; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_cvt_f16_f32_e32 v34, v35
+; SI-NEXT: v_cvt_f16_f32_e32 v63, v35
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v36, v36
-; SI-NEXT: v_cvt_f16_f32_e32 v63, s16
-; SI-NEXT: v_cvt_f16_f32_e32 v62, s18
-; SI-NEXT: v_cvt_f16_f32_e32 v60, s20
-; SI-NEXT: v_cvt_f16_f32_e32 v42, s22
-; SI-NEXT: v_cvt_f16_f32_e32 v35, s24
-; SI-NEXT: v_cvt_f16_f32_e32 v33, s26
+; SI-NEXT: v_cvt_f16_f32_e32 v35, v36
+; SI-NEXT: v_cvt_f16_f32_e32 v62, s16
+; SI-NEXT: v_cvt_f16_f32_e32 v60, s18
+; SI-NEXT: v_cvt_f16_f32_e32 v42, s20
+; SI-NEXT: v_cvt_f16_f32_e32 v36, s22
+; SI-NEXT: v_cvt_f16_f32_e32 v34, s24
+; SI-NEXT: v_cvt_f16_f32_e32 v32, s26
; SI-NEXT: v_cvt_f16_f32_e32 v6, s29
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB47_2
; SI-NEXT: ; %bb.1: ; %cmp.false
+; SI-NEXT: s_waitcnt expcnt(6)
+; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20
+; SI-NEXT: v_or_b32_e32 v3, v36, v3
+; SI-NEXT: v_mov_b32_e32 v36, v54
+; SI-NEXT: v_or_b32_e32 v20, v54, v20
+; SI-NEXT: v_mov_b32_e32 v54, v21
; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21
-; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: v_or_b32_e32 v21, v22, v21
; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v52
-; SI-NEXT: v_or_b32_e32 v5, v33, v5
-; SI-NEXT: v_mov_b32_e32 v33, v52
-; SI-NEXT: v_mov_b32_e32 v52, v51
; SI-NEXT: v_or_b32_e32 v22, v51, v22
; SI-NEXT: v_mov_b32_e32 v51, v23
; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v23
@@ -69820,11 +70117,9 @@ define inreg <32 x float> @bitcast_v64f16_to_v32f32_scalar(<64 x half> inreg %a,
; SI-NEXT: v_or_b32_e32 v27, v38, v27
; SI-NEXT: v_mov_b32_e32 v38, v28
; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v28
-; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
; SI-NEXT: v_or_b32_e32 v7, v8, v7
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v11
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v12
-; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19
; SI-NEXT: v_or_b32_e32 v28, v37, v28
; SI-NEXT: v_mov_b32_e32 v37, v29
; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v29
@@ -69832,70 +70127,68 @@ define inreg <32 x float> @bitcast_v64f16_to_v32f32_scalar(<64 x half> inreg %a,
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; SI-NEXT: v_or_b32_e32 v4, v35, v4
+; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: v_or_b32_e32 v8, v10, v8
; SI-NEXT: v_or_b32_e32 v9, v14, v9
; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v58
-; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v46
-; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v61
+; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v56
+; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v44
; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
-; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v47
+; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v57
; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15
; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17
; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18
-; SI-NEXT: v_mov_b32_e32 v35, v54
-; SI-NEXT: v_or_b32_e32 v19, v54, v19
-; SI-NEXT: v_mov_b32_e32 v54, v20
-; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20
+; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19
; SI-NEXT: v_or_b32_e32 v29, v31, v29
; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v30
-; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v34
-; SI-NEXT: v_or_b32_e32 v0, v63, v0
-; SI-NEXT: v_or_b32_e32 v1, v62, v1
-; SI-NEXT: v_or_b32_e32 v2, v60, v2
-; SI-NEXT: v_or_b32_e32 v3, v42, v3
-; SI-NEXT: v_or_b32_e32 v10, v56, v10
-; SI-NEXT: v_mov_b32_e32 v63, v44
-; SI-NEXT: v_or_b32_e32 v11, v44, v11
+; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v63
+; SI-NEXT: v_or_b32_e32 v0, v62, v0
+; SI-NEXT: v_or_b32_e32 v1, v60, v1
+; SI-NEXT: v_or_b32_e32 v2, v42, v2
+; SI-NEXT: v_or_b32_e32 v4, v34, v4
+; SI-NEXT: v_or_b32_e32 v5, v32, v5
+; SI-NEXT: v_or_b32_e32 v10, v41, v10
+; SI-NEXT: v_or_b32_e32 v11, v46, v11
+; SI-NEXT: v_mov_b32_e32 v41, v44
; SI-NEXT: v_mov_b32_e32 v62, v61
+; SI-NEXT: v_or_b32_e32 v12, v61, v12
; SI-NEXT: v_mov_b32_e32 v60, v59
-; SI-NEXT: v_or_b32_e32 v12, v59, v12
+; SI-NEXT: v_or_b32_e32 v13, v59, v13
; SI-NEXT: v_mov_b32_e32 v58, v57
-; SI-NEXT: v_or_b32_e32 v13, v57, v13
; SI-NEXT: v_mov_b32_e32 v56, v47
+; SI-NEXT: v_or_b32_e32 v14, v47, v14
; SI-NEXT: v_mov_b32_e32 v46, v45
-; SI-NEXT: v_or_b32_e32 v14, v45, v14
+; SI-NEXT: v_or_b32_e32 v15, v45, v15
; SI-NEXT: v_mov_b32_e32 v44, v43
-; SI-NEXT: v_or_b32_e32 v15, v43, v15
-; SI-NEXT: v_mov_b32_e32 v42, v41
-; SI-NEXT: v_or_b32_e32 v16, v41, v16
-; SI-NEXT: v_or_b32_e32 v17, v40, v17
+; SI-NEXT: v_or_b32_e32 v16, v43, v16
+; SI-NEXT: v_mov_b32_e32 v42, v53
+; SI-NEXT: v_or_b32_e32 v17, v53, v17
+; SI-NEXT: v_or_b32_e32 v18, v40, v18
; SI-NEXT: v_mov_b32_e32 v40, v55
-; SI-NEXT: v_or_b32_e32 v18, v55, v18
-; SI-NEXT: v_or_b32_e32 v20, v53, v20
-; SI-NEXT: v_or_b32_e32 v30, v32, v30
-; SI-NEXT: v_mov_b32_e32 v32, v34
-; SI-NEXT: v_or_b32_e32 v31, v36, v31
+; SI-NEXT: v_or_b32_e32 v19, v55, v19
+; SI-NEXT: v_mov_b32_e32 v32, v52
+; SI-NEXT: v_mov_b32_e32 v34, v33
+; SI-NEXT: v_or_b32_e32 v30, v33, v30
+; SI-NEXT: v_or_b32_e32 v31, v35, v31
; SI-NEXT: s_mov_b64 s[4:5], 0
; SI-NEXT: s_branch .LBB47_3
; SI-NEXT: .LBB47_2:
+; SI-NEXT: v_mov_b32_e32 v41, v44
; SI-NEXT: s_waitcnt expcnt(1)
-; SI-NEXT: v_mov_b32_e32 v63, v44
; SI-NEXT: v_mov_b32_e32 v62, v61
; SI-NEXT: v_mov_b32_e32 v60, v59
; SI-NEXT: v_mov_b32_e32 v58, v57
; SI-NEXT: v_mov_b32_e32 v56, v47
; SI-NEXT: v_mov_b32_e32 v46, v45
; SI-NEXT: v_mov_b32_e32 v44, v43
-; SI-NEXT: v_mov_b32_e32 v42, v41
+; SI-NEXT: v_mov_b32_e32 v42, v53
; SI-NEXT: v_mov_b32_e32 v40, v55
-; SI-NEXT: v_mov_b32_e32 v35, v54
-; SI-NEXT: v_mov_b32_e32 v54, v20
-; SI-NEXT: v_mov_b32_e32 v33, v52
-; SI-NEXT: v_mov_b32_e32 v32, v34
-; SI-NEXT: v_mov_b32_e32 v52, v51
+; SI-NEXT: v_mov_b32_e32 v36, v54
+; SI-NEXT: v_mov_b32_e32 v54, v21
+; SI-NEXT: v_mov_b32_e32 v32, v52
+; SI-NEXT: v_mov_b32_e32 v34, v33
; SI-NEXT: v_mov_b32_e32 v51, v23
; SI-NEXT: v_mov_b32_e32 v50, v24
; SI-NEXT: v_mov_b32_e32 v49, v25
@@ -69903,25 +70196,29 @@ define inreg <32 x float> @bitcast_v64f16_to_v32f32_scalar(<64 x half> inreg %a,
; SI-NEXT: v_mov_b32_e32 v39, v27
; SI-NEXT: v_mov_b32_e32 v38, v28
; SI-NEXT: v_mov_b32_e32 v37, v29
-; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; SI-NEXT: .LBB47_3: ; %Flow
-; SI-NEXT: v_mov_b32_e32 v34, v33
-; SI-NEXT: v_mov_b32_e32 v33, v35
-; SI-NEXT: v_mov_b32_e32 v35, v40
+; SI-NEXT: v_mov_b32_e32 v33, v63
+; SI-NEXT: v_mov_b32_e32 v52, v36
+; SI-NEXT: v_mov_b32_e32 v36, v40
; SI-NEXT: v_mov_b32_e32 v53, v42
+; SI-NEXT: v_mov_b32_e32 v55, v44
; SI-NEXT: v_mov_b32_e32 v40, v46
-; SI-NEXT: v_mov_b32_e32 v41, v56
+; SI-NEXT: v_mov_b32_e32 v57, v56
; SI-NEXT: v_mov_b32_e32 v42, v58
; SI-NEXT: v_mov_b32_e32 v43, v60
+; SI-NEXT: v_mov_b32_e32 v44, v62
+; SI-NEXT: v_mov_b32_e32 v45, v41
; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
; SI-NEXT: s_cbranch_vccnz .LBB47_5
; SI-NEXT: ; %bb.4: ; %cmp.true
@@ -69930,11 +70227,11 @@ define inreg <32 x float> @bitcast_v64f16_to_v32f32_scalar(<64 x half> inreg %a,
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(4)
-; SI-NEXT: v_cvt_f32_f16_e32 v8, v61
-; SI-NEXT: v_cvt_f32_f16_e32 v9, v59
-; SI-NEXT: v_cvt_f32_f16_e32 v10, v57
-; SI-NEXT: v_cvt_f32_f16_e32 v11, v47
+; SI-NEXT: s_waitcnt vmcnt(5)
+; SI-NEXT: v_cvt_f32_f16_e32 v8, v62
+; SI-NEXT: v_cvt_f32_f16_e32 v9, v60
+; SI-NEXT: v_cvt_f32_f16_e32 v10, v58
+; SI-NEXT: v_cvt_f32_f16_e32 v11, v56
; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8
; SI-NEXT: v_cvt_f16_f32_e32 v8, v8
; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9
@@ -69943,10 +70240,10 @@ define inreg <32 x float> @bitcast_v64f16_to_v32f32_scalar(<64 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v10, v10
; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11
; SI-NEXT: v_cvt_f16_f32_e32 v11, v11
-; SI-NEXT: v_cvt_f32_f16_e32 v12, v63
-; SI-NEXT: v_cvt_f32_f16_e32 v13, v43
-; SI-NEXT: v_cvt_f32_f16_e32 v14, v42
-; SI-NEXT: v_cvt_f32_f16_e32 v15, v40
+; SI-NEXT: v_cvt_f32_f16_e32 v12, v46
+; SI-NEXT: v_cvt_f32_f16_e32 v13, v44
+; SI-NEXT: v_cvt_f32_f16_e32 v14, v43
+; SI-NEXT: v_cvt_f32_f16_e32 v15, v57
; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12
; SI-NEXT: v_cvt_f16_f32_e32 v12, v12
; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13
@@ -69955,33 +70252,32 @@ define inreg <32 x float> @bitcast_v64f16_to_v32f32_scalar(<64 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v14, v14
; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15
; SI-NEXT: v_cvt_f16_f32_e32 v15, v15
-; SI-NEXT: v_mov_b32_e32 v55, v44
-; SI-NEXT: v_cvt_f32_f16_e32 v16, v55
-; SI-NEXT: v_cvt_f32_f16_e32 v17, v53
-; SI-NEXT: v_cvt_f32_f16_e32 v19, v35
-; SI-NEXT: v_cvt_f32_f16_e32 v21, v33
+; SI-NEXT: v_cvt_f32_f16_e32 v16, v40
+; SI-NEXT: v_cvt_f32_f16_e32 v17, v55
+; SI-NEXT: v_cvt_f32_f16_e32 v18, v53
+; SI-NEXT: v_cvt_f32_f16_e32 v22, v52
; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16
; SI-NEXT: v_cvt_f16_f32_e32 v16, v16
; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17
; SI-NEXT: v_cvt_f16_f32_e32 v17, v17
-; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19
-; SI-NEXT: v_cvt_f16_f32_e32 v19, v19
-; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21
-; SI-NEXT: v_cvt_f16_f32_e32 v21, v21
-; SI-NEXT: v_cvt_f32_f16_e32 v23, v34
-; SI-NEXT: v_cvt_f32_f16_e32 v24, v52
+; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18
+; SI-NEXT: v_cvt_f16_f32_e32 v18, v18
+; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22
+; SI-NEXT: v_cvt_f16_f32_e32 v22, v22
+; SI-NEXT: v_cvt_f32_f16_e32 v21, v36
+; SI-NEXT: v_cvt_f32_f16_e32 v23, v32
; SI-NEXT: v_cvt_f32_f16_e32 v26, v49
; SI-NEXT: v_cvt_f32_f16_e32 v29, v38
+; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21
+; SI-NEXT: v_cvt_f16_f32_e32 v21, v21
; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23
; SI-NEXT: v_cvt_f16_f32_e32 v23, v23
-; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24
-; SI-NEXT: v_cvt_f16_f32_e32 v24, v24
; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26
; SI-NEXT: v_cvt_f16_f32_e32 v26, v26
; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v29
; SI-NEXT: v_cvt_f16_f32_e32 v29, v29
-; SI-NEXT: v_cvt_f32_f16_e32 v32, v32
-; SI-NEXT: v_cvt_f32_f16_e32 v33, v36
+; SI-NEXT: v_cvt_f32_f16_e32 v32, v33
+; SI-NEXT: v_cvt_f32_f16_e32 v33, v35
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v32, 0x38000000, v32
@@ -69990,14 +70286,14 @@ define inreg <32 x float> @bitcast_v64f16_to_v32f32_scalar(<64 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v33, v33
; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
@@ -70032,26 +70328,22 @@ define inreg <32 x float> @bitcast_v64f16_to_v32f32_scalar(<64 x half> inreg %a,
; SI-NEXT: s_waitcnt vmcnt(10)
; SI-NEXT: v_cvt_f32_f16_e32 v7, v7
; SI-NEXT: s_waitcnt vmcnt(9)
-; SI-NEXT: v_cvt_f32_f16_e32 v18, v18
+; SI-NEXT: v_cvt_f32_f16_e32 v19, v19
; SI-NEXT: s_waitcnt vmcnt(8)
; SI-NEXT: v_cvt_f32_f16_e32 v20, v20
; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6
; SI-NEXT: v_cvt_f16_f32_e32 v6, v6
; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7
; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
-; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18
-; SI-NEXT: v_cvt_f16_f32_e32 v18, v18
+; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19
; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20
+; SI-NEXT: v_cvt_f16_f32_e32 v19, v19
; SI-NEXT: v_cvt_f16_f32_e32 v20, v20
-; SI-NEXT: s_waitcnt vmcnt(7)
-; SI-NEXT: v_cvt_f32_f16_e32 v22, v22
-; SI-NEXT: s_waitcnt vmcnt(6)
+; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_cvt_f32_f16_e32 v25, v25
-; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_cvt_f32_f16_e32 v28, v28
+; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_cvt_f32_f16_e32 v30, v30
-; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22
-; SI-NEXT: v_cvt_f16_f32_e32 v22, v22
; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25
; SI-NEXT: v_cvt_f16_f32_e32 v25, v25
; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28
@@ -70096,72 +70388,70 @@ define inreg <32 x float> @bitcast_v64f16_to_v32f32_scalar(<64 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v6, v6
; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6
; SI-NEXT: v_or_b32_e32 v6, v7, v6
-; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v7, v7
+; SI-NEXT: v_cvt_f32_f16_e32 v7, v63
; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7
; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7
; SI-NEXT: v_or_b32_e32 v7, v8, v7
-; SI-NEXT: v_cvt_f32_f16_e32 v8, v60
+; SI-NEXT: v_cvt_f32_f16_e32 v8, v61
; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8
; SI-NEXT: v_cvt_f16_f32_e32 v8, v8
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8
; SI-NEXT: v_or_b32_e32 v8, v9, v8
-; SI-NEXT: v_cvt_f32_f16_e32 v9, v58
+; SI-NEXT: v_cvt_f32_f16_e32 v9, v59
; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9
; SI-NEXT: v_cvt_f16_f32_e32 v9, v9
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9
; SI-NEXT: v_or_b32_e32 v9, v10, v9
-; SI-NEXT: v_cvt_f32_f16_e32 v10, v56
+; SI-NEXT: v_cvt_f32_f16_e32 v10, v41
; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10
; SI-NEXT: v_cvt_f16_f32_e32 v10, v10
; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10
; SI-NEXT: v_or_b32_e32 v10, v11, v10
-; SI-NEXT: v_cvt_f32_f16_e32 v11, v46
+; SI-NEXT: v_cvt_f32_f16_e32 v11, v47
; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11
; SI-NEXT: v_cvt_f16_f32_e32 v11, v11
; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11
; SI-NEXT: v_or_b32_e32 v11, v12, v11
-; SI-NEXT: v_cvt_f32_f16_e32 v12, v62
+; SI-NEXT: v_cvt_f32_f16_e32 v12, v45
; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12
; SI-NEXT: v_cvt_f16_f32_e32 v12, v12
; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12
; SI-NEXT: v_or_b32_e32 v12, v13, v12
-; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v13, v13
; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13
; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
; SI-NEXT: v_or_b32_e32 v13, v14, v13
-; SI-NEXT: v_cvt_f32_f16_e32 v14, v41
+; SI-NEXT: v_cvt_f32_f16_e32 v14, v42
; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14
; SI-NEXT: v_cvt_f16_f32_e32 v14, v14
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
; SI-NEXT: v_or_b32_e32 v14, v15, v14
-; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v15, v15
; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15
; SI-NEXT: v_cvt_f16_f32_e32 v15, v15
; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15
; SI-NEXT: v_or_b32_e32 v15, v16, v15
-; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v16, v16
; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16
; SI-NEXT: v_cvt_f16_f32_e32 v16, v16
; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16
; SI-NEXT: v_or_b32_e32 v16, v17, v16
-; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v17, v17
; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17
; SI-NEXT: v_cvt_f16_f32_e32 v17, v17
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17
; SI-NEXT: v_or_b32_e32 v17, v18, v17
-; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v18, v18
; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18
@@ -70169,35 +70459,38 @@ define inreg <32 x float> @bitcast_v64f16_to_v32f32_scalar(<64 x half> inreg %a,
; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18
; SI-NEXT: v_or_b32_e32 v18, v19, v18
; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v20
-; SI-NEXT: v_cvt_f32_f16_e32 v20, v54
+; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
; SI-NEXT: v_or_b32_e32 v19, v21, v19
-; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
+; SI-NEXT: v_cvt_f32_f16_e32 v21, v54
+; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21
+; SI-NEXT: v_cvt_f16_f32_e32 v21, v21
+; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_cvt_f32_f16_e32 v20, v20
; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20
; SI-NEXT: v_cvt_f16_f32_e32 v20, v20
; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20
; SI-NEXT: v_or_b32_e32 v20, v22, v20
-; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_cvt_f32_f16_e32 v21, v21
-; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21
-; SI-NEXT: v_cvt_f16_f32_e32 v21, v21
+; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v22, v22
-; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21
; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22
; SI-NEXT: v_cvt_f16_f32_e32 v22, v22
; SI-NEXT: v_or_b32_e32 v21, v22, v21
; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v23
; SI-NEXT: v_cvt_f32_f16_e32 v23, v51
-; SI-NEXT: v_or_b32_e32 v22, v24, v22
-; SI-NEXT: v_cvt_f32_f16_e32 v24, v50
; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23
; SI-NEXT: v_cvt_f16_f32_e32 v23, v23
-; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24
-; SI-NEXT: v_cvt_f16_f32_e32 v24, v24
; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v23
; SI-NEXT: v_or_b32_e32 v23, v25, v23
-; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
+; SI-NEXT: v_cvt_f32_f16_e32 v24, v24
+; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24
+; SI-NEXT: v_cvt_f16_f32_e32 v24, v24
+; SI-NEXT: v_or_b32_e32 v22, v24, v22
+; SI-NEXT: v_cvt_f32_f16_e32 v24, v50
+; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24
+; SI-NEXT: v_cvt_f16_f32_e32 v24, v24
; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v24
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v25, v25
@@ -70210,7 +70503,7 @@ define inreg <32 x float> @bitcast_v64f16_to_v32f32_scalar(<64 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v26, v26
; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v26
; SI-NEXT: v_or_b32_e32 v26, v28, v26
-; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
; SI-NEXT: v_cvt_f32_f16_e32 v27, v27
; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27
; SI-NEXT: v_cvt_f16_f32_e32 v27, v27
@@ -70225,9 +70518,9 @@ define inreg <32 x float> @bitcast_v64f16_to_v32f32_scalar(<64 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v28, v28
; SI-NEXT: v_or_b32_e32 v27, v28, v27
; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v29
-; SI-NEXT: v_cvt_f32_f16_e32 v29, v37
; SI-NEXT: v_or_b32_e32 v28, v30, v28
-; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
+; SI-NEXT: v_cvt_f32_f16_e32 v29, v37
; SI-NEXT: v_cvt_f32_f16_e32 v31, v31
; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v29
; SI-NEXT: v_cvt_f16_f32_e32 v29, v29
@@ -70235,16 +70528,14 @@ define inreg <32 x float> @bitcast_v64f16_to_v32f32_scalar(<64 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v31, v31
; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v29
; SI-NEXT: v_or_b32_e32 v29, v31, v29
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_cvt_f32_f16_e32 v31, v34
+; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31
+; SI-NEXT: v_cvt_f16_f32_e32 v31, v31
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v30, v30
; SI-NEXT: v_add_f32_e32 v30, 0x38000000, v30
; SI-NEXT: v_cvt_f16_f32_e32 v30, v30
; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v30
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v31, v31
-; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31
-; SI-NEXT: v_cvt_f16_f32_e32 v31, v31
; SI-NEXT: v_or_b32_e32 v30, v31, v30
; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v32
; SI-NEXT: v_or_b32_e32 v31, v33, v31
@@ -70272,6 +70563,7 @@ define inreg <32 x float> @bitcast_v64f16_to_v32f32_scalar(<64 x half> inreg %a,
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v31, v17
; VI-NEXT: v_mov_b32_e32 v30, v16
; VI-NEXT: v_mov_b32_e32 v29, v15
@@ -70292,7 +70584,7 @@ define inreg <32 x float> @bitcast_v64f16_to_v32f32_scalar(<64 x half> inreg %a,
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
@@ -70305,10 +70597,13 @@ define inreg <32 x float> @bitcast_v64f16_to_v32f32_scalar(<64 x half> inreg %a,
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB47_4
+; VI-NEXT: s_cbranch_scc0 .LBB47_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB47_3
-; VI-NEXT: .LBB47_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB47_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB47_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v18, 0x200
; VI-NEXT: v_add_f16_sdwa v33, v15, v18 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v15, 0x200, v15
@@ -70406,16 +70701,15 @@ define inreg <32 x float> @bitcast_v64f16_to_v32f32_scalar(<64 x half> inreg %a,
; VI-NEXT: v_add_f16_e32 v16, 0x200, v16
; VI-NEXT: v_or_b32_e32 v17, v17, v33
; VI-NEXT: v_or_b32_e32 v16, v16, v18
-; VI-NEXT: .LBB47_3: ; %end
+; VI-NEXT: .LBB47_4: ; %end
; VI-NEXT: v_mov_b32_e32 v18, v32
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB47_4:
-; VI-NEXT: s_branch .LBB47_2
;
; GFX9-LABEL: bitcast_v64f16_to_v32f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v31, v17
; GFX9-NEXT: v_mov_b32_e32 v30, v16
; GFX9-NEXT: v_mov_b32_e32 v29, v15
@@ -70436,7 +70730,7 @@ define inreg <32 x float> @bitcast_v64f16_to_v32f32_scalar(<64 x half> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -70449,10 +70743,13 @@ define inreg <32 x float> @bitcast_v64f16_to_v32f32_scalar(<64 x half> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB47_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB47_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB47_3
-; GFX9-NEXT: .LBB47_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB47_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_movk_i32 s4, 0x200
; GFX9-NEXT: v_pk_add_f16 v15, v15, s4 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v14, v14, s4 op_sel_hi:[1,0]
@@ -70486,118 +70783,113 @@ define inreg <32 x float> @bitcast_v64f16_to_v32f32_scalar(<64 x half> inreg %a,
; GFX9-NEXT: v_pk_add_f16 v32, v32, s4 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v17, v17, s4 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v16, v16, s4 op_sel_hi:[1,0]
-; GFX9-NEXT: .LBB47_3: ; %end
+; GFX9-NEXT: .LBB47_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v18, v32
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB47_4:
-; GFX9-NEXT: s_branch .LBB47_2
;
; GFX11-LABEL: bitcast_v64f16_to_v32f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_store_b32 off, v40, s32 offset:292
-; GFX11-NEXT: scratch_store_b32 off, v41, s32 offset:288
-; GFX11-NEXT: scratch_store_b32 off, v42, s32 offset:284
-; GFX11-NEXT: scratch_store_b32 off, v43, s32 offset:280
-; GFX11-NEXT: scratch_store_b32 off, v44, s32 offset:276
-; GFX11-NEXT: scratch_store_b32 off, v45, s32 offset:272
-; GFX11-NEXT: scratch_store_b32 off, v46, s32 offset:268
-; GFX11-NEXT: scratch_store_b32 off, v47, s32 offset:264
-; GFX11-NEXT: scratch_store_b32 off, v56, s32 offset:260
-; GFX11-NEXT: scratch_store_b32 off, v57, s32 offset:256
-; GFX11-NEXT: scratch_store_b32 off, v58, s32 offset:252
-; GFX11-NEXT: scratch_store_b32 off, v59, s32 offset:248
-; GFX11-NEXT: scratch_store_b32 off, v60, s32 offset:244
-; GFX11-NEXT: scratch_store_b32 off, v61, s32 offset:240
-; GFX11-NEXT: scratch_store_b32 off, v62, s32 offset:236
-; GFX11-NEXT: scratch_store_b32 off, v63, s32 offset:232
-; GFX11-NEXT: scratch_store_b32 off, v72, s32 offset:228
-; GFX11-NEXT: scratch_store_b32 off, v73, s32 offset:224
-; GFX11-NEXT: scratch_store_b32 off, v74, s32 offset:220
-; GFX11-NEXT: scratch_store_b32 off, v75, s32 offset:216
-; GFX11-NEXT: scratch_store_b32 off, v76, s32 offset:212
-; GFX11-NEXT: scratch_store_b32 off, v77, s32 offset:208
-; GFX11-NEXT: scratch_store_b32 off, v78, s32 offset:204
-; GFX11-NEXT: scratch_store_b32 off, v79, s32 offset:200
-; GFX11-NEXT: scratch_store_b32 off, v88, s32 offset:196
-; GFX11-NEXT: scratch_store_b32 off, v89, s32 offset:192
-; GFX11-NEXT: scratch_store_b32 off, v90, s32 offset:188
-; GFX11-NEXT: scratch_store_b32 off, v91, s32 offset:184
-; GFX11-NEXT: scratch_store_b32 off, v92, s32 offset:180
-; GFX11-NEXT: scratch_store_b32 off, v93, s32 offset:176
-; GFX11-NEXT: scratch_store_b32 off, v94, s32 offset:172
-; GFX11-NEXT: scratch_store_b32 off, v95, s32 offset:168
+; GFX11-NEXT: scratch_store_b32 off, v40, s32 offset:284
+; GFX11-NEXT: scratch_store_b32 off, v41, s32 offset:280
+; GFX11-NEXT: scratch_store_b32 off, v42, s32 offset:276
+; GFX11-NEXT: scratch_store_b32 off, v43, s32 offset:272
+; GFX11-NEXT: scratch_store_b32 off, v44, s32 offset:268
+; GFX11-NEXT: scratch_store_b32 off, v45, s32 offset:264
+; GFX11-NEXT: scratch_store_b32 off, v46, s32 offset:260
+; GFX11-NEXT: scratch_store_b32 off, v47, s32 offset:256
+; GFX11-NEXT: scratch_store_b32 off, v56, s32 offset:252
+; GFX11-NEXT: scratch_store_b32 off, v57, s32 offset:248
+; GFX11-NEXT: scratch_store_b32 off, v58, s32 offset:244
+; GFX11-NEXT: scratch_store_b32 off, v59, s32 offset:240
+; GFX11-NEXT: scratch_store_b32 off, v60, s32 offset:236
+; GFX11-NEXT: scratch_store_b32 off, v61, s32 offset:232
+; GFX11-NEXT: scratch_store_b32 off, v62, s32 offset:228
+; GFX11-NEXT: scratch_store_b32 off, v63, s32 offset:224
+; GFX11-NEXT: scratch_store_b32 off, v72, s32 offset:220
+; GFX11-NEXT: scratch_store_b32 off, v73, s32 offset:216
+; GFX11-NEXT: scratch_store_b32 off, v74, s32 offset:212
+; GFX11-NEXT: scratch_store_b32 off, v75, s32 offset:208
+; GFX11-NEXT: scratch_store_b32 off, v76, s32 offset:204
+; GFX11-NEXT: scratch_store_b32 off, v77, s32 offset:200
+; GFX11-NEXT: scratch_store_b32 off, v78, s32 offset:196
+; GFX11-NEXT: scratch_store_b32 off, v79, s32 offset:192
+; GFX11-NEXT: scratch_store_b32 off, v88, s32 offset:188
+; GFX11-NEXT: scratch_store_b32 off, v89, s32 offset:184
+; GFX11-NEXT: scratch_store_b32 off, v90, s32 offset:180
+; GFX11-NEXT: scratch_store_b32 off, v91, s32 offset:176
+; GFX11-NEXT: scratch_store_b32 off, v92, s32 offset:172
+; GFX11-NEXT: scratch_store_b32 off, v93, s32 offset:168
+; GFX11-NEXT: scratch_store_b32 off, v94, s32 offset:164
+; GFX11-NEXT: scratch_store_b32 off, v95, s32 offset:160
; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_store_b32 off, v104, s32 offset:164
-; GFX11-NEXT: scratch_store_b32 off, v105, s32 offset:160
-; GFX11-NEXT: scratch_store_b32 off, v106, s32 offset:156
-; GFX11-NEXT: scratch_store_b32 off, v107, s32 offset:152
-; GFX11-NEXT: scratch_store_b32 off, v108, s32 offset:148
-; GFX11-NEXT: scratch_store_b32 off, v109, s32 offset:144
-; GFX11-NEXT: scratch_store_b32 off, v110, s32 offset:140
-; GFX11-NEXT: scratch_store_b32 off, v111, s32 offset:136
-; GFX11-NEXT: scratch_store_b32 off, v120, s32 offset:132
-; GFX11-NEXT: scratch_store_b32 off, v121, s32 offset:128
-; GFX11-NEXT: scratch_store_b32 off, v122, s32 offset:124
-; GFX11-NEXT: scratch_store_b32 off, v123, s32 offset:120
-; GFX11-NEXT: scratch_store_b32 off, v124, s32 offset:116
-; GFX11-NEXT: scratch_store_b32 off, v125, s32 offset:112
-; GFX11-NEXT: scratch_store_b32 off, v126, s32 offset:108
-; GFX11-NEXT: scratch_store_b32 off, v127, s32 offset:104
-; GFX11-NEXT: scratch_store_b32 off, v136, s32 offset:100
-; GFX11-NEXT: scratch_store_b32 off, v137, s32 offset:96
-; GFX11-NEXT: scratch_store_b32 off, v138, s32 offset:92
-; GFX11-NEXT: scratch_store_b32 off, v139, s32 offset:88
-; GFX11-NEXT: scratch_store_b32 off, v140, s32 offset:84
-; GFX11-NEXT: scratch_store_b32 off, v141, s32 offset:80
-; GFX11-NEXT: scratch_store_b32 off, v142, s32 offset:76
-; GFX11-NEXT: scratch_store_b32 off, v143, s32 offset:72
-; GFX11-NEXT: scratch_store_b32 off, v152, s32 offset:68
-; GFX11-NEXT: scratch_store_b32 off, v153, s32 offset:64
-; GFX11-NEXT: scratch_store_b32 off, v154, s32 offset:60
-; GFX11-NEXT: scratch_store_b32 off, v155, s32 offset:56
-; GFX11-NEXT: scratch_store_b32 off, v156, s32 offset:52
-; GFX11-NEXT: scratch_store_b32 off, v157, s32 offset:48
-; GFX11-NEXT: scratch_store_b32 off, v158, s32 offset:44
-; GFX11-NEXT: scratch_store_b32 off, v159, s32 offset:40
-; GFX11-NEXT: s_clause 0x9
-; GFX11-NEXT: scratch_store_b32 off, v168, s32 offset:36
-; GFX11-NEXT: scratch_store_b32 off, v169, s32 offset:32
-; GFX11-NEXT: scratch_store_b32 off, v170, s32 offset:28
-; GFX11-NEXT: scratch_store_b32 off, v171, s32 offset:24
-; GFX11-NEXT: scratch_store_b32 off, v172, s32 offset:20
-; GFX11-NEXT: scratch_store_b32 off, v173, s32 offset:16
-; GFX11-NEXT: scratch_store_b32 off, v174, s32 offset:12
-; GFX11-NEXT: scratch_store_b32 off, v175, s32 offset:8
-; GFX11-NEXT: scratch_store_b32 off, v184, s32 offset:4
-; GFX11-NEXT: scratch_store_b32 off, v185, s32
+; GFX11-NEXT: scratch_store_b32 off, v104, s32 offset:156
+; GFX11-NEXT: scratch_store_b32 off, v105, s32 offset:152
+; GFX11-NEXT: scratch_store_b32 off, v106, s32 offset:148
+; GFX11-NEXT: scratch_store_b32 off, v107, s32 offset:144
+; GFX11-NEXT: scratch_store_b32 off, v108, s32 offset:140
+; GFX11-NEXT: scratch_store_b32 off, v109, s32 offset:136
+; GFX11-NEXT: scratch_store_b32 off, v110, s32 offset:132
+; GFX11-NEXT: scratch_store_b32 off, v111, s32 offset:128
+; GFX11-NEXT: scratch_store_b32 off, v120, s32 offset:124
+; GFX11-NEXT: scratch_store_b32 off, v121, s32 offset:120
+; GFX11-NEXT: scratch_store_b32 off, v122, s32 offset:116
+; GFX11-NEXT: scratch_store_b32 off, v123, s32 offset:112
+; GFX11-NEXT: scratch_store_b32 off, v124, s32 offset:108
+; GFX11-NEXT: scratch_store_b32 off, v125, s32 offset:104
+; GFX11-NEXT: scratch_store_b32 off, v126, s32 offset:100
+; GFX11-NEXT: scratch_store_b32 off, v127, s32 offset:96
+; GFX11-NEXT: scratch_store_b32 off, v136, s32 offset:92
+; GFX11-NEXT: scratch_store_b32 off, v137, s32 offset:88
+; GFX11-NEXT: scratch_store_b32 off, v138, s32 offset:84
+; GFX11-NEXT: scratch_store_b32 off, v139, s32 offset:80
+; GFX11-NEXT: scratch_store_b32 off, v140, s32 offset:76
+; GFX11-NEXT: scratch_store_b32 off, v141, s32 offset:72
+; GFX11-NEXT: scratch_store_b32 off, v142, s32 offset:68
+; GFX11-NEXT: scratch_store_b32 off, v143, s32 offset:64
+; GFX11-NEXT: scratch_store_b32 off, v152, s32 offset:60
+; GFX11-NEXT: scratch_store_b32 off, v153, s32 offset:56
+; GFX11-NEXT: scratch_store_b32 off, v154, s32 offset:52
+; GFX11-NEXT: scratch_store_b32 off, v155, s32 offset:48
+; GFX11-NEXT: scratch_store_b32 off, v156, s32 offset:44
+; GFX11-NEXT: scratch_store_b32 off, v157, s32 offset:40
+; GFX11-NEXT: scratch_store_b32 off, v158, s32 offset:36
+; GFX11-NEXT: scratch_store_b32 off, v159, s32 offset:32
+; GFX11-NEXT: s_clause 0x7
+; GFX11-NEXT: scratch_store_b32 off, v168, s32 offset:28
+; GFX11-NEXT: scratch_store_b32 off, v169, s32 offset:24
+; GFX11-NEXT: scratch_store_b32 off, v170, s32 offset:20
+; GFX11-NEXT: scratch_store_b32 off, v171, s32 offset:16
+; GFX11-NEXT: scratch_store_b32 off, v172, s32 offset:12
+; GFX11-NEXT: scratch_store_b32 off, v173, s32 offset:8
+; GFX11-NEXT: scratch_store_b32 off, v174, s32 offset:4
+; GFX11-NEXT: scratch_store_b32 off, v175, s32
; GFX11-NEXT: v_dual_mov_b32 v176, v13 :: v_dual_mov_b32 v177, v12
; GFX11-NEXT: v_dual_mov_b32 v178, v11 :: v_dual_mov_b32 v179, v10
; GFX11-NEXT: v_dual_mov_b32 v180, v9 :: v_dual_mov_b32 v181, v8
; GFX11-NEXT: v_dual_mov_b32 v182, v7 :: v_dual_mov_b32 v183, v6
-; GFX11-NEXT: v_dual_mov_b32 v170, v5 :: v_dual_mov_b32 v171, v4
-; GFX11-NEXT: v_dual_mov_b32 v172, v3 :: v_dual_mov_b32 v173, v2
-; GFX11-NEXT: v_dual_mov_b32 v174, v1 :: v_dual_mov_b32 v175, v0
-; GFX11-NEXT: v_dual_mov_b32 v184, s28 :: v_dual_mov_b32 v185, s29
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-NEXT: v_dual_mov_b32 v168, v5 :: v_dual_mov_b32 v169, v4
+; GFX11-NEXT: v_dual_mov_b32 v170, v3 :: v_dual_mov_b32 v171, v2
+; GFX11-NEXT: v_dual_mov_b32 v172, v1 :: v_dual_mov_b32 v173, v0
+; GFX11-NEXT: v_dual_mov_b32 v174, s28 :: v_dual_mov_b32 v175, s29
+; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s4, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB47_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_dual_mov_b32 v47, s0 :: v_dual_mov_b32 v52, s2
-; GFX11-NEXT: v_dual_mov_b32 v49, s1 :: v_dual_mov_b32 v56, s3
-; GFX11-NEXT: v_dual_mov_b32 v61, s16 :: v_dual_mov_b32 v74, s18
-; GFX11-NEXT: v_dual_mov_b32 v67, s17 :: v_dual_mov_b32 v82, s19
-; GFX11-NEXT: v_dual_mov_b32 v91, s20 :: v_dual_mov_b32 v112, s22
-; GFX11-NEXT: v_dual_mov_b32 v101, s21 :: v_dual_mov_b32 v124, s23
-; GFX11-NEXT: v_dual_mov_b32 v137, s24 :: v_dual_mov_b32 v14, s26
-; GFX11-NEXT: v_dual_mov_b32 v151, s25 :: v_dual_mov_b32 v30, s27
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB47_3
+; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v59, s16
+; GFX11-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v65, s17
+; GFX11-NEXT: v_dual_mov_b32 v50, s2 :: v_dual_mov_b32 v89, s20
+; GFX11-NEXT: v_dual_mov_b32 v54, s3 :: v_dual_mov_b32 v99, s21
+; GFX11-NEXT: v_dual_mov_b32 v72, s18 :: v_dual_mov_b32 v135, s24
+; GFX11-NEXT: v_dual_mov_b32 v80, s19 :: v_dual_mov_b32 v149, s25
+; GFX11-NEXT: v_dual_mov_b32 v110, s22 :: v_dual_mov_b32 v17, s26
+; GFX11-NEXT: v_dual_mov_b32 v122, s23 :: v_dual_mov_b32 v33, s27
+; GFX11-NEXT: s_cbranch_execnz .LBB47_3
; GFX11-NEXT: .LBB47_2: ; %cmp.true
-; GFX11-NEXT: v_pk_add_f16 v30, 0x200, s27 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v14, 0x200, s26 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v33, 0x200, s27 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v17, 0x200, s26 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v176, 0x200, v176 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v177, 0x200, v177 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v178, 0x200, v178 op_sel_hi:[0,1]
@@ -70606,119 +70898,117 @@ define inreg <32 x float> @bitcast_v64f16_to_v32f32_scalar(<64 x half> inreg %a,
; GFX11-NEXT: v_pk_add_f16 v181, 0x200, v181 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v182, 0x200, v182 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v183, 0x200, v183 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v168, 0x200, v168 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v169, 0x200, v169 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v170, 0x200, v170 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v171, 0x200, v171 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v172, 0x200, v172 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v173, 0x200, v173 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v174, 0x200, v174 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v175, 0x200, v175 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v184, 0x200, v184 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v151, 0x200, s25 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v137, 0x200, s24 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v124, 0x200, s23 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v112, 0x200, s22 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v101, 0x200, s21 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v91, 0x200, s20 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v82, 0x200, s19 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v74, 0x200, s18 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v67, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v61, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v56, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v52, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v49, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v47, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v174, 0x200, v174 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v149, 0x200, s25 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v135, 0x200, s24 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v122, 0x200, s23 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v110, 0x200, s22 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v99, 0x200, s21 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v89, 0x200, s20 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v80, 0x200, s19 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v72, 0x200, s18 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v65, 0x200, s17 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v59, 0x200, s16 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v54, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v50, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v2, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: .LBB47_3: ; %end
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mov_b32 v0, v47 :: v_dual_mov_b32 v1, v49
-; GFX11-NEXT: v_dual_mov_b32 v3, v56 :: v_dual_mov_b32 v4, v61
-; GFX11-NEXT: v_dual_mov_b32 v6, v74 :: v_dual_mov_b32 v9, v101
-; GFX11-NEXT: v_dual_mov_b32 v7, v82 :: v_dual_mov_b32 v8, v91
-; GFX11-NEXT: v_dual_mov_b32 v11, v124 :: v_dual_mov_b32 v12, v137
-; GFX11-NEXT: v_dual_mov_b32 v15, v30 :: v_dual_mov_b32 v16, v184
-; GFX11-NEXT: v_dual_mov_b32 v17, v185 :: v_dual_mov_b32 v18, v175
-; GFX11-NEXT: v_dual_mov_b32 v19, v174 :: v_dual_mov_b32 v20, v173
-; GFX11-NEXT: v_dual_mov_b32 v21, v172 :: v_dual_mov_b32 v22, v171
-; GFX11-NEXT: v_dual_mov_b32 v23, v170 :: v_dual_mov_b32 v24, v183
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v4, v59
+; GFX11-NEXT: v_dual_mov_b32 v3, v54 :: v_dual_mov_b32 v6, v72
+; GFX11-NEXT: v_dual_mov_b32 v7, v80 :: v_dual_mov_b32 v8, v89
+; GFX11-NEXT: v_dual_mov_b32 v9, v99 :: v_dual_mov_b32 v10, v110
+; GFX11-NEXT: v_dual_mov_b32 v11, v122 :: v_dual_mov_b32 v12, v135
+; GFX11-NEXT: v_dual_mov_b32 v13, v149 :: v_dual_mov_b32 v16, v174
+; GFX11-NEXT: v_dual_mov_b32 v14, v17 :: v_dual_mov_b32 v17, v175
+; GFX11-NEXT: v_dual_mov_b32 v15, v33 :: v_dual_mov_b32 v20, v171
+; GFX11-NEXT: v_dual_mov_b32 v18, v173 :: v_dual_mov_b32 v19, v172
+; GFX11-NEXT: v_dual_mov_b32 v21, v170 :: v_dual_mov_b32 v22, v169
+; GFX11-NEXT: v_dual_mov_b32 v23, v168 :: v_dual_mov_b32 v24, v183
; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_load_b32 v185, off, s32
-; GFX11-NEXT: scratch_load_b32 v184, off, s32 offset:4
-; GFX11-NEXT: scratch_load_b32 v175, off, s32 offset:8
-; GFX11-NEXT: scratch_load_b32 v174, off, s32 offset:12
-; GFX11-NEXT: scratch_load_b32 v173, off, s32 offset:16
-; GFX11-NEXT: scratch_load_b32 v172, off, s32 offset:20
-; GFX11-NEXT: scratch_load_b32 v171, off, s32 offset:24
-; GFX11-NEXT: scratch_load_b32 v170, off, s32 offset:28
-; GFX11-NEXT: scratch_load_b32 v169, off, s32 offset:32
-; GFX11-NEXT: scratch_load_b32 v168, off, s32 offset:36
-; GFX11-NEXT: scratch_load_b32 v159, off, s32 offset:40
-; GFX11-NEXT: scratch_load_b32 v158, off, s32 offset:44
-; GFX11-NEXT: scratch_load_b32 v157, off, s32 offset:48
-; GFX11-NEXT: scratch_load_b32 v156, off, s32 offset:52
-; GFX11-NEXT: scratch_load_b32 v155, off, s32 offset:56
-; GFX11-NEXT: scratch_load_b32 v154, off, s32 offset:60
-; GFX11-NEXT: scratch_load_b32 v153, off, s32 offset:64
-; GFX11-NEXT: scratch_load_b32 v152, off, s32 offset:68
-; GFX11-NEXT: scratch_load_b32 v143, off, s32 offset:72
-; GFX11-NEXT: scratch_load_b32 v142, off, s32 offset:76
-; GFX11-NEXT: scratch_load_b32 v141, off, s32 offset:80
-; GFX11-NEXT: scratch_load_b32 v140, off, s32 offset:84
-; GFX11-NEXT: scratch_load_b32 v139, off, s32 offset:88
-; GFX11-NEXT: scratch_load_b32 v138, off, s32 offset:92
-; GFX11-NEXT: scratch_load_b32 v137, off, s32 offset:96
-; GFX11-NEXT: scratch_load_b32 v136, off, s32 offset:100
-; GFX11-NEXT: scratch_load_b32 v127, off, s32 offset:104
-; GFX11-NEXT: scratch_load_b32 v126, off, s32 offset:108
-; GFX11-NEXT: scratch_load_b32 v125, off, s32 offset:112
-; GFX11-NEXT: scratch_load_b32 v124, off, s32 offset:116
-; GFX11-NEXT: scratch_load_b32 v123, off, s32 offset:120
-; GFX11-NEXT: scratch_load_b32 v122, off, s32 offset:124
+; GFX11-NEXT: scratch_load_b32 v175, off, s32
+; GFX11-NEXT: scratch_load_b32 v174, off, s32 offset:4
+; GFX11-NEXT: scratch_load_b32 v173, off, s32 offset:8
+; GFX11-NEXT: scratch_load_b32 v172, off, s32 offset:12
+; GFX11-NEXT: scratch_load_b32 v171, off, s32 offset:16
+; GFX11-NEXT: scratch_load_b32 v170, off, s32 offset:20
+; GFX11-NEXT: scratch_load_b32 v169, off, s32 offset:24
+; GFX11-NEXT: scratch_load_b32 v168, off, s32 offset:28
+; GFX11-NEXT: scratch_load_b32 v159, off, s32 offset:32
+; GFX11-NEXT: scratch_load_b32 v158, off, s32 offset:36
+; GFX11-NEXT: scratch_load_b32 v157, off, s32 offset:40
+; GFX11-NEXT: scratch_load_b32 v156, off, s32 offset:44
+; GFX11-NEXT: scratch_load_b32 v155, off, s32 offset:48
+; GFX11-NEXT: scratch_load_b32 v154, off, s32 offset:52
+; GFX11-NEXT: scratch_load_b32 v153, off, s32 offset:56
+; GFX11-NEXT: scratch_load_b32 v152, off, s32 offset:60
+; GFX11-NEXT: scratch_load_b32 v143, off, s32 offset:64
+; GFX11-NEXT: scratch_load_b32 v142, off, s32 offset:68
+; GFX11-NEXT: scratch_load_b32 v141, off, s32 offset:72
+; GFX11-NEXT: scratch_load_b32 v140, off, s32 offset:76
+; GFX11-NEXT: scratch_load_b32 v139, off, s32 offset:80
+; GFX11-NEXT: scratch_load_b32 v138, off, s32 offset:84
+; GFX11-NEXT: scratch_load_b32 v137, off, s32 offset:88
+; GFX11-NEXT: scratch_load_b32 v136, off, s32 offset:92
+; GFX11-NEXT: scratch_load_b32 v127, off, s32 offset:96
+; GFX11-NEXT: scratch_load_b32 v126, off, s32 offset:100
+; GFX11-NEXT: scratch_load_b32 v125, off, s32 offset:104
+; GFX11-NEXT: scratch_load_b32 v124, off, s32 offset:108
+; GFX11-NEXT: scratch_load_b32 v123, off, s32 offset:112
+; GFX11-NEXT: scratch_load_b32 v122, off, s32 offset:116
+; GFX11-NEXT: scratch_load_b32 v121, off, s32 offset:120
+; GFX11-NEXT: scratch_load_b32 v120, off, s32 offset:124
; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_load_b32 v121, off, s32 offset:128
-; GFX11-NEXT: scratch_load_b32 v120, off, s32 offset:132
-; GFX11-NEXT: scratch_load_b32 v111, off, s32 offset:136
-; GFX11-NEXT: scratch_load_b32 v110, off, s32 offset:140
-; GFX11-NEXT: scratch_load_b32 v109, off, s32 offset:144
-; GFX11-NEXT: scratch_load_b32 v108, off, s32 offset:148
-; GFX11-NEXT: scratch_load_b32 v107, off, s32 offset:152
-; GFX11-NEXT: scratch_load_b32 v106, off, s32 offset:156
-; GFX11-NEXT: scratch_load_b32 v105, off, s32 offset:160
-; GFX11-NEXT: scratch_load_b32 v104, off, s32 offset:164
-; GFX11-NEXT: scratch_load_b32 v95, off, s32 offset:168
-; GFX11-NEXT: scratch_load_b32 v94, off, s32 offset:172
-; GFX11-NEXT: scratch_load_b32 v93, off, s32 offset:176
-; GFX11-NEXT: scratch_load_b32 v92, off, s32 offset:180
-; GFX11-NEXT: scratch_load_b32 v91, off, s32 offset:184
-; GFX11-NEXT: scratch_load_b32 v90, off, s32 offset:188
-; GFX11-NEXT: scratch_load_b32 v89, off, s32 offset:192
-; GFX11-NEXT: scratch_load_b32 v88, off, s32 offset:196
-; GFX11-NEXT: scratch_load_b32 v79, off, s32 offset:200
-; GFX11-NEXT: scratch_load_b32 v78, off, s32 offset:204
-; GFX11-NEXT: scratch_load_b32 v77, off, s32 offset:208
-; GFX11-NEXT: scratch_load_b32 v76, off, s32 offset:212
-; GFX11-NEXT: scratch_load_b32 v75, off, s32 offset:216
-; GFX11-NEXT: scratch_load_b32 v74, off, s32 offset:220
-; GFX11-NEXT: scratch_load_b32 v73, off, s32 offset:224
-; GFX11-NEXT: scratch_load_b32 v72, off, s32 offset:228
-; GFX11-NEXT: scratch_load_b32 v63, off, s32 offset:232
-; GFX11-NEXT: scratch_load_b32 v62, off, s32 offset:236
-; GFX11-NEXT: scratch_load_b32 v61, off, s32 offset:240
-; GFX11-NEXT: scratch_load_b32 v60, off, s32 offset:244
-; GFX11-NEXT: scratch_load_b32 v59, off, s32 offset:248
-; GFX11-NEXT: scratch_load_b32 v58, off, s32 offset:252
-; GFX11-NEXT: s_clause 0x9
-; GFX11-NEXT: scratch_load_b32 v57, off, s32 offset:256
-; GFX11-NEXT: scratch_load_b32 v56, off, s32 offset:260
-; GFX11-NEXT: scratch_load_b32 v47, off, s32 offset:264
-; GFX11-NEXT: scratch_load_b32 v46, off, s32 offset:268
-; GFX11-NEXT: scratch_load_b32 v45, off, s32 offset:272
-; GFX11-NEXT: scratch_load_b32 v44, off, s32 offset:276
-; GFX11-NEXT: scratch_load_b32 v43, off, s32 offset:280
-; GFX11-NEXT: scratch_load_b32 v42, off, s32 offset:284
-; GFX11-NEXT: scratch_load_b32 v41, off, s32 offset:288
-; GFX11-NEXT: scratch_load_b32 v40, off, s32 offset:292
-; GFX11-NEXT: v_dual_mov_b32 v2, v52 :: v_dual_mov_b32 v5, v67
-; GFX11-NEXT: v_dual_mov_b32 v10, v112 :: v_dual_mov_b32 v13, v151
+; GFX11-NEXT: scratch_load_b32 v111, off, s32 offset:128
+; GFX11-NEXT: scratch_load_b32 v110, off, s32 offset:132
+; GFX11-NEXT: scratch_load_b32 v109, off, s32 offset:136
+; GFX11-NEXT: scratch_load_b32 v108, off, s32 offset:140
+; GFX11-NEXT: scratch_load_b32 v107, off, s32 offset:144
+; GFX11-NEXT: scratch_load_b32 v106, off, s32 offset:148
+; GFX11-NEXT: scratch_load_b32 v105, off, s32 offset:152
+; GFX11-NEXT: scratch_load_b32 v104, off, s32 offset:156
+; GFX11-NEXT: scratch_load_b32 v95, off, s32 offset:160
+; GFX11-NEXT: scratch_load_b32 v94, off, s32 offset:164
+; GFX11-NEXT: scratch_load_b32 v93, off, s32 offset:168
+; GFX11-NEXT: scratch_load_b32 v92, off, s32 offset:172
+; GFX11-NEXT: scratch_load_b32 v91, off, s32 offset:176
+; GFX11-NEXT: scratch_load_b32 v90, off, s32 offset:180
+; GFX11-NEXT: scratch_load_b32 v89, off, s32 offset:184
+; GFX11-NEXT: scratch_load_b32 v88, off, s32 offset:188
+; GFX11-NEXT: scratch_load_b32 v79, off, s32 offset:192
+; GFX11-NEXT: scratch_load_b32 v78, off, s32 offset:196
+; GFX11-NEXT: scratch_load_b32 v77, off, s32 offset:200
+; GFX11-NEXT: scratch_load_b32 v76, off, s32 offset:204
+; GFX11-NEXT: scratch_load_b32 v75, off, s32 offset:208
+; GFX11-NEXT: scratch_load_b32 v74, off, s32 offset:212
+; GFX11-NEXT: scratch_load_b32 v73, off, s32 offset:216
+; GFX11-NEXT: scratch_load_b32 v72, off, s32 offset:220
+; GFX11-NEXT: scratch_load_b32 v63, off, s32 offset:224
+; GFX11-NEXT: scratch_load_b32 v62, off, s32 offset:228
+; GFX11-NEXT: scratch_load_b32 v61, off, s32 offset:232
+; GFX11-NEXT: scratch_load_b32 v60, off, s32 offset:236
+; GFX11-NEXT: scratch_load_b32 v59, off, s32 offset:240
+; GFX11-NEXT: scratch_load_b32 v58, off, s32 offset:244
+; GFX11-NEXT: scratch_load_b32 v57, off, s32 offset:248
+; GFX11-NEXT: scratch_load_b32 v56, off, s32 offset:252
+; GFX11-NEXT: s_clause 0x7
+; GFX11-NEXT: scratch_load_b32 v47, off, s32 offset:256
+; GFX11-NEXT: scratch_load_b32 v46, off, s32 offset:260
+; GFX11-NEXT: scratch_load_b32 v45, off, s32 offset:264
+; GFX11-NEXT: scratch_load_b32 v44, off, s32 offset:268
+; GFX11-NEXT: scratch_load_b32 v43, off, s32 offset:272
+; GFX11-NEXT: scratch_load_b32 v42, off, s32 offset:276
+; GFX11-NEXT: scratch_load_b32 v41, off, s32 offset:280
+; GFX11-NEXT: scratch_load_b32 v40, off, s32 offset:284
+; GFX11-NEXT: v_dual_mov_b32 v2, v50 :: v_dual_mov_b32 v5, v65
; GFX11-NEXT: v_dual_mov_b32 v25, v182 :: v_dual_mov_b32 v26, v181
; GFX11-NEXT: v_dual_mov_b32 v27, v180 :: v_dual_mov_b32 v28, v179
; GFX11-NEXT: v_dual_mov_b32 v29, v178 :: v_dual_mov_b32 v30, v177
@@ -70726,23 +71016,25 @@ define inreg <32 x float> @bitcast_v64f16_to_v32f32_scalar(<64 x half> inreg %a,
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB47_4:
-; GFX11-NEXT: ; implicit-def: $vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78
; GFX11-NEXT: ; implicit-def: $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79
; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
-; GFX11-NEXT: ; implicit-def: $vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81
-; GFX11-NEXT: ; implicit-def: $vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84
-; GFX11-NEXT: ; implicit-def: $vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88
-; GFX11-NEXT: ; implicit-def: $vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93
-; GFX11-NEXT: ; implicit-def: $vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99
-; GFX11-NEXT: ; implicit-def: $vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106
-; GFX11-NEXT: ; implicit-def: $vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114
-; GFX11-NEXT: ; implicit-def: $vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123
-; GFX11-NEXT: ; implicit-def: $vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133
-; GFX11-NEXT: ; implicit-def: $vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144
-; GFX11-NEXT: ; implicit-def: $vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156
-; GFX11-NEXT: ; implicit-def: $vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169
-; GFX11-NEXT: s_branch .LBB47_2
+; GFX11-NEXT: ; implicit-def: $vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82
+; GFX11-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-NEXT: ; implicit-def: $vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91
+; GFX11-NEXT: ; implicit-def: $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49
+; GFX11-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-NEXT: ; implicit-def: $vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104
+; GFX11-NEXT: ; implicit-def: $vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112
+; GFX11-NEXT: ; implicit-def: $vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121
+; GFX11-NEXT: ; implicit-def: $vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131
+; GFX11-NEXT: ; implicit-def: $vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142
+; GFX11-NEXT: ; implicit-def: $vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154
+; GFX11-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_vccz .LBB47_2
+; GFX11-NEXT: s_branch .LBB47_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -71309,8 +71601,9 @@ define inreg <64 x i16> @bitcast_v32f32_to_v64i16_scalar(<32 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v19
-; SI-NEXT: v_mov_b32_e32 v36, s16
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: v_mov_b32_e32 v36, s16
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v35, s17
; SI-NEXT: v_mov_b32_e32 v33, s18
; SI-NEXT: v_mov_b32_e32 v32, s19
@@ -71699,12 +71992,15 @@ define inreg <64 x i16> @bitcast_v32f32_to_v64i16_scalar(<32 x float> inreg %a,
; SI-NEXT: ; implicit-def: $vgpr49
; SI-NEXT: ; kill: killed $vgpr39
; SI-NEXT: ; implicit-def: $vgpr39
-; SI-NEXT: s_branch .LBB49_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB49_2
+; SI-NEXT: s_branch .LBB49_3
;
; VI-LABEL: bitcast_v32f32_to_v64i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v31, v17
; VI-NEXT: v_mov_b32_e32 v30, v16
; VI-NEXT: v_mov_b32_e32 v29, v15
@@ -71725,7 +72021,7 @@ define inreg <64 x i16> @bitcast_v32f32_to_v64i16_scalar(<32 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
@@ -71738,10 +72034,13 @@ define inreg <64 x i16> @bitcast_v32f32_to_v64i16_scalar(<32 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB49_4
+; VI-NEXT: s_cbranch_scc0 .LBB49_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB49_3
-; VI-NEXT: .LBB49_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB49_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB49_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e32 v15, 1.0, v15
; VI-NEXT: v_add_f32_e32 v14, 1.0, v14
; VI-NEXT: v_add_f32_e32 v13, 1.0, v13
@@ -71774,16 +72073,15 @@ define inreg <64 x i16> @bitcast_v32f32_to_v64i16_scalar(<32 x float> inreg %a,
; VI-NEXT: v_add_f32_e32 v32, 1.0, v32
; VI-NEXT: v_add_f32_e32 v17, 1.0, v17
; VI-NEXT: v_add_f32_e32 v16, 1.0, v16
-; VI-NEXT: .LBB49_3: ; %end
+; VI-NEXT: .LBB49_4: ; %end
; VI-NEXT: v_mov_b32_e32 v18, v32
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB49_4:
-; VI-NEXT: s_branch .LBB49_2
;
; GFX9-LABEL: bitcast_v32f32_to_v64i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v31, v17
; GFX9-NEXT: v_mov_b32_e32 v30, v16
; GFX9-NEXT: v_mov_b32_e32 v29, v15
@@ -71804,7 +72102,7 @@ define inreg <64 x i16> @bitcast_v32f32_to_v64i16_scalar(<32 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -71817,10 +72115,13 @@ define inreg <64 x i16> @bitcast_v32f32_to_v64i16_scalar(<32 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB49_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB49_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB49_3
-; GFX9-NEXT: .LBB49_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB49_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e32 v15, 1.0, v15
; GFX9-NEXT: v_add_f32_e32 v14, 1.0, v14
; GFX9-NEXT: v_add_f32_e32 v13, 1.0, v13
@@ -71853,44 +72154,42 @@ define inreg <64 x i16> @bitcast_v32f32_to_v64i16_scalar(<32 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e32 v32, 1.0, v32
; GFX9-NEXT: v_add_f32_e32 v17, 1.0, v17
; GFX9-NEXT: v_add_f32_e32 v16, 1.0, v16
-; GFX9-NEXT: .LBB49_3: ; %end
+; GFX9-NEXT: .LBB49_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v18, v32
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB49_4:
-; GFX9-NEXT: s_branch .LBB49_2
;
; GFX11-LABEL: bitcast_v32f32_to_v64i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v14 :: v_dual_mov_b32 v31, v13
-; GFX11-NEXT: v_dual_mov_b32 v30, v12 :: v_dual_mov_b32 v29, v11
-; GFX11-NEXT: v_dual_mov_b32 v28, v10 :: v_dual_mov_b32 v27, v9
+; GFX11-NEXT: v_dual_mov_b32 v15, v14 :: v_dual_mov_b32 v30, v12
+; GFX11-NEXT: v_dual_mov_b32 v31, v13 :: v_dual_mov_b32 v28, v10
+; GFX11-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v26, v8
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB49_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB49_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB49_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB49_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB49_3:
-; GFX11-NEXT: .LBB49_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
@@ -71907,6 +72206,7 @@ define inreg <64 x i16> @bitcast_v32f32_to_v64i16_scalar(<32 x float> inreg %a,
; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
; GFX11-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-NEXT: .LBB49_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -72753,43 +73053,43 @@ define inreg <32 x float> @bitcast_v64i16_to_v32f32_scalar(<64 x i16> inreg %a,
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
; SI-NEXT: v_mov_b32_e32 v49, v12
-; SI-NEXT: v_mov_b32_e32 v56, v10
-; SI-NEXT: s_waitcnt expcnt(6)
-; SI-NEXT: v_mov_b32_e32 v57, v8
+; SI-NEXT: v_mov_b32_e32 v47, v10
+; SI-NEXT: s_waitcnt expcnt(3)
+; SI-NEXT: v_mov_b32_e32 v60, v8
; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:76
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32
; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:8
-; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:4
+; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:4
; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:16
-; SI-NEXT: s_waitcnt expcnt(3)
-; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:12
+; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:12
; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:24
-; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:20
+; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:20
; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:32
-; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:28
+; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:28
; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:40
; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:36
-; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:48
+; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:48
; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:44
-; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:56
-; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:52
+; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:56
+; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:52
; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:64
-; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:60
+; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:60
; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:72
-; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:68
+; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:68
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_waitcnt expcnt(2)
; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v55, 16, v3
-; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v5
-; SI-NEXT: v_lshlrev_b32_e32 v43, 16, v7
-; SI-NEXT: v_lshlrev_b32_e32 v41, 16, v9
+; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v5
+; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v7
+; SI-NEXT: v_lshlrev_b32_e32 v43, 16, v9
; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v11
; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v15
; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v17
-; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v19
+; SI-NEXT: v_lshlrev_b32_e32 v38, 16, v19
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v21
-; SI-NEXT: v_lshlrev_b32_e32 v35, 16, v23
+; SI-NEXT: v_lshlrev_b32_e32 v36, 16, v23
; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v25
; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v27
; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v29
@@ -72797,7 +73097,7 @@ define inreg <32 x float> @bitcast_v64i16_to_v32f32_scalar(<64 x i16> inreg %a,
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v53
; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v31
; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v52
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_and_b64 s[6:7], vcc, exec
; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v51
; SI-NEXT: s_waitcnt vmcnt(13)
; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v8
@@ -72806,102 +73106,103 @@ define inreg <32 x float> @bitcast_v64i16_to_v32f32_scalar(<64 x i16> inreg %a,
; SI-NEXT: s_waitcnt vmcnt(9)
; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v12
; SI-NEXT: s_waitcnt vmcnt(7)
-; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v38
-; SI-NEXT: s_waitcnt vmcnt(5) expcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v36
+; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v37
+; SI-NEXT: s_waitcnt vmcnt(5) expcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v35
; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v34
; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v32
-; SI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v32
+; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(4)
-; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
-; SI-NEXT: s_cbranch_scc0 .LBB51_4
+; SI-NEXT: s_cbranch_scc0 .LBB51_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_waitcnt expcnt(2)
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v7, v0, v61
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v4
-; SI-NEXT: v_or_b32_e32 v9, v0, v50
+; SI-NEXT: v_or_b32_e32 v9, v0, v57
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v6
-; SI-NEXT: v_or_b32_e32 v10, v0, v43
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v57
-; SI-NEXT: v_or_b32_e32 v11, v0, v41
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v56
+; SI-NEXT: v_or_b32_e32 v10, v0, v50
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v60
+; SI-NEXT: v_or_b32_e32 v11, v0, v43
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v47
; SI-NEXT: v_or_b32_e32 v12, v0, v40
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49
-; SI-NEXT: v_mov_b32_e32 v52, v57
-; SI-NEXT: v_mov_b32_e32 v57, v40
-; SI-NEXT: v_mov_b32_e32 v40, v49
-; SI-NEXT: v_mov_b32_e32 v49, v13
+; SI-NEXT: v_mov_b32_e32 v35, v61
+; SI-NEXT: v_mov_b32_e32 v61, v50
+; SI-NEXT: v_mov_b32_e32 v50, v43
+; SI-NEXT: v_mov_b32_e32 v43, v40
+; SI-NEXT: v_mov_b32_e32 v40, v13
; SI-NEXT: v_or_b32_e32 v13, v0, v13
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v14
-; SI-NEXT: v_mov_b32_e32 v36, v41
-; SI-NEXT: v_mov_b32_e32 v41, v14
+; SI-NEXT: v_mov_b32_e32 v52, v60
+; SI-NEXT: v_mov_b32_e32 v60, v14
; SI-NEXT: v_or_b32_e32 v14, v0, v48
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v16
-; SI-NEXT: v_mov_b32_e32 v51, v50
-; SI-NEXT: v_mov_b32_e32 v50, v43
-; SI-NEXT: v_mov_b32_e32 v43, v48
+; SI-NEXT: v_mov_b32_e32 v51, v57
+; SI-NEXT: v_mov_b32_e32 v57, v49
+; SI-NEXT: v_mov_b32_e32 v49, v48
; SI-NEXT: v_mov_b32_e32 v48, v15
; SI-NEXT: v_or_b32_e32 v15, v0, v15
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v18
-; SI-NEXT: v_mov_b32_e32 v38, v61
-; SI-NEXT: v_mov_b32_e32 v61, v56
-; SI-NEXT: v_mov_b32_e32 v56, v16
-; SI-NEXT: v_or_b32_e32 v16, v0, v37
+; SI-NEXT: v_mov_b32_e32 v32, v47
+; SI-NEXT: v_mov_b32_e32 v47, v16
+; SI-NEXT: v_or_b32_e32 v16, v0, v38
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v20
; SI-NEXT: v_or_b32_e32 v17, v0, v17
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v22
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_or_b32_e32 v18, v0, v35
+; SI-NEXT: v_or_b32_e32 v18, v0, v36
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v24
; SI-NEXT: v_or_b32_e32 v19, v0, v19
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v26
-; SI-NEXT: v_mov_b32_e32 v37, v20
+; SI-NEXT: v_mov_b32_e32 v38, v20
; SI-NEXT: v_or_b32_e32 v20, v0, v33
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v28
; SI-NEXT: v_or_b32_e32 v21, v0, v21
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v30
; SI-NEXT: v_or_b32_e32 v22, v0, v31
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v39
-; SI-NEXT: v_mov_b32_e32 v35, v24
-; SI-NEXT: v_mov_b32_e32 v39, v23
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v56
+; SI-NEXT: v_mov_b32_e32 v36, v24
+; SI-NEXT: v_mov_b32_e32 v56, v23
; SI-NEXT: v_or_b32_e32 v23, v0, v23
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v60
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v39
; SI-NEXT: v_mov_b32_e32 v24, v29
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s17, 16
; SI-NEXT: v_or_b32_e32 v24, v0, v24
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v47
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v46
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: s_and_b32 s5, s18, 0xffff
; SI-NEXT: s_lshl_b32 s6, s19, 16
+; SI-NEXT: v_mov_b32_e32 v46, v25
; SI-NEXT: v_or_b32_e32 v25, v0, v25
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v46
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v59
; SI-NEXT: v_mov_b32_e32 v26, v27
; SI-NEXT: s_or_b32 s5, s5, s6
; SI-NEXT: s_and_b32 s6, s20, 0xffff
@@ -72918,29 +73219,28 @@ define inreg <32 x float> @bitcast_v64i16_to_v32f32_scalar(<64 x i16> inreg %a,
; SI-NEXT: s_lshl_b32 s9, s25, 16
; SI-NEXT: v_mov_b32_e32 v33, v28
; SI-NEXT: v_or_b32_e32 v28, v0, v5
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v59
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v42
; SI-NEXT: s_or_b32 s8, s8, s9
; SI-NEXT: s_and_b32 s9, s26, 0xffff
; SI-NEXT: s_lshl_b32 s10, s27, 16
-; SI-NEXT: v_mov_b32_e32 v60, v29
-; SI-NEXT: v_or_b32_e32 v29, v0, v62
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v42
+; SI-NEXT: v_mov_b32_e32 v39, v29
+; SI-NEXT: v_or_b32_e32 v29, v0, v63
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v58
; SI-NEXT: s_or_b32 s9, s9, s10
; SI-NEXT: s_and_b32 s10, s28, 0xffff
; SI-NEXT: s_lshl_b32 s11, s29, 16
; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2
; SI-NEXT: v_or_b32_e32 v30, v0, v3
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v58
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v41
; SI-NEXT: s_or_b32 s10, s10, s11
-; SI-NEXT: v_mov_b32_e32 v63, v2
-; SI-NEXT: v_mov_b32_e32 v32, v55
+; SI-NEXT: v_mov_b32_e32 v37, v2
+; SI-NEXT: v_mov_b32_e32 v34, v55
; SI-NEXT: v_or_b32_e32 v8, v1, v55
; SI-NEXT: v_mov_b32_e32 v55, v4
; SI-NEXT: v_mov_b32_e32 v53, v6
-; SI-NEXT: v_mov_b32_e32 v47, v46
; SI-NEXT: v_mov_b32_e32 v45, v44
-; SI-NEXT: v_mov_b32_e32 v59, v42
-; SI-NEXT: v_or_b32_e32 v31, v0, v34
+; SI-NEXT: v_mov_b32_e32 v42, v58
+; SI-NEXT: v_or_b32_e32 v31, v0, v62
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: v_mov_b32_e32 v2, s6
@@ -72948,12 +73248,45 @@ define inreg <32 x float> @bitcast_v64i16_to_v32f32_scalar(<64 x i16> inreg %a,
; SI-NEXT: v_mov_b32_e32 v4, s8
; SI-NEXT: v_mov_b32_e32 v5, s9
; SI-NEXT: v_mov_b32_e32 v6, s10
-; SI-NEXT: s_cbranch_execnz .LBB51_3
-; SI-NEXT: .LBB51_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: s_branch .LBB51_3
+; SI-NEXT: .LBB51_2:
+; SI-NEXT: v_mov_b32_e32 v35, v61
+; SI-NEXT: v_mov_b32_e32 v34, v55
+; SI-NEXT: v_mov_b32_e32 v37, v2
+; SI-NEXT: v_mov_b32_e32 v55, v4
+; SI-NEXT: v_mov_b32_e32 v53, v6
+; SI-NEXT: v_mov_b32_e32 v52, v60
+; SI-NEXT: v_mov_b32_e32 v51, v57
+; SI-NEXT: v_mov_b32_e32 v61, v50
+; SI-NEXT: v_mov_b32_e32 v32, v47
+; SI-NEXT: v_mov_b32_e32 v50, v43
+; SI-NEXT: v_mov_b32_e32 v43, v40
+; SI-NEXT: v_mov_b32_e32 v40, v13
+; SI-NEXT: v_mov_b32_e32 v57, v49
+; SI-NEXT: v_mov_b32_e32 v49, v48
+; SI-NEXT: v_mov_b32_e32 v48, v15
+; SI-NEXT: v_mov_b32_e32 v60, v14
+; SI-NEXT: v_mov_b32_e32 v47, v16
+; SI-NEXT: v_mov_b32_e32 v45, v44
+; SI-NEXT: v_mov_b32_e32 v42, v58
+; SI-NEXT: s_waitcnt expcnt(1)
+; SI-NEXT: v_mov_b32_e32 v38, v20
+; SI-NEXT: v_mov_b32_e32 v56, v23
+; SI-NEXT: v_mov_b32_e32 v36, v24
+; SI-NEXT: v_mov_b32_e32 v33, v28
+; SI-NEXT: v_mov_b32_e32 v39, v29
+; SI-NEXT: v_mov_b32_e32 v46, v25
+; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; SI-NEXT: .LBB51_3: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: v_mov_b32_e32 v41, v42
+; SI-NEXT: s_cbranch_vccnz .LBB51_5
+; SI-NEXT: ; %bb.4: ; %cmp.true
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v63
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v37
; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; SI-NEXT: v_or_b32_e32 v1, v32, v1
+; SI-NEXT: v_or_b32_e32 v1, v34, v1
; SI-NEXT: v_add_i32_e32 v8, vcc, 0x30000, v1
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
; SI-NEXT: s_add_i32 s16, s16, 3
@@ -72999,7 +73332,7 @@ define inreg <32 x float> @bitcast_v64i16_to_v32f32_scalar(<64 x i16> inreg %a,
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v0, v38, v0
+; SI-NEXT: v_or_b32_e32 v0, v35, v0
; SI-NEXT: v_add_i32_e32 v7, vcc, 0x30000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v55
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
@@ -73007,25 +73340,25 @@ define inreg <32 x float> @bitcast_v64i16_to_v32f32_scalar(<64 x i16> inreg %a,
; SI-NEXT: v_add_i32_e32 v9, vcc, 0x30000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v0, v50, v0
+; SI-NEXT: v_or_b32_e32 v0, v61, v0
; SI-NEXT: v_add_i32_e32 v10, vcc, 0x30000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v52
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v0, v36, v0
+; SI-NEXT: v_or_b32_e32 v0, v50, v0
; SI-NEXT: v_add_i32_e32 v11, vcc, 0x30000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v61
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v32
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v0, v57, v0
+; SI-NEXT: v_or_b32_e32 v0, v43, v0
; SI-NEXT: v_add_i32_e32 v12, vcc, 0x30000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v40
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v57
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v0, v49, v0
+; SI-NEXT: v_or_b32_e32 v0, v40, v0
; SI-NEXT: v_add_i32_e32 v13, vcc, 0x30000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v41
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v60
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v0, v43, v0
+; SI-NEXT: v_or_b32_e32 v0, v49, v0
; SI-NEXT: v_add_i32_e32 v14, vcc, 0x30000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v56
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v47
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v48, v0
; SI-NEXT: v_add_i32_e32 v15, vcc, 0x30000, v0
@@ -73036,7 +73369,7 @@ define inreg <32 x float> @bitcast_v64i16_to_v32f32_scalar(<64 x i16> inreg %a,
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v16, vcc, 0x30000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v38
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
@@ -73051,7 +73384,7 @@ define inreg <32 x float> @bitcast_v64i16_to_v32f32_scalar(<64 x i16> inreg %a,
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_add_i32_e32 v18, vcc, 0x30000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v36
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
@@ -73079,31 +73412,31 @@ define inreg <32 x float> @bitcast_v64i16_to_v32f32_scalar(<64 x i16> inreg %a,
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v22, vcc, 0x30000, v0
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v0, v39, v0
+; SI-NEXT: v_or_b32_e32 v0, v56, v0
; SI-NEXT: v_add_i32_e32 v23, vcc, 0x30000, v0
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v0, v60, v0
+; SI-NEXT: v_or_b32_e32 v0, v39, v0
; SI-NEXT: v_add_i32_e32 v24, vcc, 0x30000, v0
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
+; SI-NEXT: v_or_b32_e32 v0, v46, v0
; SI-NEXT: v_add_i32_e32 v25, vcc, 0x30000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v47
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v26, vcc, 0x30000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
@@ -73117,7 +73450,7 @@ define inreg <32 x float> @bitcast_v64i16_to_v32f32_scalar(<64 x i16> inreg %a,
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v28, vcc, 0x30000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
@@ -73126,7 +73459,7 @@ define inreg <32 x float> @bitcast_v64i16_to_v32f32_scalar(<64 x i16> inreg %a,
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v29, vcc, 0x30000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v59
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v41
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
@@ -73141,7 +73474,7 @@ define inreg <32 x float> @bitcast_v64i16_to_v32f32_scalar(<64 x i16> inreg %a,
; SI-NEXT: v_add_i32_e32 v31, vcc, 0x30000, v0
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
-; SI-NEXT: .LBB51_3: ; %end
+; SI-NEXT: .LBB51_5: ; %end
; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
@@ -73160,40 +73493,12 @@ define inreg <32 x float> @bitcast_v64i16_to_v32f32_scalar(<64 x i16> inreg %a,
; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB51_4:
-; SI-NEXT: v_mov_b32_e32 v38, v61
-; SI-NEXT: v_mov_b32_e32 v32, v55
-; SI-NEXT: v_mov_b32_e32 v63, v2
-; SI-NEXT: v_mov_b32_e32 v55, v4
-; SI-NEXT: v_mov_b32_e32 v53, v6
-; SI-NEXT: v_mov_b32_e32 v52, v57
-; SI-NEXT: v_mov_b32_e32 v51, v50
-; SI-NEXT: v_mov_b32_e32 v61, v56
-; SI-NEXT: v_mov_b32_e32 v50, v43
-; SI-NEXT: v_mov_b32_e32 v36, v41
-; SI-NEXT: v_mov_b32_e32 v57, v40
-; SI-NEXT: v_mov_b32_e32 v40, v49
-; SI-NEXT: v_mov_b32_e32 v49, v13
-; SI-NEXT: v_mov_b32_e32 v43, v48
-; SI-NEXT: v_mov_b32_e32 v48, v15
-; SI-NEXT: v_mov_b32_e32 v41, v14
-; SI-NEXT: v_mov_b32_e32 v56, v16
-; SI-NEXT: v_mov_b32_e32 v47, v46
-; SI-NEXT: v_mov_b32_e32 v45, v44
-; SI-NEXT: v_mov_b32_e32 v59, v42
-; SI-NEXT: s_waitcnt expcnt(1)
-; SI-NEXT: v_mov_b32_e32 v37, v20
-; SI-NEXT: v_mov_b32_e32 v39, v23
-; SI-NEXT: v_mov_b32_e32 v35, v24
-; SI-NEXT: v_mov_b32_e32 v33, v28
-; SI-NEXT: v_mov_b32_e32 v60, v29
-; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; SI-NEXT: s_branch .LBB51_2
;
; VI-LABEL: bitcast_v64i16_to_v32f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s6, v2
; VI-NEXT: v_readfirstlane_b32 s7, v3
; VI-NEXT: v_readfirstlane_b32 s8, v4
@@ -73211,12 +73516,15 @@ define inreg <32 x float> @bitcast_v64i16_to_v32f32_scalar(<64 x i16> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s44, v16
; VI-NEXT: v_readfirstlane_b32 s45, v17
; VI-NEXT: v_readfirstlane_b32 s46, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s47, v1
-; VI-NEXT: s_cbranch_scc0 .LBB51_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB51_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB51_3
-; VI-NEXT: .LBB51_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB51_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB51_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s47, 3
; VI-NEXT: s_and_b32 s4, s47, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
@@ -73377,7 +73685,7 @@ define inreg <32 x float> @bitcast_v64i16_to_v32f32_scalar(<64 x i16> inreg %a,
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s6, s4, 0x30000
-; VI-NEXT: .LBB51_3: ; %end
+; VI-NEXT: .LBB51_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -73411,13 +73719,12 @@ define inreg <32 x float> @bitcast_v64i16_to_v32f32_scalar(<64 x i16> inreg %a,
; VI-NEXT: v_mov_b32_e32 v30, s44
; VI-NEXT: v_mov_b32_e32 v31, s45
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB51_4:
-; VI-NEXT: s_branch .LBB51_2
;
; GFX9-LABEL: bitcast_v64i16_to_v32f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v31, v17
; GFX9-NEXT: v_mov_b32_e32 v30, v16
; GFX9-NEXT: v_mov_b32_e32 v29, v15
@@ -73438,7 +73745,7 @@ define inreg <32 x float> @bitcast_v64i16_to_v32f32_scalar(<64 x i16> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -73451,10 +73758,13 @@ define inreg <32 x float> @bitcast_v64i16_to_v32f32_scalar(<64 x i16> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB51_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB51_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB51_3
-; GFX9-NEXT: .LBB51_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB51_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB51_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
@@ -73487,118 +73797,113 @@ define inreg <32 x float> @bitcast_v64i16_to_v32f32_scalar(<64 x i16> inreg %a,
; GFX9-NEXT: v_pk_add_u16 v32, v32, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: .LBB51_3: ; %end
+; GFX9-NEXT: .LBB51_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v18, v32
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB51_4:
-; GFX9-NEXT: s_branch .LBB51_2
;
; GFX11-LABEL: bitcast_v64i16_to_v32f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_store_b32 off, v40, s32 offset:292
-; GFX11-NEXT: scratch_store_b32 off, v41, s32 offset:288
-; GFX11-NEXT: scratch_store_b32 off, v42, s32 offset:284
-; GFX11-NEXT: scratch_store_b32 off, v43, s32 offset:280
-; GFX11-NEXT: scratch_store_b32 off, v44, s32 offset:276
-; GFX11-NEXT: scratch_store_b32 off, v45, s32 offset:272
-; GFX11-NEXT: scratch_store_b32 off, v46, s32 offset:268
-; GFX11-NEXT: scratch_store_b32 off, v47, s32 offset:264
-; GFX11-NEXT: scratch_store_b32 off, v56, s32 offset:260
-; GFX11-NEXT: scratch_store_b32 off, v57, s32 offset:256
-; GFX11-NEXT: scratch_store_b32 off, v58, s32 offset:252
-; GFX11-NEXT: scratch_store_b32 off, v59, s32 offset:248
-; GFX11-NEXT: scratch_store_b32 off, v60, s32 offset:244
-; GFX11-NEXT: scratch_store_b32 off, v61, s32 offset:240
-; GFX11-NEXT: scratch_store_b32 off, v62, s32 offset:236
-; GFX11-NEXT: scratch_store_b32 off, v63, s32 offset:232
-; GFX11-NEXT: scratch_store_b32 off, v72, s32 offset:228
-; GFX11-NEXT: scratch_store_b32 off, v73, s32 offset:224
-; GFX11-NEXT: scratch_store_b32 off, v74, s32 offset:220
-; GFX11-NEXT: scratch_store_b32 off, v75, s32 offset:216
-; GFX11-NEXT: scratch_store_b32 off, v76, s32 offset:212
-; GFX11-NEXT: scratch_store_b32 off, v77, s32 offset:208
-; GFX11-NEXT: scratch_store_b32 off, v78, s32 offset:204
-; GFX11-NEXT: scratch_store_b32 off, v79, s32 offset:200
-; GFX11-NEXT: scratch_store_b32 off, v88, s32 offset:196
-; GFX11-NEXT: scratch_store_b32 off, v89, s32 offset:192
-; GFX11-NEXT: scratch_store_b32 off, v90, s32 offset:188
-; GFX11-NEXT: scratch_store_b32 off, v91, s32 offset:184
-; GFX11-NEXT: scratch_store_b32 off, v92, s32 offset:180
-; GFX11-NEXT: scratch_store_b32 off, v93, s32 offset:176
-; GFX11-NEXT: scratch_store_b32 off, v94, s32 offset:172
-; GFX11-NEXT: scratch_store_b32 off, v95, s32 offset:168
+; GFX11-NEXT: scratch_store_b32 off, v40, s32 offset:284
+; GFX11-NEXT: scratch_store_b32 off, v41, s32 offset:280
+; GFX11-NEXT: scratch_store_b32 off, v42, s32 offset:276
+; GFX11-NEXT: scratch_store_b32 off, v43, s32 offset:272
+; GFX11-NEXT: scratch_store_b32 off, v44, s32 offset:268
+; GFX11-NEXT: scratch_store_b32 off, v45, s32 offset:264
+; GFX11-NEXT: scratch_store_b32 off, v46, s32 offset:260
+; GFX11-NEXT: scratch_store_b32 off, v47, s32 offset:256
+; GFX11-NEXT: scratch_store_b32 off, v56, s32 offset:252
+; GFX11-NEXT: scratch_store_b32 off, v57, s32 offset:248
+; GFX11-NEXT: scratch_store_b32 off, v58, s32 offset:244
+; GFX11-NEXT: scratch_store_b32 off, v59, s32 offset:240
+; GFX11-NEXT: scratch_store_b32 off, v60, s32 offset:236
+; GFX11-NEXT: scratch_store_b32 off, v61, s32 offset:232
+; GFX11-NEXT: scratch_store_b32 off, v62, s32 offset:228
+; GFX11-NEXT: scratch_store_b32 off, v63, s32 offset:224
+; GFX11-NEXT: scratch_store_b32 off, v72, s32 offset:220
+; GFX11-NEXT: scratch_store_b32 off, v73, s32 offset:216
+; GFX11-NEXT: scratch_store_b32 off, v74, s32 offset:212
+; GFX11-NEXT: scratch_store_b32 off, v75, s32 offset:208
+; GFX11-NEXT: scratch_store_b32 off, v76, s32 offset:204
+; GFX11-NEXT: scratch_store_b32 off, v77, s32 offset:200
+; GFX11-NEXT: scratch_store_b32 off, v78, s32 offset:196
+; GFX11-NEXT: scratch_store_b32 off, v79, s32 offset:192
+; GFX11-NEXT: scratch_store_b32 off, v88, s32 offset:188
+; GFX11-NEXT: scratch_store_b32 off, v89, s32 offset:184
+; GFX11-NEXT: scratch_store_b32 off, v90, s32 offset:180
+; GFX11-NEXT: scratch_store_b32 off, v91, s32 offset:176
+; GFX11-NEXT: scratch_store_b32 off, v92, s32 offset:172
+; GFX11-NEXT: scratch_store_b32 off, v93, s32 offset:168
+; GFX11-NEXT: scratch_store_b32 off, v94, s32 offset:164
+; GFX11-NEXT: scratch_store_b32 off, v95, s32 offset:160
; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_store_b32 off, v104, s32 offset:164
-; GFX11-NEXT: scratch_store_b32 off, v105, s32 offset:160
-; GFX11-NEXT: scratch_store_b32 off, v106, s32 offset:156
-; GFX11-NEXT: scratch_store_b32 off, v107, s32 offset:152
-; GFX11-NEXT: scratch_store_b32 off, v108, s32 offset:148
-; GFX11-NEXT: scratch_store_b32 off, v109, s32 offset:144
-; GFX11-NEXT: scratch_store_b32 off, v110, s32 offset:140
-; GFX11-NEXT: scratch_store_b32 off, v111, s32 offset:136
-; GFX11-NEXT: scratch_store_b32 off, v120, s32 offset:132
-; GFX11-NEXT: scratch_store_b32 off, v121, s32 offset:128
-; GFX11-NEXT: scratch_store_b32 off, v122, s32 offset:124
-; GFX11-NEXT: scratch_store_b32 off, v123, s32 offset:120
-; GFX11-NEXT: scratch_store_b32 off, v124, s32 offset:116
-; GFX11-NEXT: scratch_store_b32 off, v125, s32 offset:112
-; GFX11-NEXT: scratch_store_b32 off, v126, s32 offset:108
-; GFX11-NEXT: scratch_store_b32 off, v127, s32 offset:104
-; GFX11-NEXT: scratch_store_b32 off, v136, s32 offset:100
-; GFX11-NEXT: scratch_store_b32 off, v137, s32 offset:96
-; GFX11-NEXT: scratch_store_b32 off, v138, s32 offset:92
-; GFX11-NEXT: scratch_store_b32 off, v139, s32 offset:88
-; GFX11-NEXT: scratch_store_b32 off, v140, s32 offset:84
-; GFX11-NEXT: scratch_store_b32 off, v141, s32 offset:80
-; GFX11-NEXT: scratch_store_b32 off, v142, s32 offset:76
-; GFX11-NEXT: scratch_store_b32 off, v143, s32 offset:72
-; GFX11-NEXT: scratch_store_b32 off, v152, s32 offset:68
-; GFX11-NEXT: scratch_store_b32 off, v153, s32 offset:64
-; GFX11-NEXT: scratch_store_b32 off, v154, s32 offset:60
-; GFX11-NEXT: scratch_store_b32 off, v155, s32 offset:56
-; GFX11-NEXT: scratch_store_b32 off, v156, s32 offset:52
-; GFX11-NEXT: scratch_store_b32 off, v157, s32 offset:48
-; GFX11-NEXT: scratch_store_b32 off, v158, s32 offset:44
-; GFX11-NEXT: scratch_store_b32 off, v159, s32 offset:40
-; GFX11-NEXT: s_clause 0x9
-; GFX11-NEXT: scratch_store_b32 off, v168, s32 offset:36
-; GFX11-NEXT: scratch_store_b32 off, v169, s32 offset:32
-; GFX11-NEXT: scratch_store_b32 off, v170, s32 offset:28
-; GFX11-NEXT: scratch_store_b32 off, v171, s32 offset:24
-; GFX11-NEXT: scratch_store_b32 off, v172, s32 offset:20
-; GFX11-NEXT: scratch_store_b32 off, v173, s32 offset:16
-; GFX11-NEXT: scratch_store_b32 off, v174, s32 offset:12
-; GFX11-NEXT: scratch_store_b32 off, v175, s32 offset:8
-; GFX11-NEXT: scratch_store_b32 off, v184, s32 offset:4
-; GFX11-NEXT: scratch_store_b32 off, v185, s32
+; GFX11-NEXT: scratch_store_b32 off, v104, s32 offset:156
+; GFX11-NEXT: scratch_store_b32 off, v105, s32 offset:152
+; GFX11-NEXT: scratch_store_b32 off, v106, s32 offset:148
+; GFX11-NEXT: scratch_store_b32 off, v107, s32 offset:144
+; GFX11-NEXT: scratch_store_b32 off, v108, s32 offset:140
+; GFX11-NEXT: scratch_store_b32 off, v109, s32 offset:136
+; GFX11-NEXT: scratch_store_b32 off, v110, s32 offset:132
+; GFX11-NEXT: scratch_store_b32 off, v111, s32 offset:128
+; GFX11-NEXT: scratch_store_b32 off, v120, s32 offset:124
+; GFX11-NEXT: scratch_store_b32 off, v121, s32 offset:120
+; GFX11-NEXT: scratch_store_b32 off, v122, s32 offset:116
+; GFX11-NEXT: scratch_store_b32 off, v123, s32 offset:112
+; GFX11-NEXT: scratch_store_b32 off, v124, s32 offset:108
+; GFX11-NEXT: scratch_store_b32 off, v125, s32 offset:104
+; GFX11-NEXT: scratch_store_b32 off, v126, s32 offset:100
+; GFX11-NEXT: scratch_store_b32 off, v127, s32 offset:96
+; GFX11-NEXT: scratch_store_b32 off, v136, s32 offset:92
+; GFX11-NEXT: scratch_store_b32 off, v137, s32 offset:88
+; GFX11-NEXT: scratch_store_b32 off, v138, s32 offset:84
+; GFX11-NEXT: scratch_store_b32 off, v139, s32 offset:80
+; GFX11-NEXT: scratch_store_b32 off, v140, s32 offset:76
+; GFX11-NEXT: scratch_store_b32 off, v141, s32 offset:72
+; GFX11-NEXT: scratch_store_b32 off, v142, s32 offset:68
+; GFX11-NEXT: scratch_store_b32 off, v143, s32 offset:64
+; GFX11-NEXT: scratch_store_b32 off, v152, s32 offset:60
+; GFX11-NEXT: scratch_store_b32 off, v153, s32 offset:56
+; GFX11-NEXT: scratch_store_b32 off, v154, s32 offset:52
+; GFX11-NEXT: scratch_store_b32 off, v155, s32 offset:48
+; GFX11-NEXT: scratch_store_b32 off, v156, s32 offset:44
+; GFX11-NEXT: scratch_store_b32 off, v157, s32 offset:40
+; GFX11-NEXT: scratch_store_b32 off, v158, s32 offset:36
+; GFX11-NEXT: scratch_store_b32 off, v159, s32 offset:32
+; GFX11-NEXT: s_clause 0x7
+; GFX11-NEXT: scratch_store_b32 off, v168, s32 offset:28
+; GFX11-NEXT: scratch_store_b32 off, v169, s32 offset:24
+; GFX11-NEXT: scratch_store_b32 off, v170, s32 offset:20
+; GFX11-NEXT: scratch_store_b32 off, v171, s32 offset:16
+; GFX11-NEXT: scratch_store_b32 off, v172, s32 offset:12
+; GFX11-NEXT: scratch_store_b32 off, v173, s32 offset:8
+; GFX11-NEXT: scratch_store_b32 off, v174, s32 offset:4
+; GFX11-NEXT: scratch_store_b32 off, v175, s32
; GFX11-NEXT: v_dual_mov_b32 v176, v13 :: v_dual_mov_b32 v177, v12
; GFX11-NEXT: v_dual_mov_b32 v178, v11 :: v_dual_mov_b32 v179, v10
; GFX11-NEXT: v_dual_mov_b32 v180, v9 :: v_dual_mov_b32 v181, v8
; GFX11-NEXT: v_dual_mov_b32 v182, v7 :: v_dual_mov_b32 v183, v6
-; GFX11-NEXT: v_dual_mov_b32 v170, v5 :: v_dual_mov_b32 v171, v4
-; GFX11-NEXT: v_dual_mov_b32 v172, v3 :: v_dual_mov_b32 v173, v2
-; GFX11-NEXT: v_dual_mov_b32 v174, v1 :: v_dual_mov_b32 v175, v0
-; GFX11-NEXT: v_dual_mov_b32 v184, s28 :: v_dual_mov_b32 v185, s29
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-NEXT: v_dual_mov_b32 v168, v5 :: v_dual_mov_b32 v169, v4
+; GFX11-NEXT: v_dual_mov_b32 v170, v3 :: v_dual_mov_b32 v171, v2
+; GFX11-NEXT: v_dual_mov_b32 v172, v1 :: v_dual_mov_b32 v173, v0
+; GFX11-NEXT: v_dual_mov_b32 v174, s28 :: v_dual_mov_b32 v175, s29
+; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s4, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB51_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_dual_mov_b32 v47, s0 :: v_dual_mov_b32 v52, s2
-; GFX11-NEXT: v_dual_mov_b32 v49, s1 :: v_dual_mov_b32 v56, s3
-; GFX11-NEXT: v_dual_mov_b32 v61, s16 :: v_dual_mov_b32 v74, s18
-; GFX11-NEXT: v_dual_mov_b32 v67, s17 :: v_dual_mov_b32 v82, s19
-; GFX11-NEXT: v_dual_mov_b32 v91, s20 :: v_dual_mov_b32 v112, s22
-; GFX11-NEXT: v_dual_mov_b32 v101, s21 :: v_dual_mov_b32 v124, s23
-; GFX11-NEXT: v_dual_mov_b32 v137, s24 :: v_dual_mov_b32 v14, s26
-; GFX11-NEXT: v_dual_mov_b32 v151, s25 :: v_dual_mov_b32 v30, s27
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB51_3
+; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v59, s16
+; GFX11-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v65, s17
+; GFX11-NEXT: v_dual_mov_b32 v50, s2 :: v_dual_mov_b32 v89, s20
+; GFX11-NEXT: v_dual_mov_b32 v54, s3 :: v_dual_mov_b32 v99, s21
+; GFX11-NEXT: v_dual_mov_b32 v72, s18 :: v_dual_mov_b32 v135, s24
+; GFX11-NEXT: v_dual_mov_b32 v80, s19 :: v_dual_mov_b32 v149, s25
+; GFX11-NEXT: v_dual_mov_b32 v110, s22 :: v_dual_mov_b32 v17, s26
+; GFX11-NEXT: v_dual_mov_b32 v122, s23 :: v_dual_mov_b32 v33, s27
+; GFX11-NEXT: s_cbranch_execnz .LBB51_3
; GFX11-NEXT: .LBB51_2: ; %cmp.true
-; GFX11-NEXT: v_pk_add_u16 v30, s27, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v14, s26, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v33, s27, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v17, s26, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v176, v176, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v177, v177, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v178, v178, 3 op_sel_hi:[1,0]
@@ -73607,119 +73912,117 @@ define inreg <32 x float> @bitcast_v64i16_to_v32f32_scalar(<64 x i16> inreg %a,
; GFX11-NEXT: v_pk_add_u16 v181, v181, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v182, v182, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v183, v183, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v168, v168, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v169, v169, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v170, v170, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v171, v171, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v172, v172, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v173, v173, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v174, v174, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v175, v175, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v184, v184, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v151, s25, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v137, s24, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v124, s23, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v112, s22, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v101, s21, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v91, s20, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v82, s19, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v74, s18, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v67, s17, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v61, s16, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v56, s3, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v52, s2, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v49, s1, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v47, s0, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v174, v174, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v149, s25, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v135, s24, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v122, s23, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v110, s22, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v99, s21, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v89, s20, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v80, s19, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v72, s18, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v65, s17, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v59, s16, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v54, s3, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v50, s2, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v2, s1, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: .LBB51_3: ; %end
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mov_b32 v0, v47 :: v_dual_mov_b32 v1, v49
-; GFX11-NEXT: v_dual_mov_b32 v3, v56 :: v_dual_mov_b32 v4, v61
-; GFX11-NEXT: v_dual_mov_b32 v6, v74 :: v_dual_mov_b32 v9, v101
-; GFX11-NEXT: v_dual_mov_b32 v7, v82 :: v_dual_mov_b32 v8, v91
-; GFX11-NEXT: v_dual_mov_b32 v11, v124 :: v_dual_mov_b32 v12, v137
-; GFX11-NEXT: v_dual_mov_b32 v15, v30 :: v_dual_mov_b32 v16, v184
-; GFX11-NEXT: v_dual_mov_b32 v17, v185 :: v_dual_mov_b32 v18, v175
-; GFX11-NEXT: v_dual_mov_b32 v19, v174 :: v_dual_mov_b32 v20, v173
-; GFX11-NEXT: v_dual_mov_b32 v21, v172 :: v_dual_mov_b32 v22, v171
-; GFX11-NEXT: v_dual_mov_b32 v23, v170 :: v_dual_mov_b32 v24, v183
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v4, v59
+; GFX11-NEXT: v_dual_mov_b32 v3, v54 :: v_dual_mov_b32 v6, v72
+; GFX11-NEXT: v_dual_mov_b32 v7, v80 :: v_dual_mov_b32 v8, v89
+; GFX11-NEXT: v_dual_mov_b32 v9, v99 :: v_dual_mov_b32 v10, v110
+; GFX11-NEXT: v_dual_mov_b32 v11, v122 :: v_dual_mov_b32 v12, v135
+; GFX11-NEXT: v_dual_mov_b32 v13, v149 :: v_dual_mov_b32 v16, v174
+; GFX11-NEXT: v_dual_mov_b32 v14, v17 :: v_dual_mov_b32 v17, v175
+; GFX11-NEXT: v_dual_mov_b32 v15, v33 :: v_dual_mov_b32 v20, v171
+; GFX11-NEXT: v_dual_mov_b32 v18, v173 :: v_dual_mov_b32 v19, v172
+; GFX11-NEXT: v_dual_mov_b32 v21, v170 :: v_dual_mov_b32 v22, v169
+; GFX11-NEXT: v_dual_mov_b32 v23, v168 :: v_dual_mov_b32 v24, v183
; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_load_b32 v185, off, s32
-; GFX11-NEXT: scratch_load_b32 v184, off, s32 offset:4
-; GFX11-NEXT: scratch_load_b32 v175, off, s32 offset:8
-; GFX11-NEXT: scratch_load_b32 v174, off, s32 offset:12
-; GFX11-NEXT: scratch_load_b32 v173, off, s32 offset:16
-; GFX11-NEXT: scratch_load_b32 v172, off, s32 offset:20
-; GFX11-NEXT: scratch_load_b32 v171, off, s32 offset:24
-; GFX11-NEXT: scratch_load_b32 v170, off, s32 offset:28
-; GFX11-NEXT: scratch_load_b32 v169, off, s32 offset:32
-; GFX11-NEXT: scratch_load_b32 v168, off, s32 offset:36
-; GFX11-NEXT: scratch_load_b32 v159, off, s32 offset:40
-; GFX11-NEXT: scratch_load_b32 v158, off, s32 offset:44
-; GFX11-NEXT: scratch_load_b32 v157, off, s32 offset:48
-; GFX11-NEXT: scratch_load_b32 v156, off, s32 offset:52
-; GFX11-NEXT: scratch_load_b32 v155, off, s32 offset:56
-; GFX11-NEXT: scratch_load_b32 v154, off, s32 offset:60
-; GFX11-NEXT: scratch_load_b32 v153, off, s32 offset:64
-; GFX11-NEXT: scratch_load_b32 v152, off, s32 offset:68
-; GFX11-NEXT: scratch_load_b32 v143, off, s32 offset:72
-; GFX11-NEXT: scratch_load_b32 v142, off, s32 offset:76
-; GFX11-NEXT: scratch_load_b32 v141, off, s32 offset:80
-; GFX11-NEXT: scratch_load_b32 v140, off, s32 offset:84
-; GFX11-NEXT: scratch_load_b32 v139, off, s32 offset:88
-; GFX11-NEXT: scratch_load_b32 v138, off, s32 offset:92
-; GFX11-NEXT: scratch_load_b32 v137, off, s32 offset:96
-; GFX11-NEXT: scratch_load_b32 v136, off, s32 offset:100
-; GFX11-NEXT: scratch_load_b32 v127, off, s32 offset:104
-; GFX11-NEXT: scratch_load_b32 v126, off, s32 offset:108
-; GFX11-NEXT: scratch_load_b32 v125, off, s32 offset:112
-; GFX11-NEXT: scratch_load_b32 v124, off, s32 offset:116
-; GFX11-NEXT: scratch_load_b32 v123, off, s32 offset:120
-; GFX11-NEXT: scratch_load_b32 v122, off, s32 offset:124
+; GFX11-NEXT: scratch_load_b32 v175, off, s32
+; GFX11-NEXT: scratch_load_b32 v174, off, s32 offset:4
+; GFX11-NEXT: scratch_load_b32 v173, off, s32 offset:8
+; GFX11-NEXT: scratch_load_b32 v172, off, s32 offset:12
+; GFX11-NEXT: scratch_load_b32 v171, off, s32 offset:16
+; GFX11-NEXT: scratch_load_b32 v170, off, s32 offset:20
+; GFX11-NEXT: scratch_load_b32 v169, off, s32 offset:24
+; GFX11-NEXT: scratch_load_b32 v168, off, s32 offset:28
+; GFX11-NEXT: scratch_load_b32 v159, off, s32 offset:32
+; GFX11-NEXT: scratch_load_b32 v158, off, s32 offset:36
+; GFX11-NEXT: scratch_load_b32 v157, off, s32 offset:40
+; GFX11-NEXT: scratch_load_b32 v156, off, s32 offset:44
+; GFX11-NEXT: scratch_load_b32 v155, off, s32 offset:48
+; GFX11-NEXT: scratch_load_b32 v154, off, s32 offset:52
+; GFX11-NEXT: scratch_load_b32 v153, off, s32 offset:56
+; GFX11-NEXT: scratch_load_b32 v152, off, s32 offset:60
+; GFX11-NEXT: scratch_load_b32 v143, off, s32 offset:64
+; GFX11-NEXT: scratch_load_b32 v142, off, s32 offset:68
+; GFX11-NEXT: scratch_load_b32 v141, off, s32 offset:72
+; GFX11-NEXT: scratch_load_b32 v140, off, s32 offset:76
+; GFX11-NEXT: scratch_load_b32 v139, off, s32 offset:80
+; GFX11-NEXT: scratch_load_b32 v138, off, s32 offset:84
+; GFX11-NEXT: scratch_load_b32 v137, off, s32 offset:88
+; GFX11-NEXT: scratch_load_b32 v136, off, s32 offset:92
+; GFX11-NEXT: scratch_load_b32 v127, off, s32 offset:96
+; GFX11-NEXT: scratch_load_b32 v126, off, s32 offset:100
+; GFX11-NEXT: scratch_load_b32 v125, off, s32 offset:104
+; GFX11-NEXT: scratch_load_b32 v124, off, s32 offset:108
+; GFX11-NEXT: scratch_load_b32 v123, off, s32 offset:112
+; GFX11-NEXT: scratch_load_b32 v122, off, s32 offset:116
+; GFX11-NEXT: scratch_load_b32 v121, off, s32 offset:120
+; GFX11-NEXT: scratch_load_b32 v120, off, s32 offset:124
; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_load_b32 v121, off, s32 offset:128
-; GFX11-NEXT: scratch_load_b32 v120, off, s32 offset:132
-; GFX11-NEXT: scratch_load_b32 v111, off, s32 offset:136
-; GFX11-NEXT: scratch_load_b32 v110, off, s32 offset:140
-; GFX11-NEXT: scratch_load_b32 v109, off, s32 offset:144
-; GFX11-NEXT: scratch_load_b32 v108, off, s32 offset:148
-; GFX11-NEXT: scratch_load_b32 v107, off, s32 offset:152
-; GFX11-NEXT: scratch_load_b32 v106, off, s32 offset:156
-; GFX11-NEXT: scratch_load_b32 v105, off, s32 offset:160
-; GFX11-NEXT: scratch_load_b32 v104, off, s32 offset:164
-; GFX11-NEXT: scratch_load_b32 v95, off, s32 offset:168
-; GFX11-NEXT: scratch_load_b32 v94, off, s32 offset:172
-; GFX11-NEXT: scratch_load_b32 v93, off, s32 offset:176
-; GFX11-NEXT: scratch_load_b32 v92, off, s32 offset:180
-; GFX11-NEXT: scratch_load_b32 v91, off, s32 offset:184
-; GFX11-NEXT: scratch_load_b32 v90, off, s32 offset:188
-; GFX11-NEXT: scratch_load_b32 v89, off, s32 offset:192
-; GFX11-NEXT: scratch_load_b32 v88, off, s32 offset:196
-; GFX11-NEXT: scratch_load_b32 v79, off, s32 offset:200
-; GFX11-NEXT: scratch_load_b32 v78, off, s32 offset:204
-; GFX11-NEXT: scratch_load_b32 v77, off, s32 offset:208
-; GFX11-NEXT: scratch_load_b32 v76, off, s32 offset:212
-; GFX11-NEXT: scratch_load_b32 v75, off, s32 offset:216
-; GFX11-NEXT: scratch_load_b32 v74, off, s32 offset:220
-; GFX11-NEXT: scratch_load_b32 v73, off, s32 offset:224
-; GFX11-NEXT: scratch_load_b32 v72, off, s32 offset:228
-; GFX11-NEXT: scratch_load_b32 v63, off, s32 offset:232
-; GFX11-NEXT: scratch_load_b32 v62, off, s32 offset:236
-; GFX11-NEXT: scratch_load_b32 v61, off, s32 offset:240
-; GFX11-NEXT: scratch_load_b32 v60, off, s32 offset:244
-; GFX11-NEXT: scratch_load_b32 v59, off, s32 offset:248
-; GFX11-NEXT: scratch_load_b32 v58, off, s32 offset:252
-; GFX11-NEXT: s_clause 0x9
-; GFX11-NEXT: scratch_load_b32 v57, off, s32 offset:256
-; GFX11-NEXT: scratch_load_b32 v56, off, s32 offset:260
-; GFX11-NEXT: scratch_load_b32 v47, off, s32 offset:264
-; GFX11-NEXT: scratch_load_b32 v46, off, s32 offset:268
-; GFX11-NEXT: scratch_load_b32 v45, off, s32 offset:272
-; GFX11-NEXT: scratch_load_b32 v44, off, s32 offset:276
-; GFX11-NEXT: scratch_load_b32 v43, off, s32 offset:280
-; GFX11-NEXT: scratch_load_b32 v42, off, s32 offset:284
-; GFX11-NEXT: scratch_load_b32 v41, off, s32 offset:288
-; GFX11-NEXT: scratch_load_b32 v40, off, s32 offset:292
-; GFX11-NEXT: v_dual_mov_b32 v2, v52 :: v_dual_mov_b32 v5, v67
-; GFX11-NEXT: v_dual_mov_b32 v10, v112 :: v_dual_mov_b32 v13, v151
+; GFX11-NEXT: scratch_load_b32 v111, off, s32 offset:128
+; GFX11-NEXT: scratch_load_b32 v110, off, s32 offset:132
+; GFX11-NEXT: scratch_load_b32 v109, off, s32 offset:136
+; GFX11-NEXT: scratch_load_b32 v108, off, s32 offset:140
+; GFX11-NEXT: scratch_load_b32 v107, off, s32 offset:144
+; GFX11-NEXT: scratch_load_b32 v106, off, s32 offset:148
+; GFX11-NEXT: scratch_load_b32 v105, off, s32 offset:152
+; GFX11-NEXT: scratch_load_b32 v104, off, s32 offset:156
+; GFX11-NEXT: scratch_load_b32 v95, off, s32 offset:160
+; GFX11-NEXT: scratch_load_b32 v94, off, s32 offset:164
+; GFX11-NEXT: scratch_load_b32 v93, off, s32 offset:168
+; GFX11-NEXT: scratch_load_b32 v92, off, s32 offset:172
+; GFX11-NEXT: scratch_load_b32 v91, off, s32 offset:176
+; GFX11-NEXT: scratch_load_b32 v90, off, s32 offset:180
+; GFX11-NEXT: scratch_load_b32 v89, off, s32 offset:184
+; GFX11-NEXT: scratch_load_b32 v88, off, s32 offset:188
+; GFX11-NEXT: scratch_load_b32 v79, off, s32 offset:192
+; GFX11-NEXT: scratch_load_b32 v78, off, s32 offset:196
+; GFX11-NEXT: scratch_load_b32 v77, off, s32 offset:200
+; GFX11-NEXT: scratch_load_b32 v76, off, s32 offset:204
+; GFX11-NEXT: scratch_load_b32 v75, off, s32 offset:208
+; GFX11-NEXT: scratch_load_b32 v74, off, s32 offset:212
+; GFX11-NEXT: scratch_load_b32 v73, off, s32 offset:216
+; GFX11-NEXT: scratch_load_b32 v72, off, s32 offset:220
+; GFX11-NEXT: scratch_load_b32 v63, off, s32 offset:224
+; GFX11-NEXT: scratch_load_b32 v62, off, s32 offset:228
+; GFX11-NEXT: scratch_load_b32 v61, off, s32 offset:232
+; GFX11-NEXT: scratch_load_b32 v60, off, s32 offset:236
+; GFX11-NEXT: scratch_load_b32 v59, off, s32 offset:240
+; GFX11-NEXT: scratch_load_b32 v58, off, s32 offset:244
+; GFX11-NEXT: scratch_load_b32 v57, off, s32 offset:248
+; GFX11-NEXT: scratch_load_b32 v56, off, s32 offset:252
+; GFX11-NEXT: s_clause 0x7
+; GFX11-NEXT: scratch_load_b32 v47, off, s32 offset:256
+; GFX11-NEXT: scratch_load_b32 v46, off, s32 offset:260
+; GFX11-NEXT: scratch_load_b32 v45, off, s32 offset:264
+; GFX11-NEXT: scratch_load_b32 v44, off, s32 offset:268
+; GFX11-NEXT: scratch_load_b32 v43, off, s32 offset:272
+; GFX11-NEXT: scratch_load_b32 v42, off, s32 offset:276
+; GFX11-NEXT: scratch_load_b32 v41, off, s32 offset:280
+; GFX11-NEXT: scratch_load_b32 v40, off, s32 offset:284
+; GFX11-NEXT: v_dual_mov_b32 v2, v50 :: v_dual_mov_b32 v5, v65
; GFX11-NEXT: v_dual_mov_b32 v25, v182 :: v_dual_mov_b32 v26, v181
; GFX11-NEXT: v_dual_mov_b32 v27, v180 :: v_dual_mov_b32 v28, v179
; GFX11-NEXT: v_dual_mov_b32 v29, v178 :: v_dual_mov_b32 v30, v177
@@ -73727,23 +74030,25 @@ define inreg <32 x float> @bitcast_v64i16_to_v32f32_scalar(<64 x i16> inreg %a,
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB51_4:
-; GFX11-NEXT: ; implicit-def: $vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78
; GFX11-NEXT: ; implicit-def: $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79
; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
-; GFX11-NEXT: ; implicit-def: $vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81
-; GFX11-NEXT: ; implicit-def: $vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84
-; GFX11-NEXT: ; implicit-def: $vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88
-; GFX11-NEXT: ; implicit-def: $vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93
-; GFX11-NEXT: ; implicit-def: $vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99
-; GFX11-NEXT: ; implicit-def: $vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106
-; GFX11-NEXT: ; implicit-def: $vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114
-; GFX11-NEXT: ; implicit-def: $vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123
-; GFX11-NEXT: ; implicit-def: $vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133
-; GFX11-NEXT: ; implicit-def: $vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144
-; GFX11-NEXT: ; implicit-def: $vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156
-; GFX11-NEXT: ; implicit-def: $vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169
-; GFX11-NEXT: s_branch .LBB51_2
+; GFX11-NEXT: ; implicit-def: $vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82
+; GFX11-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-NEXT: ; implicit-def: $vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91
+; GFX11-NEXT: ; implicit-def: $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49
+; GFX11-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-NEXT: ; implicit-def: $vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104
+; GFX11-NEXT: ; implicit-def: $vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112
+; GFX11-NEXT: ; implicit-def: $vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121
+; GFX11-NEXT: ; implicit-def: $vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131
+; GFX11-NEXT: ; implicit-def: $vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142
+; GFX11-NEXT: ; implicit-def: $vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154
+; GFX11-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_vccz .LBB51_2
+; GFX11-NEXT: s_branch .LBB51_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -73993,6 +74298,7 @@ define inreg <16 x double> @bitcast_v16i64_to_v16f64_scalar(<16 x i64> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v31, v17
; SI-NEXT: v_mov_b32_e32 v30, v16
; SI-NEXT: v_mov_b32_e32 v29, v15
@@ -74013,7 +74319,7 @@ define inreg <16 x double> @bitcast_v16i64_to_v16f64_scalar(<16 x i64> inreg %a,
; SI-NEXT: v_mov_b32_e32 v14, v0
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v2, s18
; SI-NEXT: v_mov_b32_e32 v3, s19
; SI-NEXT: v_mov_b32_e32 v4, s20
@@ -74026,10 +74332,13 @@ define inreg <16 x double> @bitcast_v16i64_to_v16f64_scalar(<16 x i64> inreg %a,
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB53_4
+; SI-NEXT: s_cbranch_scc0 .LBB53_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB53_3
-; SI-NEXT: .LBB53_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB53_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB53_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2
@@ -74062,16 +74371,15 @@ define inreg <16 x double> @bitcast_v16i64_to_v16f64_scalar(<16 x i64> inreg %a,
; SI-NEXT: v_addc_u32_e32 v29, vcc, 0, v29, vcc
; SI-NEXT: v_add_i32_e32 v30, vcc, 3, v30
; SI-NEXT: v_addc_u32_e32 v31, vcc, 0, v31, vcc
-; SI-NEXT: .LBB53_3: ; %end
+; SI-NEXT: .LBB53_4: ; %end
; SI-NEXT: v_mov_b32_e32 v18, v32
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB53_4:
-; SI-NEXT: s_branch .LBB53_2
;
; VI-LABEL: bitcast_v16i64_to_v16f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v31, v17
; VI-NEXT: v_mov_b32_e32 v30, v16
; VI-NEXT: v_mov_b32_e32 v29, v15
@@ -74092,7 +74400,7 @@ define inreg <16 x double> @bitcast_v16i64_to_v16f64_scalar(<16 x i64> inreg %a,
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
@@ -74105,10 +74413,13 @@ define inreg <16 x double> @bitcast_v16i64_to_v16f64_scalar(<16 x i64> inreg %a,
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB53_4
+; VI-NEXT: s_cbranch_scc0 .LBB53_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB53_3
-; VI-NEXT: .LBB53_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB53_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB53_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
@@ -74141,16 +74452,15 @@ define inreg <16 x double> @bitcast_v16i64_to_v16f64_scalar(<16 x i64> inreg %a,
; VI-NEXT: v_addc_u32_e32 v29, vcc, 0, v29, vcc
; VI-NEXT: v_add_u32_e32 v30, vcc, 3, v30
; VI-NEXT: v_addc_u32_e32 v31, vcc, 0, v31, vcc
-; VI-NEXT: .LBB53_3: ; %end
+; VI-NEXT: .LBB53_4: ; %end
; VI-NEXT: v_mov_b32_e32 v18, v32
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB53_4:
-; VI-NEXT: s_branch .LBB53_2
;
; GFX9-LABEL: bitcast_v16i64_to_v16f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v31, v17
; GFX9-NEXT: v_mov_b32_e32 v30, v16
; GFX9-NEXT: v_mov_b32_e32 v29, v15
@@ -74171,7 +74481,7 @@ define inreg <16 x double> @bitcast_v16i64_to_v16f64_scalar(<16 x i64> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -74184,10 +74494,13 @@ define inreg <16 x double> @bitcast_v16i64_to_v16f64_scalar(<16 x i64> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB53_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB53_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB53_3
-; GFX9-NEXT: .LBB53_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB53_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB53_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 3, v0
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, 3, v2
@@ -74220,44 +74533,42 @@ define inreg <16 x double> @bitcast_v16i64_to_v16f64_scalar(<16 x i64> inreg %a,
; GFX9-NEXT: v_addc_co_u32_e32 v29, vcc, 0, v29, vcc
; GFX9-NEXT: v_add_co_u32_e32 v30, vcc, 3, v30
; GFX9-NEXT: v_addc_co_u32_e32 v31, vcc, 0, v31, vcc
-; GFX9-NEXT: .LBB53_3: ; %end
+; GFX9-NEXT: .LBB53_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v18, v32
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB53_4:
-; GFX9-NEXT: s_branch .LBB53_2
;
; GFX11-LABEL: bitcast_v16i64_to_v16f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v14 :: v_dual_mov_b32 v31, v13
-; GFX11-NEXT: v_dual_mov_b32 v30, v12 :: v_dual_mov_b32 v29, v11
-; GFX11-NEXT: v_dual_mov_b32 v28, v10 :: v_dual_mov_b32 v27, v9
+; GFX11-NEXT: v_dual_mov_b32 v15, v14 :: v_dual_mov_b32 v30, v12
+; GFX11-NEXT: v_dual_mov_b32 v31, v13 :: v_dual_mov_b32 v28, v10
+; GFX11-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v26, v8
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB53_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB53_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB53_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB53_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB53_3:
-; GFX11-NEXT: .LBB53_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB53_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
@@ -74298,6 +74609,7 @@ define inreg <16 x double> @bitcast_v16i64_to_v16f64_scalar(<16 x i64> inreg %a,
; GFX11-NEXT: v_add_co_ci_u32_e64 v29, null, 0, v29, vcc_lo
; GFX11-NEXT: v_add_co_u32 v30, vcc_lo, v30, 3
; GFX11-NEXT: v_add_co_ci_u32_e64 v31, null, 0, v31, vcc_lo
+; GFX11-NEXT: .LBB53_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -74476,6 +74788,7 @@ define inreg <16 x i64> @bitcast_v16f64_to_v16i64_scalar(<16 x double> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v31, v17
; SI-NEXT: v_mov_b32_e32 v30, v16
; SI-NEXT: v_mov_b32_e32 v29, v15
@@ -74506,13 +74819,16 @@ define inreg <16 x i64> @bitcast_v16f64_to_v16i64_scalar(<16 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v9, s25
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB55_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB55_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB55_3
-; SI-NEXT: .LBB55_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB55_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB55_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
; SI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; SI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
@@ -74529,17 +74845,16 @@ define inreg <16 x i64> @bitcast_v16f64_to_v16i64_scalar(<16 x double> inreg %a,
; SI-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
; SI-NEXT: v_add_f64 v[28:29], v[28:29], 1.0
; SI-NEXT: v_add_f64 v[30:31], v[30:31], 1.0
-; SI-NEXT: .LBB55_3: ; %end
+; SI-NEXT: .LBB55_4: ; %end
; SI-NEXT: v_mov_b32_e32 v18, v32
; SI-NEXT: v_mov_b32_e32 v19, v33
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB55_4:
-; SI-NEXT: s_branch .LBB55_2
;
; VI-LABEL: bitcast_v16f64_to_v16i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v31, v17
; VI-NEXT: v_mov_b32_e32 v30, v16
; VI-NEXT: v_mov_b32_e32 v29, v15
@@ -74570,13 +74885,16 @@ define inreg <16 x i64> @bitcast_v16f64_to_v16i64_scalar(<16 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB55_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB55_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB55_3
-; VI-NEXT: .LBB55_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB55_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB55_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
; VI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; VI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
@@ -74593,17 +74911,16 @@ define inreg <16 x i64> @bitcast_v16f64_to_v16i64_scalar(<16 x double> inreg %a,
; VI-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
; VI-NEXT: v_add_f64 v[28:29], v[28:29], 1.0
; VI-NEXT: v_add_f64 v[30:31], v[30:31], 1.0
-; VI-NEXT: .LBB55_3: ; %end
+; VI-NEXT: .LBB55_4: ; %end
; VI-NEXT: v_mov_b32_e32 v18, v32
; VI-NEXT: v_mov_b32_e32 v19, v33
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB55_4:
-; VI-NEXT: s_branch .LBB55_2
;
; GFX9-LABEL: bitcast_v16f64_to_v16i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v31, v17
; GFX9-NEXT: v_mov_b32_e32 v30, v16
; GFX9-NEXT: v_mov_b32_e32 v29, v15
@@ -74634,13 +74951,16 @@ define inreg <16 x i64> @bitcast_v16f64_to_v16i64_scalar(<16 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB55_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB55_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB55_3
-; GFX9-NEXT: .LBB55_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB55_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB55_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX9-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
@@ -74657,45 +74977,43 @@ define inreg <16 x i64> @bitcast_v16f64_to_v16i64_scalar(<16 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
; GFX9-NEXT: v_add_f64 v[28:29], v[28:29], 1.0
; GFX9-NEXT: v_add_f64 v[30:31], v[30:31], 1.0
-; GFX9-NEXT: .LBB55_3: ; %end
+; GFX9-NEXT: .LBB55_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v18, v32
; GFX9-NEXT: v_mov_b32_e32 v19, v33
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB55_4:
-; GFX9-NEXT: s_branch .LBB55_2
;
; GFX11-LABEL: bitcast_v16f64_to_v16i64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v14 :: v_dual_mov_b32 v31, v13
-; GFX11-NEXT: v_dual_mov_b32 v30, v12 :: v_dual_mov_b32 v29, v11
-; GFX11-NEXT: v_dual_mov_b32 v28, v10 :: v_dual_mov_b32 v27, v9
+; GFX11-NEXT: v_dual_mov_b32 v15, v14 :: v_dual_mov_b32 v30, v12
+; GFX11-NEXT: v_dual_mov_b32 v31, v13 :: v_dual_mov_b32 v28, v10
+; GFX11-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v26, v8
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB55_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB55_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB55_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB55_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB55_3:
-; GFX11-NEXT: .LBB55_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB55_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
@@ -74712,6 +75030,7 @@ define inreg <16 x i64> @bitcast_v16f64_to_v16i64_scalar(<16 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
; GFX11-NEXT: v_add_f64 v[28:29], v[28:29], 1.0
; GFX11-NEXT: v_add_f64 v[30:31], v[30:31], 1.0
+; GFX11-NEXT: .LBB55_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -79237,8 +79556,9 @@ define inreg <128 x i8> @bitcast_v16i64_to_v128i8_scalar(<16 x i64> inreg %a, i3
; SI-NEXT: v_writelane_b32 v41, s66, 18
; SI-NEXT: v_writelane_b32 v41, s67, 19
; SI-NEXT: v_writelane_b32 v41, s68, 20
-; SI-NEXT: v_writelane_b32 v41, s69, 21
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v19
+; SI-NEXT: v_writelane_b32 v41, s69, 21
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_writelane_b32 v41, s70, 22
; SI-NEXT: v_readfirstlane_b32 s47, v1
; SI-NEXT: v_readfirstlane_b32 s46, v2
@@ -79257,8 +79577,8 @@ define inreg <128 x i8> @bitcast_v16i64_to_v128i8_scalar(<16 x i64> inreg %a, i3
; SI-NEXT: v_readfirstlane_b32 s9, v15
; SI-NEXT: v_readfirstlane_b32 s8, v16
; SI-NEXT: v_readfirstlane_b32 s7, v17
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s6, v18
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: v_writelane_b32 v41, s71, 23
; SI-NEXT: s_cbranch_scc0 .LBB57_4
@@ -79304,9 +79624,9 @@ define inreg <128 x i8> @bitcast_v16i64_to_v128i8_scalar(<16 x i64> inreg %a, i3
; SI-NEXT: v_alignbit_b32 v19, s42, v21, 24
; SI-NEXT: v_alignbit_b32 v20, s42, v21, 16
; SI-NEXT: v_alignbit_b32 v21, s42, v21, 8
-; SI-NEXT: v_alignbit_b32 v30, s46, v22, 24
-; SI-NEXT: v_alignbit_b32 v31, s46, v22, 16
-; SI-NEXT: v_alignbit_b32 v32, s46, v22, 8
+; SI-NEXT: v_alignbit_b32 v31, s46, v22, 24
+; SI-NEXT: v_alignbit_b32 v32, s46, v22, 16
+; SI-NEXT: v_alignbit_b32 v30, s46, v22, 8
; SI-NEXT: v_alignbit_b32 v36, s29, v23, 24
; SI-NEXT: v_alignbit_b32 v22, s29, v23, 16
; SI-NEXT: v_alignbit_b32 v23, s29, v23, 8
@@ -79451,9 +79771,9 @@ define inreg <128 x i8> @bitcast_v16i64_to_v128i8_scalar(<16 x i64> inreg %a, i3
; SI-NEXT: v_alignbit_b32 v19, s42, v21, 24
; SI-NEXT: v_alignbit_b32 v20, s42, v21, 16
; SI-NEXT: v_alignbit_b32 v21, s42, v21, 8
-; SI-NEXT: v_alignbit_b32 v30, s46, v22, 24
-; SI-NEXT: v_alignbit_b32 v31, s46, v22, 16
-; SI-NEXT: v_alignbit_b32 v32, s46, v22, 8
+; SI-NEXT: v_alignbit_b32 v31, s46, v22, 24
+; SI-NEXT: v_alignbit_b32 v32, s46, v22, 16
+; SI-NEXT: v_alignbit_b32 v30, s46, v22, 8
; SI-NEXT: v_alignbit_b32 v36, s29, v23, 24
; SI-NEXT: v_alignbit_b32 v22, s29, v23, 16
; SI-NEXT: v_alignbit_b32 v23, s29, v23, 8
@@ -79693,16 +80013,16 @@ define inreg <128 x i8> @bitcast_v16i64_to_v128i8_scalar(<16 x i64> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v23, s4
; SI-NEXT: buffer_store_dword v23, v22, s[0:3], 0 offen
; SI-NEXT: s_and_b32 s4, s47, 0xff
-; SI-NEXT: v_lshlrev_b32_e32 v22, 8, v32
+; SI-NEXT: v_lshlrev_b32_e32 v22, 8, v30
; SI-NEXT: v_or_b32_e32 v22, s4, v22
; SI-NEXT: s_and_b32 s4, s46, 0xff
; SI-NEXT: s_lshl_b32 s5, s34, 8
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_and_b32_e32 v23, 0xff, v31
+; SI-NEXT: v_and_b32_e32 v23, 0xff, v32
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: s_and_b32 s5, s31, 0xff
; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v23
-; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v30
+; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v31
; SI-NEXT: s_lshl_b32 s5, s5, 16
; SI-NEXT: s_lshl_b32 s16, s30, 24
; SI-NEXT: v_and_b32_e32 v22, 0xffff, v22
@@ -80003,18 +80323,18 @@ define inreg <128 x i8> @bitcast_v16i64_to_v128i8_scalar(<16 x i64> inreg %a, i3
; SI-NEXT: ; implicit-def: $sgpr72
; SI-NEXT: ; implicit-def: $sgpr63
; SI-NEXT: ; implicit-def: $sgpr62
-; SI-NEXT: ; implicit-def: $vgpr36
; SI-NEXT: ; implicit-def: $sgpr61
-; SI-NEXT: ; implicit-def: $vgpr32
; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $vgpr31
+; SI-NEXT: ; implicit-def: $vgpr36
; SI-NEXT: ; implicit-def: $sgpr59
; SI-NEXT: ; implicit-def: $vgpr30
; SI-NEXT: ; implicit-def: $sgpr58
-; SI-NEXT: ; implicit-def: $vgpr26
+; SI-NEXT: ; implicit-def: $vgpr32
; SI-NEXT: ; implicit-def: $sgpr57
-; SI-NEXT: ; implicit-def: $vgpr25
+; SI-NEXT: ; implicit-def: $vgpr31
; SI-NEXT: ; implicit-def: $sgpr56
+; SI-NEXT: ; implicit-def: $vgpr26
+; SI-NEXT: ; implicit-def: $vgpr25
; SI-NEXT: ; implicit-def: $vgpr24
; SI-NEXT: ; implicit-def: $vgpr21
; SI-NEXT: ; implicit-def: $vgpr20
@@ -80037,7 +80357,9 @@ define inreg <128 x i8> @bitcast_v16i64_to_v128i8_scalar(<16 x i64> inreg %a, i3
; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: s_branch .LBB57_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB57_2
+; SI-NEXT: s_branch .LBB57_3
;
; VI-LABEL: bitcast_v16i64_to_v128i8_scalar:
; VI: ; %bb.0:
@@ -80095,8 +80417,9 @@ define inreg <128 x i8> @bitcast_v16i64_to_v128i8_scalar(<16 x i64> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s6, v15
; VI-NEXT: v_readfirstlane_b32 s7, v16
; VI-NEXT: v_readfirstlane_b32 s4, v17
-; VI-NEXT: s_and_b64 s[46:47], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s5, v18
+; VI-NEXT: s_and_b64 s[46:47], vcc, exec
+; VI-NEXT: s_mov_b64 vcc, -1
; VI-NEXT: v_writelane_b32 v20, s87, 31
; VI-NEXT: ; implicit-def: $vgpr21 : SGPR spill to VGPR lane
; VI-NEXT: s_cbranch_scc0 .LBB57_4
@@ -80932,8 +81255,6 @@ define inreg <128 x i8> @bitcast_v16i64_to_v128i8_scalar(<16 x i64> inreg %a, i3
; VI-NEXT: .LBB57_4:
; VI-NEXT: ; implicit-def: $sgpr60
; VI-NEXT: ; kill: killed $sgpr60
-; VI-NEXT: ; implicit-def: $sgpr60
-; VI-NEXT: ; kill: killed $sgpr60
; VI-NEXT: ; implicit-def: $sgpr65
; VI-NEXT: ; implicit-def: $sgpr64
; VI-NEXT: ; implicit-def: $sgpr55
@@ -81075,6 +81396,8 @@ define inreg <128 x i8> @bitcast_v16i64_to_v128i8_scalar(<16 x i64> inreg %a, i3
; VI-NEXT: ; implicit-def: $sgpr60
; VI-NEXT: ; kill: killed $sgpr60
; VI-NEXT: ; implicit-def: $sgpr60
+; VI-NEXT: ; kill: killed $sgpr60
+; VI-NEXT: ; implicit-def: $sgpr60
; VI-NEXT: v_writelane_b32 v21, s60, 0
; VI-NEXT: v_writelane_b32 v21, s61, 1
; VI-NEXT: ; implicit-def: $sgpr60
@@ -81086,7 +81409,9 @@ define inreg <128 x i8> @bitcast_v16i64_to_v128i8_scalar(<16 x i64> inreg %a, i3
; VI-NEXT: ; implicit-def: $sgpr60
; VI-NEXT: v_writelane_b32 v21, s60, 6
; VI-NEXT: v_writelane_b32 v21, s61, 7
-; VI-NEXT: s_branch .LBB57_2
+; VI-NEXT: s_andn2_b64 vcc, exec, vcc
+; VI-NEXT: s_cbranch_vccz .LBB57_2
+; VI-NEXT: s_branch .LBB57_3
;
; GFX9-LABEL: bitcast_v16i64_to_v128i8_scalar:
; GFX9: ; %bb.0:
@@ -81148,8 +81473,9 @@ define inreg <128 x i8> @bitcast_v16i64_to_v128i8_scalar(<16 x i64> inreg %a, i3
; GFX9-NEXT: v_readfirstlane_b32 s6, v15
; GFX9-NEXT: v_readfirstlane_b32 s7, v16
; GFX9-NEXT: v_readfirstlane_b32 s4, v17
-; GFX9-NEXT: s_and_b64 s[46:47], vcc, exec
; GFX9-NEXT: v_readfirstlane_b32 s5, v18
+; GFX9-NEXT: s_and_b64 s[46:47], vcc, exec
+; GFX9-NEXT: s_mov_b64 vcc, -1
; GFX9-NEXT: v_writelane_b32 v20, s99, 35
; GFX9-NEXT: ; implicit-def: $vgpr21 : SGPR spill to VGPR lane
; GFX9-NEXT: s_cbranch_scc0 .LBB57_4
@@ -81254,9 +81580,9 @@ define inreg <128 x i8> @bitcast_v16i64_to_v128i8_scalar(<16 x i64> inreg %a, i3
; GFX9-NEXT: v_writelane_b32 v21, s46, 50
; GFX9-NEXT: s_lshr_b64 s[56:57], s[4:5], 24
; GFX9-NEXT: v_writelane_b32 v21, s56, 0
-; GFX9-NEXT: s_lshr_b32 s82, s28, 8
-; GFX9-NEXT: s_lshr_b32 s83, s27, 24
-; GFX9-NEXT: s_lshr_b32 s81, s27, 16
+; GFX9-NEXT: s_lshr_b32 s81, s28, 8
+; GFX9-NEXT: s_lshr_b32 s82, s27, 24
+; GFX9-NEXT: s_lshr_b32 s83, s27, 16
; GFX9-NEXT: s_lshr_b32 s84, s27, 8
; GFX9-NEXT: s_lshr_b32 s85, s26, 16
; GFX9-NEXT: s_lshr_b32 s86, s26, 8
@@ -81435,9 +81761,9 @@ define inreg <128 x i8> @bitcast_v16i64_to_v128i8_scalar(<16 x i64> inreg %a, i3
; GFX9-NEXT: v_writelane_b32 v21, s46, 50
; GFX9-NEXT: s_lshr_b64 s[56:57], s[4:5], 24
; GFX9-NEXT: v_writelane_b32 v21, s56, 0
-; GFX9-NEXT: s_lshr_b32 s82, s28, 8
-; GFX9-NEXT: s_lshr_b32 s83, s27, 24
-; GFX9-NEXT: s_lshr_b32 s81, s27, 16
+; GFX9-NEXT: s_lshr_b32 s81, s28, 8
+; GFX9-NEXT: s_lshr_b32 s82, s27, 24
+; GFX9-NEXT: s_lshr_b32 s83, s27, 16
; GFX9-NEXT: s_lshr_b32 s84, s27, 8
; GFX9-NEXT: s_lshr_b32 s85, s26, 16
; GFX9-NEXT: s_lshr_b32 s86, s26, 8
@@ -81596,14 +81922,14 @@ define inreg <128 x i8> @bitcast_v16i64_to_v128i8_scalar(<16 x i64> inreg %a, i3
; GFX9-NEXT: s_and_b32 s16, s27, 0xff
; GFX9-NEXT: s_lshl_b32 s17, s84, 8
; GFX9-NEXT: s_or_b32 s16, s16, s17
-; GFX9-NEXT: s_and_b32 s17, s81, 0xff
-; GFX9-NEXT: s_lshl_b32 s18, s83, 8
+; GFX9-NEXT: s_and_b32 s17, s83, 0xff
+; GFX9-NEXT: s_lshl_b32 s18, s82, 8
; GFX9-NEXT: s_or_b32 s17, s17, s18
; GFX9-NEXT: s_and_b32 s16, s16, 0xffff
; GFX9-NEXT: s_lshl_b32 s17, s17, 16
; GFX9-NEXT: s_or_b32 s16, s16, s17
; GFX9-NEXT: v_mov_b32_e32 v12, s16
-; GFX9-NEXT: s_lshl_b32 s16, s82, 8
+; GFX9-NEXT: s_lshl_b32 s16, s81, 8
; GFX9-NEXT: s_and_b32 s17, s28, 0xff
; GFX9-NEXT: v_readlane_b32 s18, v21, 50
; GFX9-NEXT: s_or_b32 s16, s17, s16
@@ -81931,13 +82257,7 @@ define inreg <128 x i8> @bitcast_v16i64_to_v128i8_scalar(<16 x i64> inreg %a, i3
; GFX9-NEXT: .LBB57_4:
; GFX9-NEXT: ; implicit-def: $sgpr47
; GFX9-NEXT: ; kill: killed $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr83
-; GFX9-NEXT: ; implicit-def: $sgpr82
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; kill: killed $sgpr47
-; GFX9-NEXT: v_writelane_b32 v21, s82, 0
-; GFX9-NEXT: ; implicit-def: $sgpr47
-; GFX9-NEXT: ; kill: killed $sgpr47
+; GFX9-NEXT: ; implicit-def: $sgpr56
; GFX9-NEXT: ; implicit-def: $sgpr46
; GFX9-NEXT: ; implicit-def: $sgpr80
; GFX9-NEXT: ; implicit-def: $sgpr71
@@ -81966,6 +82286,8 @@ define inreg <128 x i8> @bitcast_v16i64_to_v128i8_scalar(<16 x i64> inreg %a, i3
; GFX9-NEXT: ; implicit-def: $sgpr86
; GFX9-NEXT: ; implicit-def: $sgpr85
; GFX9-NEXT: ; implicit-def: $sgpr84
+; GFX9-NEXT: ; implicit-def: $sgpr83
+; GFX9-NEXT: ; implicit-def: $sgpr82
; GFX9-NEXT: ; implicit-def: $sgpr81
; GFX9-NEXT: ; implicit-def: $sgpr36
; GFX9-NEXT: ; implicit-def: $sgpr34
@@ -81981,15 +82303,15 @@ define inreg <128 x i8> @bitcast_v16i64_to_v128i8_scalar(<16 x i64> inreg %a, i3
; GFX9-NEXT: ; implicit-def: $sgpr62
; GFX9-NEXT: ; implicit-def: $sgpr60
; GFX9-NEXT: ; implicit-def: $sgpr58
-; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: v_writelane_b32 v21, s83, 1
; GFX9-NEXT: ; implicit-def: $sgpr47
; GFX9-NEXT: ; kill: killed $sgpr47
-; GFX9-NEXT: ; implicit-def: $sgpr82
+; GFX9-NEXT: v_writelane_b32 v21, s56, 0
; GFX9-NEXT: ; implicit-def: $sgpr47
; GFX9-NEXT: ; kill: killed $sgpr47
+; GFX9-NEXT: v_writelane_b32 v21, s57, 1
; GFX9-NEXT: ; implicit-def: $sgpr47
; GFX9-NEXT: ; kill: killed $sgpr47
+; GFX9-NEXT: ; implicit-def: $sgpr56
; GFX9-NEXT: ; implicit-def: $sgpr47
; GFX9-NEXT: ; kill: killed $sgpr47
; GFX9-NEXT: ; implicit-def: $sgpr47
@@ -82076,7 +82398,13 @@ define inreg <128 x i8> @bitcast_v16i64_to_v128i8_scalar(<16 x i64> inreg %a, i3
; GFX9-NEXT: ; kill: killed $sgpr47
; GFX9-NEXT: ; implicit-def: $sgpr47
; GFX9-NEXT: ; kill: killed $sgpr47
-; GFX9-NEXT: s_branch .LBB57_2
+; GFX9-NEXT: ; implicit-def: $sgpr47
+; GFX9-NEXT: ; kill: killed $sgpr47
+; GFX9-NEXT: ; implicit-def: $sgpr47
+; GFX9-NEXT: ; kill: killed $sgpr47
+; GFX9-NEXT: s_andn2_b64 vcc, exec, vcc
+; GFX9-NEXT: s_cbranch_vccz .LBB57_2
+; GFX9-NEXT: s_branch .LBB57_3
;
; GFX11-LABEL: bitcast_v16i64_to_v128i8_scalar:
; GFX11: ; %bb.0:
@@ -82115,8 +82443,8 @@ define inreg <128 x i8> @bitcast_v16i64_to_v128i8_scalar(<16 x i64> inreg %a, i3
; GFX11-NEXT: v_readfirstlane_b32 s5, v14
; GFX11-NEXT: v_writelane_b32 v16, s37, 5
; GFX11-NEXT: v_writelane_b32 v17, s101, 5
-; GFX11-NEXT: s_mov_b32 s101, 0
; GFX11-NEXT: s_and_b32 s42, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 vcc_lo, -1
; GFX11-NEXT: ; implicit-def: $vgpr19 : SGPR spill to VGPR lane
; GFX11-NEXT: ; implicit-def: $vgpr18 : SGPR spill to VGPR lane
; GFX11-NEXT: v_writelane_b32 v16, s38, 6
@@ -82148,297 +82476,156 @@ define inreg <128 x i8> @bitcast_v16i64_to_v128i8_scalar(<16 x i64> inreg %a, i3
; GFX11-NEXT: v_writelane_b32 v16, s85, 29
; GFX11-NEXT: v_writelane_b32 v16, s86, 30
; GFX11-NEXT: v_writelane_b32 v16, s87, 31
-; GFX11-NEXT: s_cbranch_scc0 .LBB57_2
+; GFX11-NEXT: s_cbranch_scc0 .LBB57_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s43, s25, 8
-; GFX11-NEXT: s_lshr_b64 s[62:63], s[4:5], 24
-; GFX11-NEXT: v_writelane_b32 v19, s43, 16
-; GFX11-NEXT: s_lshr_b32 s43, s24, 16
-; GFX11-NEXT: s_lshr_b32 s104, s5, 24
-; GFX11-NEXT: s_lshr_b32 s102, s5, 16
-; GFX11-NEXT: s_lshr_b32 s103, s5, 8
-; GFX11-NEXT: v_writelane_b32 v19, s43, 17
-; GFX11-NEXT: s_lshr_b32 s43, s24, 8
-; GFX11-NEXT: s_lshr_b32 s57, s4, 16
-; GFX11-NEXT: s_lshr_b32 s47, s4, 8
-; GFX11-NEXT: s_lshr_b32 s46, s7, 24
-; GFX11-NEXT: v_writelane_b32 v19, s43, 18
-; GFX11-NEXT: s_lshr_b32 s43, s23, 24
-; GFX11-NEXT: s_lshr_b32 vcc_hi, s7, 16
-; GFX11-NEXT: s_lshr_b32 s34, s7, 8
-; GFX11-NEXT: s_lshr_b32 s69, s6, 16
-; GFX11-NEXT: v_writelane_b32 v19, s43, 19
-; GFX11-NEXT: s_lshr_b32 s43, s23, 16
-; GFX11-NEXT: s_lshr_b32 s56, s6, 8
-; GFX11-NEXT: s_lshr_b32 s35, s9, 24
-; GFX11-NEXT: s_lshr_b32 s36, s9, 16
-; GFX11-NEXT: v_writelane_b32 v19, s43, 20
-; GFX11-NEXT: s_lshr_b32 s43, s23, 8
-; GFX11-NEXT: s_lshr_b32 s37, s9, 8
-; GFX11-NEXT: s_lshr_b32 s38, s8, 16
-; GFX11-NEXT: s_lshr_b32 s39, s8, 8
-; GFX11-NEXT: v_writelane_b32 v19, s43, 21
-; GFX11-NEXT: s_lshr_b32 s43, s22, 16
-; GFX11-NEXT: s_lshr_b32 s48, s11, 24
-; GFX11-NEXT: s_lshr_b32 s49, s11, 16
-; GFX11-NEXT: s_lshr_b32 s50, s11, 8
-; GFX11-NEXT: v_writelane_b32 v19, s43, 22
-; GFX11-NEXT: s_lshr_b32 s43, s22, 8
-; GFX11-NEXT: s_lshr_b32 s51, s10, 16
-; GFX11-NEXT: s_lshr_b32 s52, s10, 8
-; GFX11-NEXT: s_lshr_b32 s53, s13, 24
-; GFX11-NEXT: v_writelane_b32 v19, s43, 23
-; GFX11-NEXT: s_lshr_b32 s43, s21, 24
-; GFX11-NEXT: s_lshr_b32 s54, s13, 16
-; GFX11-NEXT: s_lshr_b32 s55, s13, 8
-; GFX11-NEXT: s_lshr_b32 s64, s12, 16
-; GFX11-NEXT: v_writelane_b32 v19, s43, 24
-; GFX11-NEXT: s_lshr_b32 s43, s21, 16
-; GFX11-NEXT: s_lshr_b32 s65, s12, 8
-; GFX11-NEXT: s_lshr_b32 s66, s15, 24
-; GFX11-NEXT: s_lshr_b32 s67, s15, 16
-; GFX11-NEXT: v_writelane_b32 v19, s43, 25
-; GFX11-NEXT: s_lshr_b32 s43, s21, 8
-; GFX11-NEXT: s_lshr_b32 s68, s15, 8
-; GFX11-NEXT: s_lshr_b32 s59, s14, 16
-; GFX11-NEXT: s_lshr_b32 s58, s14, 8
-; GFX11-NEXT: v_writelane_b32 v19, s43, 26
-; GFX11-NEXT: s_lshr_b32 s43, s20, 16
-; GFX11-NEXT: s_lshr_b32 s70, s41, 24
-; GFX11-NEXT: s_lshr_b32 s71, s41, 16
-; GFX11-NEXT: s_lshr_b32 s60, s41, 8
-; GFX11-NEXT: v_writelane_b32 v19, s43, 27
-; GFX11-NEXT: s_lshr_b32 s43, s20, 8
-; GFX11-NEXT: s_lshr_b32 s80, s40, 16
-; GFX11-NEXT: s_lshr_b32 s61, s40, 8
-; GFX11-NEXT: s_lshr_b32 s81, s29, 24
-; GFX11-NEXT: v_writelane_b32 v19, s43, 28
-; GFX11-NEXT: s_lshr_b32 s43, s19, 24
-; GFX11-NEXT: s_lshr_b32 s82, s29, 16
-; GFX11-NEXT: s_lshr_b32 s83, s29, 8
-; GFX11-NEXT: s_lshr_b32 s84, s28, 16
-; GFX11-NEXT: v_writelane_b32 v19, s43, 29
-; GFX11-NEXT: s_lshr_b32 s43, s19, 16
-; GFX11-NEXT: s_lshr_b32 s85, s28, 8
-; GFX11-NEXT: s_lshr_b32 s86, s27, 24
-; GFX11-NEXT: s_lshr_b32 s72, s27, 16
-; GFX11-NEXT: v_writelane_b32 v19, s43, 30
-; GFX11-NEXT: s_lshr_b32 s43, s19, 8
-; GFX11-NEXT: s_lshr_b32 s87, s27, 8
-; GFX11-NEXT: s_lshr_b32 s73, s26, 16
-; GFX11-NEXT: s_lshr_b32 s96, s26, 8
-; GFX11-NEXT: v_writelane_b32 v19, s43, 31
-; GFX11-NEXT: s_lshr_b32 s43, s18, 16
-; GFX11-NEXT: s_lshr_b32 s97, s25, 24
-; GFX11-NEXT: v_writelane_b32 v18, s43, 0
-; GFX11-NEXT: s_lshr_b32 s43, s18, 8
-; GFX11-NEXT: v_writelane_b32 v19, s62, 14
-; GFX11-NEXT: s_lshr_b32 s42, s25, 16
-; GFX11-NEXT: s_lshr_b32 s74, s2, 16
-; GFX11-NEXT: v_writelane_b32 v18, s43, 1
-; GFX11-NEXT: s_lshr_b32 s43, s17, 24
-; GFX11-NEXT: v_writelane_b32 v19, s63, 15
-; GFX11-NEXT: s_lshr_b64 s[62:63], s[6:7], 24
-; GFX11-NEXT: s_lshr_b32 s98, s1, 24
-; GFX11-NEXT: v_writelane_b32 v18, s43, 2
-; GFX11-NEXT: s_lshr_b32 s43, s17, 16
-; GFX11-NEXT: v_writelane_b32 v19, s62, 12
-; GFX11-NEXT: s_lshr_b32 s99, s1, 16
-; GFX11-NEXT: s_lshr_b32 s100, s1, 8
-; GFX11-NEXT: v_writelane_b32 v18, s43, 3
-; GFX11-NEXT: s_lshr_b32 s43, s17, 8
-; GFX11-NEXT: v_writelane_b32 v19, s63, 13
-; GFX11-NEXT: s_lshr_b64 s[62:63], s[8:9], 24
-; GFX11-NEXT: s_lshr_b32 s44, s0, 16
-; GFX11-NEXT: v_writelane_b32 v18, s43, 4
-; GFX11-NEXT: s_lshr_b32 s43, s16, 16
-; GFX11-NEXT: v_writelane_b32 v19, s62, 10
-; GFX11-NEXT: s_lshr_b32 s45, s0, 8
-; GFX11-NEXT: s_lshr_b64 s[76:77], s[26:27], 24
-; GFX11-NEXT: v_writelane_b32 v18, s43, 5
-; GFX11-NEXT: s_lshr_b32 s43, s16, 8
-; GFX11-NEXT: v_writelane_b32 v19, s63, 11
-; GFX11-NEXT: s_lshr_b64 s[62:63], s[10:11], 24
+; GFX11-NEXT: s_lshr_b32 s42, s5, 24
+; GFX11-NEXT: s_lshr_b32 vcc_hi, s27, 24
+; GFX11-NEXT: v_writelane_b32 v19, s42, 0
+; GFX11-NEXT: s_lshr_b32 s42, s5, 16
+; GFX11-NEXT: s_lshr_b32 s34, s27, 16
+; GFX11-NEXT: s_lshr_b32 s35, s27, 8
+; GFX11-NEXT: s_lshr_b32 s36, s26, 16
+; GFX11-NEXT: v_writelane_b32 v19, s42, 1
+; GFX11-NEXT: s_lshr_b32 s42, s5, 8
+; GFX11-NEXT: s_lshr_b32 s37, s26, 8
+; GFX11-NEXT: s_lshr_b32 s38, s25, 24
+; GFX11-NEXT: s_lshr_b32 s39, s25, 16
+; GFX11-NEXT: v_writelane_b32 v19, s42, 2
+; GFX11-NEXT: s_lshr_b32 s42, s4, 16
+; GFX11-NEXT: s_lshr_b32 s48, s25, 8
+; GFX11-NEXT: s_lshr_b32 s49, s24, 16
+; GFX11-NEXT: s_lshr_b32 s50, s24, 8
+; GFX11-NEXT: v_writelane_b32 v19, s42, 3
+; GFX11-NEXT: s_lshr_b32 s42, s4, 8
+; GFX11-NEXT: s_lshr_b32 s51, s23, 24
+; GFX11-NEXT: s_lshr_b32 s52, s23, 16
+; GFX11-NEXT: s_lshr_b32 s53, s23, 8
+; GFX11-NEXT: v_writelane_b32 v19, s42, 4
+; GFX11-NEXT: s_lshr_b32 s42, s7, 24
+; GFX11-NEXT: s_lshr_b32 s54, s22, 16
+; GFX11-NEXT: s_lshr_b32 s55, s22, 8
+; GFX11-NEXT: s_lshr_b32 s64, s21, 24
+; GFX11-NEXT: v_writelane_b32 v19, s42, 5
+; GFX11-NEXT: s_lshr_b32 s42, s7, 16
+; GFX11-NEXT: s_lshr_b32 s65, s21, 16
+; GFX11-NEXT: s_lshr_b32 s66, s21, 8
+; GFX11-NEXT: s_lshr_b32 s67, s20, 16
+; GFX11-NEXT: v_writelane_b32 v19, s42, 6
+; GFX11-NEXT: s_lshr_b32 s42, s7, 8
+; GFX11-NEXT: s_lshr_b32 s68, s20, 8
+; GFX11-NEXT: s_lshr_b32 s69, s19, 24
+; GFX11-NEXT: s_lshr_b32 s70, s19, 16
+; GFX11-NEXT: v_writelane_b32 v19, s42, 7
+; GFX11-NEXT: s_lshr_b32 s42, s6, 16
+; GFX11-NEXT: s_lshr_b32 s71, s19, 8
+; GFX11-NEXT: s_lshr_b32 s80, s18, 16
+; GFX11-NEXT: s_lshr_b32 s81, s18, 8
+; GFX11-NEXT: v_writelane_b32 v19, s42, 8
+; GFX11-NEXT: s_lshr_b32 s42, s6, 8
+; GFX11-NEXT: s_lshr_b32 s82, s17, 24
+; GFX11-NEXT: s_lshr_b32 s83, s17, 16
+; GFX11-NEXT: s_lshr_b32 s84, s17, 8
+; GFX11-NEXT: v_writelane_b32 v19, s42, 9
+; GFX11-NEXT: s_lshr_b32 s42, s9, 24
+; GFX11-NEXT: s_lshr_b32 s85, s16, 16
+; GFX11-NEXT: s_lshr_b32 s86, s16, 8
+; GFX11-NEXT: s_lshr_b32 s87, s3, 24
+; GFX11-NEXT: v_writelane_b32 v19, s42, 10
+; GFX11-NEXT: s_lshr_b32 s42, s9, 16
+; GFX11-NEXT: s_lshr_b32 s96, s3, 16
+; GFX11-NEXT: s_lshr_b32 s97, s3, 8
+; GFX11-NEXT: s_lshr_b32 s98, s2, 16
+; GFX11-NEXT: v_writelane_b32 v19, s42, 11
+; GFX11-NEXT: s_lshr_b32 s42, s9, 8
+; GFX11-NEXT: s_lshr_b32 s99, s2, 8
+; GFX11-NEXT: s_lshr_b32 s100, s1, 24
+; GFX11-NEXT: s_lshr_b32 s101, s1, 16
+; GFX11-NEXT: v_writelane_b32 v19, s42, 12
+; GFX11-NEXT: s_lshr_b32 s42, s8, 16
+; GFX11-NEXT: s_lshr_b32 s102, s1, 8
+; GFX11-NEXT: s_lshr_b32 s103, s0, 16
+; GFX11-NEXT: s_lshr_b32 s104, s0, 8
+; GFX11-NEXT: v_writelane_b32 v19, s42, 13
+; GFX11-NEXT: s_lshr_b32 s42, s8, 8
+; GFX11-NEXT: s_lshr_b64 s[44:45], s[6:7], 24
+; GFX11-NEXT: s_lshr_b64 s[46:47], s[8:9], 24
+; GFX11-NEXT: v_writelane_b32 v19, s42, 14
+; GFX11-NEXT: s_lshr_b32 s42, s11, 24
+; GFX11-NEXT: s_lshr_b64 s[56:57], s[10:11], 24
+; GFX11-NEXT: s_lshr_b64 s[58:59], s[12:13], 24
+; GFX11-NEXT: s_lshr_b64 s[60:61], s[14:15], 24
+; GFX11-NEXT: v_writelane_b32 v19, s42, 15
+; GFX11-NEXT: s_lshr_b32 s42, s11, 16
+; GFX11-NEXT: s_lshr_b64 s[62:63], s[40:41], 24
+; GFX11-NEXT: s_lshr_b64 s[72:73], s[28:29], 24
+; GFX11-NEXT: s_lshr_b64 s[78:79], s[26:27], 24
+; GFX11-NEXT: v_writelane_b32 v19, s42, 16
+; GFX11-NEXT: s_lshr_b32 s42, s11, 8
; GFX11-NEXT: s_lshr_b64 s[88:89], s[24:25], 24
-; GFX11-NEXT: v_writelane_b32 v18, s43, 6
-; GFX11-NEXT: s_lshr_b32 s43, s3, 24
-; GFX11-NEXT: v_writelane_b32 v19, s62, 8
-; GFX11-NEXT: s_lshr_b64 s[78:79], s[20:21], 24
+; GFX11-NEXT: s_lshr_b64 s[74:75], s[22:23], 24
+; GFX11-NEXT: s_lshr_b64 s[76:77], s[20:21], 24
+; GFX11-NEXT: v_writelane_b32 v19, s42, 17
+; GFX11-NEXT: s_lshr_b32 s42, s10, 16
; GFX11-NEXT: s_lshr_b64 s[90:91], s[18:19], 24
-; GFX11-NEXT: v_writelane_b32 v18, s43, 7
-; GFX11-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-NEXT: v_writelane_b32 v19, s63, 9
-; GFX11-NEXT: s_lshr_b64 s[62:63], s[12:13], 24
; GFX11-NEXT: s_lshr_b64 s[92:93], s[16:17], 24
-; GFX11-NEXT: v_writelane_b32 v18, s43, 8
-; GFX11-NEXT: s_lshr_b32 s43, s3, 8
-; GFX11-NEXT: v_writelane_b32 v19, s62, 6
; GFX11-NEXT: s_lshr_b64 s[94:95], s[2:3], 24
+; GFX11-NEXT: v_writelane_b32 v19, s42, 18
+; GFX11-NEXT: s_lshr_b32 s42, s10, 8
; GFX11-NEXT: s_lshr_b64 s[30:31], s[0:1], 24
-; GFX11-NEXT: v_writelane_b32 v18, s43, 9
-; GFX11-NEXT: s_lshr_b32 s43, s2, 8
-; GFX11-NEXT: v_writelane_b32 v19, s63, 7
-; GFX11-NEXT: s_lshr_b64 s[62:63], s[14:15], 24
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v19, s62, 4
-; GFX11-NEXT: v_writelane_b32 v19, s63, 5
-; GFX11-NEXT: s_lshr_b64 s[62:63], s[40:41], 24
-; GFX11-NEXT: v_writelane_b32 v19, s62, 2
-; GFX11-NEXT: v_writelane_b32 v19, s63, 3
-; GFX11-NEXT: s_lshr_b64 s[62:63], s[28:29], 24
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v19, s62, 0
-; GFX11-NEXT: v_writelane_b32 v19, s63, 1
-; GFX11-NEXT: s_lshr_b64 s[62:63], s[22:23], 24
-; GFX11-NEXT: s_branch .LBB57_3
-; GFX11-NEXT: .LBB57_2:
-; GFX11-NEXT: ; implicit-def: $vcc_hi
-; GFX11-NEXT: ; implicit-def: $vcc_lo
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: s_mov_b32 s101, -1
-; GFX11-NEXT: v_writelane_b32 v19, vcc_lo, 0
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: v_writelane_b32 v19, vcc_hi, 1
-; GFX11-NEXT: ; implicit-def: $vcc_lo
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: v_writelane_b32 v19, vcc_lo, 2
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: v_writelane_b32 v19, vcc_hi, 3
-; GFX11-NEXT: ; implicit-def: $vcc_lo
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: v_writelane_b32 v19, vcc_lo, 4
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: v_writelane_b32 v19, vcc_hi, 5
-; GFX11-NEXT: ; implicit-def: $vcc_lo
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr30
-; GFX11-NEXT: ; implicit-def: $sgpr100
-; GFX11-NEXT: ; implicit-def: $sgpr99
-; GFX11-NEXT: ; implicit-def: $sgpr98
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr74
-; GFX11-NEXT: ; implicit-def: $sgpr94
-; GFX11-NEXT: ; implicit-def: $sgpr92
-; GFX11-NEXT: ; implicit-def: $sgpr90
-; GFX11-NEXT: ; implicit-def: $sgpr78
-; GFX11-NEXT: ; implicit-def: $sgpr62
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr97
-; GFX11-NEXT: ; implicit-def: $sgpr96
-; GFX11-NEXT: ; implicit-def: $sgpr73
-; GFX11-NEXT: ; implicit-def: $sgpr87
-; GFX11-NEXT: ; implicit-def: $sgpr72
-; GFX11-NEXT: ; implicit-def: $sgpr86
-; GFX11-NEXT: ; implicit-def: $sgpr85
-; GFX11-NEXT: ; implicit-def: $sgpr84
-; GFX11-NEXT: ; implicit-def: $sgpr83
-; GFX11-NEXT: ; implicit-def: $sgpr82
-; GFX11-NEXT: ; implicit-def: $sgpr81
-; GFX11-NEXT: ; implicit-def: $sgpr61
-; GFX11-NEXT: ; implicit-def: $sgpr80
-; GFX11-NEXT: ; implicit-def: $sgpr60
-; GFX11-NEXT: ; implicit-def: $sgpr71
-; GFX11-NEXT: ; implicit-def: $sgpr70
-; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr59
-; GFX11-NEXT: ; implicit-def: $sgpr68
-; GFX11-NEXT: ; implicit-def: $sgpr67
-; GFX11-NEXT: ; implicit-def: $sgpr66
-; GFX11-NEXT: ; implicit-def: $sgpr65
-; GFX11-NEXT: ; implicit-def: $sgpr64
-; GFX11-NEXT: ; implicit-def: $sgpr55
-; GFX11-NEXT: ; implicit-def: $sgpr54
-; GFX11-NEXT: ; implicit-def: $sgpr53
-; GFX11-NEXT: ; implicit-def: $sgpr52
-; GFX11-NEXT: ; implicit-def: $sgpr51
-; GFX11-NEXT: ; implicit-def: $sgpr50
-; GFX11-NEXT: ; implicit-def: $sgpr49
-; GFX11-NEXT: ; implicit-def: $sgpr48
-; GFX11-NEXT: ; implicit-def: $sgpr39
-; GFX11-NEXT: ; implicit-def: $sgpr38
-; GFX11-NEXT: ; implicit-def: $sgpr37
-; GFX11-NEXT: ; implicit-def: $sgpr36
-; GFX11-NEXT: ; implicit-def: $sgpr35
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr69
-; GFX11-NEXT: ; implicit-def: $sgpr34
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr47
-; GFX11-NEXT: ; implicit-def: $sgpr57
-; GFX11-NEXT: ; implicit-def: $sgpr103
-; GFX11-NEXT: ; implicit-def: $sgpr102
-; GFX11-NEXT: ; implicit-def: $sgpr104
-; GFX11-NEXT: ; implicit-def: $sgpr88
-; GFX11-NEXT: ; implicit-def: $sgpr76
-; GFX11-NEXT: v_writelane_b32 v19, vcc_lo, 6
-; GFX11-NEXT: v_writelane_b32 v19, vcc_hi, 7
-; GFX11-NEXT: ; implicit-def: $vcc_lo
-; GFX11-NEXT: v_writelane_b32 v19, vcc_lo, 8
-; GFX11-NEXT: v_writelane_b32 v19, vcc_hi, 9
-; GFX11-NEXT: ; implicit-def: $vcc_lo
-; GFX11-NEXT: v_writelane_b32 v19, vcc_lo, 10
-; GFX11-NEXT: v_writelane_b32 v19, vcc_hi, 11
-; GFX11-NEXT: ; implicit-def: $vcc_lo
-; GFX11-NEXT: v_writelane_b32 v19, vcc_lo, 12
-; GFX11-NEXT: v_writelane_b32 v19, vcc_hi, 13
-; GFX11-NEXT: ; implicit-def: $vcc_lo
-; GFX11-NEXT: v_writelane_b32 v19, vcc_lo, 14
-; GFX11-NEXT: v_writelane_b32 v19, vcc_hi, 15
-; GFX11-NEXT: .LBB57_3: ; %Flow
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s101
-; GFX11-NEXT: s_mov_b32 s101, s104
-; GFX11-NEXT: s_mov_b32 s104, s57
-; GFX11-NEXT: s_mov_b32 s57, s69
-; GFX11-NEXT: s_mov_b32 s69, s42
-; GFX11-NEXT: s_cbranch_vccnz .LBB57_5
-; GFX11-NEXT: ; %bb.4: ; %cmp.true
+; GFX11-NEXT: v_writelane_b32 v19, s42, 19
+; GFX11-NEXT: s_lshr_b32 s42, s13, 24
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: v_writelane_b32 v19, s42, 20
+; GFX11-NEXT: s_lshr_b32 s42, s13, 16
+; GFX11-NEXT: v_writelane_b32 v19, s42, 21
+; GFX11-NEXT: s_lshr_b32 s42, s13, 8
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: v_writelane_b32 v19, s42, 22
+; GFX11-NEXT: s_lshr_b32 s42, s12, 16
+; GFX11-NEXT: v_writelane_b32 v19, s42, 23
+; GFX11-NEXT: s_lshr_b32 s42, s12, 8
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: v_writelane_b32 v19, s42, 24
+; GFX11-NEXT: s_lshr_b32 s42, s15, 24
+; GFX11-NEXT: v_writelane_b32 v19, s42, 25
+; GFX11-NEXT: s_lshr_b32 s42, s15, 16
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: v_writelane_b32 v19, s42, 26
+; GFX11-NEXT: s_lshr_b32 s42, s15, 8
+; GFX11-NEXT: v_writelane_b32 v19, s42, 27
+; GFX11-NEXT: s_lshr_b32 s42, s14, 16
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: v_writelane_b32 v19, s42, 28
+; GFX11-NEXT: s_lshr_b32 s42, s14, 8
+; GFX11-NEXT: v_writelane_b32 v19, s42, 29
+; GFX11-NEXT: s_lshr_b32 s42, s41, 24
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: v_writelane_b32 v19, s42, 30
+; GFX11-NEXT: s_lshr_b32 s42, s41, 16
+; GFX11-NEXT: v_writelane_b32 v19, s42, 31
+; GFX11-NEXT: s_lshr_b32 s42, s41, 8
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: v_writelane_b32 v18, s42, 0
+; GFX11-NEXT: s_lshr_b32 s42, s40, 16
+; GFX11-NEXT: v_writelane_b32 v18, s42, 1
+; GFX11-NEXT: s_lshr_b32 s42, s40, 8
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: v_writelane_b32 v18, s42, 2
+; GFX11-NEXT: s_lshr_b32 s42, s29, 24
+; GFX11-NEXT: v_writelane_b32 v18, s42, 3
+; GFX11-NEXT: s_lshr_b32 s42, s29, 16
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: v_writelane_b32 v18, s42, 4
+; GFX11-NEXT: s_lshr_b32 s42, s29, 8
+; GFX11-NEXT: v_writelane_b32 v18, s42, 5
+; GFX11-NEXT: s_lshr_b32 s42, s28, 16
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: v_writelane_b32 v18, s42, 6
+; GFX11-NEXT: s_lshr_b32 s42, s28, 8
+; GFX11-NEXT: v_writelane_b32 v18, s42, 7
+; GFX11-NEXT: s_lshr_b64 s[42:43], s[4:5], 24
+; GFX11-NEXT: s_cbranch_execnz .LBB57_3
+; GFX11-NEXT: .LBB57_2: ; %cmp.true
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
; GFX11-NEXT: s_add_u32 s2, s2, 3
@@ -82471,514 +82658,509 @@ define inreg <128 x i8> @bitcast_v16i64_to_v128i8_scalar(<16 x i64> inreg %a, i3
; GFX11-NEXT: s_addc_u32 s7, s7, 0
; GFX11-NEXT: s_add_u32 s4, s4, 3
; GFX11-NEXT: s_addc_u32 s5, s5, 0
-; GFX11-NEXT: s_lshr_b32 s42, s25, 8
-; GFX11-NEXT: s_lshr_b64 s[62:63], s[4:5], 24
+; GFX11-NEXT: s_lshr_b32 vcc_hi, s27, 24
+; GFX11-NEXT: s_lshr_b32 s42, s5, 24
+; GFX11-NEXT: s_lshr_b32 s34, s27, 16
+; GFX11-NEXT: v_writelane_b32 v19, s42, 0
+; GFX11-NEXT: s_lshr_b32 s42, s5, 16
+; GFX11-NEXT: s_lshr_b32 s35, s27, 8
+; GFX11-NEXT: s_lshr_b32 s36, s26, 16
+; GFX11-NEXT: s_lshr_b32 s37, s26, 8
+; GFX11-NEXT: v_writelane_b32 v19, s42, 1
+; GFX11-NEXT: s_lshr_b32 s42, s5, 8
+; GFX11-NEXT: s_lshr_b32 s38, s25, 24
+; GFX11-NEXT: s_lshr_b32 s39, s25, 16
+; GFX11-NEXT: s_lshr_b32 s48, s25, 8
+; GFX11-NEXT: v_writelane_b32 v19, s42, 2
+; GFX11-NEXT: s_lshr_b32 s42, s4, 16
+; GFX11-NEXT: s_lshr_b32 s49, s24, 16
+; GFX11-NEXT: s_lshr_b32 s50, s24, 8
+; GFX11-NEXT: s_lshr_b32 s51, s23, 24
+; GFX11-NEXT: v_writelane_b32 v19, s42, 3
+; GFX11-NEXT: s_lshr_b32 s42, s4, 8
+; GFX11-NEXT: s_lshr_b32 s52, s23, 16
+; GFX11-NEXT: s_lshr_b32 s53, s23, 8
+; GFX11-NEXT: s_lshr_b32 s54, s22, 16
+; GFX11-NEXT: v_writelane_b32 v19, s42, 4
+; GFX11-NEXT: s_lshr_b32 s42, s7, 24
+; GFX11-NEXT: s_lshr_b32 s55, s22, 8
+; GFX11-NEXT: s_lshr_b32 s64, s21, 24
+; GFX11-NEXT: s_lshr_b32 s65, s21, 16
+; GFX11-NEXT: v_writelane_b32 v19, s42, 5
+; GFX11-NEXT: s_lshr_b32 s42, s7, 16
+; GFX11-NEXT: s_lshr_b32 s66, s21, 8
+; GFX11-NEXT: s_lshr_b32 s67, s20, 16
+; GFX11-NEXT: s_lshr_b32 s68, s20, 8
+; GFX11-NEXT: v_writelane_b32 v19, s42, 6
+; GFX11-NEXT: s_lshr_b32 s42, s7, 8
+; GFX11-NEXT: s_lshr_b32 s69, s19, 24
+; GFX11-NEXT: s_lshr_b32 s70, s19, 16
+; GFX11-NEXT: s_lshr_b32 s71, s19, 8
+; GFX11-NEXT: v_writelane_b32 v19, s42, 7
+; GFX11-NEXT: s_lshr_b32 s42, s6, 16
+; GFX11-NEXT: s_lshr_b32 s80, s18, 16
+; GFX11-NEXT: s_lshr_b32 s81, s18, 8
+; GFX11-NEXT: s_lshr_b32 s82, s17, 24
+; GFX11-NEXT: v_writelane_b32 v19, s42, 8
+; GFX11-NEXT: s_lshr_b32 s42, s6, 8
+; GFX11-NEXT: s_lshr_b32 s83, s17, 16
+; GFX11-NEXT: s_lshr_b32 s84, s17, 8
+; GFX11-NEXT: s_lshr_b32 s85, s16, 16
+; GFX11-NEXT: v_writelane_b32 v19, s42, 9
+; GFX11-NEXT: s_lshr_b32 s42, s9, 24
+; GFX11-NEXT: s_lshr_b32 s86, s16, 8
+; GFX11-NEXT: s_lshr_b32 s87, s3, 24
+; GFX11-NEXT: s_lshr_b32 s96, s3, 16
+; GFX11-NEXT: v_writelane_b32 v19, s42, 10
+; GFX11-NEXT: s_lshr_b32 s42, s9, 16
+; GFX11-NEXT: s_lshr_b32 s97, s3, 8
+; GFX11-NEXT: s_lshr_b32 s98, s2, 16
+; GFX11-NEXT: s_lshr_b32 s99, s2, 8
+; GFX11-NEXT: v_writelane_b32 v19, s42, 11
+; GFX11-NEXT: s_lshr_b32 s42, s9, 8
+; GFX11-NEXT: s_lshr_b32 s100, s1, 24
+; GFX11-NEXT: s_lshr_b32 s101, s1, 16
+; GFX11-NEXT: s_lshr_b32 s102, s1, 8
+; GFX11-NEXT: v_writelane_b32 v19, s42, 12
+; GFX11-NEXT: s_lshr_b32 s42, s8, 16
+; GFX11-NEXT: s_lshr_b32 s103, s0, 16
+; GFX11-NEXT: s_lshr_b32 s104, s0, 8
+; GFX11-NEXT: s_lshr_b64 s[44:45], s[6:7], 24
+; GFX11-NEXT: v_writelane_b32 v19, s42, 13
+; GFX11-NEXT: s_lshr_b32 s42, s8, 8
+; GFX11-NEXT: s_lshr_b64 s[46:47], s[8:9], 24
+; GFX11-NEXT: s_lshr_b64 s[56:57], s[10:11], 24
+; GFX11-NEXT: s_lshr_b64 s[58:59], s[12:13], 24
+; GFX11-NEXT: v_writelane_b32 v19, s42, 14
+; GFX11-NEXT: s_lshr_b32 s42, s11, 24
+; GFX11-NEXT: s_lshr_b64 s[60:61], s[14:15], 24
+; GFX11-NEXT: s_lshr_b64 s[62:63], s[40:41], 24
+; GFX11-NEXT: s_lshr_b64 s[72:73], s[28:29], 24
+; GFX11-NEXT: v_writelane_b32 v19, s42, 15
+; GFX11-NEXT: s_lshr_b32 s42, s11, 16
+; GFX11-NEXT: s_lshr_b64 s[78:79], s[26:27], 24
+; GFX11-NEXT: s_lshr_b64 s[88:89], s[24:25], 24
+; GFX11-NEXT: s_lshr_b64 s[74:75], s[22:23], 24
; GFX11-NEXT: v_writelane_b32 v19, s42, 16
-; GFX11-NEXT: s_lshr_b32 s42, s24, 16
-; GFX11-NEXT: s_lshr_b32 s101, s5, 24
-; GFX11-NEXT: s_lshr_b32 s102, s5, 16
-; GFX11-NEXT: s_lshr_b32 s103, s5, 8
+; GFX11-NEXT: s_lshr_b32 s42, s11, 8
+; GFX11-NEXT: s_lshr_b64 s[76:77], s[20:21], 24
+; GFX11-NEXT: s_lshr_b64 s[90:91], s[18:19], 24
+; GFX11-NEXT: s_lshr_b64 s[92:93], s[16:17], 24
; GFX11-NEXT: v_writelane_b32 v19, s42, 17
-; GFX11-NEXT: s_lshr_b32 s42, s24, 8
-; GFX11-NEXT: s_lshr_b32 s104, s4, 16
-; GFX11-NEXT: s_lshr_b32 s47, s4, 8
-; GFX11-NEXT: s_lshr_b32 s46, s7, 24
+; GFX11-NEXT: s_lshr_b32 s42, s10, 16
+; GFX11-NEXT: s_lshr_b64 s[94:95], s[2:3], 24
+; GFX11-NEXT: s_lshr_b64 s[30:31], s[0:1], 24
; GFX11-NEXT: v_writelane_b32 v19, s42, 18
-; GFX11-NEXT: s_lshr_b32 s42, s23, 24
-; GFX11-NEXT: s_lshr_b32 vcc_hi, s7, 16
-; GFX11-NEXT: s_lshr_b32 s34, s7, 8
-; GFX11-NEXT: s_lshr_b32 s57, s6, 16
+; GFX11-NEXT: s_lshr_b32 s42, s10, 8
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX11-NEXT: v_writelane_b32 v19, s42, 19
-; GFX11-NEXT: s_lshr_b32 s42, s23, 16
-; GFX11-NEXT: s_lshr_b32 s56, s6, 8
-; GFX11-NEXT: s_lshr_b32 s35, s9, 24
-; GFX11-NEXT: s_lshr_b32 s36, s9, 16
+; GFX11-NEXT: s_lshr_b32 s42, s13, 24
; GFX11-NEXT: v_writelane_b32 v19, s42, 20
-; GFX11-NEXT: s_lshr_b32 s42, s23, 8
-; GFX11-NEXT: s_lshr_b32 s37, s9, 8
-; GFX11-NEXT: s_lshr_b32 s38, s8, 16
-; GFX11-NEXT: s_lshr_b32 s39, s8, 8
+; GFX11-NEXT: s_lshr_b32 s42, s13, 16
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX11-NEXT: v_writelane_b32 v19, s42, 21
-; GFX11-NEXT: s_lshr_b32 s42, s22, 16
-; GFX11-NEXT: s_lshr_b32 s48, s11, 24
-; GFX11-NEXT: s_lshr_b32 s49, s11, 16
-; GFX11-NEXT: s_lshr_b32 s50, s11, 8
+; GFX11-NEXT: s_lshr_b32 s42, s13, 8
; GFX11-NEXT: v_writelane_b32 v19, s42, 22
-; GFX11-NEXT: s_lshr_b32 s42, s22, 8
-; GFX11-NEXT: s_lshr_b32 s51, s10, 16
-; GFX11-NEXT: s_lshr_b32 s52, s10, 8
-; GFX11-NEXT: s_lshr_b32 s53, s13, 24
+; GFX11-NEXT: s_lshr_b32 s42, s12, 16
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX11-NEXT: v_writelane_b32 v19, s42, 23
-; GFX11-NEXT: s_lshr_b32 s42, s21, 24
-; GFX11-NEXT: s_lshr_b32 s54, s13, 16
-; GFX11-NEXT: s_lshr_b32 s55, s13, 8
-; GFX11-NEXT: s_lshr_b32 s64, s12, 16
+; GFX11-NEXT: s_lshr_b32 s42, s12, 8
; GFX11-NEXT: v_writelane_b32 v19, s42, 24
-; GFX11-NEXT: s_lshr_b32 s42, s21, 16
-; GFX11-NEXT: s_lshr_b32 s65, s12, 8
-; GFX11-NEXT: s_lshr_b32 s66, s15, 24
-; GFX11-NEXT: s_lshr_b32 s67, s15, 16
+; GFX11-NEXT: s_lshr_b32 s42, s15, 24
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX11-NEXT: v_writelane_b32 v19, s42, 25
-; GFX11-NEXT: s_lshr_b32 s42, s21, 8
-; GFX11-NEXT: s_lshr_b32 s68, s15, 8
-; GFX11-NEXT: s_lshr_b32 s59, s14, 16
-; GFX11-NEXT: s_lshr_b32 s58, s14, 8
+; GFX11-NEXT: s_lshr_b32 s42, s15, 16
; GFX11-NEXT: v_writelane_b32 v19, s42, 26
-; GFX11-NEXT: s_lshr_b32 s42, s20, 16
-; GFX11-NEXT: s_lshr_b32 s70, s41, 24
-; GFX11-NEXT: s_lshr_b32 s71, s41, 16
-; GFX11-NEXT: s_lshr_b32 s60, s41, 8
+; GFX11-NEXT: s_lshr_b32 s42, s15, 8
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX11-NEXT: v_writelane_b32 v19, s42, 27
-; GFX11-NEXT: s_lshr_b32 s42, s20, 8
-; GFX11-NEXT: s_lshr_b32 s80, s40, 16
-; GFX11-NEXT: s_lshr_b32 s61, s40, 8
-; GFX11-NEXT: s_lshr_b32 s81, s29, 24
+; GFX11-NEXT: s_lshr_b32 s42, s14, 16
; GFX11-NEXT: v_writelane_b32 v19, s42, 28
-; GFX11-NEXT: s_lshr_b32 s42, s19, 24
-; GFX11-NEXT: s_lshr_b32 s82, s29, 16
-; GFX11-NEXT: s_lshr_b32 s83, s29, 8
-; GFX11-NEXT: s_lshr_b32 s84, s28, 16
+; GFX11-NEXT: s_lshr_b32 s42, s14, 8
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX11-NEXT: v_writelane_b32 v19, s42, 29
-; GFX11-NEXT: s_lshr_b32 s42, s19, 16
-; GFX11-NEXT: s_lshr_b32 s85, s28, 8
-; GFX11-NEXT: s_lshr_b32 s86, s27, 24
-; GFX11-NEXT: s_lshr_b32 s72, s27, 16
+; GFX11-NEXT: s_lshr_b32 s42, s41, 24
; GFX11-NEXT: v_writelane_b32 v19, s42, 30
-; GFX11-NEXT: s_lshr_b32 s42, s19, 8
-; GFX11-NEXT: s_lshr_b32 s87, s27, 8
-; GFX11-NEXT: s_lshr_b32 s73, s26, 16
-; GFX11-NEXT: s_lshr_b32 s96, s26, 8
+; GFX11-NEXT: s_lshr_b32 s42, s41, 16
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX11-NEXT: v_writelane_b32 v19, s42, 31
-; GFX11-NEXT: s_lshr_b32 s42, s18, 16
-; GFX11-NEXT: s_lshr_b32 s97, s25, 24
+; GFX11-NEXT: s_lshr_b32 s42, s41, 8
; GFX11-NEXT: v_writelane_b32 v18, s42, 0
-; GFX11-NEXT: s_lshr_b32 s42, s18, 8
-; GFX11-NEXT: v_writelane_b32 v19, s62, 14
-; GFX11-NEXT: s_lshr_b32 s69, s25, 16
-; GFX11-NEXT: s_lshr_b32 s74, s2, 16
+; GFX11-NEXT: s_lshr_b32 s42, s40, 16
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX11-NEXT: v_writelane_b32 v18, s42, 1
-; GFX11-NEXT: s_lshr_b32 s42, s17, 24
-; GFX11-NEXT: v_writelane_b32 v19, s63, 15
-; GFX11-NEXT: s_lshr_b64 s[62:63], s[6:7], 24
-; GFX11-NEXT: s_lshr_b32 s43, s2, 8
+; GFX11-NEXT: s_lshr_b32 s42, s40, 8
; GFX11-NEXT: v_writelane_b32 v18, s42, 2
-; GFX11-NEXT: s_lshr_b32 s42, s17, 16
-; GFX11-NEXT: v_writelane_b32 v19, s62, 12
-; GFX11-NEXT: s_lshr_b32 s98, s1, 24
-; GFX11-NEXT: s_lshr_b32 s99, s1, 16
+; GFX11-NEXT: s_lshr_b32 s42, s29, 24
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX11-NEXT: v_writelane_b32 v18, s42, 3
-; GFX11-NEXT: s_lshr_b32 s42, s17, 8
-; GFX11-NEXT: v_writelane_b32 v19, s63, 13
-; GFX11-NEXT: s_lshr_b64 s[62:63], s[8:9], 24
-; GFX11-NEXT: s_lshr_b32 s100, s1, 8
+; GFX11-NEXT: s_lshr_b32 s42, s29, 16
; GFX11-NEXT: v_writelane_b32 v18, s42, 4
-; GFX11-NEXT: s_lshr_b32 s42, s16, 16
-; GFX11-NEXT: v_writelane_b32 v19, s62, 10
-; GFX11-NEXT: s_lshr_b32 s44, s0, 16
-; GFX11-NEXT: s_lshr_b32 s45, s0, 8
+; GFX11-NEXT: s_lshr_b32 s42, s29, 8
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
; GFX11-NEXT: v_writelane_b32 v18, s42, 5
-; GFX11-NEXT: s_lshr_b32 s42, s16, 8
-; GFX11-NEXT: v_writelane_b32 v19, s63, 11
-; GFX11-NEXT: s_lshr_b64 s[62:63], s[10:11], 24
-; GFX11-NEXT: s_lshr_b64 s[76:77], s[26:27], 24
+; GFX11-NEXT: s_lshr_b32 s42, s28, 16
; GFX11-NEXT: v_writelane_b32 v18, s42, 6
-; GFX11-NEXT: s_lshr_b32 s42, s3, 24
-; GFX11-NEXT: v_writelane_b32 v19, s62, 8
-; GFX11-NEXT: s_lshr_b64 s[88:89], s[24:25], 24
-; GFX11-NEXT: s_lshr_b64 s[78:79], s[20:21], 24
-; GFX11-NEXT: v_writelane_b32 v18, s42, 7
-; GFX11-NEXT: s_lshr_b32 s42, s3, 16
-; GFX11-NEXT: v_writelane_b32 v19, s63, 9
-; GFX11-NEXT: s_lshr_b64 s[62:63], s[12:13], 24
-; GFX11-NEXT: s_lshr_b64 s[90:91], s[18:19], 24
-; GFX11-NEXT: v_writelane_b32 v18, s42, 8
-; GFX11-NEXT: s_lshr_b32 s42, s3, 8
-; GFX11-NEXT: v_writelane_b32 v19, s62, 6
-; GFX11-NEXT: s_lshr_b64 s[92:93], s[16:17], 24
-; GFX11-NEXT: s_lshr_b64 s[94:95], s[2:3], 24
-; GFX11-NEXT: s_lshr_b64 s[30:31], s[0:1], 24
-; GFX11-NEXT: v_writelane_b32 v18, s42, 9
-; GFX11-NEXT: v_writelane_b32 v19, s63, 7
-; GFX11-NEXT: s_lshr_b64 s[62:63], s[14:15], 24
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v19, s62, 4
-; GFX11-NEXT: v_writelane_b32 v19, s63, 5
-; GFX11-NEXT: s_lshr_b64 s[62:63], s[40:41], 24
-; GFX11-NEXT: v_writelane_b32 v19, s62, 2
-; GFX11-NEXT: v_writelane_b32 v19, s63, 3
-; GFX11-NEXT: s_lshr_b64 s[62:63], s[28:29], 24
+; GFX11-NEXT: s_lshr_b32 s42, s28, 8
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v19, s62, 0
-; GFX11-NEXT: v_writelane_b32 v19, s63, 1
-; GFX11-NEXT: s_lshr_b64 s[62:63], s[22:23], 24
-; GFX11-NEXT: .LBB57_5: ; %end
-; GFX11-NEXT: s_lshl_b32 s43, s43, 8
-; GFX11-NEXT: s_and_b32 s2, s2, 0xff
-; GFX11-NEXT: s_and_b32 s42, s74, 0xff
-; GFX11-NEXT: s_or_b32 s2, s2, s43
-; GFX11-NEXT: s_lshl_b32 s43, s94, 8
-; GFX11-NEXT: s_and_b32 s2, s2, 0xffff
-; GFX11-NEXT: s_or_b32 s42, s42, s43
-; GFX11-NEXT: s_lshl_b32 s45, s45, 8
-; GFX11-NEXT: s_lshl_b32 s42, s42, 16
+; GFX11-NEXT: v_writelane_b32 v18, s42, 7
+; GFX11-NEXT: s_lshr_b64 s[42:43], s[4:5], 24
+; GFX11-NEXT: .LBB57_3: ; %end
+; GFX11-NEXT: s_lshl_b32 s43, s104, 8
; GFX11-NEXT: s_and_b32 s0, s0, 0xff
-; GFX11-NEXT: s_or_b32 s2, s2, s42
-; GFX11-NEXT: v_readlane_b32 s42, v18, 9
-; GFX11-NEXT: s_or_b32 s0, s0, s45
-; GFX11-NEXT: s_lshl_b32 s45, s30, 8
-; GFX11-NEXT: s_and_b32 s44, s44, 0xff
-; GFX11-NEXT: s_and_b32 s3, s3, 0xff
-; GFX11-NEXT: s_or_b32 s44, s44, s45
-; GFX11-NEXT: s_lshl_b32 s42, s42, 8
+; GFX11-NEXT: s_and_b32 s45, s103, 0xff
+; GFX11-NEXT: s_or_b32 s0, s0, s43
+; GFX11-NEXT: s_lshl_b32 s43, s30, 8
; GFX11-NEXT: s_and_b32 s0, s0, 0xffff
-; GFX11-NEXT: s_lshl_b32 s44, s44, 16
-; GFX11-NEXT: s_or_b32 s3, s3, s42
-; GFX11-NEXT: v_readlane_b32 s42, v18, 8
-; GFX11-NEXT: v_readlane_b32 s43, v18, 7
-; GFX11-NEXT: s_or_b32 s0, s0, s44
+; GFX11-NEXT: s_or_b32 s43, s45, s43
; GFX11-NEXT: s_and_b32 s1, s1, 0xff
-; GFX11-NEXT: s_lshl_b32 s44, s100, 8
-; GFX11-NEXT: s_lshl_b32 s45, s98, 8
-; GFX11-NEXT: s_or_b32 s1, s1, s44
-; GFX11-NEXT: s_and_b32 s44, s99, 0xff
-; GFX11-NEXT: s_and_b32 s42, s42, 0xff
-; GFX11-NEXT: s_or_b32 s44, s44, s45
-; GFX11-NEXT: s_lshl_b32 s43, s43, 8
+; GFX11-NEXT: s_lshl_b32 s43, s43, 16
+; GFX11-NEXT: s_lshl_b32 s45, s100, 8
+; GFX11-NEXT: s_or_b32 s0, s0, s43
+; GFX11-NEXT: s_lshl_b32 s43, s102, 8
+; GFX11-NEXT: s_and_b32 s2, s2, 0xff
+; GFX11-NEXT: s_or_b32 s1, s1, s43
+; GFX11-NEXT: s_and_b32 s43, s101, 0xff
; GFX11-NEXT: s_and_b32 s1, s1, 0xffff
-; GFX11-NEXT: s_lshl_b32 s44, s44, 16
-; GFX11-NEXT: s_or_b32 s42, s42, s43
-; GFX11-NEXT: s_or_b32 s1, s1, s44
-; GFX11-NEXT: s_and_b32 s3, s3, 0xffff
-; GFX11-NEXT: s_lshl_b32 s42, s42, 16
+; GFX11-NEXT: s_or_b32 s43, s43, s45
+; GFX11-NEXT: s_and_b32 s45, s98, 0xff
+; GFX11-NEXT: s_lshl_b32 s43, s43, 16
+; GFX11-NEXT: s_and_b32 s3, s3, 0xff
+; GFX11-NEXT: s_or_b32 s1, s1, s43
+; GFX11-NEXT: s_lshl_b32 s43, s99, 8
; GFX11-NEXT: v_dual_mov_b32 v1, s0 :: v_dual_mov_b32 v2, s1
-; GFX11-NEXT: v_readlane_b32 s0, v18, 6
-; GFX11-NEXT: s_or_b32 s3, s3, s42
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_dual_mov_b32 v3, s2 :: v_dual_mov_b32 v4, s3
-; GFX11-NEXT: v_readlane_b32 s2, v18, 5
-; GFX11-NEXT: s_lshl_b32 s0, s0, 8
+; GFX11-NEXT: s_or_b32 s2, s2, s43
+; GFX11-NEXT: s_lshl_b32 s43, s94, 8
+; GFX11-NEXT: s_and_b32 s2, s2, 0xffff
+; GFX11-NEXT: s_or_b32 s43, s45, s43
+; GFX11-NEXT: s_lshl_b32 s45, s87, 8
+; GFX11-NEXT: s_lshl_b32 s43, s43, 16
+; GFX11-NEXT: s_lshl_b32 s0, s86, 8
+; GFX11-NEXT: s_or_b32 s2, s2, s43
+; GFX11-NEXT: s_lshl_b32 s43, s97, 8
; GFX11-NEXT: s_and_b32 s1, s16, 0xff
-; GFX11-NEXT: v_readlane_b32 s3, v18, 2
+; GFX11-NEXT: s_or_b32 s3, s3, s43
+; GFX11-NEXT: s_and_b32 s43, s96, 0xff
+; GFX11-NEXT: s_and_b32 s3, s3, 0xffff
+; GFX11-NEXT: s_or_b32 s43, s43, s45
; GFX11-NEXT: s_or_b32 s0, s1, s0
+; GFX11-NEXT: s_lshl_b32 s43, s43, 16
; GFX11-NEXT: s_lshl_b32 s1, s92, 8
-; GFX11-NEXT: s_and_b32 s2, s2, 0xff
+; GFX11-NEXT: s_or_b32 s3, s3, s43
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: v_dual_mov_b32 v3, s2 :: v_dual_mov_b32 v4, s3
+; GFX11-NEXT: s_and_b32 s2, s85, 0xff
; GFX11-NEXT: s_and_b32 s0, s0, 0xffff
; GFX11-NEXT: s_or_b32 s1, s2, s1
-; GFX11-NEXT: v_readlane_b32 s2, v18, 4
+; GFX11-NEXT: s_lshl_b32 s2, s84, 8
; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: s_lshl_b32 s3, s3, 8
+; GFX11-NEXT: s_lshl_b32 s3, s82, 8
; GFX11-NEXT: s_or_b32 s0, s0, s1
; GFX11-NEXT: s_and_b32 s1, s17, 0xff
-; GFX11-NEXT: s_lshl_b32 s2, s2, 8
-; GFX11-NEXT: v_readlane_b32 s16, v18, 0
+; GFX11-NEXT: s_and_b32 s16, s80, 0xff
; GFX11-NEXT: s_or_b32 s1, s1, s2
-; GFX11-NEXT: v_readlane_b32 s2, v18, 3
+; GFX11-NEXT: s_and_b32 s2, s83, 0xff
; GFX11-NEXT: s_and_b32 s1, s1, 0xffff
-; GFX11-NEXT: v_readlane_b32 s17, v19, 29
-; GFX11-NEXT: s_and_b32 s16, s16, 0xff
-; GFX11-NEXT: v_readlane_b32 s100, v17, 4
-; GFX11-NEXT: s_and_b32 s2, s2, 0xff
-; GFX11-NEXT: v_readlane_b32 s99, v17, 3
; GFX11-NEXT: s_or_b32 s2, s2, s3
; GFX11-NEXT: s_and_b32 s3, s18, 0xff
; GFX11-NEXT: s_lshl_b32 s2, s2, 16
-; GFX11-NEXT: s_lshl_b32 s17, s17, 8
+; GFX11-NEXT: s_lshl_b32 s17, s69, 8
; GFX11-NEXT: s_or_b32 s1, s1, s2
-; GFX11-NEXT: v_readlane_b32 s2, v18, 1
+; GFX11-NEXT: s_lshl_b32 s2, s81, 8
; GFX11-NEXT: v_dual_mov_b32 v5, s0 :: v_dual_mov_b32 v6, s1
-; GFX11-NEXT: v_readlane_b32 s0, v19, 28
-; GFX11-NEXT: s_and_b32 s1, s20, 0xff
-; GFX11-NEXT: s_lshl_b32 s2, s2, 8
-; GFX11-NEXT: v_readlane_b32 s18, v19, 19
; GFX11-NEXT: s_or_b32 s2, s3, s2
; GFX11-NEXT: s_lshl_b32 s3, s90, 8
; GFX11-NEXT: s_and_b32 s2, s2, 0xffff
; GFX11-NEXT: s_or_b32 s3, s16, s3
-; GFX11-NEXT: v_readlane_b32 s16, v19, 31
+; GFX11-NEXT: s_lshl_b32 s16, s71, 8
; GFX11-NEXT: s_lshl_b32 s3, s3, 16
-; GFX11-NEXT: s_lshl_b32 s0, s0, 8
+; GFX11-NEXT: s_lshl_b32 s0, s68, 8
; GFX11-NEXT: s_or_b32 s2, s2, s3
; GFX11-NEXT: s_and_b32 s3, s19, 0xff
-; GFX11-NEXT: s_lshl_b32 s16, s16, 8
-; GFX11-NEXT: s_or_b32 s0, s1, s0
+; GFX11-NEXT: s_and_b32 s1, s20, 0xff
; GFX11-NEXT: s_or_b32 s3, s3, s16
-; GFX11-NEXT: v_readlane_b32 s16, v19, 30
+; GFX11-NEXT: s_and_b32 s16, s70, 0xff
; GFX11-NEXT: s_and_b32 s3, s3, 0xffff
-; GFX11-NEXT: s_lshl_b32 s1, s78, 8
-; GFX11-NEXT: s_and_b32 s0, s0, 0xffff
-; GFX11-NEXT: s_lshl_b32 s18, s18, 8
-; GFX11-NEXT: s_and_b32 s16, s16, 0xff
-; GFX11-NEXT: s_lshl_b32 s19, s86, 8
; GFX11-NEXT: s_or_b32 s16, s16, s17
-; GFX11-NEXT: v_readlane_b32 s17, v19, 21
+; GFX11-NEXT: s_or_b32 s0, s1, s0
; GFX11-NEXT: s_lshl_b32 s16, s16, 16
-; GFX11-NEXT: v_readlane_b32 s98, v17, 2
+; GFX11-NEXT: s_lshl_b32 s1, s76, 8
; GFX11-NEXT: s_or_b32 s3, s3, s16
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v7, s2 :: v_dual_mov_b32 v8, s3
-; GFX11-NEXT: v_readlane_b32 s2, v19, 27
-; GFX11-NEXT: v_readlane_b32 s3, v19, 24
-; GFX11-NEXT: v_readlane_b32 s16, v19, 22
-; GFX11-NEXT: s_lshl_b32 s17, s17, 8
-; GFX11-NEXT: s_clause 0x1
-; GFX11-NEXT: scratch_store_b128 v0, v[1:4], off
-; GFX11-NEXT: scratch_store_b128 v0, v[5:8], off offset:16
-; GFX11-NEXT: s_and_b32 s2, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s3, 8
+; GFX11-NEXT: s_and_b32 s2, s67, 0xff
+; GFX11-NEXT: s_and_b32 s0, s0, 0xffff
; GFX11-NEXT: s_or_b32 s1, s2, s1
-; GFX11-NEXT: v_readlane_b32 s2, v19, 26
+; GFX11-NEXT: s_lshl_b32 s2, s66, 8
; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: s_and_b32 s16, s16, 0xff
+; GFX11-NEXT: s_lshl_b32 s3, s64, 8
; GFX11-NEXT: s_or_b32 s0, s0, s1
; GFX11-NEXT: s_and_b32 s1, s21, 0xff
-; GFX11-NEXT: s_lshl_b32 s2, s2, 8
-; GFX11-NEXT: v_readlane_b32 s86, v16, 30
+; GFX11-NEXT: s_and_b32 s16, s54, 0xff
; GFX11-NEXT: s_or_b32 s1, s1, s2
-; GFX11-NEXT: v_readlane_b32 s2, v19, 25
+; GFX11-NEXT: s_and_b32 s2, s65, 0xff
; GFX11-NEXT: s_and_b32 s1, s1, 0xffff
-; GFX11-NEXT: v_readlane_b32 s31, v16, 1
-; GFX11-NEXT: v_readlane_b32 s30, v16, 0
-; GFX11-NEXT: s_and_b32 s2, s2, 0xff
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
; GFX11-NEXT: s_or_b32 s2, s2, s3
; GFX11-NEXT: s_and_b32 s3, s22, 0xff
; GFX11-NEXT: s_lshl_b32 s2, s2, 16
+; GFX11-NEXT: s_lshl_b32 s17, s53, 8
; GFX11-NEXT: s_or_b32 s1, s1, s2
-; GFX11-NEXT: v_readlane_b32 s2, v19, 23
-; GFX11-NEXT: v_dual_mov_b32 v9, s0 :: v_dual_mov_b32 v10, s1
-; GFX11-NEXT: v_readlane_b32 s1, v19, 18
-; GFX11-NEXT: s_and_b32 s0, s24, 0xff
-; GFX11-NEXT: s_lshl_b32 s2, s2, 8
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_lshl_b32 s2, s55, 8
+; GFX11-NEXT: s_lshl_b32 s18, s51, 8
; GFX11-NEXT: s_or_b32 s2, s3, s2
-; GFX11-NEXT: s_lshl_b32 s3, s62, 8
+; GFX11-NEXT: s_lshl_b32 s3, s74, 8
; GFX11-NEXT: s_and_b32 s2, s2, 0xffff
; GFX11-NEXT: s_or_b32 s3, s16, s3
; GFX11-NEXT: s_and_b32 s16, s23, 0xff
; GFX11-NEXT: s_lshl_b32 s3, s3, 16
; GFX11-NEXT: s_or_b32 s16, s16, s17
-; GFX11-NEXT: v_readlane_b32 s17, v19, 20
+; GFX11-NEXT: s_and_b32 s17, s52, 0xff
; GFX11-NEXT: s_or_b32 s2, s2, s3
-; GFX11-NEXT: s_and_b32 s3, s16, 0xffff
-; GFX11-NEXT: s_lshl_b32 s1, s1, 8
-; GFX11-NEXT: s_and_b32 s17, s17, 0xff
-; GFX11-NEXT: s_or_b32 s0, s0, s1
; GFX11-NEXT: s_or_b32 s17, s17, s18
-; GFX11-NEXT: s_and_b32 s0, s0, 0xffff
+; GFX11-NEXT: s_and_b32 s3, s16, 0xffff
; GFX11-NEXT: s_lshl_b32 s16, s17, 16
-; GFX11-NEXT: s_lshl_b32 s17, s97, 8
+; GFX11-NEXT: v_dual_mov_b32 v9, s0 :: v_dual_mov_b32 v10, s1
; GFX11-NEXT: s_or_b32 s3, s3, s16
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v11, s2 :: v_dual_mov_b32 v12, s3
-; GFX11-NEXT: v_readlane_b32 s2, v19, 17
+; GFX11-NEXT: s_and_b32 s0, s24, 0xff
+; GFX11-NEXT: s_lshl_b32 s1, s50, 8
+; GFX11-NEXT: s_and_b32 s2, s49, 0xff
; GFX11-NEXT: s_lshl_b32 s3, s88, 8
-; GFX11-NEXT: s_and_b32 s16, s69, 0xff
-; GFX11-NEXT: s_and_b32 s18, s72, 0xff
-; GFX11-NEXT: v_readlane_b32 s97, v17, 1
-; GFX11-NEXT: s_and_b32 s2, s2, 0xff
-; GFX11-NEXT: v_readlane_b32 s69, v16, 21
+; GFX11-NEXT: s_or_b32 s0, s0, s1
; GFX11-NEXT: s_or_b32 s1, s2, s3
-; GFX11-NEXT: v_readlane_b32 s3, v19, 16
; GFX11-NEXT: s_and_b32 s2, s25, 0xff
-; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_or_b32 s0, s0, s1
-; GFX11-NEXT: s_lshl_b32 s3, s3, 8
+; GFX11-NEXT: s_lshl_b32 s3, s48, 8
+; GFX11-NEXT: s_and_b32 s16, s39, 0xff
+; GFX11-NEXT: s_lshl_b32 s17, s38, 8
; GFX11-NEXT: s_or_b32 s2, s2, s3
; GFX11-NEXT: s_or_b32 s3, s16, s17
+; GFX11-NEXT: s_and_b32 s0, s0, 0xffff
+; GFX11-NEXT: s_lshl_b32 s1, s1, 16
; GFX11-NEXT: s_and_b32 s2, s2, 0xffff
; GFX11-NEXT: s_lshl_b32 s3, s3, 16
-; GFX11-NEXT: s_and_b32 s16, s73, 0xff
+; GFX11-NEXT: s_or_b32 s0, s0, s1
; GFX11-NEXT: s_or_b32 s1, s2, s3
; GFX11-NEXT: s_and_b32 s2, s26, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s96, 8
-; GFX11-NEXT: s_lshl_b32 s17, s76, 8
+; GFX11-NEXT: s_lshl_b32 s3, s37, 8
+; GFX11-NEXT: s_and_b32 s16, s36, 0xff
+; GFX11-NEXT: s_lshl_b32 s17, s78, 8
; GFX11-NEXT: s_or_b32 s2, s2, s3
; GFX11-NEXT: s_or_b32 s3, s16, s17
; GFX11-NEXT: s_and_b32 s16, s27, 0xff
-; GFX11-NEXT: s_lshl_b32 s17, s87, 8
+; GFX11-NEXT: s_lshl_b32 s17, s35, 8
+; GFX11-NEXT: s_and_b32 s18, s34, 0xff
+; GFX11-NEXT: s_lshl_b32 s19, vcc_hi, 8
; GFX11-NEXT: s_and_b32 s2, s2, 0xffff
+; GFX11-NEXT: s_lshl_b32 s3, s3, 16
; GFX11-NEXT: s_or_b32 s16, s16, s17
; GFX11-NEXT: s_or_b32 s17, s18, s19
-; GFX11-NEXT: s_lshl_b32 s3, s3, 16
; GFX11-NEXT: s_and_b32 s16, s16, 0xffff
; GFX11-NEXT: s_lshl_b32 s17, s17, 16
; GFX11-NEXT: s_or_b32 s2, s2, s3
+; GFX11-NEXT: s_clause 0x1
+; GFX11-NEXT: scratch_store_b128 v0, v[1:4], off
+; GFX11-NEXT: scratch_store_b128 v0, v[5:8], off offset:16
; GFX11-NEXT: s_or_b32 s3, s16, s17
-; GFX11-NEXT: v_readlane_b32 s16, v19, 0
; GFX11-NEXT: v_dual_mov_b32 v1, s0 :: v_dual_mov_b32 v2, s1
; GFX11-NEXT: v_dual_mov_b32 v3, s2 :: v_dual_mov_b32 v4, s3
+; GFX11-NEXT: v_readlane_b32 s1, v18, 7
+; GFX11-NEXT: v_readlane_b32 s2, v18, 6
; GFX11-NEXT: s_and_b32 s0, s28, 0xff
-; GFX11-NEXT: s_lshl_b32 s1, s85, 8
-; GFX11-NEXT: s_and_b32 s2, s84, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s16, 8
-; GFX11-NEXT: v_readlane_b32 s17, v19, 1
+; GFX11-NEXT: s_lshl_b32 s3, s72, 8
+; GFX11-NEXT: v_readlane_b32 s16, v18, 4
+; GFX11-NEXT: s_lshl_b32 s1, s1, 8
+; GFX11-NEXT: s_and_b32 s2, s2, 0xff
; GFX11-NEXT: s_or_b32 s0, s0, s1
; GFX11-NEXT: s_or_b32 s1, s2, s3
+; GFX11-NEXT: v_readlane_b32 s3, v18, 5
+; GFX11-NEXT: v_readlane_b32 s17, v18, 3
; GFX11-NEXT: s_and_b32 s2, s29, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s83, 8
-; GFX11-NEXT: s_and_b32 s16, s82, 0xff
-; GFX11-NEXT: s_lshl_b32 s17, s81, 8
-; GFX11-NEXT: v_readlane_b32 s18, v19, 2
+; GFX11-NEXT: s_and_b32 s16, s16, 0xff
+; GFX11-NEXT: s_and_b32 s0, s0, 0xffff
+; GFX11-NEXT: s_lshl_b32 s3, s3, 8
+; GFX11-NEXT: s_lshl_b32 s17, s17, 8
; GFX11-NEXT: s_or_b32 s2, s2, s3
; GFX11-NEXT: s_or_b32 s3, s16, s17
-; GFX11-NEXT: s_and_b32 s0, s0, 0xffff
; GFX11-NEXT: s_lshl_b32 s1, s1, 16
; GFX11-NEXT: s_and_b32 s2, s2, 0xffff
; GFX11-NEXT: s_lshl_b32 s3, s3, 16
; GFX11-NEXT: s_or_b32 s0, s0, s1
; GFX11-NEXT: s_or_b32 s1, s2, s3
+; GFX11-NEXT: v_readlane_b32 s3, v18, 2
+; GFX11-NEXT: v_readlane_b32 s16, v18, 1
; GFX11-NEXT: s_and_b32 s2, s40, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s61, 8
-; GFX11-NEXT: s_and_b32 s16, s80, 0xff
-; GFX11-NEXT: s_lshl_b32 s17, s18, 8
-; GFX11-NEXT: v_readlane_b32 s19, v19, 3
+; GFX11-NEXT: s_lshl_b32 s17, s62, 8
+; GFX11-NEXT: v_readlane_b32 s18, v19, 31
+; GFX11-NEXT: s_lshl_b32 s3, s3, 8
+; GFX11-NEXT: s_and_b32 s16, s16, 0xff
; GFX11-NEXT: s_or_b32 s2, s2, s3
; GFX11-NEXT: s_or_b32 s3, s16, s17
+; GFX11-NEXT: v_readlane_b32 s17, v18, 0
+; GFX11-NEXT: v_readlane_b32 s19, v19, 30
; GFX11-NEXT: s_and_b32 s16, s41, 0xff
-; GFX11-NEXT: s_lshl_b32 s17, s60, 8
-; GFX11-NEXT: s_and_b32 s18, s71, 0xff
-; GFX11-NEXT: s_lshl_b32 s19, s70, 8
-; GFX11-NEXT: s_or_b32 s16, s16, s17
-; GFX11-NEXT: s_or_b32 s17, s18, s19
+; GFX11-NEXT: s_and_b32 s18, s18, 0xff
; GFX11-NEXT: s_and_b32 s2, s2, 0xffff
+; GFX11-NEXT: s_lshl_b32 s17, s17, 8
+; GFX11-NEXT: s_lshl_b32 s19, s19, 8
; GFX11-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-NEXT: s_or_b32 s16, s16, s17
+; GFX11-NEXT: s_or_b32 s17, s18, s19
; GFX11-NEXT: s_and_b32 s16, s16, 0xffff
; GFX11-NEXT: s_lshl_b32 s17, s17, 16
; GFX11-NEXT: s_or_b32 s2, s2, s3
; GFX11-NEXT: s_or_b32 s3, s16, s17
-; GFX11-NEXT: v_readlane_b32 s16, v19, 4
; GFX11-NEXT: v_dual_mov_b32 v5, s0 :: v_dual_mov_b32 v6, s1
; GFX11-NEXT: v_dual_mov_b32 v7, s2 :: v_dual_mov_b32 v8, s3
+; GFX11-NEXT: v_readlane_b32 s1, v19, 29
+; GFX11-NEXT: v_readlane_b32 s2, v19, 28
; GFX11-NEXT: s_and_b32 s0, s14, 0xff
-; GFX11-NEXT: s_lshl_b32 s1, s58, 8
-; GFX11-NEXT: s_and_b32 s2, s59, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s16, 8
+; GFX11-NEXT: s_lshl_b32 s3, s60, 8
+; GFX11-NEXT: v_readlane_b32 s14, v19, 26
+; GFX11-NEXT: s_lshl_b32 s1, s1, 8
+; GFX11-NEXT: s_and_b32 s2, s2, 0xff
; GFX11-NEXT: s_or_b32 s0, s0, s1
; GFX11-NEXT: s_or_b32 s1, s2, s3
; GFX11-NEXT: s_and_b32 s2, s15, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s68, 8
-; GFX11-NEXT: s_and_b32 s14, s67, 0xff
-; GFX11-NEXT: s_lshl_b32 s15, s66, 8
-; GFX11-NEXT: s_or_b32 s2, s2, s3
-; GFX11-NEXT: s_or_b32 s3, s14, s15
-; GFX11-NEXT: v_readlane_b32 s14, v19, 6
+; GFX11-NEXT: v_readlane_b32 s3, v19, 27
+; GFX11-NEXT: v_readlane_b32 s15, v19, 25
+; GFX11-NEXT: s_and_b32 s14, s14, 0xff
; GFX11-NEXT: s_and_b32 s0, s0, 0xffff
; GFX11-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-NEXT: s_lshl_b32 s3, s3, 8
+; GFX11-NEXT: s_lshl_b32 s15, s15, 8
+; GFX11-NEXT: s_or_b32 s2, s2, s3
+; GFX11-NEXT: s_or_b32 s3, s14, s15
; GFX11-NEXT: s_and_b32 s2, s2, 0xffff
; GFX11-NEXT: s_lshl_b32 s3, s3, 16
; GFX11-NEXT: s_or_b32 s0, s0, s1
; GFX11-NEXT: s_or_b32 s1, s2, s3
; GFX11-NEXT: s_and_b32 s2, s12, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s65, 8
-; GFX11-NEXT: s_and_b32 s12, s64, 0xff
-; GFX11-NEXT: s_lshl_b32 s14, s14, 8
-; GFX11-NEXT: v_readlane_b32 s15, v19, 7
+; GFX11-NEXT: v_readlane_b32 s3, v19, 24
+; GFX11-NEXT: v_readlane_b32 s12, v19, 23
+; GFX11-NEXT: s_lshl_b32 s14, s58, 8
+; GFX11-NEXT: v_readlane_b32 s15, v19, 20
+; GFX11-NEXT: s_clause 0x1
+; GFX11-NEXT: scratch_store_b128 v0, v[9:12], off offset:32
+; GFX11-NEXT: scratch_store_b128 v0, v[1:4], off offset:48
+; GFX11-NEXT: s_lshl_b32 s3, s3, 8
+; GFX11-NEXT: s_and_b32 s12, s12, 0xff
; GFX11-NEXT: s_or_b32 s2, s2, s3
; GFX11-NEXT: s_or_b32 s3, s12, s14
; GFX11-NEXT: s_and_b32 s12, s13, 0xff
-; GFX11-NEXT: s_lshl_b32 s13, s55, 8
-; GFX11-NEXT: s_and_b32 s14, s54, 0xff
-; GFX11-NEXT: s_lshl_b32 s15, s53, 8
-; GFX11-NEXT: s_or_b32 s12, s12, s13
-; GFX11-NEXT: s_or_b32 s13, s14, s15
+; GFX11-NEXT: v_readlane_b32 s13, v19, 22
+; GFX11-NEXT: v_readlane_b32 s14, v19, 21
+; GFX11-NEXT: s_lshl_b32 s15, s15, 8
; GFX11-NEXT: s_and_b32 s2, s2, 0xffff
; GFX11-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-NEXT: s_lshl_b32 s13, s13, 8
+; GFX11-NEXT: s_and_b32 s14, s14, 0xff
+; GFX11-NEXT: s_or_b32 s12, s12, s13
+; GFX11-NEXT: s_or_b32 s13, s14, s15
; GFX11-NEXT: s_and_b32 s12, s12, 0xffff
; GFX11-NEXT: s_lshl_b32 s13, s13, 16
; GFX11-NEXT: s_or_b32 s2, s2, s3
; GFX11-NEXT: s_or_b32 s3, s12, s13
-; GFX11-NEXT: v_readlane_b32 s12, v19, 8
-; GFX11-NEXT: s_clause 0x1
-; GFX11-NEXT: scratch_store_b128 v0, v[9:12], off offset:32
-; GFX11-NEXT: scratch_store_b128 v0, v[1:4], off offset:48
; GFX11-NEXT: v_dual_mov_b32 v9, s0 :: v_dual_mov_b32 v10, s1
; GFX11-NEXT: v_dual_mov_b32 v11, s2 :: v_dual_mov_b32 v12, s3
+; GFX11-NEXT: v_readlane_b32 s1, v19, 19
+; GFX11-NEXT: v_readlane_b32 s2, v19, 18
; GFX11-NEXT: s_and_b32 s0, s10, 0xff
-; GFX11-NEXT: s_lshl_b32 s1, s52, 8
-; GFX11-NEXT: s_and_b32 s2, s51, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s12, 8
+; GFX11-NEXT: s_lshl_b32 s3, s56, 8
+; GFX11-NEXT: v_readlane_b32 s10, v19, 16
+; GFX11-NEXT: s_lshl_b32 s1, s1, 8
+; GFX11-NEXT: s_and_b32 s2, s2, 0xff
; GFX11-NEXT: s_or_b32 s0, s0, s1
; GFX11-NEXT: s_or_b32 s1, s2, s3
; GFX11-NEXT: s_and_b32 s2, s11, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s50, 8
-; GFX11-NEXT: s_and_b32 s10, s49, 0xff
-; GFX11-NEXT: s_lshl_b32 s11, s48, 8
-; GFX11-NEXT: s_or_b32 s2, s2, s3
-; GFX11-NEXT: s_or_b32 s3, s10, s11
-; GFX11-NEXT: v_readlane_b32 s10, v19, 10
+; GFX11-NEXT: v_readlane_b32 s3, v19, 17
+; GFX11-NEXT: v_readlane_b32 s11, v19, 15
+; GFX11-NEXT: s_and_b32 s10, s10, 0xff
; GFX11-NEXT: s_and_b32 s0, s0, 0xffff
; GFX11-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-NEXT: s_lshl_b32 s3, s3, 8
+; GFX11-NEXT: s_lshl_b32 s11, s11, 8
+; GFX11-NEXT: s_or_b32 s2, s2, s3
+; GFX11-NEXT: s_or_b32 s3, s10, s11
; GFX11-NEXT: s_and_b32 s2, s2, 0xffff
; GFX11-NEXT: s_lshl_b32 s3, s3, 16
; GFX11-NEXT: s_or_b32 s0, s0, s1
; GFX11-NEXT: s_or_b32 s1, s2, s3
; GFX11-NEXT: s_and_b32 s2, s8, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s39, 8
-; GFX11-NEXT: s_and_b32 s8, s38, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s10, 8
-; GFX11-NEXT: v_readlane_b32 s11, v19, 11
+; GFX11-NEXT: v_readlane_b32 s3, v19, 14
+; GFX11-NEXT: v_readlane_b32 s8, v19, 13
+; GFX11-NEXT: s_lshl_b32 s10, s46, 8
+; GFX11-NEXT: v_readlane_b32 s11, v19, 10
+; GFX11-NEXT: v_dual_mov_b32 v1, s0 :: v_dual_mov_b32 v2, s1
+; GFX11-NEXT: s_lshl_b32 s3, s3, 8
+; GFX11-NEXT: s_and_b32 s8, s8, 0xff
; GFX11-NEXT: s_or_b32 s2, s2, s3
; GFX11-NEXT: s_or_b32 s3, s8, s10
; GFX11-NEXT: s_and_b32 s8, s9, 0xff
-; GFX11-NEXT: s_lshl_b32 s9, s37, 8
-; GFX11-NEXT: s_and_b32 s10, s36, 0xff
-; GFX11-NEXT: s_lshl_b32 s11, s35, 8
-; GFX11-NEXT: s_or_b32 s8, s8, s9
-; GFX11-NEXT: s_or_b32 s9, s10, s11
+; GFX11-NEXT: v_readlane_b32 s9, v19, 12
+; GFX11-NEXT: v_readlane_b32 s10, v19, 11
+; GFX11-NEXT: s_lshl_b32 s11, s11, 8
; GFX11-NEXT: s_and_b32 s2, s2, 0xffff
; GFX11-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-NEXT: s_lshl_b32 s9, s9, 8
+; GFX11-NEXT: s_and_b32 s10, s10, 0xff
+; GFX11-NEXT: s_or_b32 s8, s8, s9
+; GFX11-NEXT: s_or_b32 s9, s10, s11
; GFX11-NEXT: s_and_b32 s8, s8, 0xffff
; GFX11-NEXT: s_lshl_b32 s9, s9, 16
; GFX11-NEXT: s_or_b32 s2, s2, s3
; GFX11-NEXT: s_or_b32 s3, s8, s9
-; GFX11-NEXT: v_readlane_b32 s8, v19, 12
-; GFX11-NEXT: v_dual_mov_b32 v1, s0 :: v_dual_mov_b32 v2, s1
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v3, s2 :: v_dual_mov_b32 v4, s3
+; GFX11-NEXT: v_readlane_b32 s1, v19, 9
+; GFX11-NEXT: v_readlane_b32 s2, v19, 8
; GFX11-NEXT: s_and_b32 s0, s6, 0xff
-; GFX11-NEXT: s_lshl_b32 s1, s56, 8
-; GFX11-NEXT: s_and_b32 s2, s57, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s8, 8
+; GFX11-NEXT: s_lshl_b32 s3, s44, 8
+; GFX11-NEXT: v_readlane_b32 s6, v19, 6
+; GFX11-NEXT: s_lshl_b32 s1, s1, 8
+; GFX11-NEXT: s_and_b32 s2, s2, 0xff
; GFX11-NEXT: s_or_b32 s0, s0, s1
; GFX11-NEXT: s_or_b32 s1, s2, s3
; GFX11-NEXT: s_and_b32 s2, s7, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s34, 8
-; GFX11-NEXT: s_and_b32 s6, vcc_hi, 0xff
-; GFX11-NEXT: s_lshl_b32 s7, s46, 8
-; GFX11-NEXT: s_or_b32 s2, s2, s3
-; GFX11-NEXT: s_or_b32 s3, s6, s7
-; GFX11-NEXT: v_readlane_b32 s6, v19, 14
+; GFX11-NEXT: v_readlane_b32 s3, v19, 7
+; GFX11-NEXT: v_readlane_b32 s7, v19, 5
+; GFX11-NEXT: s_and_b32 s6, s6, 0xff
; GFX11-NEXT: s_and_b32 s0, s0, 0xffff
; GFX11-NEXT: s_lshl_b32 s1, s1, 16
+; GFX11-NEXT: s_lshl_b32 s3, s3, 8
+; GFX11-NEXT: s_lshl_b32 s7, s7, 8
+; GFX11-NEXT: s_or_b32 s2, s2, s3
+; GFX11-NEXT: s_or_b32 s3, s6, s7
; GFX11-NEXT: s_and_b32 s2, s2, 0xffff
; GFX11-NEXT: s_lshl_b32 s3, s3, 16
; GFX11-NEXT: s_or_b32 s0, s0, s1
; GFX11-NEXT: s_or_b32 s1, s2, s3
; GFX11-NEXT: s_and_b32 s2, s4, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s47, 8
-; GFX11-NEXT: s_and_b32 s4, s104, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s6, 8
-; GFX11-NEXT: v_readlane_b32 s7, v19, 15
+; GFX11-NEXT: v_readlane_b32 s3, v19, 4
+; GFX11-NEXT: v_readlane_b32 s4, v19, 3
+; GFX11-NEXT: s_lshl_b32 s6, s42, 8
+; GFX11-NEXT: v_readlane_b32 s7, v19, 0
+; GFX11-NEXT: scratch_store_b128 v0, v[5:8], off offset:64
+; GFX11-NEXT: s_lshl_b32 s3, s3, 8
+; GFX11-NEXT: s_and_b32 s4, s4, 0xff
; GFX11-NEXT: s_or_b32 s2, s2, s3
; GFX11-NEXT: s_or_b32 s3, s4, s6
; GFX11-NEXT: s_and_b32 s4, s5, 0xff
-; GFX11-NEXT: s_lshl_b32 s5, s103, 8
-; GFX11-NEXT: s_and_b32 s6, s102, 0xff
-; GFX11-NEXT: s_lshl_b32 s7, s101, 8
-; GFX11-NEXT: s_or_b32 s4, s4, s5
-; GFX11-NEXT: s_or_b32 s5, s6, s7
+; GFX11-NEXT: v_readlane_b32 s5, v19, 2
+; GFX11-NEXT: v_readlane_b32 s6, v19, 1
+; GFX11-NEXT: s_lshl_b32 s7, s7, 8
; GFX11-NEXT: s_and_b32 s2, s2, 0xffff
; GFX11-NEXT: s_lshl_b32 s3, s3, 16
+; GFX11-NEXT: s_lshl_b32 s5, s5, 8
+; GFX11-NEXT: s_and_b32 s6, s6, 0xff
+; GFX11-NEXT: s_or_b32 s4, s4, s5
+; GFX11-NEXT: s_or_b32 s5, s6, s7
; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
; GFX11-NEXT: s_lshl_b32 s5, s5, 16
; GFX11-NEXT: s_or_b32 s2, s2, s3
; GFX11-NEXT: s_or_b32 s3, s4, s5
-; GFX11-NEXT: scratch_store_b128 v0, v[5:8], off offset:64
; GFX11-NEXT: v_dual_mov_b32 v5, s0 :: v_dual_mov_b32 v6, s1
; GFX11-NEXT: v_dual_mov_b32 v7, s2 :: v_dual_mov_b32 v8, s3
-; GFX11-NEXT: v_readlane_b32 s17, v19, 5
-; GFX11-NEXT: v_readlane_b32 s13, v19, 9
-; GFX11-NEXT: v_readlane_b32 s9, v19, 13
; GFX11-NEXT: s_clause 0x2
; GFX11-NEXT: scratch_store_b128 v0, v[9:12], off offset:80
; GFX11-NEXT: scratch_store_b128 v0, v[1:4], off offset:96
@@ -82987,8 +83169,13 @@ define inreg <128 x i8> @bitcast_v16i64_to_v128i8_scalar(<16 x i64> inreg %a, i3
; GFX11-NEXT: v_readlane_b32 s103, v17, 7
; GFX11-NEXT: v_readlane_b32 s102, v17, 6
; GFX11-NEXT: v_readlane_b32 s101, v17, 5
+; GFX11-NEXT: v_readlane_b32 s100, v17, 4
+; GFX11-NEXT: v_readlane_b32 s99, v17, 3
+; GFX11-NEXT: v_readlane_b32 s98, v17, 2
+; GFX11-NEXT: v_readlane_b32 s97, v17, 1
; GFX11-NEXT: v_readlane_b32 s96, v17, 0
; GFX11-NEXT: v_readlane_b32 s87, v16, 31
+; GFX11-NEXT: v_readlane_b32 s86, v16, 30
; GFX11-NEXT: v_readlane_b32 s85, v16, 29
; GFX11-NEXT: v_readlane_b32 s84, v16, 28
; GFX11-NEXT: v_readlane_b32 s83, v16, 27
@@ -82997,6 +83184,7 @@ define inreg <128 x i8> @bitcast_v16i64_to_v128i8_scalar(<16 x i64> inreg %a, i3
; GFX11-NEXT: v_readlane_b32 s80, v16, 24
; GFX11-NEXT: v_readlane_b32 s71, v16, 23
; GFX11-NEXT: v_readlane_b32 s70, v16, 22
+; GFX11-NEXT: v_readlane_b32 s69, v16, 21
; GFX11-NEXT: v_readlane_b32 s68, v16, 20
; GFX11-NEXT: v_readlane_b32 s67, v16, 19
; GFX11-NEXT: v_readlane_b32 s66, v16, 18
@@ -83016,6 +83204,8 @@ define inreg <128 x i8> @bitcast_v16i64_to_v128i8_scalar(<16 x i64> inreg %a, i3
; GFX11-NEXT: v_readlane_b32 s36, v16, 4
; GFX11-NEXT: v_readlane_b32 s35, v16, 3
; GFX11-NEXT: v_readlane_b32 s34, v16, 2
+; GFX11-NEXT: v_readlane_b32 s31, v16, 1
+; GFX11-NEXT: v_readlane_b32 s30, v16, 0
; GFX11-NEXT: s_xor_saveexec_b32 s0, -1
; GFX11-NEXT: s_clause 0x3
; GFX11-NEXT: scratch_load_b32 v16, off, s32
@@ -83025,6 +83215,146 @@ define inreg <128 x i8> @bitcast_v16i64_to_v128i8_scalar(<16 x i64> inreg %a, i3
; GFX11-NEXT: s_mov_b32 exec_lo, s0
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
+; GFX11-NEXT: .LBB57_4:
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr104
+; GFX11-NEXT: ; implicit-def: $sgpr103
+; GFX11-NEXT: ; implicit-def: $sgpr30
+; GFX11-NEXT: ; implicit-def: $sgpr102
+; GFX11-NEXT: ; implicit-def: $sgpr101
+; GFX11-NEXT: ; implicit-def: $sgpr100
+; GFX11-NEXT: ; implicit-def: $sgpr99
+; GFX11-NEXT: ; implicit-def: $sgpr98
+; GFX11-NEXT: ; implicit-def: $sgpr94
+; GFX11-NEXT: ; implicit-def: $sgpr97
+; GFX11-NEXT: ; implicit-def: $sgpr96
+; GFX11-NEXT: ; implicit-def: $sgpr87
+; GFX11-NEXT: ; implicit-def: $sgpr86
+; GFX11-NEXT: ; implicit-def: $sgpr85
+; GFX11-NEXT: ; implicit-def: $sgpr92
+; GFX11-NEXT: ; implicit-def: $sgpr84
+; GFX11-NEXT: ; implicit-def: $sgpr83
+; GFX11-NEXT: ; implicit-def: $sgpr82
+; GFX11-NEXT: ; implicit-def: $sgpr81
+; GFX11-NEXT: ; implicit-def: $sgpr80
+; GFX11-NEXT: ; implicit-def: $sgpr90
+; GFX11-NEXT: ; implicit-def: $sgpr71
+; GFX11-NEXT: ; implicit-def: $sgpr70
+; GFX11-NEXT: ; implicit-def: $sgpr69
+; GFX11-NEXT: ; implicit-def: $sgpr68
+; GFX11-NEXT: ; implicit-def: $sgpr67
+; GFX11-NEXT: ; implicit-def: $sgpr76
+; GFX11-NEXT: ; implicit-def: $sgpr66
+; GFX11-NEXT: ; implicit-def: $sgpr65
+; GFX11-NEXT: ; implicit-def: $sgpr64
+; GFX11-NEXT: ; implicit-def: $sgpr55
+; GFX11-NEXT: ; implicit-def: $sgpr54
+; GFX11-NEXT: ; implicit-def: $sgpr74
+; GFX11-NEXT: ; implicit-def: $sgpr53
+; GFX11-NEXT: ; implicit-def: $sgpr52
+; GFX11-NEXT: ; implicit-def: $sgpr51
+; GFX11-NEXT: ; implicit-def: $sgpr50
+; GFX11-NEXT: ; implicit-def: $sgpr49
+; GFX11-NEXT: ; implicit-def: $sgpr48
+; GFX11-NEXT: ; implicit-def: $sgpr39
+; GFX11-NEXT: ; implicit-def: $sgpr38
+; GFX11-NEXT: ; implicit-def: $sgpr37
+; GFX11-NEXT: ; implicit-def: $sgpr36
+; GFX11-NEXT: ; implicit-def: $sgpr35
+; GFX11-NEXT: ; implicit-def: $sgpr34
+; GFX11-NEXT: ; implicit-def: $vcc_hi
+; GFX11-NEXT: ; implicit-def: $sgpr88
+; GFX11-NEXT: ; implicit-def: $sgpr78
+; GFX11-NEXT: ; implicit-def: $sgpr72
+; GFX11-NEXT: ; implicit-def: $sgpr62
+; GFX11-NEXT: ; implicit-def: $sgpr60
+; GFX11-NEXT: ; implicit-def: $sgpr58
+; GFX11-NEXT: ; implicit-def: $sgpr56
+; GFX11-NEXT: ; implicit-def: $sgpr46
+; GFX11-NEXT: ; implicit-def: $sgpr44
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, vcc_lo
+; GFX11-NEXT: s_cbranch_vccz .LBB57_2
+; GFX11-NEXT: s_branch .LBB57_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -89093,8 +89423,17 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:332
; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32
@@ -89103,15 +89442,15 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:24
; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:32
; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:40
-; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:48
-; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:56
-; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:64
-; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:72
-; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:80
-; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:88
-; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:96
-; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:104
-; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:112
+; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:48
+; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:56
+; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:64
+; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:72
+; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:80
+; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:88
+; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:96
+; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:104
+; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:112
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:120
; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:128
; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:136
@@ -89121,113 +89460,92 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:168
; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:176
; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v1
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v7
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v9
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v11
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v13
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v17
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v19
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v21
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
-; SI-NEXT: v_lshlrev_b32_e32 v60, 8, v3
-; SI-NEXT: v_lshlrev_b32_e32 v30, 24, v5
-; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v15
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v16, 8, v3
+; SI-NEXT: v_lshlrev_b32_e32 v62, 24, v5
+; SI-NEXT: v_lshlrev_b32_e32 v28, 8, v7
+; SI-NEXT: v_lshlrev_b32_e32 v20, 24, v9
+; SI-NEXT: v_lshlrev_b32_e32 v46, 8, v11
+; SI-NEXT: v_lshlrev_b32_e32 v57, 24, v13
+; SI-NEXT: v_lshlrev_b32_e32 v24, 8, v15
+; SI-NEXT: v_lshlrev_b32_e32 v12, 24, v17
+; SI-NEXT: v_lshlrev_b32_e32 v26, 8, v19
+; SI-NEXT: v_lshlrev_b32_e32 v17, 24, v21
+; SI-NEXT: v_lshlrev_b32_e32 v19, 8, v23
+; SI-NEXT: v_lshlrev_b32_e32 v15, 24, v25
+; SI-NEXT: v_lshlrev_b32_e32 v21, 8, v27
+; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v29
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v23
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v25
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v27
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v29
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v45
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v44
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v43
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v42
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v41
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v40
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v55
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v54
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v54
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v53
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v53
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v52
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v52
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v51
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v51
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v50
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v50
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v49
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v49
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v48
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v48
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v39
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v39
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v30
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v31
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v32
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v33
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v34
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v35
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v36
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v37
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v38
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:184
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:192
@@ -89237,31 +89555,31 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:224
; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:232
; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:240
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_and_b64 s[6:7], vcc, exec
; SI-NEXT: s_waitcnt vmcnt(7)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v0
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v1
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v13
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v3
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v11
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v5
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v9
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v7
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:248
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:256
@@ -89273,140 +89591,157 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:304
; SI-NEXT: s_waitcnt vmcnt(7)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v0
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v1
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v13
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v3
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v11
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v5
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:312
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:320
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:328
-; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:4
-; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:12
-; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:20
-; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:28
-; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:36
-; SI-NEXT: s_waitcnt vmcnt(14)
+; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:324
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:316
+; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:308
+; SI-NEXT: s_waitcnt vmcnt(13)
; SI-NEXT: v_lshlrev_b32_e32 v9, 24, v9
-; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v7
-; SI-NEXT: s_waitcnt vmcnt(7)
+; SI-NEXT: s_waitcnt vmcnt(12)
+; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v7
+; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v0
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:44
-; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:52
-; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:60
-; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:68
-; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:76
-; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:84
-; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:92
-; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:100
-; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:108
-; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:116
-; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:124
-; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:132
-; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:140
-; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:148
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:156
-; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:164
-; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:172
-; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:180
-; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:188
-; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:196
-; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:204
-; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:212
-; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:220
-; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:228
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:236
-; SI-NEXT: s_waitcnt vmcnt(14)
-; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v1
-; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v3
; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
+; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:300
+; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:292
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v1
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:244
-; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:252
-; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:260
-; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:268
-; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:276
-; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:284
-; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:292
-; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:300
-; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:308
-; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:316
-; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:324
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v3
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
+; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:284
+; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:276
+; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:268
+; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:260
+; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:252
+; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:244
+; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:236
+; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:228
+; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:220
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:212
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill
+; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:204
+; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:196
+; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:188
+; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:180
+; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:172
+; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:164
+; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:156
+; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:148
+; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:140
+; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:132
+; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:124
+; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:116
+; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:108
+; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:100
+; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:92
+; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:84
+; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:76
+; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:68
+; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:60
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:52
+; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:44
+; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:36
+; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:28
+; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:20
+; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:12
+; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:4
+; SI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(14)
+; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(14)
-; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB59_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
-; SI-NEXT: v_mov_b32_e32 v57, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v4
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v0, 0xff, v2
-; SI-NEXT: v_and_b32_e32 v2, 0xff, v6
-; SI-NEXT: v_or_b32_e32 v0, v0, v60
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v4
+; SI-NEXT: v_or_b32_e32 v0, v0, v16
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v30, v1
-; SI-NEXT: s_waitcnt expcnt(4)
-; SI-NEXT: v_mov_b32_e32 v30, v5
+; SI-NEXT: v_or_b32_e32 v1, v62, v1
+; SI-NEXT: v_or_b32_e32 v4, v0, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v10
+; SI-NEXT: v_or_b32_e32 v0, v0, v46
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: v_and_b32_e32 v2, 0xff, v6
+; SI-NEXT: v_and_b32_e32 v3, 0xff, v8
+; SI-NEXT: v_or_b32_e32 v2, v2, v28
+; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; SI-NEXT: v_or_b32_e32 v3, v20, v3
+; SI-NEXT: v_or_b32_e32 v5, v2, v3
+; SI-NEXT: v_mov_b32_e32 v2, v9
; SI-NEXT: s_and_b32 s4, s28, 0xff
; SI-NEXT: s_lshl_b32 s5, s29, 8
; SI-NEXT: s_or_b32 s4, s4, s5
@@ -89415,306 +89750,310 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: s_lshl_b32 s6, s19, 24
; SI-NEXT: s_lshl_b32 s7, s23, 24
; SI-NEXT: s_lshl_b32 s8, s27, 24
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_or_b32_e32 v2, v2, v3
-; SI-NEXT: v_and_b32_e32 v3, 0xff, v8
-; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_or_b32_e32 v3, v4, v3
-; SI-NEXT: v_or_b32_e32 v4, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; SI-NEXT: v_or_b32_e32 v5, v2, v3
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v10
-; SI-NEXT: v_mov_b32_e32 v3, v7
-; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v12
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: v_or_b32_e32 v1, v57, v1
; SI-NEXT: v_or_b32_e32 v6, v0, v1
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v14
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v16
-; SI-NEXT: v_or_b32_e32 v0, v0, v15
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v57, v37
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
+; SI-NEXT: v_or_b32_e32 v0, v0, v24
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(4)
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
+; SI-NEXT: v_or_b32_e32 v1, v12, v1
; SI-NEXT: v_or_b32_e32 v7, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v18
-; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v20
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
+; SI-NEXT: v_or_b32_e32 v0, v0, v26
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
+; SI-NEXT: v_or_b32_e32 v1, v17, v1
; SI-NEXT: v_or_b32_e32 v8, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v22
-; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(3)
-; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v24
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
+; SI-NEXT: v_or_b32_e32 v0, v0, v19
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
-; SI-NEXT: v_mov_b32_e32 v2, v9
+; SI-NEXT: v_or_b32_e32 v1, v15, v1
; SI-NEXT: v_or_b32_e32 v9, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v26
-; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v28
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
+; SI-NEXT: v_or_b32_e32 v0, v0, v21
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v10, v1
+; SI-NEXT: v_or_b32_e32 v1, v27, v1
; SI-NEXT: v_or_b32_e32 v10, v0, v1
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
; SI-NEXT: v_and_b32_e32 v1, 0xff, v11
-; SI-NEXT: s_waitcnt expcnt(1)
-; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_or_b32_e32 v1, v11, v1
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_or_b32_e32 v11, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v17
-; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v39
+; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v23
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v54
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v12, v1
-; SI-NEXT: v_or_b32_e32 v12, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v25
-; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
+; SI-NEXT: v_or_b32_e32 v12, v0, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v23
+; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
; SI-NEXT: v_and_b32_e32 v1, 0xff, v13
-; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_or_b32_e32 v1, v13, v1
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
; SI-NEXT: v_or_b32_e32 v13, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v58
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v58, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v25
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v29
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v14
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v14, v1
-; SI-NEXT: v_or_b32_e32 v14, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v27
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v60, v1
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
+; SI-NEXT: v_or_b32_e32 v14, v0, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v43
+; SI-NEXT: v_mov_b32_e32 v43, v63
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v62
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v48
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v15, v1
-; SI-NEXT: v_or_b32_e32 v15, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v43
-; SI-NEXT: v_mov_b32_e32 v43, v16
; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
+; SI-NEXT: v_or_b32_e32 v15, v0, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v42
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_mov_b32_e32 v42, v1
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v21
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v40
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v16, v1
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
; SI-NEXT: v_or_b32_e32 v16, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v19
-; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v18
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_mov_b32_e32 v46, v1
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v55
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v49
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v17, v1
-; SI-NEXT: v_or_b32_e32 v17, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v51
-; SI-NEXT: v_mov_b32_e32 v55, v22
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v51, v1
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
+; SI-NEXT: v_or_b32_e32 v17, v0, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v53
+; SI-NEXT: v_mov_b32_e32 v49, v61
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v44
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v22
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v18, v1
-; SI-NEXT: v_or_b32_e32 v18, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v50
-; SI-NEXT: v_mov_b32_e32 v44, v23
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v50, v1
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
+; SI-NEXT: v_or_b32_e32 v18, v0, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v53, v3
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v29
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v63
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v38
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v19, v1
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
; SI-NEXT: v_or_b32_e32 v19, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v61
-; SI-NEXT: v_mov_b32_e32 v61, v45
-; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v36
+; SI-NEXT: v_mov_b32_e32 v36, v31
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_mov_b32_e32 v40, v1
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v40
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v30
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v20, v1
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
; SI-NEXT: v_or_b32_e32 v20, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v31
-; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v34
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_mov_b32_e32 v34, v1
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v32
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v51
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v21, v1
-; SI-NEXT: v_or_b32_e32 v21, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v59
-; SI-NEXT: v_mov_b32_e32 v59, v24
; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
+; SI-NEXT: v_or_b32_e32 v21, v0, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v33
+; SI-NEXT: v_mov_b32_e32 v51, v47
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v39
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v37
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v22, v1
+; SI-NEXT: v_or_b32_e32 v1, v62, v1
; SI-NEXT: v_or_b32_e32 v22, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v61
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v39, v1
-; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v49
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v44
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v59
+; SI-NEXT: v_or_b32_e32 v0, v0, v54
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v23, v1
+; SI-NEXT: v_or_b32_e32 v1, v39, v1
; SI-NEXT: v_or_b32_e32 v23, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v53
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v37, v56
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v37
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: v_mov_b32_e32 v33, v3
+; SI-NEXT: v_mov_b32_e32 v44, v59
+; SI-NEXT: v_mov_b32_e32 v59, v58
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v47
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v56
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v24, v1
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
; SI-NEXT: v_or_b32_e32 v24, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v42
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v42, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v32
+; SI-NEXT: v_mov_b32_e32 v32, v35
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v52
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v35
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v25, v1
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
; SI-NEXT: v_or_b32_e32 v25, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v0, 0xff, v45
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v45, v41
+; SI-NEXT: v_mov_b32_e32 v35, v39
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v56
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v41
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v63, v1
-; SI-NEXT: v_or_b32_e32 v26, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v48
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v32, v1
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
+; SI-NEXT: v_or_b32_e32 v26, v0, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v63
+; SI-NEXT: v_mov_b32_e32 v41, v62
+; SI-NEXT: v_mov_b32_e32 v63, v56
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v46
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v47
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v27, v1
-; SI-NEXT: v_or_b32_e32 v27, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v38
; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
+; SI-NEXT: v_or_b32_e32 v27, v0, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v58
+; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v41
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v50
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v28, v1
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
; SI-NEXT: v_or_b32_e32 v28, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v37
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v62, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v60
+; SI-NEXT: v_mov_b32_e32 v50, v60
+; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v38, v3
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v54
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v61
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v29, v0, v1
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v36
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v35
-; SI-NEXT: v_or_b32_e32 v0, v0, v30
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v52
+; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v61, v54
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: v_or_b32_e32 v0, v0, v1
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v60
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
+; SI-NEXT: v_or_b32_e32 v1, v58, v1
; SI-NEXT: v_or_b32_e32 v30, v0, v1
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v34
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v33
-; SI-NEXT: v_or_b32_e32 v0, v0, v3
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v31
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v0, v0, v1
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v55
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v57, v1
+; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v31, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v40
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v52
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v33, v34
-; SI-NEXT: v_mov_b32_e32 v34, v35
-; SI-NEXT: v_mov_b32_e32 v35, v36
-; SI-NEXT: v_mov_b32_e32 v36, v54
-; SI-NEXT: v_mov_b32_e32 v54, v37
-; SI-NEXT: v_mov_b32_e32 v37, v41
-; SI-NEXT: v_mov_b32_e32 v41, v38
-; SI-NEXT: v_mov_b32_e32 v38, v63
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_or_b32_e32 v3, s4, v0
@@ -89741,61 +90080,64 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: s_and_b32 s6, s6, 0xffff
; SI-NEXT: s_or_b32 s7, s8, s7
; SI-NEXT: s_or_b32 s6, s6, s7
-; SI-NEXT: v_mov_b32_e32 v57, v1
+; SI-NEXT: v_mov_b32_e32 v48, v1
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: s_mov_b64 s[4:5], 0
; SI-NEXT: s_branch .LBB59_3
; SI-NEXT: .LBB59_2:
-; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(2)
-; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
-; SI-NEXT: v_mov_b32_e32 v61, v45
-; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v44, v59
+; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v32, v35
+; SI-NEXT: v_mov_b32_e32 v45, v41
+; SI-NEXT: v_mov_b32_e32 v43, v63
+; SI-NEXT: v_mov_b32_e32 v59, v58
+; SI-NEXT: v_mov_b32_e32 v50, v60
+; SI-NEXT: v_mov_b32_e32 v49, v61
+; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v57, v37
+; SI-NEXT: v_mov_b32_e32 v51, v47
+; SI-NEXT: v_mov_b32_e32 v36, v31
+; SI-NEXT: v_mov_b32_e32 v37, v56
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_mov_b32_e32 v45, v33
-; SI-NEXT: v_mov_b32_e32 v33, v34
-; SI-NEXT: v_mov_b32_e32 v34, v35
-; SI-NEXT: v_mov_b32_e32 v35, v36
-; SI-NEXT: v_mov_b32_e32 v36, v54
-; SI-NEXT: v_mov_b32_e32 v54, v37
-; SI-NEXT: v_mov_b32_e32 v37, v41
-; SI-NEXT: v_mov_b32_e32 v41, v38
-; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
; SI-NEXT: .LBB59_3: ; %Flow
-; SI-NEXT: v_mov_b32_e32 v63, v46
+; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(2)
+; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(1)
+; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: v_mov_b32_e32 v47, v44
; SI-NEXT: s_cbranch_vccnz .LBB59_5
; SI-NEXT: ; %bb.4: ; %cmp.true
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
; SI-NEXT: s_add_i32 s28, s28, 3
-; SI-NEXT: s_waitcnt vmcnt(4)
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v40
+; SI-NEXT: s_waitcnt vmcnt(11)
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v52
; SI-NEXT: s_and_b32 s4, s28, 0xff
; SI-NEXT: s_lshl_b32 s5, s29, 8
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_or_b32 s4, s5, s4
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; SI-NEXT: s_addk_i32 s4, 0x300
-; SI-NEXT: v_or_b32_e32 v0, v57, v0
+; SI-NEXT: v_or_b32_e32 v0, v48, v0
; SI-NEXT: s_and_b32 s4, s4, 0xffff
; SI-NEXT: v_or_b32_e32 v0, s4, v0
; SI-NEXT: s_add_i32 s16, s16, 3
@@ -89842,7 +90184,7 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v1, v2, v1
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v1, vcc, 0x300, v1
; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -89851,17 +90193,17 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; SI-NEXT: v_or_b32_e32 v2, v3, v2
; SI-NEXT: v_add_i32_e32 v3, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_add_i32_e32 v4, vcc, 0x3000000, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -89871,15 +90213,15 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v5, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -89889,15 +90231,15 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v6, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -89907,15 +90249,15 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v7, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -89925,15 +90267,15 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v8, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -89943,15 +90285,15 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v9, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -89961,15 +90303,15 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v10, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -89978,34 +90320,66 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_add_i32_e32 v11, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
+; SI-NEXT: v_or_b32_e32 v0, v39, v0
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
+; SI-NEXT: v_or_b32_e32 v1, v54, v1
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v12, vcc, 0x3000000, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
+; SI-NEXT: v_or_b32_e32 v1, v55, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v12, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: v_add_i32_e32 v13, vcc, 0x3000000, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT: v_or_b32_e32 v1, v62, v1
+; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: v_add_i32_e32 v14, vcc, 0x3000000, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -90014,16 +90388,16 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v13, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v15, vcc, 0x3000000, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
-; SI-NEXT: v_or_b32_e32 v0, v58, v0
+; SI-NEXT: v_or_b32_e32 v0, v42, v0
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
@@ -90031,16 +90405,16 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v14, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v16, vcc, 0x3000000, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
-; SI-NEXT: v_or_b32_e32 v0, v60, v0
+; SI-NEXT: v_or_b32_e32 v0, v46, v0
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
@@ -90048,33 +90422,33 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v15, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v17, vcc, 0x3000000, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_or_b32_e32 v1, v43, v1
+; SI-NEXT: v_or_b32_e32 v1, v53, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v16, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v18, vcc, 0x3000000, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -90083,16 +90457,16 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v17, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v19, vcc, 0x3000000, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
-; SI-NEXT: v_or_b32_e32 v0, v51, v0
+; SI-NEXT: v_or_b32_e32 v0, v40, v0
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
@@ -90100,16 +90474,16 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v18, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v20, vcc, 0x3000000, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
-; SI-NEXT: v_or_b32_e32 v0, v50, v0
+; SI-NEXT: v_or_b32_e32 v0, v34, v0
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
@@ -90117,173 +90491,147 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v19, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v21, vcc, 0x3000000, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
-; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v57
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
-; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v20, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
-; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
-; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v21, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
+; SI-NEXT: v_or_b32_e32 v1, v41, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_or_b32_e32 v1, v55, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v22, vcc, 0x3000000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v61
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v49
-; SI-NEXT: v_or_b32_e32 v0, v39, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v47
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT: v_or_b32_e32 v1, v35, v1
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
+; SI-NEXT: v_or_b32_e32 v0, v61, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
-; SI-NEXT: v_or_b32_e32 v1, v44, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v23, vcc, 0x3000000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v47
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v63
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
-; SI-NEXT: v_or_b32_e32 v1, v59, v1
+; SI-NEXT: v_or_b32_e32 v1, v33, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v24, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v52
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: v_or_b32_e32 v0, v42, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v32
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v25, vcc, 0x3000000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v45
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v56
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v45
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v38, v1
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v26, vcc, 0x3000000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v48
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v43
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v63
-; SI-NEXT: v_or_b32_e32 v0, v32, v0
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v51
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v27, vcc, 0x3000000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v41
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v59
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v37
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT: v_or_b32_e32 v1, v38, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v28, vcc, 0x3000000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v54
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v50
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v36
-; SI-NEXT: v_or_b32_e32 v0, v62, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v49
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v29, vcc, 0x3000000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v34
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v60
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
+; SI-NEXT: v_or_b32_e32 v1, v58, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v30, vcc, 0x3000000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v33
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v36
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -90313,7 +90661,7 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
; VI-LABEL: bitcast_v128i8_to_v16i64_scalar:
@@ -90335,21 +90683,21 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:332
+; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
+; VI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:332
; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32
; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:8
; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:16
@@ -90364,7 +90712,7 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:88
; VI-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:96
; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:104
-; VI-NEXT: buffer_load_ushort v22, off, s[0:3], s32 offset:112
+; VI-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:112
; VI-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:120
; VI-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:128
; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:136
@@ -90373,76 +90721,80 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:160
; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:168
; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:176
-; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v1
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v11
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v13
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v15
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v17
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v19
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v21
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v23
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v8, 8, v3
-; VI-NEXT: v_lshlrev_b32_e32 v59, 8, v5
-; VI-NEXT: v_lshlrev_b32_e32 v14, 8, v7
-; VI-NEXT: v_lshlrev_b32_e32 v10, 8, v9
-; VI-NEXT: v_lshlrev_b32_e32 v16, 8, v11
-; VI-NEXT: v_lshlrev_b32_e32 v6, 8, v13
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v25
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v27
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v6, 8, v3
+; VI-NEXT: v_lshlrev_b32_e32 v10, 8, v5
+; VI-NEXT: v_lshlrev_b32_e32 v8, 8, v7
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v9
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: v_lshlrev_b32_e32 v46, 8, v29
; VI-NEXT: s_waitcnt vmcnt(14)
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v23
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v25
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v27
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v29
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v44
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v43
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v42
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v41
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v40
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v55
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v54
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v53
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v52
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v51
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v50
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v49
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v48
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v39
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v45
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v44
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v43
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v42
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v41
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v40
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v55
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v54
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v53
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v52
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v51
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v50
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v49
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v48
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v39
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v30
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v31
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v32
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v33
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v34
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
; VI-NEXT: s_waitcnt vmcnt(14)
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v22
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v31
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v32
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v33
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v34
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v35
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v36
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:184
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v35
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v36
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v37
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
+; VI-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:184
; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:192
; VI-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:200
; VI-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:208
@@ -90450,30 +90802,30 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:224
; VI-NEXT: buffer_load_ushort v9, off, s[0:3], s32 offset:232
; VI-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:240
-; VI-NEXT: v_lshlrev_b32_e32 v52, 8, v37
-; VI-NEXT: v_lshlrev_b32_e32 v31, 8, v38
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_and_b64 s[6:7], vcc, exec
+; VI-NEXT: v_lshlrev_b32_e32 v38, 8, v38
; VI-NEXT: s_waitcnt vmcnt(7)
-; VI-NEXT: v_lshlrev_b32_e32 v26, 8, v0
+; VI-NEXT: v_lshlrev_b32_e32 v35, 8, v15
; VI-NEXT: s_waitcnt vmcnt(6)
-; VI-NEXT: v_lshlrev_b32_e32 v32, 8, v1
+; VI-NEXT: v_lshlrev_b32_e32 v24, 8, v1
; VI-NEXT: s_waitcnt vmcnt(5)
-; VI-NEXT: v_lshlrev_b32_e32 v54, 8, v13
+; VI-NEXT: v_lshlrev_b32_e32 v26, 8, v13
; VI-NEXT: s_waitcnt vmcnt(4)
-; VI-NEXT: v_lshlrev_b32_e32 v49, 8, v3
-; VI-NEXT: s_waitcnt vmcnt(3)
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v11
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(3)
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v5
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(3)
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v9
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(3)
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v7
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:248
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v3
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v11
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v5
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v9
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v7
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
+; VI-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:248
; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:256
; VI-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:264
; VI-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:272
@@ -90482,130 +90834,127 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: buffer_load_ushort v9, off, s[0:3], s32 offset:296
; VI-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:304
; VI-NEXT: s_waitcnt vmcnt(7)
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v0
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v12, 8, v15
+; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
; VI-NEXT: s_waitcnt vmcnt(7)
-; VI-NEXT: v_lshlrev_b32_e32 v48, 8, v1
-; VI-NEXT: s_waitcnt vmcnt(6)
+; VI-NEXT: v_lshlrev_b32_e32 v50, 8, v1
+; VI-NEXT: s_waitcnt vmcnt(5)
+; VI-NEXT: v_lshlrev_b32_e32 v51, 8, v3
; VI-NEXT: v_lshlrev_b32_e32 v27, 8, v13
-; VI-NEXT: s_waitcnt vmcnt(4)
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v11
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(4)
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v5
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v29, 8, v3
-; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:312
+; VI-NEXT: s_waitcnt vmcnt(3)
+; VI-NEXT: v_lshlrev_b32_e32 v31, 8, v5
+; VI-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:312
; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:320
; VI-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:328
-; VI-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:4
-; VI-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:12
-; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:20
-; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:28
-; VI-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:36
-; VI-NEXT: s_waitcnt vmcnt(11)
-; VI-NEXT: v_lshlrev_b32_e32 v5, 8, v7
-; VI-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; VI-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:324
+; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:316
+; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:308
+; VI-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:300
+; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:292
+; VI-NEXT: v_lshlrev_b32_e32 v52, 8, v11
+; VI-NEXT: s_waitcnt vmcnt(10)
+; VI-NEXT: v_lshlrev_b32_e32 v29, 8, v9
+; VI-NEXT: s_waitcnt vmcnt(9)
+; VI-NEXT: v_lshlrev_b32_e32 v30, 8, v7
; VI-NEXT: s_waitcnt vmcnt(7)
-; VI-NEXT: v_lshlrev_b32_e32 v7, 8, v0
+; VI-NEXT: v_lshlrev_b32_e32 v5, 8, v5
; VI-NEXT: s_waitcnt vmcnt(6)
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v1
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v18, off, s[0:3], s32 offset:44
-; VI-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:52
-; VI-NEXT: buffer_load_ushort v17, off, s[0:3], s32 offset:60
-; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:68
-; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:76
-; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:84
-; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:92
-; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:100
-; VI-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:108
-; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:116
-; VI-NEXT: buffer_load_ushort v25, off, s[0:3], s32 offset:124
-; VI-NEXT: buffer_load_ushort v19, off, s[0:3], s32 offset:132
-; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:140
-; VI-NEXT: buffer_load_ushort v20, off, s[0:3], s32 offset:148
-; VI-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:156
-; VI-NEXT: buffer_load_ushort v21, off, s[0:3], s32 offset:164
-; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:172
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(6)
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v3
+; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill
+; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:284
+; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:276
+; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:268
+; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:260
+; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:252
+; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:244
+; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:236
+; VI-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:228
+; VI-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:220
+; VI-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:212
+; VI-NEXT: buffer_load_ushort v25, off, s[0:3], s32 offset:204
+; VI-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:196
+; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:188
; VI-NEXT: buffer_load_ushort v22, off, s[0:3], s32 offset:180
-; VI-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:188
-; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:196
-; VI-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:204
-; VI-NEXT: buffer_load_ushort v24, off, s[0:3], s32 offset:212
-; VI-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:220
-; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:228
-; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:236
-; VI-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:244
-; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:252
-; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:260
-; VI-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:268
-; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:276
-; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:284
-; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:292
-; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:300
-; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:308
-; VI-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:316
-; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:324
+; VI-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:172
+; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:164
+; VI-NEXT: buffer_load_ushort v21, off, s[0:3], s32 offset:156
+; VI-NEXT: buffer_load_ushort v20, off, s[0:3], s32 offset:148
+; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:140
+; VI-NEXT: buffer_load_ushort v19, off, s[0:3], s32 offset:132
+; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:124
+; VI-NEXT: buffer_load_ushort v18, off, s[0:3], s32 offset:116
+; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:108
+; VI-NEXT: buffer_load_ushort v17, off, s[0:3], s32 offset:100
+; VI-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:92
+; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:84
+; VI-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:76
+; VI-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:68
+; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:60
+; VI-NEXT: buffer_load_ushort v16, off, s[0:3], s32 offset:52
+; VI-NEXT: buffer_load_ushort v14, off, s[0:3], s32 offset:44
+; VI-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:36
+; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:28
+; VI-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:20
+; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:12
+; VI-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:4
+; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
; VI-NEXT: s_waitcnt vmcnt(14)
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v3
-; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(12)
-; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(14)
+; VI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill
; VI-NEXT: s_cbranch_scc0 .LBB59_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: v_or_b32_sdwa v0, v2, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v4, v59 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v5, v2, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
-; VI-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v1, v4, v10 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v4, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: s_and_b32 s4, s28, 0xff
; VI-NEXT: s_lshl_b32 s5, s29, 8
; VI-NEXT: s_or_b32 s4, s4, s5
@@ -90614,208 +90963,207 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: s_lshl_b32 s6, s19, 8
; VI-NEXT: s_lshl_b32 s7, s23, 8
; VI-NEXT: s_lshl_b32 s8, s27, 8
-; VI-NEXT: s_waitcnt vmcnt(3)
-; VI-NEXT: v_or_b32_sdwa v2, v2, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(2)
-; VI-NEXT: v_or_b32_sdwa v3, v3, v10 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v0, v0, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v2, v2, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v1, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
-; VI-NEXT: v_mov_b32_e32 v3, v7
+; VI-NEXT: v_or_b32_sdwa v3, v3, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v2, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v1, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v6, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v3, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v1, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v7, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
-; VI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill
-; VI-NEXT: v_mov_b32_e32 v29, v9
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v1, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v1, v46 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v46, v47
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v11, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v0, v12, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v50, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v12, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v50, v0
-; VI-NEXT: v_or_b32_sdwa v0, v56, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v13, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v59, v0
-; VI-NEXT: v_or_b32_sdwa v0, v18, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v14, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v15, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v16, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v56, v0
-; VI-NEXT: v_or_b32_sdwa v0, v17, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v61, v0
+; VI-NEXT: v_or_b32_sdwa v0, v37, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v39, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v37, v1
+; VI-NEXT: v_or_b32_sdwa v1, v15, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v39, v0
-; VI-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v38, v1
-; VI-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v63, v1
+; VI-NEXT: v_or_b32_sdwa v1, v36, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v16, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v37, v0
-; VI-NEXT: v_or_b32_sdwa v0, v57, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v36, v0
+; VI-NEXT: v_or_b32_sdwa v0, v59, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v36, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v17, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v17, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v59, v45
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v36, v0
-; VI-NEXT: v_or_b32_sdwa v0, v35, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v34, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v35, v1
-; VI-NEXT: v_or_b32_sdwa v1, v33, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v34, v1
+; VI-NEXT: v_or_b32_sdwa v1, v18, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v18, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v33, v0
-; VI-NEXT: v_or_b32_sdwa v0, v25, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v19, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v19, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v0, v51, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v58, v0
+; VI-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v20, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v20, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
-; VI-NEXT: v_or_b32_sdwa v1, v21, v52 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v51, v3
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_or_b32_sdwa v0, v21, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v0, v28, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v42, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v21, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v34, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v22, v26 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v28, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v22, v35 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v22, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v23, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v43, v54 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v62, v24 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v23, v26 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v23, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
-; VI-NEXT: v_mov_b32_e32 v43, v49
-; VI-NEXT: v_or_b32_sdwa v0, v30, v49 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v32, v54
-; VI-NEXT: v_mov_b32_e32 v34, v26
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v35, v24
+; VI-NEXT: v_mov_b32_e32 v62, v26
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_or_b32_sdwa v0, v25, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v49, v1
-; VI-NEXT: v_or_b32_sdwa v1, v24, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v33, v1
+; VI-NEXT: v_or_b32_sdwa v1, v54, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v24, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v54, v0
-; VI-NEXT: v_or_b32_sdwa v0, v46, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v45, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v46, v61
+; VI-NEXT: v_or_b32_sdwa v1, v32, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v45, v32
; VI-NEXT: v_or_b32_sdwa v25, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v54, v0
+; VI-NEXT: v_or_b32_sdwa v0, v44, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v45, v61 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v57, v32 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v26, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v58, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v44, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v47, v45
+; VI-NEXT: v_or_b32_sdwa v0, v41, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v43, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v27, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
-; VI-NEXT: v_mov_b32_e32 v58, v44
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v48, v0
-; VI-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v42, v45 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v39, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v40, v52 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v28, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
-; VI-NEXT: v_or_b32_sdwa v1, v40, v29 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v63, v42
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v0, v41, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v56, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v60, v29 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v29, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
-; VI-NEXT: v_or_b32_sdwa v1, v60, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v55, v30 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v50, v51
+; VI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v56, v60
+; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: v_or_b32_sdwa v1, v53, v55 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v30, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v0, v55, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v49, v52 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v53, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v48, v51 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v31, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v57, v0
-; VI-NEXT: v_or_b32_sdwa v0, v52, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v48, v2
+; VI-NEXT: v_mov_b32_e32 v53, v55
+; VI-NEXT: v_mov_b32_e32 v55, v40
+; VI-NEXT: v_mov_b32_e32 v40, v39
+; VI-NEXT: v_mov_b32_e32 v39, v43
+; VI-NEXT: v_mov_b32_e32 v43, v32
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v42, v0
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_e32 v3, s4, v0
; VI-NEXT: s_and_b32 s4, s16, 0xff
; VI-NEXT: s_or_b32 s4, s4, s5
@@ -90846,52 +91194,49 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: s_mov_b64 s[4:5], 0
; VI-NEXT: s_branch .LBB59_3
; VI-NEXT: .LBB59_2:
-; VI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v35, v24
+; VI-NEXT: v_mov_b32_e32 v62, v26
+; VI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; VI-NEXT: v_mov_b32_e32 v32, v54
-; VI-NEXT: v_mov_b32_e32 v43, v49
-; VI-NEXT: v_mov_b32_e32 v46, v61
-; VI-NEXT: v_mov_b32_e32 v47, v45
-; VI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
-; VI-NEXT: v_mov_b32_e32 v34, v26
-; VI-NEXT: v_mov_b32_e32 v58, v44
-; VI-NEXT: s_waitcnt vmcnt(14)
-; VI-NEXT: v_mov_b32_e32 v63, v42
-; VI-NEXT: v_mov_b32_e32 v51, v7
-; VI-NEXT: v_mov_b32_e32 v48, v29
-; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v50, v51
+; VI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; VI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v55, v40
+; VI-NEXT: v_mov_b32_e32 v40, v39
+; VI-NEXT: v_mov_b32_e32 v39, v43
+; VI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v46, v47
+; VI-NEXT: v_mov_b32_e32 v59, v45
+; VI-NEXT: v_mov_b32_e32 v45, v32
+; VI-NEXT: v_mov_b32_e32 v56, v60
; VI-NEXT: .LBB59_3: ; %Flow
; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
-; VI-NEXT: v_mov_b32_e32 v44, v47
-; VI-NEXT: v_mov_b32_e32 v47, v46
-; VI-NEXT: s_waitcnt vmcnt(3)
-; VI-NEXT: v_mov_b32_e32 v46, v49
+; VI-NEXT: v_mov_b32_e32 v32, v59
+; VI-NEXT: s_waitcnt vmcnt(7)
+; VI-NEXT: v_mov_b32_e32 v59, v33
; VI-NEXT: s_cbranch_vccnz .LBB59_5
; VI-NEXT: ; %bb.4: ; %cmp.true
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
; VI-NEXT: s_add_i32 s28, s28, 3
; VI-NEXT: s_and_b32 s4, s28, 0xff
; VI-NEXT: s_lshl_b32 s5, s29, 8
; VI-NEXT: s_or_b32 s4, s5, s4
-; VI-NEXT: s_waitcnt vmcnt(4)
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v52
+; VI-NEXT: s_waitcnt vmcnt(7)
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v48
; VI-NEXT: s_addk_i32 s4, 0x300
-; VI-NEXT: v_or_b32_sdwa v0, v57, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v0, v42, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_and_b32 s4, s4, 0xffff
; VI-NEXT: v_or_b32_e32 v0, s4, v0
; VI-NEXT: s_add_i32 s16, s16, 3
@@ -90937,17 +91282,17 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v1, vcc, 0x300, v1
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
; VI-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v3, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v4, vcc, 0x3000000, v1
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: s_waitcnt vmcnt(1)
@@ -90960,327 +91305,332 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v5, vcc, 0x3000000, v0
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v6, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v7, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v8, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v9, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v10, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v11, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(2)
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v46
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v12, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v50, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
+; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v13, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v59, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
+; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v14, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v56, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v15, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(2)
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v39, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
-; VI-NEXT: v_or_b32_sdwa v1, v38, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
+; VI-NEXT: v_or_b32_sdwa v1, v63, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v16, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v37, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v0, v36, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v17, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v36, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
-; VI-NEXT: v_or_b32_sdwa v1, v35, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
+; VI-NEXT: v_or_b32_sdwa v1, v34, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v18, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v19, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
+; VI-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
-; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v20, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v21, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
-; VI-NEXT: v_or_b32_sdwa v1, v34, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v22, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
-; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v0, v35, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
-; VI-NEXT: v_or_b32_sdwa v1, v32, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v1, v62, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v23, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v43, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
-; VI-NEXT: v_or_b32_sdwa v1, v46, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
+; VI-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v24, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v47
-; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v54, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v32
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v45
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
+; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v25, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v44
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v44
+; VI-NEXT: v_or_b32_sdwa v0, v54, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v57
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
-; VI-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v1, v43, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v26, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v41
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v58
-; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v39
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v27, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v63
-; VI-NEXT: v_or_b32_sdwa v1, v45, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v48, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v40
+; VI-NEXT: v_or_b32_sdwa v0, v50, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v55
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v28, vcc, 0x3000000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v41
-; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
+; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v40
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v56
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v29, vcc, 0x3000000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v62
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v2, s6
; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v60
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
-; VI-NEXT: v_or_b32_sdwa v1, v51, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
+; VI-NEXT: v_or_b32_sdwa v1, v53, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v30, vcc, 0x3000000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v55
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v53
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v49
+; VI-NEXT: v_or_b32_sdwa v0, v52, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
-; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
+; VI-NEXT: v_or_b32_sdwa v1, v51, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v31, vcc, 0x3000000, v0
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
-; VI-NEXT: v_mov_b32_e32 v2, s6
; VI-NEXT: .LBB59_5: ; %end
; VI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload
@@ -91320,28 +91670,37 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:332
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_load_ushort v2, off, s[0:3], s32
-; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:8
-; GFX9-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:16
-; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:24
-; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:32
-; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:40
-; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:48
-; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:56
-; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:64
-; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:72
-; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:80
-; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:88
-; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:96
-; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:104
-; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:112
+; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:332
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32
+; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:8
+; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:16
+; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:24
+; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:32
+; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:40
+; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:48
+; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:56
+; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:64
+; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:72
+; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:80
+; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:88
+; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:96
+; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:104
+; GFX9-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:112
; GFX9-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:120
; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:128
; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:136
@@ -91351,270 +91710,294 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:168
; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:176
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v7
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v11
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v13
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v15
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v17
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v19
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v21
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v23
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v25
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v27
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v29
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshlrev_b32_e32 v10, 8, v3
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 8, v3
; GFX9-NEXT: v_lshlrev_b32_e32 v8, 8, v5
-; GFX9-NEXT: v_lshlrev_b32_e32 v9, 8, v9
-; GFX9-NEXT: s_waitcnt vmcnt(35)
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v43
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v2
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v4
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v6
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v42
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v41
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v40
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v55
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v54
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v53
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v52
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v51
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v50
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v49
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v48
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v39
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v31
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v32
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v33
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v34
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v35
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v36
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v37
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v38
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:184
+; GFX9-NEXT: v_lshlrev_b32_e32 v18, 8, v7
+; GFX9-NEXT: v_lshlrev_b32_e32 v10, 8, v9
+; GFX9-NEXT: v_lshlrev_b32_e32 v6, 8, v13
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_waitcnt vmcnt(28)
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v21
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v23
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v25
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v27
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v29
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(32)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v44
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(32)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v43
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(32)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v42
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(32)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v41
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(32)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v40
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(32)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v55
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(32)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v54
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(32)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v53
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(32)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v52
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v50
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v49
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v48
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v39
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v30
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v31
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v32
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v33
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v34
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v35
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v36
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v37
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v38
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:184
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:192
-; GFX9-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:200
+; GFX9-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:200
; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:208
-; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:216
+; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:216
; GFX9-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:224
-; GFX9-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:232
+; GFX9-NEXT: buffer_load_ushort v9, off, s[0:3], s32 offset:232
; GFX9-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:240
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_and_b64 s[6:7], vcc, exec
+; GFX9-NEXT: v_lshlrev_b32_e32 v51, 8, v51
; GFX9-NEXT: s_waitcnt vmcnt(7)
-; GFX9-NEXT: v_lshlrev_b32_e32 v38, 8, v11
+; GFX9-NEXT: v_lshlrev_b32_e32 v61, 8, v0
; GFX9-NEXT: s_waitcnt vmcnt(6)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v1
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v2
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v13
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v3
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(5)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v5
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(5)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v6
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(5)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v7
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshlrev_b32_e32 v49, 8, v4
-; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:248
+; GFX9-NEXT: v_lshlrev_b32_e32 v40, 8, v3
+; GFX9-NEXT: s_waitcnt vmcnt(3)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v9
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(3)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v7
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v25, 8, v11
+; GFX9-NEXT: v_lshlrev_b32_e32 v32, 8, v5
+; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:248
; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:256
-; GFX9-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:264
+; GFX9-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:264
; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:272
-; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:280
+; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:280
; GFX9-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:288
-; GFX9-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:296
+; GFX9-NEXT: buffer_load_ushort v9, off, s[0:3], s32 offset:296
; GFX9-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:304
; GFX9-NEXT: s_waitcnt vmcnt(7)
-; GFX9-NEXT: v_lshlrev_b32_e32 v11, 8, v11
-; GFX9-NEXT: s_waitcnt vmcnt(6)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(6)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v2
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(6)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v3
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v0
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(7)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v1
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v4
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v3
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v5
-; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:312
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v11
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:312
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:320
-; GFX9-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:328
-; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:4
-; GFX9-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:12
-; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:20
-; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:28
-; GFX9-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:36
+; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:328
+; GFX9-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:324
+; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:316
+; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:308
+; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:300
+; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:292
+; GFX9-NEXT: v_lshlrev_b32_e32 v27, 8, v13
; GFX9-NEXT: s_waitcnt vmcnt(14)
-; GFX9-NEXT: v_lshlrev_b32_e32 v4, 8, v7
-; GFX9-NEXT: v_lshlrev_b32_e32 v5, 8, v6
+; GFX9-NEXT: v_lshlrev_b32_e32 v31, 8, v5
+; GFX9-NEXT: s_waitcnt vmcnt(13)
+; GFX9-NEXT: v_lshlrev_b32_e32 v29, 8, v9
+; GFX9-NEXT: s_waitcnt vmcnt(12)
+; GFX9-NEXT: v_lshlrev_b32_e32 v30, 8, v7
; GFX9-NEXT: s_waitcnt vmcnt(7)
-; GFX9-NEXT: v_lshlrev_b32_e32 v3, 8, v3
+; GFX9-NEXT: v_lshlrev_b32_e32 v39, 8, v0
; GFX9-NEXT: s_waitcnt vmcnt(6)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_ushort v25, off, s[0:3], s32 offset:44
-; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:52
-; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:60
-; GFX9-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:68
-; GFX9-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:76
-; GFX9-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:84
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v1
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(6)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v3
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:284
+; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:276
+; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:268
+; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:260
+; GFX9-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:252
+; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:244
+; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:236
+; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:228
+; GFX9-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:220
+; GFX9-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:212
+; GFX9-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:204
+; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:196
+; GFX9-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:188
+; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:180
+; GFX9-NEXT: buffer_load_ushort v26, off, s[0:3], s32 offset:172
+; GFX9-NEXT: buffer_load_ushort v21, off, s[0:3], s32 offset:164
+; GFX9-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:156
+; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:148
+; GFX9-NEXT: buffer_load_ushort v24, off, s[0:3], s32 offset:140
+; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:132
+; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:124
+; GFX9-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:116
+; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:108
+; GFX9-NEXT: buffer_load_ushort v19, off, s[0:3], s32 offset:100
; GFX9-NEXT: buffer_load_ushort v17, off, s[0:3], s32 offset:92
-; GFX9-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:100
-; GFX9-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:108
-; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:116
-; GFX9-NEXT: buffer_load_ushort v27, off, s[0:3], s32 offset:124
-; GFX9-NEXT: buffer_load_ushort v19, off, s[0:3], s32 offset:132
-; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:140
-; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:148
-; GFX9-NEXT: buffer_load_ushort v21, off, s[0:3], s32 offset:156
-; GFX9-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:164
-; GFX9-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:172
-; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:180
-; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:188
-; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:196
-; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:204
-; GFX9-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:212
-; GFX9-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:220
-; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:228
-; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:236
-; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:244
-; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:252
-; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:260
-; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:268
-; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:276
-; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:284
-; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:292
-; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:300
-; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:308
-; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:316
-; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:324
-; GFX9-NEXT: s_waitcnt vmcnt(42)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v2
-; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:84
+; GFX9-NEXT: buffer_load_ushort v22, off, s[0:3], s32 offset:76
+; GFX9-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:68
+; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:60
+; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:52
+; GFX9-NEXT: buffer_load_ushort v14, off, s[0:3], s32 offset:44
+; GFX9-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:36
+; GFX9-NEXT: buffer_load_ushort v20, off, s[0:3], s32 offset:28
+; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:20
+; GFX9-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:12
+; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:4
+; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(28)
-; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(43)
+; GFX9-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(40)
+; GFX9-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(39)
+; GFX9-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(36)
+; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
+; GFX9-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
+; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
+; GFX9-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
+; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
+; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
; GFX9-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
+; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
+; GFX9-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
+; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
+; GFX9-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
+; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
+; GFX9-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(39)
+; GFX9-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(39)
+; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(39)
+; GFX9-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(41)
+; GFX9-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(41)
+; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(42)
+; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(43)
+; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(43)
+; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(43)
+; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(43)
+; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(43)
+; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(43)
+; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(43)
+; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(43)
+; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill
; GFX9-NEXT: s_cbranch_scc0 .LBB59_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
-; GFX9-NEXT: v_mov_b32_e32 v38, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v0, v2, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v4, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
; GFX9-NEXT: s_and_b32 s4, s28, 0xff
-; GFX9-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
; GFX9-NEXT: s_lshl_b32 s5, s29, 8
; GFX9-NEXT: s_or_b32 s4, s4, s5
; GFX9-NEXT: s_and_b32 s4, s4, 0xffff
@@ -91622,202 +92005,199 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: s_lshl_b32 s6, s19, 8
; GFX9-NEXT: s_lshl_b32 s7, s23, 8
; GFX9-NEXT: s_lshl_b32 s8, s27, 8
-; GFX9-NEXT: s_waitcnt vmcnt(5)
-; GFX9-NEXT: v_or_b32_sdwa v0, v0, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(4)
-; GFX9-NEXT: v_or_b32_sdwa v1, v1, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(4)
-; GFX9-NEXT: v_or_b32_sdwa v2, v2, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_waitcnt vmcnt(3)
+; GFX9-NEXT: v_or_b32_sdwa v2, v2, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_or_b32_sdwa v3, v3, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v3, v3, v10 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v12, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v1, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v14, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v16, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v18, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v20, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v22, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v24, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v26, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v28, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v30, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v1, v11, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v34, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v12, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v60, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v62, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_or_b32_sdwa v0, v53, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v20, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v1, v13, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v25, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v14, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v62, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v43, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v1, v15, v51 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v1, v15, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v43, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
-; GFX9-NEXT: v_mov_b32_e32 v61, v38
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v22, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v58, v1
+; GFX9-NEXT: v_or_b32_sdwa v1, v35, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v16, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(3)
-; GFX9-NEXT: v_or_b32_sdwa v0, v17, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v1, v63, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v35, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v17, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v1, v19, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v17, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
-; GFX9-NEXT: v_mov_b32_e32 v63, v57
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v57, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v54, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v56, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v54, v1
+; GFX9-NEXT: v_or_b32_sdwa v1, v23, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v18, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_or_b32_sdwa v0, v27, v56 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_or_b32_sdwa v0, v44, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v19, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v52, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v19, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v0, v51, v62 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v58, v59 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: v_mov_b32_e32 v52, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v24, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_or_b32_sdwa v1, v50, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v20, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
-; GFX9-NEXT: v_or_b32_sdwa v0, v21, v47 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v1, v31, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v50, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v28, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v1, v21, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v21, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
-; GFX9-NEXT: v_or_b32_sdwa v1, v50, v60 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v1, v53, v61 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v23, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v26, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v22, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v0, v44, v58 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v37, v57 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v23, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v0, v52, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v29, v49 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v47, v53 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v60, v51 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v23, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v46, v40 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v63, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v24, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
-; GFX9-NEXT: v_mov_b32_e32 v37, v57
-; GFX9-NEXT: v_mov_b32_e32 v57, v60
-; GFX9-NEXT: v_mov_b32_e32 v52, v56
-; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v0, v57, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v40, v25
+; GFX9-NEXT: v_mov_b32_e32 v57, v41
+; GFX9-NEXT: v_mov_b32_e32 v46, v61
+; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_mov_b32_e32 v34, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v46, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v1, v48, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v41, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v25, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v1, v45, v44 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v32, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v37, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v0, v39, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v56, v41 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v26, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v40, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v55, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v34, v44 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v33, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v27, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_mov_b32_e32 v51, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v43, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v36, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v36, v62 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v49, v60 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v28, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v42, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v41, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v38, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v55, v29 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v29, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v53, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v35, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v48, v30 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v48, v39
+; GFX9-NEXT: v_or_b32_sdwa v1, v45, v39 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
; GFX9-NEXT: v_or_b32_sdwa v30, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v0, v42, v47 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v38, v55
+; GFX9-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v36, v49
+; GFX9-NEXT: v_mov_b32_e32 v49, v56
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v54, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v33, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v59, v39 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v31, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v59, v39
+; GFX9-NEXT: v_mov_b32_e32 v39, v41
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_mov_b32_e32 v56, v55
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v61, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v55, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_e32 v3, s4, v0
; GFX9-NEXT: s_and_b32 s4, s16, 0xff
; GFX9-NEXT: s_or_b32 s4, s4, s5
@@ -91848,32 +92228,39 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: s_mov_b64 s[4:5], 0
; GFX9-NEXT: s_branch .LBB59_3
; GFX9-NEXT: .LBB59_2:
-; GFX9-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
-; GFX9-NEXT: v_mov_b32_e32 v61, v0
-; GFX9-NEXT: v_mov_b32_e32 v63, v57
-; GFX9-NEXT: v_mov_b32_e32 v53, v3
-; GFX9-NEXT: s_mov_b64 s[4:5], -1
-; GFX9-NEXT: v_mov_b32_e32 v57, v38
+; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v48, v39
+; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v36, v49
+; GFX9-NEXT: v_mov_b32_e32 v49, v56
+; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v40, v25
+; GFX9-NEXT: v_mov_b32_e32 v57, v41
+; GFX9-NEXT: v_mov_b32_e32 v38, v55
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; GFX9-NEXT: .LBB59_3: ; %Flow
-; GFX9-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_waitcnt vmcnt(12)
+; GFX9-NEXT: v_mov_b32_e32 v41, v52
; GFX9-NEXT: s_cbranch_vccnz .LBB59_5
; GFX9-NEXT: ; %bb.4: ; %cmp.true
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v61
; GFX9-NEXT: s_add_i32 s16, s16, 3
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_and_b32 s4, s16, 0xff
@@ -91917,190 +92304,210 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: s_and_b32 s8, s28, 0xff
; GFX9-NEXT: s_lshl_b32 s9, s29, 8
; GFX9-NEXT: s_or_b32 s8, s9, s8
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v56
; GFX9-NEXT: s_movk_i32 s4, 0x300
; GFX9-NEXT: s_addk_i32 s8, 0x300
+; GFX9-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: s_and_b32 s8, s8, 0xffff
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_sdwa v0, v0, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_e32 v3, s8, v0
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v60
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v52, v54
+; GFX9-NEXT: v_mov_b32_e32 v55, v57
+; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v38
-; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
+; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v49
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v5, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
-; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
+; GFX9-NEXT: v_or_b32_sdwa v0, v43, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v58, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v16, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
+; GFX9-NEXT: v_or_b32_sdwa v0, v35, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
+; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v17, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
@@ -92110,163 +92517,155 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v52, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v17, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v63
+; GFX9-NEXT: v_or_b32_sdwa v18, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v18, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v19, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v0, v52, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v0, v41, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v19, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v20, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
-; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v20, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v50, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v21, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v57, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v46, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v22, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v0, v53, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v51, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v23, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v50, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v63
+; GFX9-NEXT: v_or_b32_sdwa v1, v40, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v24, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v46
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v48
-; GFX9-NEXT: v_or_b32_sdwa v0, v34, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v55
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v25, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v39
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v45
-; GFX9-NEXT: v_or_b32_sdwa v1, v44, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v37
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v49
+; GFX9-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v39, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v26, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v40
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v34
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v33
+; GFX9-NEXT: v_or_b32_sdwa v0, v44, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v55
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v27, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v43
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v1, 3, v36
-; GFX9-NEXT: v_or_b32_sdwa v0, v51, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v60, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_or_b32_sdwa v28, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v42
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v41
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v38
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v29, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v32
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v2, s7
; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v35
-; GFX9-NEXT: v_or_b32_sdwa v1, v53, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v45
+; GFX9-NEXT: v_or_b32_sdwa v1, v48, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v30, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v54
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v33
-; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v42
+; GFX9-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
+; GFX9-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v31, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_mov_b32_e32 v0, s5
; GFX9-NEXT: v_mov_b32_e32 v1, s6
-; GFX9-NEXT: v_mov_b32_e32 v2, s7
; GFX9-NEXT: .LBB59_5: ; %end
; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload
@@ -92438,7 +92837,7 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v73, 8, v25
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v74, 8, v27
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v75, 8, v29
-; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, -1
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(62)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v57, 8, v2
@@ -92509,24 +92908,24 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v54
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v53
-; GFX11-TRUE16-NEXT: s_and_b32 s5, s28, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s29, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s2, 0xff
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s28, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s29, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s2, 0xff
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v90
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v91
-; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s5, s5, 0xffff
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s4, 0xffff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s3, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s11, s26, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s26, 0xff
; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v0, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v50
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v49
-; GFX11-TRUE16-NEXT: s_lshl_b32 s12, s27, 8
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s27, 8
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v76
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v77
@@ -92768,40 +93167,40 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v89
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, s5, v0
-; GFX11-TRUE16-NEXT: s_and_b32 s5, s0, 0xff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, s4, v0
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s0, 0xff
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s5, s5, 0xffff
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s6, 16
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s8, 16
-; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s23, 8
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s25, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-TRUE16-NEXT: s_or_b32 s9, s9, s10
-; GFX11-TRUE16-NEXT: s_or_b32 s10, s11, s12
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s8, 16
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s9, 0xffff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s10, 16
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s16, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s6, 0xffff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s7, 16
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s20, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s21, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s25, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s6, 0xffff
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s8, s9
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s11
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s7, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s8, 0xffff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s9, 16
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s6, s7
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v51
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s9
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v52
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v93
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v92
@@ -92810,9 +93209,8 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v2, v3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB59_3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB59_3
; GFX11-TRUE16-NEXT: .LBB59_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_add_i32 s0, s0, 3
; GFX11-TRUE16-NEXT: s_add_i32 s2, s2, 3
@@ -93226,7 +93624,9 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB59_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB59_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB59_2
+; GFX11-TRUE16-NEXT: s_branch .LBB59_3
;
; GFX11-FAKE16-LABEL: bitcast_v128i8_to_v16i64_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -93379,7 +93779,7 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v73, 8, v25
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v74, 8, v27
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v75, 8, v29
-; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, -1
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(62)
; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v57, 8, v2
@@ -93450,24 +93850,24 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v54
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v53
-; GFX11-FAKE16-NEXT: s_and_b32 s5, s28, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s29, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s7, s2, 0xff
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s28, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s29, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s2, 0xff
; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v90
; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, v1, v91
-; GFX11-FAKE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s5, s5, 0xffff
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s4, 0xffff
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s3, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s11, s26, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s10, s26, 0xff
; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, v0, v1
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v50
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v49
-; GFX11-FAKE16-NEXT: s_lshl_b32 s12, s27, 8
+; GFX11-FAKE16-NEXT: s_lshl_b32 s11, s27, 8
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v76
; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, v1, v77
@@ -93709,40 +94109,40 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v89
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, s5, v0
-; GFX11-FAKE16-NEXT: s_and_b32 s5, s0, 0xff
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, s4, v0
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s0, 0xff
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-FAKE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-FAKE16-NEXT: s_or_b32 s6, s7, s8
-; GFX11-FAKE16-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s5, s5, 0xffff
-; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s6, 16
-; GFX11-FAKE16-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s8, 16
-; GFX11-FAKE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-FAKE16-NEXT: s_or_b32 s6, s7, s8
-; GFX11-FAKE16-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s23, 8
-; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-FAKE16-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s25, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-FAKE16-NEXT: s_or_b32 s9, s9, s10
-; GFX11-FAKE16-NEXT: s_or_b32 s10, s11, s12
-; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s8, 16
-; GFX11-FAKE16-NEXT: s_and_b32 s9, s9, 0xffff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s10, 16
-; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s16, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s6, 0xffff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s7, 16
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s20, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s21, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s25, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s6, 0xffff
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s8, s9
+; GFX11-FAKE16-NEXT: s_or_b32 s9, s10, s11
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s7, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s8, 0xffff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s9, 16
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s6, s7
; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v51
-; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s8, s9
; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v52
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, v3, v93
; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, v2, v92
@@ -93751,9 +94151,8 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, v2, v3
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB59_3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB59_3
; GFX11-FAKE16-NEXT: .LBB59_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_add_i32 s0, s0, 3
; GFX11-FAKE16-NEXT: s_add_i32 s2, s2, 3
@@ -94167,7 +94566,9 @@ define inreg <16 x i64> @bitcast_v128i8_to_v16i64_scalar(<128 x i8> inreg %a, i3
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB59_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB59_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB59_2
+; GFX11-FAKE16-NEXT: s_branch .LBB59_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -95138,8 +95539,9 @@ define inreg <64 x bfloat> @bitcast_v16i64_to_v64bf16_scalar(<16 x i64> inreg %a
; SI-NEXT: v_writelane_b32 v20, s87, 31
; SI-NEXT: v_writelane_b32 v20, s96, 32
; SI-NEXT: v_writelane_b32 v20, s97, 33
-; SI-NEXT: v_writelane_b32 v20, s98, 34
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v19
+; SI-NEXT: v_writelane_b32 v20, s98, 34
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_writelane_b32 v20, s99, 35
; SI-NEXT: v_readfirstlane_b32 s70, v1
; SI-NEXT: v_readfirstlane_b32 s71, v2
@@ -95158,22 +95560,22 @@ define inreg <64 x bfloat> @bitcast_v16i64_to_v64bf16_scalar(<16 x i64> inreg %a
; SI-NEXT: v_readfirstlane_b32 s6, v15
; SI-NEXT: v_readfirstlane_b32 s7, v16
; SI-NEXT: v_readfirstlane_b32 s8, v17
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s9, v18
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: ; implicit-def: $vgpr21 : SGPR spill to VGPR lane
; SI-NEXT: s_cbranch_scc0 .LBB61_4
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_and_b32 s4, s9, 0xffff0000
+; SI-NEXT: s_lshl_b32 s4, s9, 16
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_writelane_b32 v21, s4, 0
-; SI-NEXT: s_lshl_b32 s4, s9, 16
-; SI-NEXT: v_writelane_b32 v21, s4, 1
; SI-NEXT: s_and_b32 s4, s8, 0xffff0000
-; SI-NEXT: v_writelane_b32 v21, s4, 2
+; SI-NEXT: v_writelane_b32 v21, s4, 1
; SI-NEXT: s_lshl_b32 s4, s8, 16
-; SI-NEXT: v_writelane_b32 v21, s4, 3
+; SI-NEXT: v_writelane_b32 v21, s4, 2
+; SI-NEXT: s_lshl_b32 s4, s7, 16
+; SI-NEXT: s_and_b32 s10, s9, 0xffff0000
; SI-NEXT: s_and_b32 s11, s7, 0xffff0000
-; SI-NEXT: s_lshl_b32 s10, s7, 16
+; SI-NEXT: v_writelane_b32 v21, s4, 3
; SI-NEXT: s_and_b32 s13, s6, 0xffff0000
; SI-NEXT: s_lshl_b32 s12, s6, 16
; SI-NEXT: s_and_b32 s15, s99, 0xffff0000
@@ -95267,15 +95669,15 @@ define inreg <64 x bfloat> @bitcast_v16i64_to_v64bf16_scalar(<16 x i64> inreg %a
; SI-NEXT: s_add_u32 s8, s8, 3
; SI-NEXT: s_addc_u32 s9, s9, 0
; SI-NEXT: s_and_b32 s10, s9, 0xffff0000
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_writelane_b32 v21, s10, 0
; SI-NEXT: s_lshl_b32 s9, s9, 16
-; SI-NEXT: v_writelane_b32 v21, s9, 1
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_writelane_b32 v21, s9, 0
; SI-NEXT: s_and_b32 s9, s8, 0xffff0000
-; SI-NEXT: v_writelane_b32 v21, s9, 2
+; SI-NEXT: v_writelane_b32 v21, s9, 1
; SI-NEXT: s_lshl_b32 s8, s8, 16
+; SI-NEXT: v_writelane_b32 v21, s8, 2
; SI-NEXT: s_and_b32 s11, s7, 0xffff0000
-; SI-NEXT: s_lshl_b32 s10, s7, 16
+; SI-NEXT: s_lshl_b32 s7, s7, 16
; SI-NEXT: s_and_b32 s13, s6, 0xffff0000
; SI-NEXT: s_lshl_b32 s12, s6, 16
; SI-NEXT: s_and_b32 s15, s14, 0xffff0000
@@ -95334,7 +95736,7 @@ define inreg <64 x bfloat> @bitcast_v16i64_to_v64bf16_scalar(<16 x i64> inreg %a
; SI-NEXT: s_lshl_b32 s66, s5, 16
; SI-NEXT: s_and_b32 s69, s4, 0xffff0000
; SI-NEXT: s_lshl_b32 s68, s4, 16
-; SI-NEXT: v_writelane_b32 v21, s8, 3
+; SI-NEXT: v_writelane_b32 v21, s7, 3
; SI-NEXT: .LBB61_3: ; %end
; SI-NEXT: v_mul_f32_e64 v1, 1.0, s69
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
@@ -95539,24 +95941,24 @@ define inreg <64 x bfloat> @bitcast_v16i64_to_v64bf16_scalar(<16 x i64> inreg %a
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e64 v1, 1.0, s11
+; SI-NEXT: v_readlane_b32 s4, v21, 3
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_mul_f32_e64 v2, 1.0, s10
+; SI-NEXT: v_mul_f32_e64 v2, 1.0, s4
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
; SI-NEXT: v_add_i32_e32 v2, vcc, 0x74, v0
-; SI-NEXT: v_readlane_b32 s4, v21, 2
+; SI-NEXT: v_readlane_b32 s4, v21, 1
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e64 v1, 1.0, s4
-; SI-NEXT: v_readlane_b32 s4, v21, 3
+; SI-NEXT: v_readlane_b32 s4, v21, 2
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s4
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
; SI-NEXT: v_add_i32_e32 v2, vcc, 0x78, v0
-; SI-NEXT: v_readlane_b32 s4, v21, 0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mul_f32_e64 v1, 1.0, s4
-; SI-NEXT: v_readlane_b32 s4, v21, 1
+; SI-NEXT: v_mul_f32_e64 v1, 1.0, s10
+; SI-NEXT: v_readlane_b32 s4, v21, 0
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s4
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
@@ -95605,8 +96007,8 @@ define inreg <64 x bfloat> @bitcast_v16i64_to_v64bf16_scalar(<16 x i64> inreg %a
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB61_4:
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; kill: killed $sgpr4
+; SI-NEXT: ; implicit-def: $sgpr10
+; SI-NEXT: ; kill: killed $sgpr10
; SI-NEXT: ; implicit-def: $sgpr68
; SI-NEXT: ; implicit-def: $sgpr69
; SI-NEXT: ; implicit-def: $sgpr66
@@ -95665,20 +96067,23 @@ define inreg <64 x bfloat> @bitcast_v16i64_to_v64bf16_scalar(<16 x i64> inreg %a
; SI-NEXT: ; implicit-def: $sgpr15
; SI-NEXT: ; implicit-def: $sgpr12
; SI-NEXT: ; implicit-def: $sgpr13
-; SI-NEXT: ; implicit-def: $sgpr10
; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; kill: killed $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; kill: killed $sgpr4
-; SI-NEXT: ; implicit-def: $sgpr4
-; SI-NEXT: ; kill: killed $sgpr4
-; SI-NEXT: s_branch .LBB61_2
+; SI-NEXT: ; implicit-def: $sgpr10
+; SI-NEXT: ; kill: killed $sgpr10
+; SI-NEXT: ; implicit-def: $sgpr10
+; SI-NEXT: ; kill: killed $sgpr10
+; SI-NEXT: ; implicit-def: $sgpr10
+; SI-NEXT: ; kill: killed $sgpr10
+; SI-NEXT: ; implicit-def: $sgpr10
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB61_2
+; SI-NEXT: s_branch .LBB61_3
;
; VI-LABEL: bitcast_v16i64_to_v64bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v31, v17
; VI-NEXT: v_mov_b32_e32 v30, v16
; VI-NEXT: v_mov_b32_e32 v29, v15
@@ -95699,7 +96104,7 @@ define inreg <64 x bfloat> @bitcast_v16i64_to_v64bf16_scalar(<16 x i64> inreg %a
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
@@ -95712,10 +96117,13 @@ define inreg <64 x bfloat> @bitcast_v16i64_to_v64bf16_scalar(<16 x i64> inreg %a
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB61_4
+; VI-NEXT: s_cbranch_scc0 .LBB61_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB61_3
-; VI-NEXT: .LBB61_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB61_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB61_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v14, vcc, 3, v14
; VI-NEXT: v_addc_u32_e32 v15, vcc, 0, v15, vcc
; VI-NEXT: v_add_u32_e32 v12, vcc, 3, v12
@@ -95748,16 +96156,15 @@ define inreg <64 x bfloat> @bitcast_v16i64_to_v64bf16_scalar(<16 x i64> inreg %a
; VI-NEXT: v_addc_u32_e32 v19, vcc, 0, v19, vcc
; VI-NEXT: v_add_u32_e32 v16, vcc, 3, v16
; VI-NEXT: v_addc_u32_e32 v17, vcc, 0, v17, vcc
-; VI-NEXT: .LBB61_3: ; %end
+; VI-NEXT: .LBB61_4: ; %end
; VI-NEXT: v_mov_b32_e32 v18, v32
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB61_4:
-; VI-NEXT: s_branch .LBB61_2
;
; GFX9-LABEL: bitcast_v16i64_to_v64bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v31, v17
; GFX9-NEXT: v_mov_b32_e32 v30, v16
; GFX9-NEXT: v_mov_b32_e32 v29, v15
@@ -95778,7 +96185,7 @@ define inreg <64 x bfloat> @bitcast_v16i64_to_v64bf16_scalar(<16 x i64> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -95791,10 +96198,13 @@ define inreg <64 x bfloat> @bitcast_v16i64_to_v64bf16_scalar(<16 x i64> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB61_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB61_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB61_3
-; GFX9-NEXT: .LBB61_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB61_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB61_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_co_u32_e32 v14, vcc, 3, v14
; GFX9-NEXT: v_addc_co_u32_e32 v15, vcc, 0, v15, vcc
; GFX9-NEXT: v_add_co_u32_e32 v12, vcc, 3, v12
@@ -95827,44 +96237,42 @@ define inreg <64 x bfloat> @bitcast_v16i64_to_v64bf16_scalar(<16 x i64> inreg %a
; GFX9-NEXT: v_addc_co_u32_e32 v19, vcc, 0, v19, vcc
; GFX9-NEXT: v_add_co_u32_e32 v16, vcc, 3, v16
; GFX9-NEXT: v_addc_co_u32_e32 v17, vcc, 0, v17, vcc
-; GFX9-NEXT: .LBB61_3: ; %end
+; GFX9-NEXT: .LBB61_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v18, v32
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB61_4:
-; GFX9-NEXT: s_branch .LBB61_2
;
; GFX11-LABEL: bitcast_v16i64_to_v64bf16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v14 :: v_dual_mov_b32 v31, v13
-; GFX11-NEXT: v_dual_mov_b32 v30, v12 :: v_dual_mov_b32 v29, v11
-; GFX11-NEXT: v_dual_mov_b32 v28, v10 :: v_dual_mov_b32 v27, v9
+; GFX11-NEXT: v_dual_mov_b32 v15, v14 :: v_dual_mov_b32 v30, v12
+; GFX11-NEXT: v_dual_mov_b32 v31, v13 :: v_dual_mov_b32 v28, v10
+; GFX11-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v26, v8
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB61_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB61_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB61_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB61_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB61_3:
-; GFX11-NEXT: .LBB61_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB61_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_co_u32 v14, vcc_lo, v14, 3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add_co_ci_u32_e64 v15, null, 0, v15, vcc_lo
@@ -95905,6 +96313,7 @@ define inreg <64 x bfloat> @bitcast_v16i64_to_v64bf16_scalar(<16 x i64> inreg %a
; GFX11-NEXT: v_add_co_ci_u32_e64 v19, null, 0, v19, vcc_lo
; GFX11-NEXT: v_add_co_u32 v16, vcc_lo, v16, 3
; GFX11-NEXT: v_add_co_ci_u32_e64 v17, null, 0, v17, vcc_lo
+; GFX11-NEXT: .LBB61_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -98894,23 +99303,28 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v3
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v2
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mul_f32_e32 v0, 1.0, v5
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v4
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mul_f32_e32 v0, 1.0, v7
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v6
-; SI-NEXT: v_mov_b32_e32 v39, v10
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v8
; SI-NEXT: v_mov_b32_e32 v38, v12
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v39
+; SI-NEXT: v_mul_f32_e32 v0, 1.0, v10
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v38
@@ -98924,14 +99338,11 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v30
; SI-NEXT: v_mov_b32_e32 v37, v14
-; SI-NEXT: v_mov_b32_e32 v14, v11
; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
-; SI-NEXT: v_mul_f32_e32 v11, 1.0, v5
-; SI-NEXT: v_mul_f32_e32 v10, 1.0, v7
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
; SI-NEXT: v_mul_f32_e32 v12, 1.0, v9
-; SI-NEXT: v_mul_f32_e32 v14, 1.0, v14
+; SI-NEXT: v_mul_f32_e32 v14, 1.0, v11
; SI-NEXT: v_mul_f32_e32 v13, 1.0, v13
; SI-NEXT: v_mul_f32_e32 v38, 1.0, v37
; SI-NEXT: v_mul_f32_e32 v15, 1.0, v17
@@ -98950,7 +99361,9 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s16
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e64 v1, 1.0, s19
+; SI-NEXT: v_mul_f32_e64 v10, 1.0, s18
; SI-NEXT: v_mul_f32_e64 v3, 1.0, s23
+; SI-NEXT: v_mul_f32_e64 v11, 1.0, s22
; SI-NEXT: v_mul_f32_e64 v4, 1.0, s25
; SI-NEXT: v_mul_f32_e64 v9, 1.0, s24
; SI-NEXT: v_mul_f32_e64 v5, 1.0, s27
@@ -98959,8 +99372,8 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_mul_f32_e64 v7, 1.0, s28
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31
-; SI-NEXT: v_mul_f32_e32 v22, 1.0, v42
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: v_mul_f32_e32 v22, 1.0, v42
; SI-NEXT: v_mul_f32_e32 v23, 1.0, v43
; SI-NEXT: v_mul_f32_e32 v52, 1.0, v44
; SI-NEXT: v_mul_f32_e32 v24, 1.0, v45
@@ -98976,77 +99389,76 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_mul_f32_e32 v29, 1.0, v63
; SI-NEXT: v_mul_f32_e32 v32, 1.0, v32
; SI-NEXT: v_mul_f32_e32 v30, 1.0, v33
-; SI-NEXT: s_waitcnt vmcnt(13)
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v35
; SI-NEXT: v_mul_f32_e32 v31, 1.0, v34
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(13)
; SI-NEXT: v_mul_f32_e32 v34, 1.0, v36
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e64 v0, 1.0, s17
-; SI-NEXT: v_mul_f32_e64 v35, 1.0, s18
-; SI-NEXT: v_mul_f32_e64 v36, 1.0, s21
-; SI-NEXT: v_mul_f32_e64 v42, 1.0, s20
-; SI-NEXT: v_mul_f32_e64 v33, 1.0, s22
-; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
+; SI-NEXT: v_mul_f32_e64 v33, 1.0, s21
+; SI-NEXT: v_mul_f32_e64 v35, 1.0, s20
+; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB63_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v6
; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v5
; SI-NEXT: v_alignbit_b32 v6, v6, v7, 16
-; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
; SI-NEXT: v_alignbit_b32 v5, v5, v8, 16
; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(2)
-; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_alignbit_b32 v1, v1, v35, 16
-; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; SI-NEXT: v_alignbit_b32 v4, v4, v9, 16
-; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(5)
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; SI-NEXT: v_mov_b32_e32 v59, v2
; SI-NEXT: v_alignbit_b32 v0, v0, v2, 16
-; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v36
-; SI-NEXT: v_alignbit_b32 v2, v2, v42, 16
-; SI-NEXT: v_mov_b32_e32 v57, v11
-; SI-NEXT: v_mov_b32_e32 v47, v10
-; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v10
-; SI-NEXT: v_mov_b32_e32 v45, v12
+; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v33
+; SI-NEXT: v_alignbit_b32 v2, v2, v35, 16
+; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; SI-NEXT: v_alignbit_b32 v4, v4, v9, 16
+; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(4)
+; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; SI-NEXT: v_alignbit_b32 v1, v1, v10, 16
+; SI-NEXT: s_waitcnt expcnt(3)
+; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; SI-NEXT: v_alignbit_b32 v3, v3, v33, 16
-; SI-NEXT: v_mov_b32_e32 v33, v14
+; SI-NEXT: v_alignbit_b32 v3, v3, v11, 16
+; SI-NEXT: s_waitcnt expcnt(2)
+; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v45, v12
+; SI-NEXT: v_mov_b32_e32 v44, v14
; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v13
; SI-NEXT: v_mov_b32_e32 v62, v38
; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v15
@@ -99087,30 +99499,35 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_alignbit_b32 v28, v28, v37, 16
; SI-NEXT: v_mov_b32_e32 v37, v34
; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: s_waitcnt vmcnt(5) expcnt(0)
; SI-NEXT: v_mov_b32_e32 v35, v7
; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v7
-; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: s_waitcnt vmcnt(4)
; SI-NEXT: v_mov_b32_e32 v43, v8
; SI-NEXT: v_alignbit_b32 v7, v7, v8, 16
-; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(2) expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v42, v9
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: v_mov_b32_e32 v60, v9
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: v_mov_b32_e32 v58, v10
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: v_mov_b32_e32 v56, v11
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v32
; SI-NEXT: v_alignbit_b32 v31, v31, v34, 16
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v60, v8
+; SI-NEXT: v_mov_b32_e32 v42, v8
; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v8
; SI-NEXT: v_alignbit_b32 v8, v8, v9, 16
-; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v11
-; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v58, v11
-; SI-NEXT: v_alignbit_b32 v9, v9, v11, 16
-; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v57, v9
+; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v9
+; SI-NEXT: v_alignbit_b32 v9, v9, v10, 16
+; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v56, v11
+; SI-NEXT: v_mov_b32_e32 v47, v10
+; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v10
; SI-NEXT: v_alignbit_b32 v10, v10, v11, 16
; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v12
; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
@@ -99124,7 +99541,7 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_alignbit_b32 v12, v12, v14, 16
; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v44, v14
+; SI-NEXT: v_mov_b32_e32 v33, v14
; SI-NEXT: v_alignbit_b32 v13, v13, v14, 16
; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -99145,25 +99562,25 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_alignbit_b32 v22, v22, v54, 16
; SI-NEXT: s_cbranch_execnz .LBB63_3
; SI-NEXT: .LBB63_2: ; %cmp.true
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v59
; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
-; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_and_b32_e32 v8, 0xffff0000, v35
; SI-NEXT: v_add_f32_e32 v8, 0x40c00000, v8
; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v8
-; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v60
+; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v42
; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v9
; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v9
; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v57
@@ -99175,28 +99592,28 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_and_b32_e32 v12, 0xffff0000, v45
; SI-NEXT: v_add_f32_e32 v12, 0x40c00000, v12
; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v12
-; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v33
+; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v44
; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v13
; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v13
; SI-NEXT: v_and_b32_e32 v15, 0xffff0000, v36
; SI-NEXT: v_add_f32_e32 v15, 0x40c00000, v15
; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v15
-; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
; SI-NEXT: v_add_f32_e32 v32, 0x40c00000, v32
; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
@@ -99207,8 +99624,8 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3
; SI-NEXT: v_alignbit_b32 v0, v1, v0, 16
; SI-NEXT: v_alignbit_b32 v1, v3, v2, 16
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; SI-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v4
@@ -99287,22 +99704,22 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3
; SI-NEXT: v_alignbit_b32 v2, v3, v2, 16
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
; SI-NEXT: v_alignbit_b32 v3, v4, v3, 16
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; SI-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
; SI-NEXT: v_alignbit_b32 v4, v5, v4, 16
-; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; SI-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
; SI-NEXT: v_alignbit_b32 v5, v6, v5, 16
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6
@@ -99310,7 +99727,7 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v43
; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v7
; SI-NEXT: v_alignbit_b32 v7, v8, v7, 16
-; SI-NEXT: v_and_b32_e32 v8, 0xffff0000, v42
+; SI-NEXT: v_and_b32_e32 v8, 0xffff0000, v60
; SI-NEXT: v_add_f32_e32 v8, 0x40c00000, v8
; SI-NEXT: v_alignbit_b32 v8, v9, v8, 16
; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v58
@@ -99325,7 +99742,7 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_and_b32_e32 v12, 0xffff0000, v63
; SI-NEXT: v_add_f32_e32 v12, 0x40c00000, v12
; SI-NEXT: v_alignbit_b32 v12, v13, v12, 16
-; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v44
+; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v33
; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v13
; SI-NEXT: v_alignbit_b32 v13, v14, v13, 16
; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v62
@@ -99357,7 +99774,7 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
; SI-NEXT: v_add_f32_e32 v22, 0x40c00000, v22
; SI-NEXT: v_alignbit_b32 v22, v23, v22, 16
-; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
; SI-NEXT: v_add_f32_e32 v23, 0x40c00000, v23
@@ -99377,12 +99794,12 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_and_b32_e32 v28, 0xffff0000, v48
; SI-NEXT: v_add_f32_e32 v28, 0x40c00000, v28
; SI-NEXT: v_alignbit_b32 v28, v29, v28, 16
-; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
; SI-NEXT: v_add_f32_e32 v29, 0x40c00000, v29
; SI-NEXT: v_alignbit_b32 v29, v30, v29, 16
-; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
; SI-NEXT: v_add_f32_e32 v30, 0x40c00000, v30
@@ -99411,25 +99828,24 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB63_4:
; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(3)
-; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
; SI-NEXT: v_mov_b32_e32 v61, v53
; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
; SI-NEXT: v_mov_b32_e32 v59, v2
-; SI-NEXT: v_mov_b32_e32 v57, v11
-; SI-NEXT: v_mov_b32_e32 v47, v10
; SI-NEXT: v_mov_b32_e32 v45, v12
-; SI-NEXT: v_mov_b32_e32 v33, v14
+; SI-NEXT: v_mov_b32_e32 v44, v14
; SI-NEXT: v_mov_b32_e32 v62, v38
; SI-NEXT: v_mov_b32_e32 v38, v39
; SI-NEXT: v_mov_b32_e32 v39, v41
@@ -99443,12 +99859,15 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_mov_b32_e32 v48, v37
; SI-NEXT: v_mov_b32_e32 v37, v34
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; SI-NEXT: s_branch .LBB63_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB63_2
+; SI-NEXT: s_branch .LBB63_3
;
; VI-LABEL: bitcast_v64bf16_to_v16i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v31, v17
; VI-NEXT: v_mov_b32_e32 v30, v16
; VI-NEXT: v_mov_b32_e32 v29, v15
@@ -99469,7 +99888,7 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
@@ -99482,10 +99901,13 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB63_4
+; VI-NEXT: s_cbranch_scc0 .LBB63_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB63_3
-; VI-NEXT: .LBB63_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB63_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB63_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_lshlrev_b32_e32 v18, 16, v15
; VI-NEXT: v_add_f32_e32 v18, 0x40c00000, v18
; VI-NEXT: v_bfe_u32 v33, v18, 16, 1
@@ -100062,16 +100484,15 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; VI-NEXT: v_cndmask_b32_e32 v16, v33, v34, vcc
; VI-NEXT: v_lshrrev_b32_e32 v16, 16, v16
; VI-NEXT: v_alignbit_b32 v16, v16, v18, 16
-; VI-NEXT: .LBB63_3: ; %end
+; VI-NEXT: .LBB63_4: ; %end
; VI-NEXT: v_mov_b32_e32 v18, v32
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB63_4:
-; VI-NEXT: s_branch .LBB63_2
;
; GFX9-LABEL: bitcast_v64bf16_to_v16i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v31, v17
; GFX9-NEXT: v_mov_b32_e32 v30, v16
; GFX9-NEXT: v_mov_b32_e32 v29, v15
@@ -100092,7 +100513,7 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -100105,10 +100526,13 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB63_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB63_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB63_3
-; GFX9-NEXT: .LBB63_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB63_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB63_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_and_b32_e32 v18, 0xffff0000, v15
; GFX9-NEXT: v_add_f32_e32 v18, 0x40c00000, v18
; GFX9-NEXT: v_bfe_u32 v33, v18, 16, 1
@@ -100718,11 +101142,9 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: v_and_b32_sdwa v16, v18, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: v_lshrrev_b32_e32 v18, 16, v33
; GFX9-NEXT: v_lshl_or_b32 v16, v18, 16, v16
-; GFX9-NEXT: .LBB63_3: ; %end
+; GFX9-NEXT: .LBB63_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v18, v32
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB63_4:
-; GFX9-NEXT: s_branch .LBB63_2
;
; GFX11-LABEL: bitcast_v64bf16_to_v16i64_scalar:
; GFX11: ; %bb.0:
@@ -100812,8 +101234,8 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; GFX11-NEXT: v_dual_mov_b32 v174, v5 :: v_dual_mov_b32 v173, v0
; GFX11-NEXT: v_dual_mov_b32 v184, v2 :: v_dual_mov_b32 v175, v1
; GFX11-NEXT: v_dual_mov_b32 v183, s28 :: v_dual_mov_b32 v172, s29
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s4, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB63_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_dual_mov_b32 v32, s0 :: v_dual_mov_b32 v37, s2
@@ -100824,8 +101246,7 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; GFX11-NEXT: v_dual_mov_b32 v86, s21 :: v_dual_mov_b32 v109, s23
; GFX11-NEXT: v_dual_mov_b32 v122, s24 :: v_dual_mov_b32 v151, s26
; GFX11-NEXT: v_dual_mov_b32 v136, s25 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB63_3
+; GFX11-NEXT: s_cbranch_execnz .LBB63_3
; GFX11-NEXT: .LBB63_2: ; %cmp.true
; GFX11-NEXT: s_and_b32 s5, s27, 0xffff0000
; GFX11-NEXT: s_lshl_b32 s4, s27, 16
@@ -101572,8 +101993,8 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB63_4:
; GFX11-NEXT: ; implicit-def: $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
-; GFX11-NEXT: ; implicit-def: $vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64
; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-NEXT: ; implicit-def: $vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64
; GFX11-NEXT: ; implicit-def: $vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66
; GFX11-NEXT: ; implicit-def: $vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69
; GFX11-NEXT: ; implicit-def: $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73
@@ -101587,7 +102008,9 @@ define inreg <16 x i64> @bitcast_v64bf16_to_v16i64_scalar(<64 x bfloat> inreg %a
; GFX11-NEXT: ; implicit-def: $vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141
; GFX11-NEXT: ; implicit-def: $vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154
; GFX11-NEXT: ; implicit-def: $vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168
-; GFX11-NEXT: s_branch .LBB63_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_vccz .LBB63_2
+; GFX11-NEXT: s_branch .LBB63_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -102594,8 +103017,9 @@ define inreg <64 x half> @bitcast_v16i64_to_v64f16_scalar(<16 x i64> inreg %a, i
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_writelane_b32 v63, s30, 0
; SI-NEXT: v_writelane_b32 v63, s31, 1
-; SI-NEXT: v_writelane_b32 v63, s34, 2
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v19
+; SI-NEXT: v_writelane_b32 v63, s34, 2
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_writelane_b32 v63, s35, 3
; SI-NEXT: v_readfirstlane_b32 s46, v1
; SI-NEXT: v_readfirstlane_b32 s47, v2
@@ -102611,11 +103035,11 @@ define inreg <64 x half> @bitcast_v16i64_to_v64f16_scalar(<16 x i64> inreg %a, i
; SI-NEXT: v_readfirstlane_b32 s13, v12
; SI-NEXT: v_readfirstlane_b32 s10, v13
; SI-NEXT: v_readfirstlane_b32 s11, v14
-; SI-NEXT: v_readfirstlane_b32 s7, v15
-; SI-NEXT: v_readfirstlane_b32 s8, v16
+; SI-NEXT: v_readfirstlane_b32 s8, v15
+; SI-NEXT: v_readfirstlane_b32 s9, v16
; SI-NEXT: v_readfirstlane_b32 s6, v17
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: v_readfirstlane_b32 s9, v18
+; SI-NEXT: v_readfirstlane_b32 s7, v18
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
@@ -102633,17 +103057,17 @@ define inreg <64 x half> @bitcast_v16i64_to_v64f16_scalar(<16 x i64> inreg %a, i
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB65_4
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_lshr_b32 s4, s9, 16
+; SI-NEXT: s_lshr_b32 s4, s7, 16
; SI-NEXT: v_cvt_f32_f16_e32 v1, s4
; SI-NEXT: s_lshr_b32 s4, s6, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v5, s9
+; SI-NEXT: v_cvt_f32_f16_e32 v5, s7
; SI-NEXT: v_cvt_f32_f16_e32 v7, s6
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v1, s4
-; SI-NEXT: s_lshr_b32 s4, s8, 16
+; SI-NEXT: s_lshr_b32 s4, s9, 16
; SI-NEXT: v_cvt_f32_f16_e32 v3, s4
-; SI-NEXT: s_lshr_b32 s4, s7, 16
+; SI-NEXT: s_lshr_b32 s4, s8, 16
; SI-NEXT: v_cvt_f32_f16_e32 v4, s4
; SI-NEXT: s_lshr_b32 s4, s11, 16
; SI-NEXT: v_cvt_f32_f16_e32 v6, s4
@@ -102702,8 +103126,8 @@ define inreg <64 x half> @bitcast_v16i64_to_v64f16_scalar(<16 x i64> inreg %a, i
; SI-NEXT: s_lshr_b32 s4, s16, 16
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; SI-NEXT: v_cvt_f32_f16_e32 v61, s4
-; SI-NEXT: v_cvt_f32_f16_e32 v8, s8
-; SI-NEXT: v_cvt_f32_f16_e32 v10, s7
+; SI-NEXT: v_cvt_f32_f16_e32 v8, s9
+; SI-NEXT: v_cvt_f32_f16_e32 v10, s8
; SI-NEXT: v_cvt_f32_f16_e32 v12, s11
; SI-NEXT: v_cvt_f32_f16_e32 v14, s10
; SI-NEXT: v_cvt_f32_f16_e32 v16, s13
@@ -102791,22 +103215,22 @@ define inreg <64 x half> @bitcast_v16i64_to_v64f16_scalar(<16 x i64> inreg %a, i
; SI-NEXT: s_addc_u32 s11, s11, 0
; SI-NEXT: s_lshr_b32 vcc_lo, s10, 16
; SI-NEXT: s_lshr_b32 vcc_hi, s11, 16
-; SI-NEXT: s_add_u32 s7, s7, 3
-; SI-NEXT: s_addc_u32 s8, s8, 0
-; SI-NEXT: s_lshr_b32 s30, s7, 16
-; SI-NEXT: s_lshr_b32 s31, s8, 16
-; SI-NEXT: s_add_u32 s6, s6, 3
+; SI-NEXT: s_add_u32 s8, s8, 3
; SI-NEXT: s_addc_u32 s9, s9, 0
-; SI-NEXT: s_lshr_b32 s35, s9, 16
+; SI-NEXT: s_lshr_b32 s30, s8, 16
+; SI-NEXT: s_lshr_b32 s31, s9, 16
+; SI-NEXT: s_add_u32 s6, s6, 3
+; SI-NEXT: s_addc_u32 s7, s7, 0
+; SI-NEXT: s_lshr_b32 s35, s7, 16
; SI-NEXT: v_cvt_f32_f16_e32 v3, s35
; SI-NEXT: s_lshr_b32 s34, s6, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v5, s9
+; SI-NEXT: v_cvt_f32_f16_e32 v5, s7
; SI-NEXT: v_cvt_f32_f16_e32 v7, s6
; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v3, s34
-; SI-NEXT: v_cvt_f32_f16_e32 v8, s8
-; SI-NEXT: v_cvt_f32_f16_e32 v10, s7
+; SI-NEXT: v_cvt_f32_f16_e32 v8, s9
+; SI-NEXT: v_cvt_f32_f16_e32 v10, s8
; SI-NEXT: v_cvt_f32_f16_e32 v12, s11
; SI-NEXT: v_cvt_f32_f16_e32 v14, s10
; SI-NEXT: v_cvt_f32_f16_e32 v16, s13
@@ -103122,7 +103546,6 @@ define inreg <64 x half> @bitcast_v16i64_to_v64f16_scalar(<16 x i64> inreg %a, i
; SI-NEXT: .LBB65_4:
; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; kill: killed $vgpr3
-; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr61
; SI-NEXT: ; implicit-def: $vgpr1
@@ -103184,14 +103607,18 @@ define inreg <64 x half> @bitcast_v16i64_to_v64f16_scalar(<16 x i64> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr8
; SI-NEXT: ; implicit-def: $vgpr7
; SI-NEXT: ; implicit-def: $vgpr5
+; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; kill: killed $vgpr3
; SI-NEXT: ; implicit-def: $vgpr3
-; SI-NEXT: s_branch .LBB65_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB65_2
+; SI-NEXT: s_branch .LBB65_3
;
; VI-LABEL: bitcast_v16i64_to_v64f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v31, v17
; VI-NEXT: v_mov_b32_e32 v30, v16
; VI-NEXT: v_mov_b32_e32 v29, v15
@@ -103212,7 +103639,7 @@ define inreg <64 x half> @bitcast_v16i64_to_v64f16_scalar(<16 x i64> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
@@ -103225,10 +103652,13 @@ define inreg <64 x half> @bitcast_v16i64_to_v64f16_scalar(<16 x i64> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB65_4
+; VI-NEXT: s_cbranch_scc0 .LBB65_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB65_3
-; VI-NEXT: .LBB65_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB65_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB65_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v14, vcc, 3, v14
; VI-NEXT: v_addc_u32_e32 v15, vcc, 0, v15, vcc
; VI-NEXT: v_add_u32_e32 v12, vcc, 3, v12
@@ -103261,16 +103691,15 @@ define inreg <64 x half> @bitcast_v16i64_to_v64f16_scalar(<16 x i64> inreg %a, i
; VI-NEXT: v_addc_u32_e32 v19, vcc, 0, v19, vcc
; VI-NEXT: v_add_u32_e32 v16, vcc, 3, v16
; VI-NEXT: v_addc_u32_e32 v17, vcc, 0, v17, vcc
-; VI-NEXT: .LBB65_3: ; %end
+; VI-NEXT: .LBB65_4: ; %end
; VI-NEXT: v_mov_b32_e32 v18, v32
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB65_4:
-; VI-NEXT: s_branch .LBB65_2
;
; GFX9-LABEL: bitcast_v16i64_to_v64f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v31, v17
; GFX9-NEXT: v_mov_b32_e32 v30, v16
; GFX9-NEXT: v_mov_b32_e32 v29, v15
@@ -103291,7 +103720,7 @@ define inreg <64 x half> @bitcast_v16i64_to_v64f16_scalar(<16 x i64> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -103304,10 +103733,13 @@ define inreg <64 x half> @bitcast_v16i64_to_v64f16_scalar(<16 x i64> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB65_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB65_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB65_3
-; GFX9-NEXT: .LBB65_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB65_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB65_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_co_u32_e32 v14, vcc, 3, v14
; GFX9-NEXT: v_addc_co_u32_e32 v15, vcc, 0, v15, vcc
; GFX9-NEXT: v_add_co_u32_e32 v12, vcc, 3, v12
@@ -103340,44 +103772,42 @@ define inreg <64 x half> @bitcast_v16i64_to_v64f16_scalar(<16 x i64> inreg %a, i
; GFX9-NEXT: v_addc_co_u32_e32 v19, vcc, 0, v19, vcc
; GFX9-NEXT: v_add_co_u32_e32 v16, vcc, 3, v16
; GFX9-NEXT: v_addc_co_u32_e32 v17, vcc, 0, v17, vcc
-; GFX9-NEXT: .LBB65_3: ; %end
+; GFX9-NEXT: .LBB65_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v18, v32
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB65_4:
-; GFX9-NEXT: s_branch .LBB65_2
;
; GFX11-LABEL: bitcast_v16i64_to_v64f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v14 :: v_dual_mov_b32 v31, v13
-; GFX11-NEXT: v_dual_mov_b32 v30, v12 :: v_dual_mov_b32 v29, v11
-; GFX11-NEXT: v_dual_mov_b32 v28, v10 :: v_dual_mov_b32 v27, v9
+; GFX11-NEXT: v_dual_mov_b32 v15, v14 :: v_dual_mov_b32 v30, v12
+; GFX11-NEXT: v_dual_mov_b32 v31, v13 :: v_dual_mov_b32 v28, v10
+; GFX11-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v26, v8
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB65_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB65_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB65_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB65_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB65_3:
-; GFX11-NEXT: .LBB65_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB65_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_co_u32 v14, vcc_lo, v14, 3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add_co_ci_u32_e64 v15, null, 0, v15, vcc_lo
@@ -103418,6 +103848,7 @@ define inreg <64 x half> @bitcast_v16i64_to_v64f16_scalar(<16 x i64> inreg %a, i
; GFX11-NEXT: v_add_co_ci_u32_e64 v19, null, 0, v19, vcc_lo
; GFX11-NEXT: v_add_co_u32 v16, vcc_lo, v16, 3
; GFX11-NEXT: v_add_co_ci_u32_e64 v17, null, 0, v17, vcc_lo
+; GFX11-NEXT: .LBB65_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -104470,22 +104901,23 @@ define inreg <16 x i64> @bitcast_v64f16_to_v16i64_scalar(<64 x half> inreg %a, i
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
-; SI-NEXT: v_mov_b32_e32 v53, v26
-; SI-NEXT: v_mov_b32_e32 v45, v6
+; SI-NEXT: v_mov_b32_e32 v52, v30
+; SI-NEXT: v_mov_b32_e32 v54, v26
+; SI-NEXT: v_mov_b32_e32 v41, v6
; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:76
-; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32
-; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:8
-; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:4
+; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32
+; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:8
+; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:4
; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:16
-; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:12
+; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:12
; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:24
-; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:20
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:32
+; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:20
+; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:32
; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:28
-; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:40
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:40
; SI-NEXT: s_waitcnt expcnt(3)
; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:36
-; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:48
+; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:48
; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:44
; SI-NEXT: s_waitcnt expcnt(0)
@@ -104495,12 +104927,12 @@ define inreg <16 x i64> @bitcast_v64f16_to_v16i64_scalar(<64 x half> inreg %a, i
; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:60
; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:72
; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:68
-; SI-NEXT: v_mov_b32_e32 v54, v14
+; SI-NEXT: v_mov_b32_e32 v53, v14
; SI-NEXT: v_mov_b32_e32 v55, v12
-; SI-NEXT: v_mov_b32_e32 v41, v11
+; SI-NEXT: v_mov_b32_e32 v43, v11
; SI-NEXT: v_mov_b32_e32 v40, v10
-; SI-NEXT: v_mov_b32_e32 v44, v9
-; SI-NEXT: v_mov_b32_e32 v43, v8
+; SI-NEXT: v_mov_b32_e32 v45, v9
+; SI-NEXT: v_mov_b32_e32 v44, v8
; SI-NEXT: v_cvt_f16_f32_e32 v9, v1
; SI-NEXT: v_cvt_f16_f32_e32 v8, v0
; SI-NEXT: v_cvt_f16_f32_e32 v11, v3
@@ -104508,27 +104940,27 @@ define inreg <16 x i64> @bitcast_v64f16_to_v16i64_scalar(<64 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v12, v5
; SI-NEXT: v_cvt_f16_f32_e32 v14, v4
; SI-NEXT: v_cvt_f16_f32_e32 v58, v7
+; SI-NEXT: v_cvt_f16_f32_e32 v41, v41
; SI-NEXT: v_cvt_f16_f32_e32 v56, v45
; SI-NEXT: v_cvt_f16_f32_e32 v46, v44
; SI-NEXT: v_cvt_f16_f32_e32 v44, v43
-; SI-NEXT: v_cvt_f16_f32_e32 v61, v41
-; SI-NEXT: v_cvt_f16_f32_e32 v59, v40
+; SI-NEXT: v_cvt_f16_f32_e32 v61, v40
; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
-; SI-NEXT: v_cvt_f16_f32_e32 v57, v55
-; SI-NEXT: v_cvt_f16_f32_e32 v47, v15
-; SI-NEXT: v_cvt_f16_f32_e32 v45, v54
+; SI-NEXT: v_cvt_f16_f32_e32 v59, v55
+; SI-NEXT: v_cvt_f16_f32_e32 v57, v15
+; SI-NEXT: v_cvt_f16_f32_e32 v47, v53
; SI-NEXT: v_cvt_f16_f32_e32 v15, v17
-; SI-NEXT: v_cvt_f16_f32_e32 v43, v16
+; SI-NEXT: v_cvt_f16_f32_e32 v45, v16
; SI-NEXT: v_cvt_f16_f32_e32 v16, v19
-; SI-NEXT: v_cvt_f16_f32_e32 v41, v18
+; SI-NEXT: v_cvt_f16_f32_e32 v43, v18
; SI-NEXT: v_cvt_f16_f32_e32 v17, v21
-; SI-NEXT: v_cvt_f16_f32_e32 v40, v20
+; SI-NEXT: v_cvt_f16_f32_e32 v53, v20
; SI-NEXT: v_cvt_f16_f32_e32 v18, v23
-; SI-NEXT: v_cvt_f16_f32_e32 v55, v22
+; SI-NEXT: v_cvt_f16_f32_e32 v40, v22
; SI-NEXT: v_cvt_f16_f32_e32 v19, v25
-; SI-NEXT: v_cvt_f16_f32_e32 v54, v24
+; SI-NEXT: v_cvt_f16_f32_e32 v55, v24
; SI-NEXT: v_cvt_f16_f32_e32 v20, v27
-; SI-NEXT: v_cvt_f16_f32_e32 v53, v53
+; SI-NEXT: v_cvt_f16_f32_e32 v54, v54
; SI-NEXT: v_cvt_f16_f32_e32 v21, v29
; SI-NEXT: v_cvt_f16_f32_e32 v22, v28
; SI-NEXT: v_cvt_f16_f32_e32 v0, s17
@@ -104540,26 +104972,26 @@ define inreg <16 x i64> @bitcast_v64f16_to_v16i64_scalar(<64 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v7, s28
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v51
-; SI-NEXT: v_cvt_f16_f32_e32 v51, v30
-; SI-NEXT: v_cvt_f16_f32_e32 v52, v52
-; SI-NEXT: v_cvt_f16_f32_e32 v23, v50
-; SI-NEXT: v_cvt_f16_f32_e32 v50, v48
+; SI-NEXT: v_cvt_f16_f32_e32 v51, v52
+; SI-NEXT: v_cvt_f16_f32_e32 v52, v49
+; SI-NEXT: v_cvt_f16_f32_e32 v23, v37
+; SI-NEXT: v_cvt_f16_f32_e32 v50, v50
; SI-NEXT: v_cvt_f16_f32_e32 v24, v38
-; SI-NEXT: v_cvt_f16_f32_e32 v49, v49
+; SI-NEXT: v_cvt_f16_f32_e32 v49, v48
; SI-NEXT: s_waitcnt vmcnt(13)
; SI-NEXT: v_cvt_f16_f32_e32 v25, v39
; SI-NEXT: s_waitcnt vmcnt(12)
-; SI-NEXT: v_cvt_f16_f32_e32 v48, v26
+; SI-NEXT: v_cvt_f16_f32_e32 v48, v30
; SI-NEXT: s_waitcnt vmcnt(11)
-; SI-NEXT: v_cvt_f16_f32_e32 v26, v31
+; SI-NEXT: v_cvt_f16_f32_e32 v26, v26
; SI-NEXT: s_waitcnt vmcnt(10)
; SI-NEXT: v_cvt_f16_f32_e32 v39, v6
; SI-NEXT: s_waitcnt vmcnt(9)
-; SI-NEXT: v_cvt_f16_f32_e32 v27, v42
+; SI-NEXT: v_cvt_f16_f32_e32 v27, v31
; SI-NEXT: s_waitcnt vmcnt(8)
; SI-NEXT: v_cvt_f16_f32_e32 v38, v60
; SI-NEXT: s_waitcnt vmcnt(7)
-; SI-NEXT: v_cvt_f16_f32_e32 v28, v37
+; SI-NEXT: v_cvt_f16_f32_e32 v28, v42
; SI-NEXT: s_waitcnt vmcnt(6)
; SI-NEXT: v_cvt_f16_f32_e32 v37, v62
; SI-NEXT: s_waitcnt vmcnt(5)
@@ -104569,70 +105001,74 @@ define inreg <16 x i64> @bitcast_v64f16_to_v16i64_scalar(<64 x half> inreg %a, i
; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_cvt_f16_f32_e32 v30, v33
; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_cvt_f16_f32_e32 v32, v34
+; SI-NEXT: v_cvt_f16_f32_e32 v33, v34
; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_cvt_f16_f32_e32 v34, v35
+; SI-NEXT: v_cvt_f16_f32_e32 v63, v35
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v36, v36
-; SI-NEXT: v_cvt_f16_f32_e32 v63, s16
-; SI-NEXT: v_cvt_f16_f32_e32 v62, s18
-; SI-NEXT: v_cvt_f16_f32_e32 v60, s20
-; SI-NEXT: v_cvt_f16_f32_e32 v42, s22
-; SI-NEXT: v_cvt_f16_f32_e32 v35, s24
-; SI-NEXT: v_cvt_f16_f32_e32 v33, s26
+; SI-NEXT: v_cvt_f16_f32_e32 v35, v36
+; SI-NEXT: v_cvt_f16_f32_e32 v62, s16
+; SI-NEXT: v_cvt_f16_f32_e32 v60, s18
+; SI-NEXT: v_cvt_f16_f32_e32 v42, s20
+; SI-NEXT: v_cvt_f16_f32_e32 v36, s22
+; SI-NEXT: v_cvt_f16_f32_e32 v34, s24
+; SI-NEXT: v_cvt_f16_f32_e32 v32, s26
; SI-NEXT: v_cvt_f16_f32_e32 v6, s29
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB67_2
; SI-NEXT: ; %bb.1: ; %cmp.false
+; SI-NEXT: s_waitcnt expcnt(6)
+; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20
+; SI-NEXT: v_or_b32_e32 v3, v36, v3
+; SI-NEXT: v_mov_b32_e32 v36, v54
+; SI-NEXT: v_or_b32_e32 v20, v54, v20
+; SI-NEXT: v_mov_b32_e32 v54, v21
; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21
-; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: v_or_b32_e32 v21, v22, v21
; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v52
-; SI-NEXT: v_or_b32_e32 v5, v33, v5
-; SI-NEXT: v_mov_b32_e32 v33, v52
-; SI-NEXT: v_mov_b32_e32 v52, v51
; SI-NEXT: v_or_b32_e32 v22, v51, v22
; SI-NEXT: v_mov_b32_e32 v51, v23
; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v23
@@ -104654,11 +105090,9 @@ define inreg <16 x i64> @bitcast_v64f16_to_v16i64_scalar(<64 x half> inreg %a, i
; SI-NEXT: v_or_b32_e32 v27, v38, v27
; SI-NEXT: v_mov_b32_e32 v38, v28
; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v28
-; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
; SI-NEXT: v_or_b32_e32 v7, v8, v7
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v11
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v12
-; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19
; SI-NEXT: v_or_b32_e32 v28, v37, v28
; SI-NEXT: v_mov_b32_e32 v37, v29
; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v29
@@ -104666,70 +105100,68 @@ define inreg <16 x i64> @bitcast_v64f16_to_v16i64_scalar(<64 x half> inreg %a, i
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; SI-NEXT: v_or_b32_e32 v4, v35, v4
+; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: v_or_b32_e32 v8, v10, v8
; SI-NEXT: v_or_b32_e32 v9, v14, v9
; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v58
-; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v46
-; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v61
+; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v56
+; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v44
; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
-; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v47
+; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v57
; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15
; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17
; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18
-; SI-NEXT: v_mov_b32_e32 v35, v54
-; SI-NEXT: v_or_b32_e32 v19, v54, v19
-; SI-NEXT: v_mov_b32_e32 v54, v20
-; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20
+; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19
; SI-NEXT: v_or_b32_e32 v29, v31, v29
; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v30
-; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v34
-; SI-NEXT: v_or_b32_e32 v0, v63, v0
-; SI-NEXT: v_or_b32_e32 v1, v62, v1
-; SI-NEXT: v_or_b32_e32 v2, v60, v2
-; SI-NEXT: v_or_b32_e32 v3, v42, v3
-; SI-NEXT: v_or_b32_e32 v10, v56, v10
-; SI-NEXT: v_mov_b32_e32 v63, v44
-; SI-NEXT: v_or_b32_e32 v11, v44, v11
+; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v63
+; SI-NEXT: v_or_b32_e32 v0, v62, v0
+; SI-NEXT: v_or_b32_e32 v1, v60, v1
+; SI-NEXT: v_or_b32_e32 v2, v42, v2
+; SI-NEXT: v_or_b32_e32 v4, v34, v4
+; SI-NEXT: v_or_b32_e32 v5, v32, v5
+; SI-NEXT: v_or_b32_e32 v10, v41, v10
+; SI-NEXT: v_or_b32_e32 v11, v46, v11
+; SI-NEXT: v_mov_b32_e32 v41, v44
; SI-NEXT: v_mov_b32_e32 v62, v61
+; SI-NEXT: v_or_b32_e32 v12, v61, v12
; SI-NEXT: v_mov_b32_e32 v60, v59
-; SI-NEXT: v_or_b32_e32 v12, v59, v12
+; SI-NEXT: v_or_b32_e32 v13, v59, v13
; SI-NEXT: v_mov_b32_e32 v58, v57
-; SI-NEXT: v_or_b32_e32 v13, v57, v13
; SI-NEXT: v_mov_b32_e32 v56, v47
+; SI-NEXT: v_or_b32_e32 v14, v47, v14
; SI-NEXT: v_mov_b32_e32 v46, v45
-; SI-NEXT: v_or_b32_e32 v14, v45, v14
+; SI-NEXT: v_or_b32_e32 v15, v45, v15
; SI-NEXT: v_mov_b32_e32 v44, v43
-; SI-NEXT: v_or_b32_e32 v15, v43, v15
-; SI-NEXT: v_mov_b32_e32 v42, v41
-; SI-NEXT: v_or_b32_e32 v16, v41, v16
-; SI-NEXT: v_or_b32_e32 v17, v40, v17
+; SI-NEXT: v_or_b32_e32 v16, v43, v16
+; SI-NEXT: v_mov_b32_e32 v42, v53
+; SI-NEXT: v_or_b32_e32 v17, v53, v17
+; SI-NEXT: v_or_b32_e32 v18, v40, v18
; SI-NEXT: v_mov_b32_e32 v40, v55
-; SI-NEXT: v_or_b32_e32 v18, v55, v18
-; SI-NEXT: v_or_b32_e32 v20, v53, v20
-; SI-NEXT: v_or_b32_e32 v30, v32, v30
-; SI-NEXT: v_mov_b32_e32 v32, v34
-; SI-NEXT: v_or_b32_e32 v31, v36, v31
+; SI-NEXT: v_or_b32_e32 v19, v55, v19
+; SI-NEXT: v_mov_b32_e32 v32, v52
+; SI-NEXT: v_mov_b32_e32 v34, v33
+; SI-NEXT: v_or_b32_e32 v30, v33, v30
+; SI-NEXT: v_or_b32_e32 v31, v35, v31
; SI-NEXT: s_mov_b64 s[4:5], 0
; SI-NEXT: s_branch .LBB67_3
; SI-NEXT: .LBB67_2:
+; SI-NEXT: v_mov_b32_e32 v41, v44
; SI-NEXT: s_waitcnt expcnt(1)
-; SI-NEXT: v_mov_b32_e32 v63, v44
; SI-NEXT: v_mov_b32_e32 v62, v61
; SI-NEXT: v_mov_b32_e32 v60, v59
; SI-NEXT: v_mov_b32_e32 v58, v57
; SI-NEXT: v_mov_b32_e32 v56, v47
; SI-NEXT: v_mov_b32_e32 v46, v45
; SI-NEXT: v_mov_b32_e32 v44, v43
-; SI-NEXT: v_mov_b32_e32 v42, v41
+; SI-NEXT: v_mov_b32_e32 v42, v53
; SI-NEXT: v_mov_b32_e32 v40, v55
-; SI-NEXT: v_mov_b32_e32 v35, v54
-; SI-NEXT: v_mov_b32_e32 v54, v20
-; SI-NEXT: v_mov_b32_e32 v33, v52
-; SI-NEXT: v_mov_b32_e32 v32, v34
-; SI-NEXT: v_mov_b32_e32 v52, v51
+; SI-NEXT: v_mov_b32_e32 v36, v54
+; SI-NEXT: v_mov_b32_e32 v54, v21
+; SI-NEXT: v_mov_b32_e32 v32, v52
+; SI-NEXT: v_mov_b32_e32 v34, v33
; SI-NEXT: v_mov_b32_e32 v51, v23
; SI-NEXT: v_mov_b32_e32 v50, v24
; SI-NEXT: v_mov_b32_e32 v49, v25
@@ -104737,25 +105169,29 @@ define inreg <16 x i64> @bitcast_v64f16_to_v16i64_scalar(<64 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v39, v27
; SI-NEXT: v_mov_b32_e32 v38, v28
; SI-NEXT: v_mov_b32_e32 v37, v29
-; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; SI-NEXT: .LBB67_3: ; %Flow
-; SI-NEXT: v_mov_b32_e32 v34, v33
-; SI-NEXT: v_mov_b32_e32 v33, v35
-; SI-NEXT: v_mov_b32_e32 v35, v40
+; SI-NEXT: v_mov_b32_e32 v33, v63
+; SI-NEXT: v_mov_b32_e32 v52, v36
+; SI-NEXT: v_mov_b32_e32 v36, v40
; SI-NEXT: v_mov_b32_e32 v53, v42
+; SI-NEXT: v_mov_b32_e32 v55, v44
; SI-NEXT: v_mov_b32_e32 v40, v46
-; SI-NEXT: v_mov_b32_e32 v41, v56
+; SI-NEXT: v_mov_b32_e32 v57, v56
; SI-NEXT: v_mov_b32_e32 v42, v58
; SI-NEXT: v_mov_b32_e32 v43, v60
+; SI-NEXT: v_mov_b32_e32 v44, v62
+; SI-NEXT: v_mov_b32_e32 v45, v41
; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
; SI-NEXT: s_cbranch_vccnz .LBB67_5
; SI-NEXT: ; %bb.4: ; %cmp.true
@@ -104764,11 +105200,11 @@ define inreg <16 x i64> @bitcast_v64f16_to_v16i64_scalar(<64 x half> inreg %a, i
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(4)
-; SI-NEXT: v_cvt_f32_f16_e32 v8, v61
-; SI-NEXT: v_cvt_f32_f16_e32 v9, v59
-; SI-NEXT: v_cvt_f32_f16_e32 v10, v57
-; SI-NEXT: v_cvt_f32_f16_e32 v11, v47
+; SI-NEXT: s_waitcnt vmcnt(5)
+; SI-NEXT: v_cvt_f32_f16_e32 v8, v62
+; SI-NEXT: v_cvt_f32_f16_e32 v9, v60
+; SI-NEXT: v_cvt_f32_f16_e32 v10, v58
+; SI-NEXT: v_cvt_f32_f16_e32 v11, v56
; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8
; SI-NEXT: v_cvt_f16_f32_e32 v8, v8
; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9
@@ -104777,10 +105213,10 @@ define inreg <16 x i64> @bitcast_v64f16_to_v16i64_scalar(<64 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v10, v10
; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11
; SI-NEXT: v_cvt_f16_f32_e32 v11, v11
-; SI-NEXT: v_cvt_f32_f16_e32 v12, v63
-; SI-NEXT: v_cvt_f32_f16_e32 v13, v43
-; SI-NEXT: v_cvt_f32_f16_e32 v14, v42
-; SI-NEXT: v_cvt_f32_f16_e32 v15, v40
+; SI-NEXT: v_cvt_f32_f16_e32 v12, v46
+; SI-NEXT: v_cvt_f32_f16_e32 v13, v44
+; SI-NEXT: v_cvt_f32_f16_e32 v14, v43
+; SI-NEXT: v_cvt_f32_f16_e32 v15, v57
; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12
; SI-NEXT: v_cvt_f16_f32_e32 v12, v12
; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13
@@ -104789,33 +105225,32 @@ define inreg <16 x i64> @bitcast_v64f16_to_v16i64_scalar(<64 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v14, v14
; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15
; SI-NEXT: v_cvt_f16_f32_e32 v15, v15
-; SI-NEXT: v_mov_b32_e32 v55, v44
-; SI-NEXT: v_cvt_f32_f16_e32 v16, v55
-; SI-NEXT: v_cvt_f32_f16_e32 v17, v53
-; SI-NEXT: v_cvt_f32_f16_e32 v19, v35
-; SI-NEXT: v_cvt_f32_f16_e32 v21, v33
+; SI-NEXT: v_cvt_f32_f16_e32 v16, v40
+; SI-NEXT: v_cvt_f32_f16_e32 v17, v55
+; SI-NEXT: v_cvt_f32_f16_e32 v18, v53
+; SI-NEXT: v_cvt_f32_f16_e32 v22, v52
; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16
; SI-NEXT: v_cvt_f16_f32_e32 v16, v16
; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17
; SI-NEXT: v_cvt_f16_f32_e32 v17, v17
-; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19
-; SI-NEXT: v_cvt_f16_f32_e32 v19, v19
-; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21
-; SI-NEXT: v_cvt_f16_f32_e32 v21, v21
-; SI-NEXT: v_cvt_f32_f16_e32 v23, v34
-; SI-NEXT: v_cvt_f32_f16_e32 v24, v52
+; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18
+; SI-NEXT: v_cvt_f16_f32_e32 v18, v18
+; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22
+; SI-NEXT: v_cvt_f16_f32_e32 v22, v22
+; SI-NEXT: v_cvt_f32_f16_e32 v21, v36
+; SI-NEXT: v_cvt_f32_f16_e32 v23, v32
; SI-NEXT: v_cvt_f32_f16_e32 v26, v49
; SI-NEXT: v_cvt_f32_f16_e32 v29, v38
+; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21
+; SI-NEXT: v_cvt_f16_f32_e32 v21, v21
; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23
; SI-NEXT: v_cvt_f16_f32_e32 v23, v23
-; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24
-; SI-NEXT: v_cvt_f16_f32_e32 v24, v24
; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26
; SI-NEXT: v_cvt_f16_f32_e32 v26, v26
; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v29
; SI-NEXT: v_cvt_f16_f32_e32 v29, v29
-; SI-NEXT: v_cvt_f32_f16_e32 v32, v32
-; SI-NEXT: v_cvt_f32_f16_e32 v33, v36
+; SI-NEXT: v_cvt_f32_f16_e32 v32, v33
+; SI-NEXT: v_cvt_f32_f16_e32 v33, v35
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v32, 0x38000000, v32
@@ -104824,14 +105259,14 @@ define inreg <16 x i64> @bitcast_v64f16_to_v16i64_scalar(<64 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v33, v33
; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
@@ -104866,26 +105301,22 @@ define inreg <16 x i64> @bitcast_v64f16_to_v16i64_scalar(<64 x half> inreg %a, i
; SI-NEXT: s_waitcnt vmcnt(10)
; SI-NEXT: v_cvt_f32_f16_e32 v7, v7
; SI-NEXT: s_waitcnt vmcnt(9)
-; SI-NEXT: v_cvt_f32_f16_e32 v18, v18
+; SI-NEXT: v_cvt_f32_f16_e32 v19, v19
; SI-NEXT: s_waitcnt vmcnt(8)
; SI-NEXT: v_cvt_f32_f16_e32 v20, v20
; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6
; SI-NEXT: v_cvt_f16_f32_e32 v6, v6
; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7
; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
-; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18
-; SI-NEXT: v_cvt_f16_f32_e32 v18, v18
+; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19
; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20
+; SI-NEXT: v_cvt_f16_f32_e32 v19, v19
; SI-NEXT: v_cvt_f16_f32_e32 v20, v20
-; SI-NEXT: s_waitcnt vmcnt(7)
-; SI-NEXT: v_cvt_f32_f16_e32 v22, v22
-; SI-NEXT: s_waitcnt vmcnt(6)
+; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_cvt_f32_f16_e32 v25, v25
-; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_cvt_f32_f16_e32 v28, v28
+; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_cvt_f32_f16_e32 v30, v30
-; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22
-; SI-NEXT: v_cvt_f16_f32_e32 v22, v22
; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25
; SI-NEXT: v_cvt_f16_f32_e32 v25, v25
; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28
@@ -104930,72 +105361,70 @@ define inreg <16 x i64> @bitcast_v64f16_to_v16i64_scalar(<64 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v6, v6
; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6
; SI-NEXT: v_or_b32_e32 v6, v7, v6
-; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v7, v7
+; SI-NEXT: v_cvt_f32_f16_e32 v7, v63
; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7
; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7
; SI-NEXT: v_or_b32_e32 v7, v8, v7
-; SI-NEXT: v_cvt_f32_f16_e32 v8, v60
+; SI-NEXT: v_cvt_f32_f16_e32 v8, v61
; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8
; SI-NEXT: v_cvt_f16_f32_e32 v8, v8
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8
; SI-NEXT: v_or_b32_e32 v8, v9, v8
-; SI-NEXT: v_cvt_f32_f16_e32 v9, v58
+; SI-NEXT: v_cvt_f32_f16_e32 v9, v59
; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9
; SI-NEXT: v_cvt_f16_f32_e32 v9, v9
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9
; SI-NEXT: v_or_b32_e32 v9, v10, v9
-; SI-NEXT: v_cvt_f32_f16_e32 v10, v56
+; SI-NEXT: v_cvt_f32_f16_e32 v10, v41
; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10
; SI-NEXT: v_cvt_f16_f32_e32 v10, v10
; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10
; SI-NEXT: v_or_b32_e32 v10, v11, v10
-; SI-NEXT: v_cvt_f32_f16_e32 v11, v46
+; SI-NEXT: v_cvt_f32_f16_e32 v11, v47
; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11
; SI-NEXT: v_cvt_f16_f32_e32 v11, v11
; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11
; SI-NEXT: v_or_b32_e32 v11, v12, v11
-; SI-NEXT: v_cvt_f32_f16_e32 v12, v62
+; SI-NEXT: v_cvt_f32_f16_e32 v12, v45
; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12
; SI-NEXT: v_cvt_f16_f32_e32 v12, v12
; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12
; SI-NEXT: v_or_b32_e32 v12, v13, v12
-; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v13, v13
; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13
; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
; SI-NEXT: v_or_b32_e32 v13, v14, v13
-; SI-NEXT: v_cvt_f32_f16_e32 v14, v41
+; SI-NEXT: v_cvt_f32_f16_e32 v14, v42
; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14
; SI-NEXT: v_cvt_f16_f32_e32 v14, v14
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
; SI-NEXT: v_or_b32_e32 v14, v15, v14
-; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v15, v15
; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15
; SI-NEXT: v_cvt_f16_f32_e32 v15, v15
; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15
; SI-NEXT: v_or_b32_e32 v15, v16, v15
-; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v16, v16
; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16
; SI-NEXT: v_cvt_f16_f32_e32 v16, v16
; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16
; SI-NEXT: v_or_b32_e32 v16, v17, v16
-; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v17, v17
; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17
; SI-NEXT: v_cvt_f16_f32_e32 v17, v17
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17
; SI-NEXT: v_or_b32_e32 v17, v18, v17
-; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v18, v18
; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18
@@ -105003,35 +105432,38 @@ define inreg <16 x i64> @bitcast_v64f16_to_v16i64_scalar(<64 x half> inreg %a, i
; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18
; SI-NEXT: v_or_b32_e32 v18, v19, v18
; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v20
-; SI-NEXT: v_cvt_f32_f16_e32 v20, v54
+; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
; SI-NEXT: v_or_b32_e32 v19, v21, v19
-; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
+; SI-NEXT: v_cvt_f32_f16_e32 v21, v54
+; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21
+; SI-NEXT: v_cvt_f16_f32_e32 v21, v21
+; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_cvt_f32_f16_e32 v20, v20
; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20
; SI-NEXT: v_cvt_f16_f32_e32 v20, v20
; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20
; SI-NEXT: v_or_b32_e32 v20, v22, v20
-; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_cvt_f32_f16_e32 v21, v21
-; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21
-; SI-NEXT: v_cvt_f16_f32_e32 v21, v21
+; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v22, v22
-; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21
; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22
; SI-NEXT: v_cvt_f16_f32_e32 v22, v22
; SI-NEXT: v_or_b32_e32 v21, v22, v21
; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v23
; SI-NEXT: v_cvt_f32_f16_e32 v23, v51
-; SI-NEXT: v_or_b32_e32 v22, v24, v22
-; SI-NEXT: v_cvt_f32_f16_e32 v24, v50
; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23
; SI-NEXT: v_cvt_f16_f32_e32 v23, v23
-; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24
-; SI-NEXT: v_cvt_f16_f32_e32 v24, v24
; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v23
; SI-NEXT: v_or_b32_e32 v23, v25, v23
-; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
+; SI-NEXT: v_cvt_f32_f16_e32 v24, v24
+; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24
+; SI-NEXT: v_cvt_f16_f32_e32 v24, v24
+; SI-NEXT: v_or_b32_e32 v22, v24, v22
+; SI-NEXT: v_cvt_f32_f16_e32 v24, v50
+; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24
+; SI-NEXT: v_cvt_f16_f32_e32 v24, v24
; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v24
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v25, v25
@@ -105044,7 +105476,7 @@ define inreg <16 x i64> @bitcast_v64f16_to_v16i64_scalar(<64 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v26, v26
; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v26
; SI-NEXT: v_or_b32_e32 v26, v28, v26
-; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
; SI-NEXT: v_cvt_f32_f16_e32 v27, v27
; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27
; SI-NEXT: v_cvt_f16_f32_e32 v27, v27
@@ -105059,9 +105491,9 @@ define inreg <16 x i64> @bitcast_v64f16_to_v16i64_scalar(<64 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v28, v28
; SI-NEXT: v_or_b32_e32 v27, v28, v27
; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v29
-; SI-NEXT: v_cvt_f32_f16_e32 v29, v37
; SI-NEXT: v_or_b32_e32 v28, v30, v28
-; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
+; SI-NEXT: v_cvt_f32_f16_e32 v29, v37
; SI-NEXT: v_cvt_f32_f16_e32 v31, v31
; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v29
; SI-NEXT: v_cvt_f16_f32_e32 v29, v29
@@ -105069,16 +105501,14 @@ define inreg <16 x i64> @bitcast_v64f16_to_v16i64_scalar(<64 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v31, v31
; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v29
; SI-NEXT: v_or_b32_e32 v29, v31, v29
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_cvt_f32_f16_e32 v31, v34
+; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31
+; SI-NEXT: v_cvt_f16_f32_e32 v31, v31
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v30, v30
; SI-NEXT: v_add_f32_e32 v30, 0x38000000, v30
; SI-NEXT: v_cvt_f16_f32_e32 v30, v30
; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v30
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v31, v31
-; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31
-; SI-NEXT: v_cvt_f16_f32_e32 v31, v31
; SI-NEXT: v_or_b32_e32 v30, v31, v30
; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v32
; SI-NEXT: v_or_b32_e32 v31, v33, v31
@@ -105106,6 +105536,7 @@ define inreg <16 x i64> @bitcast_v64f16_to_v16i64_scalar(<64 x half> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v31, v17
; VI-NEXT: v_mov_b32_e32 v30, v16
; VI-NEXT: v_mov_b32_e32 v29, v15
@@ -105126,7 +105557,7 @@ define inreg <16 x i64> @bitcast_v64f16_to_v16i64_scalar(<64 x half> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
@@ -105139,10 +105570,13 @@ define inreg <16 x i64> @bitcast_v64f16_to_v16i64_scalar(<64 x half> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB67_4
+; VI-NEXT: s_cbranch_scc0 .LBB67_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB67_3
-; VI-NEXT: .LBB67_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB67_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB67_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v18, 0x200
; VI-NEXT: v_add_f16_sdwa v33, v15, v18 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v15, 0x200, v15
@@ -105240,16 +105674,15 @@ define inreg <16 x i64> @bitcast_v64f16_to_v16i64_scalar(<64 x half> inreg %a, i
; VI-NEXT: v_add_f16_e32 v16, 0x200, v16
; VI-NEXT: v_or_b32_e32 v17, v17, v33
; VI-NEXT: v_or_b32_e32 v16, v16, v18
-; VI-NEXT: .LBB67_3: ; %end
+; VI-NEXT: .LBB67_4: ; %end
; VI-NEXT: v_mov_b32_e32 v18, v32
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB67_4:
-; VI-NEXT: s_branch .LBB67_2
;
; GFX9-LABEL: bitcast_v64f16_to_v16i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v31, v17
; GFX9-NEXT: v_mov_b32_e32 v30, v16
; GFX9-NEXT: v_mov_b32_e32 v29, v15
@@ -105270,7 +105703,7 @@ define inreg <16 x i64> @bitcast_v64f16_to_v16i64_scalar(<64 x half> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -105283,10 +105716,13 @@ define inreg <16 x i64> @bitcast_v64f16_to_v16i64_scalar(<64 x half> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB67_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB67_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB67_3
-; GFX9-NEXT: .LBB67_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB67_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB67_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_movk_i32 s4, 0x200
; GFX9-NEXT: v_pk_add_f16 v15, v15, s4 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v14, v14, s4 op_sel_hi:[1,0]
@@ -105320,118 +105756,113 @@ define inreg <16 x i64> @bitcast_v64f16_to_v16i64_scalar(<64 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v32, v32, s4 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v17, v17, s4 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v16, v16, s4 op_sel_hi:[1,0]
-; GFX9-NEXT: .LBB67_3: ; %end
+; GFX9-NEXT: .LBB67_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v18, v32
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB67_4:
-; GFX9-NEXT: s_branch .LBB67_2
;
; GFX11-LABEL: bitcast_v64f16_to_v16i64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_store_b32 off, v40, s32 offset:292
-; GFX11-NEXT: scratch_store_b32 off, v41, s32 offset:288
-; GFX11-NEXT: scratch_store_b32 off, v42, s32 offset:284
-; GFX11-NEXT: scratch_store_b32 off, v43, s32 offset:280
-; GFX11-NEXT: scratch_store_b32 off, v44, s32 offset:276
-; GFX11-NEXT: scratch_store_b32 off, v45, s32 offset:272
-; GFX11-NEXT: scratch_store_b32 off, v46, s32 offset:268
-; GFX11-NEXT: scratch_store_b32 off, v47, s32 offset:264
-; GFX11-NEXT: scratch_store_b32 off, v56, s32 offset:260
-; GFX11-NEXT: scratch_store_b32 off, v57, s32 offset:256
-; GFX11-NEXT: scratch_store_b32 off, v58, s32 offset:252
-; GFX11-NEXT: scratch_store_b32 off, v59, s32 offset:248
-; GFX11-NEXT: scratch_store_b32 off, v60, s32 offset:244
-; GFX11-NEXT: scratch_store_b32 off, v61, s32 offset:240
-; GFX11-NEXT: scratch_store_b32 off, v62, s32 offset:236
-; GFX11-NEXT: scratch_store_b32 off, v63, s32 offset:232
-; GFX11-NEXT: scratch_store_b32 off, v72, s32 offset:228
-; GFX11-NEXT: scratch_store_b32 off, v73, s32 offset:224
-; GFX11-NEXT: scratch_store_b32 off, v74, s32 offset:220
-; GFX11-NEXT: scratch_store_b32 off, v75, s32 offset:216
-; GFX11-NEXT: scratch_store_b32 off, v76, s32 offset:212
-; GFX11-NEXT: scratch_store_b32 off, v77, s32 offset:208
-; GFX11-NEXT: scratch_store_b32 off, v78, s32 offset:204
-; GFX11-NEXT: scratch_store_b32 off, v79, s32 offset:200
-; GFX11-NEXT: scratch_store_b32 off, v88, s32 offset:196
-; GFX11-NEXT: scratch_store_b32 off, v89, s32 offset:192
-; GFX11-NEXT: scratch_store_b32 off, v90, s32 offset:188
-; GFX11-NEXT: scratch_store_b32 off, v91, s32 offset:184
-; GFX11-NEXT: scratch_store_b32 off, v92, s32 offset:180
-; GFX11-NEXT: scratch_store_b32 off, v93, s32 offset:176
-; GFX11-NEXT: scratch_store_b32 off, v94, s32 offset:172
-; GFX11-NEXT: scratch_store_b32 off, v95, s32 offset:168
+; GFX11-NEXT: scratch_store_b32 off, v40, s32 offset:284
+; GFX11-NEXT: scratch_store_b32 off, v41, s32 offset:280
+; GFX11-NEXT: scratch_store_b32 off, v42, s32 offset:276
+; GFX11-NEXT: scratch_store_b32 off, v43, s32 offset:272
+; GFX11-NEXT: scratch_store_b32 off, v44, s32 offset:268
+; GFX11-NEXT: scratch_store_b32 off, v45, s32 offset:264
+; GFX11-NEXT: scratch_store_b32 off, v46, s32 offset:260
+; GFX11-NEXT: scratch_store_b32 off, v47, s32 offset:256
+; GFX11-NEXT: scratch_store_b32 off, v56, s32 offset:252
+; GFX11-NEXT: scratch_store_b32 off, v57, s32 offset:248
+; GFX11-NEXT: scratch_store_b32 off, v58, s32 offset:244
+; GFX11-NEXT: scratch_store_b32 off, v59, s32 offset:240
+; GFX11-NEXT: scratch_store_b32 off, v60, s32 offset:236
+; GFX11-NEXT: scratch_store_b32 off, v61, s32 offset:232
+; GFX11-NEXT: scratch_store_b32 off, v62, s32 offset:228
+; GFX11-NEXT: scratch_store_b32 off, v63, s32 offset:224
+; GFX11-NEXT: scratch_store_b32 off, v72, s32 offset:220
+; GFX11-NEXT: scratch_store_b32 off, v73, s32 offset:216
+; GFX11-NEXT: scratch_store_b32 off, v74, s32 offset:212
+; GFX11-NEXT: scratch_store_b32 off, v75, s32 offset:208
+; GFX11-NEXT: scratch_store_b32 off, v76, s32 offset:204
+; GFX11-NEXT: scratch_store_b32 off, v77, s32 offset:200
+; GFX11-NEXT: scratch_store_b32 off, v78, s32 offset:196
+; GFX11-NEXT: scratch_store_b32 off, v79, s32 offset:192
+; GFX11-NEXT: scratch_store_b32 off, v88, s32 offset:188
+; GFX11-NEXT: scratch_store_b32 off, v89, s32 offset:184
+; GFX11-NEXT: scratch_store_b32 off, v90, s32 offset:180
+; GFX11-NEXT: scratch_store_b32 off, v91, s32 offset:176
+; GFX11-NEXT: scratch_store_b32 off, v92, s32 offset:172
+; GFX11-NEXT: scratch_store_b32 off, v93, s32 offset:168
+; GFX11-NEXT: scratch_store_b32 off, v94, s32 offset:164
+; GFX11-NEXT: scratch_store_b32 off, v95, s32 offset:160
; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_store_b32 off, v104, s32 offset:164
-; GFX11-NEXT: scratch_store_b32 off, v105, s32 offset:160
-; GFX11-NEXT: scratch_store_b32 off, v106, s32 offset:156
-; GFX11-NEXT: scratch_store_b32 off, v107, s32 offset:152
-; GFX11-NEXT: scratch_store_b32 off, v108, s32 offset:148
-; GFX11-NEXT: scratch_store_b32 off, v109, s32 offset:144
-; GFX11-NEXT: scratch_store_b32 off, v110, s32 offset:140
-; GFX11-NEXT: scratch_store_b32 off, v111, s32 offset:136
-; GFX11-NEXT: scratch_store_b32 off, v120, s32 offset:132
-; GFX11-NEXT: scratch_store_b32 off, v121, s32 offset:128
-; GFX11-NEXT: scratch_store_b32 off, v122, s32 offset:124
-; GFX11-NEXT: scratch_store_b32 off, v123, s32 offset:120
-; GFX11-NEXT: scratch_store_b32 off, v124, s32 offset:116
-; GFX11-NEXT: scratch_store_b32 off, v125, s32 offset:112
-; GFX11-NEXT: scratch_store_b32 off, v126, s32 offset:108
-; GFX11-NEXT: scratch_store_b32 off, v127, s32 offset:104
-; GFX11-NEXT: scratch_store_b32 off, v136, s32 offset:100
-; GFX11-NEXT: scratch_store_b32 off, v137, s32 offset:96
-; GFX11-NEXT: scratch_store_b32 off, v138, s32 offset:92
-; GFX11-NEXT: scratch_store_b32 off, v139, s32 offset:88
-; GFX11-NEXT: scratch_store_b32 off, v140, s32 offset:84
-; GFX11-NEXT: scratch_store_b32 off, v141, s32 offset:80
-; GFX11-NEXT: scratch_store_b32 off, v142, s32 offset:76
-; GFX11-NEXT: scratch_store_b32 off, v143, s32 offset:72
-; GFX11-NEXT: scratch_store_b32 off, v152, s32 offset:68
-; GFX11-NEXT: scratch_store_b32 off, v153, s32 offset:64
-; GFX11-NEXT: scratch_store_b32 off, v154, s32 offset:60
-; GFX11-NEXT: scratch_store_b32 off, v155, s32 offset:56
-; GFX11-NEXT: scratch_store_b32 off, v156, s32 offset:52
-; GFX11-NEXT: scratch_store_b32 off, v157, s32 offset:48
-; GFX11-NEXT: scratch_store_b32 off, v158, s32 offset:44
-; GFX11-NEXT: scratch_store_b32 off, v159, s32 offset:40
-; GFX11-NEXT: s_clause 0x9
-; GFX11-NEXT: scratch_store_b32 off, v168, s32 offset:36
-; GFX11-NEXT: scratch_store_b32 off, v169, s32 offset:32
-; GFX11-NEXT: scratch_store_b32 off, v170, s32 offset:28
-; GFX11-NEXT: scratch_store_b32 off, v171, s32 offset:24
-; GFX11-NEXT: scratch_store_b32 off, v172, s32 offset:20
-; GFX11-NEXT: scratch_store_b32 off, v173, s32 offset:16
-; GFX11-NEXT: scratch_store_b32 off, v174, s32 offset:12
-; GFX11-NEXT: scratch_store_b32 off, v175, s32 offset:8
-; GFX11-NEXT: scratch_store_b32 off, v184, s32 offset:4
-; GFX11-NEXT: scratch_store_b32 off, v185, s32
+; GFX11-NEXT: scratch_store_b32 off, v104, s32 offset:156
+; GFX11-NEXT: scratch_store_b32 off, v105, s32 offset:152
+; GFX11-NEXT: scratch_store_b32 off, v106, s32 offset:148
+; GFX11-NEXT: scratch_store_b32 off, v107, s32 offset:144
+; GFX11-NEXT: scratch_store_b32 off, v108, s32 offset:140
+; GFX11-NEXT: scratch_store_b32 off, v109, s32 offset:136
+; GFX11-NEXT: scratch_store_b32 off, v110, s32 offset:132
+; GFX11-NEXT: scratch_store_b32 off, v111, s32 offset:128
+; GFX11-NEXT: scratch_store_b32 off, v120, s32 offset:124
+; GFX11-NEXT: scratch_store_b32 off, v121, s32 offset:120
+; GFX11-NEXT: scratch_store_b32 off, v122, s32 offset:116
+; GFX11-NEXT: scratch_store_b32 off, v123, s32 offset:112
+; GFX11-NEXT: scratch_store_b32 off, v124, s32 offset:108
+; GFX11-NEXT: scratch_store_b32 off, v125, s32 offset:104
+; GFX11-NEXT: scratch_store_b32 off, v126, s32 offset:100
+; GFX11-NEXT: scratch_store_b32 off, v127, s32 offset:96
+; GFX11-NEXT: scratch_store_b32 off, v136, s32 offset:92
+; GFX11-NEXT: scratch_store_b32 off, v137, s32 offset:88
+; GFX11-NEXT: scratch_store_b32 off, v138, s32 offset:84
+; GFX11-NEXT: scratch_store_b32 off, v139, s32 offset:80
+; GFX11-NEXT: scratch_store_b32 off, v140, s32 offset:76
+; GFX11-NEXT: scratch_store_b32 off, v141, s32 offset:72
+; GFX11-NEXT: scratch_store_b32 off, v142, s32 offset:68
+; GFX11-NEXT: scratch_store_b32 off, v143, s32 offset:64
+; GFX11-NEXT: scratch_store_b32 off, v152, s32 offset:60
+; GFX11-NEXT: scratch_store_b32 off, v153, s32 offset:56
+; GFX11-NEXT: scratch_store_b32 off, v154, s32 offset:52
+; GFX11-NEXT: scratch_store_b32 off, v155, s32 offset:48
+; GFX11-NEXT: scratch_store_b32 off, v156, s32 offset:44
+; GFX11-NEXT: scratch_store_b32 off, v157, s32 offset:40
+; GFX11-NEXT: scratch_store_b32 off, v158, s32 offset:36
+; GFX11-NEXT: scratch_store_b32 off, v159, s32 offset:32
+; GFX11-NEXT: s_clause 0x7
+; GFX11-NEXT: scratch_store_b32 off, v168, s32 offset:28
+; GFX11-NEXT: scratch_store_b32 off, v169, s32 offset:24
+; GFX11-NEXT: scratch_store_b32 off, v170, s32 offset:20
+; GFX11-NEXT: scratch_store_b32 off, v171, s32 offset:16
+; GFX11-NEXT: scratch_store_b32 off, v172, s32 offset:12
+; GFX11-NEXT: scratch_store_b32 off, v173, s32 offset:8
+; GFX11-NEXT: scratch_store_b32 off, v174, s32 offset:4
+; GFX11-NEXT: scratch_store_b32 off, v175, s32
; GFX11-NEXT: v_dual_mov_b32 v176, v13 :: v_dual_mov_b32 v177, v12
; GFX11-NEXT: v_dual_mov_b32 v178, v11 :: v_dual_mov_b32 v179, v10
; GFX11-NEXT: v_dual_mov_b32 v180, v9 :: v_dual_mov_b32 v181, v8
; GFX11-NEXT: v_dual_mov_b32 v182, v7 :: v_dual_mov_b32 v183, v6
-; GFX11-NEXT: v_dual_mov_b32 v170, v5 :: v_dual_mov_b32 v171, v4
-; GFX11-NEXT: v_dual_mov_b32 v172, v3 :: v_dual_mov_b32 v173, v2
-; GFX11-NEXT: v_dual_mov_b32 v174, v1 :: v_dual_mov_b32 v175, v0
-; GFX11-NEXT: v_dual_mov_b32 v184, s28 :: v_dual_mov_b32 v185, s29
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-NEXT: v_dual_mov_b32 v168, v5 :: v_dual_mov_b32 v169, v4
+; GFX11-NEXT: v_dual_mov_b32 v170, v3 :: v_dual_mov_b32 v171, v2
+; GFX11-NEXT: v_dual_mov_b32 v172, v1 :: v_dual_mov_b32 v173, v0
+; GFX11-NEXT: v_dual_mov_b32 v174, s28 :: v_dual_mov_b32 v175, s29
+; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s4, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB67_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_dual_mov_b32 v47, s0 :: v_dual_mov_b32 v52, s2
-; GFX11-NEXT: v_dual_mov_b32 v49, s1 :: v_dual_mov_b32 v56, s3
-; GFX11-NEXT: v_dual_mov_b32 v61, s16 :: v_dual_mov_b32 v74, s18
-; GFX11-NEXT: v_dual_mov_b32 v67, s17 :: v_dual_mov_b32 v82, s19
-; GFX11-NEXT: v_dual_mov_b32 v91, s20 :: v_dual_mov_b32 v112, s22
-; GFX11-NEXT: v_dual_mov_b32 v101, s21 :: v_dual_mov_b32 v124, s23
-; GFX11-NEXT: v_dual_mov_b32 v137, s24 :: v_dual_mov_b32 v14, s26
-; GFX11-NEXT: v_dual_mov_b32 v151, s25 :: v_dual_mov_b32 v30, s27
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB67_3
+; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v59, s16
+; GFX11-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v65, s17
+; GFX11-NEXT: v_dual_mov_b32 v50, s2 :: v_dual_mov_b32 v89, s20
+; GFX11-NEXT: v_dual_mov_b32 v54, s3 :: v_dual_mov_b32 v99, s21
+; GFX11-NEXT: v_dual_mov_b32 v72, s18 :: v_dual_mov_b32 v135, s24
+; GFX11-NEXT: v_dual_mov_b32 v80, s19 :: v_dual_mov_b32 v149, s25
+; GFX11-NEXT: v_dual_mov_b32 v110, s22 :: v_dual_mov_b32 v17, s26
+; GFX11-NEXT: v_dual_mov_b32 v122, s23 :: v_dual_mov_b32 v33, s27
+; GFX11-NEXT: s_cbranch_execnz .LBB67_3
; GFX11-NEXT: .LBB67_2: ; %cmp.true
-; GFX11-NEXT: v_pk_add_f16 v30, 0x200, s27 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v14, 0x200, s26 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v33, 0x200, s27 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v17, 0x200, s26 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v176, 0x200, v176 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v177, 0x200, v177 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v178, 0x200, v178 op_sel_hi:[0,1]
@@ -105440,119 +105871,117 @@ define inreg <16 x i64> @bitcast_v64f16_to_v16i64_scalar(<64 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v181, 0x200, v181 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v182, 0x200, v182 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v183, 0x200, v183 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v168, 0x200, v168 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v169, 0x200, v169 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v170, 0x200, v170 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v171, 0x200, v171 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v172, 0x200, v172 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v173, 0x200, v173 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v174, 0x200, v174 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v175, 0x200, v175 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v184, 0x200, v184 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v151, 0x200, s25 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v137, 0x200, s24 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v124, 0x200, s23 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v112, 0x200, s22 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v101, 0x200, s21 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v91, 0x200, s20 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v82, 0x200, s19 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v74, 0x200, s18 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v67, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v61, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v56, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v52, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v49, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v47, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v174, 0x200, v174 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v149, 0x200, s25 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v135, 0x200, s24 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v122, 0x200, s23 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v110, 0x200, s22 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v99, 0x200, s21 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v89, 0x200, s20 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v80, 0x200, s19 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v72, 0x200, s18 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v65, 0x200, s17 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v59, 0x200, s16 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v54, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v50, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v2, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: .LBB67_3: ; %end
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mov_b32 v0, v47 :: v_dual_mov_b32 v1, v49
-; GFX11-NEXT: v_dual_mov_b32 v3, v56 :: v_dual_mov_b32 v4, v61
-; GFX11-NEXT: v_dual_mov_b32 v6, v74 :: v_dual_mov_b32 v9, v101
-; GFX11-NEXT: v_dual_mov_b32 v7, v82 :: v_dual_mov_b32 v8, v91
-; GFX11-NEXT: v_dual_mov_b32 v11, v124 :: v_dual_mov_b32 v12, v137
-; GFX11-NEXT: v_dual_mov_b32 v15, v30 :: v_dual_mov_b32 v16, v184
-; GFX11-NEXT: v_dual_mov_b32 v17, v185 :: v_dual_mov_b32 v18, v175
-; GFX11-NEXT: v_dual_mov_b32 v19, v174 :: v_dual_mov_b32 v20, v173
-; GFX11-NEXT: v_dual_mov_b32 v21, v172 :: v_dual_mov_b32 v22, v171
-; GFX11-NEXT: v_dual_mov_b32 v23, v170 :: v_dual_mov_b32 v24, v183
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v4, v59
+; GFX11-NEXT: v_dual_mov_b32 v3, v54 :: v_dual_mov_b32 v6, v72
+; GFX11-NEXT: v_dual_mov_b32 v7, v80 :: v_dual_mov_b32 v8, v89
+; GFX11-NEXT: v_dual_mov_b32 v9, v99 :: v_dual_mov_b32 v10, v110
+; GFX11-NEXT: v_dual_mov_b32 v11, v122 :: v_dual_mov_b32 v12, v135
+; GFX11-NEXT: v_dual_mov_b32 v13, v149 :: v_dual_mov_b32 v16, v174
+; GFX11-NEXT: v_dual_mov_b32 v14, v17 :: v_dual_mov_b32 v17, v175
+; GFX11-NEXT: v_dual_mov_b32 v15, v33 :: v_dual_mov_b32 v20, v171
+; GFX11-NEXT: v_dual_mov_b32 v18, v173 :: v_dual_mov_b32 v19, v172
+; GFX11-NEXT: v_dual_mov_b32 v21, v170 :: v_dual_mov_b32 v22, v169
+; GFX11-NEXT: v_dual_mov_b32 v23, v168 :: v_dual_mov_b32 v24, v183
; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_load_b32 v185, off, s32
-; GFX11-NEXT: scratch_load_b32 v184, off, s32 offset:4
-; GFX11-NEXT: scratch_load_b32 v175, off, s32 offset:8
-; GFX11-NEXT: scratch_load_b32 v174, off, s32 offset:12
-; GFX11-NEXT: scratch_load_b32 v173, off, s32 offset:16
-; GFX11-NEXT: scratch_load_b32 v172, off, s32 offset:20
-; GFX11-NEXT: scratch_load_b32 v171, off, s32 offset:24
-; GFX11-NEXT: scratch_load_b32 v170, off, s32 offset:28
-; GFX11-NEXT: scratch_load_b32 v169, off, s32 offset:32
-; GFX11-NEXT: scratch_load_b32 v168, off, s32 offset:36
-; GFX11-NEXT: scratch_load_b32 v159, off, s32 offset:40
-; GFX11-NEXT: scratch_load_b32 v158, off, s32 offset:44
-; GFX11-NEXT: scratch_load_b32 v157, off, s32 offset:48
-; GFX11-NEXT: scratch_load_b32 v156, off, s32 offset:52
-; GFX11-NEXT: scratch_load_b32 v155, off, s32 offset:56
-; GFX11-NEXT: scratch_load_b32 v154, off, s32 offset:60
-; GFX11-NEXT: scratch_load_b32 v153, off, s32 offset:64
-; GFX11-NEXT: scratch_load_b32 v152, off, s32 offset:68
-; GFX11-NEXT: scratch_load_b32 v143, off, s32 offset:72
-; GFX11-NEXT: scratch_load_b32 v142, off, s32 offset:76
-; GFX11-NEXT: scratch_load_b32 v141, off, s32 offset:80
-; GFX11-NEXT: scratch_load_b32 v140, off, s32 offset:84
-; GFX11-NEXT: scratch_load_b32 v139, off, s32 offset:88
-; GFX11-NEXT: scratch_load_b32 v138, off, s32 offset:92
-; GFX11-NEXT: scratch_load_b32 v137, off, s32 offset:96
-; GFX11-NEXT: scratch_load_b32 v136, off, s32 offset:100
-; GFX11-NEXT: scratch_load_b32 v127, off, s32 offset:104
-; GFX11-NEXT: scratch_load_b32 v126, off, s32 offset:108
-; GFX11-NEXT: scratch_load_b32 v125, off, s32 offset:112
-; GFX11-NEXT: scratch_load_b32 v124, off, s32 offset:116
-; GFX11-NEXT: scratch_load_b32 v123, off, s32 offset:120
-; GFX11-NEXT: scratch_load_b32 v122, off, s32 offset:124
+; GFX11-NEXT: scratch_load_b32 v175, off, s32
+; GFX11-NEXT: scratch_load_b32 v174, off, s32 offset:4
+; GFX11-NEXT: scratch_load_b32 v173, off, s32 offset:8
+; GFX11-NEXT: scratch_load_b32 v172, off, s32 offset:12
+; GFX11-NEXT: scratch_load_b32 v171, off, s32 offset:16
+; GFX11-NEXT: scratch_load_b32 v170, off, s32 offset:20
+; GFX11-NEXT: scratch_load_b32 v169, off, s32 offset:24
+; GFX11-NEXT: scratch_load_b32 v168, off, s32 offset:28
+; GFX11-NEXT: scratch_load_b32 v159, off, s32 offset:32
+; GFX11-NEXT: scratch_load_b32 v158, off, s32 offset:36
+; GFX11-NEXT: scratch_load_b32 v157, off, s32 offset:40
+; GFX11-NEXT: scratch_load_b32 v156, off, s32 offset:44
+; GFX11-NEXT: scratch_load_b32 v155, off, s32 offset:48
+; GFX11-NEXT: scratch_load_b32 v154, off, s32 offset:52
+; GFX11-NEXT: scratch_load_b32 v153, off, s32 offset:56
+; GFX11-NEXT: scratch_load_b32 v152, off, s32 offset:60
+; GFX11-NEXT: scratch_load_b32 v143, off, s32 offset:64
+; GFX11-NEXT: scratch_load_b32 v142, off, s32 offset:68
+; GFX11-NEXT: scratch_load_b32 v141, off, s32 offset:72
+; GFX11-NEXT: scratch_load_b32 v140, off, s32 offset:76
+; GFX11-NEXT: scratch_load_b32 v139, off, s32 offset:80
+; GFX11-NEXT: scratch_load_b32 v138, off, s32 offset:84
+; GFX11-NEXT: scratch_load_b32 v137, off, s32 offset:88
+; GFX11-NEXT: scratch_load_b32 v136, off, s32 offset:92
+; GFX11-NEXT: scratch_load_b32 v127, off, s32 offset:96
+; GFX11-NEXT: scratch_load_b32 v126, off, s32 offset:100
+; GFX11-NEXT: scratch_load_b32 v125, off, s32 offset:104
+; GFX11-NEXT: scratch_load_b32 v124, off, s32 offset:108
+; GFX11-NEXT: scratch_load_b32 v123, off, s32 offset:112
+; GFX11-NEXT: scratch_load_b32 v122, off, s32 offset:116
+; GFX11-NEXT: scratch_load_b32 v121, off, s32 offset:120
+; GFX11-NEXT: scratch_load_b32 v120, off, s32 offset:124
; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_load_b32 v121, off, s32 offset:128
-; GFX11-NEXT: scratch_load_b32 v120, off, s32 offset:132
-; GFX11-NEXT: scratch_load_b32 v111, off, s32 offset:136
-; GFX11-NEXT: scratch_load_b32 v110, off, s32 offset:140
-; GFX11-NEXT: scratch_load_b32 v109, off, s32 offset:144
-; GFX11-NEXT: scratch_load_b32 v108, off, s32 offset:148
-; GFX11-NEXT: scratch_load_b32 v107, off, s32 offset:152
-; GFX11-NEXT: scratch_load_b32 v106, off, s32 offset:156
-; GFX11-NEXT: scratch_load_b32 v105, off, s32 offset:160
-; GFX11-NEXT: scratch_load_b32 v104, off, s32 offset:164
-; GFX11-NEXT: scratch_load_b32 v95, off, s32 offset:168
-; GFX11-NEXT: scratch_load_b32 v94, off, s32 offset:172
-; GFX11-NEXT: scratch_load_b32 v93, off, s32 offset:176
-; GFX11-NEXT: scratch_load_b32 v92, off, s32 offset:180
-; GFX11-NEXT: scratch_load_b32 v91, off, s32 offset:184
-; GFX11-NEXT: scratch_load_b32 v90, off, s32 offset:188
-; GFX11-NEXT: scratch_load_b32 v89, off, s32 offset:192
-; GFX11-NEXT: scratch_load_b32 v88, off, s32 offset:196
-; GFX11-NEXT: scratch_load_b32 v79, off, s32 offset:200
-; GFX11-NEXT: scratch_load_b32 v78, off, s32 offset:204
-; GFX11-NEXT: scratch_load_b32 v77, off, s32 offset:208
-; GFX11-NEXT: scratch_load_b32 v76, off, s32 offset:212
-; GFX11-NEXT: scratch_load_b32 v75, off, s32 offset:216
-; GFX11-NEXT: scratch_load_b32 v74, off, s32 offset:220
-; GFX11-NEXT: scratch_load_b32 v73, off, s32 offset:224
-; GFX11-NEXT: scratch_load_b32 v72, off, s32 offset:228
-; GFX11-NEXT: scratch_load_b32 v63, off, s32 offset:232
-; GFX11-NEXT: scratch_load_b32 v62, off, s32 offset:236
-; GFX11-NEXT: scratch_load_b32 v61, off, s32 offset:240
-; GFX11-NEXT: scratch_load_b32 v60, off, s32 offset:244
-; GFX11-NEXT: scratch_load_b32 v59, off, s32 offset:248
-; GFX11-NEXT: scratch_load_b32 v58, off, s32 offset:252
-; GFX11-NEXT: s_clause 0x9
-; GFX11-NEXT: scratch_load_b32 v57, off, s32 offset:256
-; GFX11-NEXT: scratch_load_b32 v56, off, s32 offset:260
-; GFX11-NEXT: scratch_load_b32 v47, off, s32 offset:264
-; GFX11-NEXT: scratch_load_b32 v46, off, s32 offset:268
-; GFX11-NEXT: scratch_load_b32 v45, off, s32 offset:272
-; GFX11-NEXT: scratch_load_b32 v44, off, s32 offset:276
-; GFX11-NEXT: scratch_load_b32 v43, off, s32 offset:280
-; GFX11-NEXT: scratch_load_b32 v42, off, s32 offset:284
-; GFX11-NEXT: scratch_load_b32 v41, off, s32 offset:288
-; GFX11-NEXT: scratch_load_b32 v40, off, s32 offset:292
-; GFX11-NEXT: v_dual_mov_b32 v2, v52 :: v_dual_mov_b32 v5, v67
-; GFX11-NEXT: v_dual_mov_b32 v10, v112 :: v_dual_mov_b32 v13, v151
+; GFX11-NEXT: scratch_load_b32 v111, off, s32 offset:128
+; GFX11-NEXT: scratch_load_b32 v110, off, s32 offset:132
+; GFX11-NEXT: scratch_load_b32 v109, off, s32 offset:136
+; GFX11-NEXT: scratch_load_b32 v108, off, s32 offset:140
+; GFX11-NEXT: scratch_load_b32 v107, off, s32 offset:144
+; GFX11-NEXT: scratch_load_b32 v106, off, s32 offset:148
+; GFX11-NEXT: scratch_load_b32 v105, off, s32 offset:152
+; GFX11-NEXT: scratch_load_b32 v104, off, s32 offset:156
+; GFX11-NEXT: scratch_load_b32 v95, off, s32 offset:160
+; GFX11-NEXT: scratch_load_b32 v94, off, s32 offset:164
+; GFX11-NEXT: scratch_load_b32 v93, off, s32 offset:168
+; GFX11-NEXT: scratch_load_b32 v92, off, s32 offset:172
+; GFX11-NEXT: scratch_load_b32 v91, off, s32 offset:176
+; GFX11-NEXT: scratch_load_b32 v90, off, s32 offset:180
+; GFX11-NEXT: scratch_load_b32 v89, off, s32 offset:184
+; GFX11-NEXT: scratch_load_b32 v88, off, s32 offset:188
+; GFX11-NEXT: scratch_load_b32 v79, off, s32 offset:192
+; GFX11-NEXT: scratch_load_b32 v78, off, s32 offset:196
+; GFX11-NEXT: scratch_load_b32 v77, off, s32 offset:200
+; GFX11-NEXT: scratch_load_b32 v76, off, s32 offset:204
+; GFX11-NEXT: scratch_load_b32 v75, off, s32 offset:208
+; GFX11-NEXT: scratch_load_b32 v74, off, s32 offset:212
+; GFX11-NEXT: scratch_load_b32 v73, off, s32 offset:216
+; GFX11-NEXT: scratch_load_b32 v72, off, s32 offset:220
+; GFX11-NEXT: scratch_load_b32 v63, off, s32 offset:224
+; GFX11-NEXT: scratch_load_b32 v62, off, s32 offset:228
+; GFX11-NEXT: scratch_load_b32 v61, off, s32 offset:232
+; GFX11-NEXT: scratch_load_b32 v60, off, s32 offset:236
+; GFX11-NEXT: scratch_load_b32 v59, off, s32 offset:240
+; GFX11-NEXT: scratch_load_b32 v58, off, s32 offset:244
+; GFX11-NEXT: scratch_load_b32 v57, off, s32 offset:248
+; GFX11-NEXT: scratch_load_b32 v56, off, s32 offset:252
+; GFX11-NEXT: s_clause 0x7
+; GFX11-NEXT: scratch_load_b32 v47, off, s32 offset:256
+; GFX11-NEXT: scratch_load_b32 v46, off, s32 offset:260
+; GFX11-NEXT: scratch_load_b32 v45, off, s32 offset:264
+; GFX11-NEXT: scratch_load_b32 v44, off, s32 offset:268
+; GFX11-NEXT: scratch_load_b32 v43, off, s32 offset:272
+; GFX11-NEXT: scratch_load_b32 v42, off, s32 offset:276
+; GFX11-NEXT: scratch_load_b32 v41, off, s32 offset:280
+; GFX11-NEXT: scratch_load_b32 v40, off, s32 offset:284
+; GFX11-NEXT: v_dual_mov_b32 v2, v50 :: v_dual_mov_b32 v5, v65
; GFX11-NEXT: v_dual_mov_b32 v25, v182 :: v_dual_mov_b32 v26, v181
; GFX11-NEXT: v_dual_mov_b32 v27, v180 :: v_dual_mov_b32 v28, v179
; GFX11-NEXT: v_dual_mov_b32 v29, v178 :: v_dual_mov_b32 v30, v177
@@ -105560,23 +105989,25 @@ define inreg <16 x i64> @bitcast_v64f16_to_v16i64_scalar(<64 x half> inreg %a, i
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB67_4:
-; GFX11-NEXT: ; implicit-def: $vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78
; GFX11-NEXT: ; implicit-def: $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79
; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
-; GFX11-NEXT: ; implicit-def: $vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81
-; GFX11-NEXT: ; implicit-def: $vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84
-; GFX11-NEXT: ; implicit-def: $vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88
-; GFX11-NEXT: ; implicit-def: $vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93
-; GFX11-NEXT: ; implicit-def: $vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99
-; GFX11-NEXT: ; implicit-def: $vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106
-; GFX11-NEXT: ; implicit-def: $vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114
-; GFX11-NEXT: ; implicit-def: $vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123
-; GFX11-NEXT: ; implicit-def: $vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133
-; GFX11-NEXT: ; implicit-def: $vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144
-; GFX11-NEXT: ; implicit-def: $vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156
-; GFX11-NEXT: ; implicit-def: $vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169
-; GFX11-NEXT: s_branch .LBB67_2
+; GFX11-NEXT: ; implicit-def: $vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82
+; GFX11-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-NEXT: ; implicit-def: $vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91
+; GFX11-NEXT: ; implicit-def: $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49
+; GFX11-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-NEXT: ; implicit-def: $vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104
+; GFX11-NEXT: ; implicit-def: $vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112
+; GFX11-NEXT: ; implicit-def: $vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121
+; GFX11-NEXT: ; implicit-def: $vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131
+; GFX11-NEXT: ; implicit-def: $vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142
+; GFX11-NEXT: ; implicit-def: $vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154
+; GFX11-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_vccz .LBB67_2
+; GFX11-NEXT: s_branch .LBB67_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -106165,6 +106596,7 @@ define inreg <64 x i16> @bitcast_v16i64_to_v64i16_scalar(<16 x i64> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v19
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s47, v1
; SI-NEXT: v_readfirstlane_b32 s46, v2
; SI-NEXT: v_readfirstlane_b32 s45, v3
@@ -106182,8 +106614,8 @@ define inreg <64 x i16> @bitcast_v16i64_to_v64i16_scalar(<16 x i64> inreg %a, i3
; SI-NEXT: v_readfirstlane_b32 s9, v15
; SI-NEXT: v_readfirstlane_b32 s8, v16
; SI-NEXT: v_readfirstlane_b32 s7, v17
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s6, v18
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB69_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v1, s7
@@ -106543,12 +106975,15 @@ define inreg <64 x i16> @bitcast_v16i64_to_v64i16_scalar(<16 x i64> inreg %a, i3
; SI-NEXT: ; implicit-def: $sgpr57
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; implicit-def: $sgpr56
-; SI-NEXT: s_branch .LBB69_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB69_2
+; SI-NEXT: s_branch .LBB69_3
;
; VI-LABEL: bitcast_v16i64_to_v64i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v31, v17
; VI-NEXT: v_mov_b32_e32 v30, v16
; VI-NEXT: v_mov_b32_e32 v29, v15
@@ -106569,7 +107004,7 @@ define inreg <64 x i16> @bitcast_v16i64_to_v64i16_scalar(<16 x i64> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
@@ -106582,10 +107017,13 @@ define inreg <64 x i16> @bitcast_v16i64_to_v64i16_scalar(<16 x i64> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB69_4
+; VI-NEXT: s_cbranch_scc0 .LBB69_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB69_3
-; VI-NEXT: .LBB69_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB69_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB69_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v14, vcc, 3, v14
; VI-NEXT: v_addc_u32_e32 v15, vcc, 0, v15, vcc
; VI-NEXT: v_add_u32_e32 v12, vcc, 3, v12
@@ -106618,16 +107056,15 @@ define inreg <64 x i16> @bitcast_v16i64_to_v64i16_scalar(<16 x i64> inreg %a, i3
; VI-NEXT: v_addc_u32_e32 v19, vcc, 0, v19, vcc
; VI-NEXT: v_add_u32_e32 v16, vcc, 3, v16
; VI-NEXT: v_addc_u32_e32 v17, vcc, 0, v17, vcc
-; VI-NEXT: .LBB69_3: ; %end
+; VI-NEXT: .LBB69_4: ; %end
; VI-NEXT: v_mov_b32_e32 v18, v32
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB69_4:
-; VI-NEXT: s_branch .LBB69_2
;
; GFX9-LABEL: bitcast_v16i64_to_v64i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v31, v17
; GFX9-NEXT: v_mov_b32_e32 v30, v16
; GFX9-NEXT: v_mov_b32_e32 v29, v15
@@ -106648,7 +107085,7 @@ define inreg <64 x i16> @bitcast_v16i64_to_v64i16_scalar(<16 x i64> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -106661,10 +107098,13 @@ define inreg <64 x i16> @bitcast_v16i64_to_v64i16_scalar(<16 x i64> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB69_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB69_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB69_3
-; GFX9-NEXT: .LBB69_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB69_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB69_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_co_u32_e32 v14, vcc, 3, v14
; GFX9-NEXT: v_addc_co_u32_e32 v15, vcc, 0, v15, vcc
; GFX9-NEXT: v_add_co_u32_e32 v12, vcc, 3, v12
@@ -106697,44 +107137,42 @@ define inreg <64 x i16> @bitcast_v16i64_to_v64i16_scalar(<16 x i64> inreg %a, i3
; GFX9-NEXT: v_addc_co_u32_e32 v19, vcc, 0, v19, vcc
; GFX9-NEXT: v_add_co_u32_e32 v16, vcc, 3, v16
; GFX9-NEXT: v_addc_co_u32_e32 v17, vcc, 0, v17, vcc
-; GFX9-NEXT: .LBB69_3: ; %end
+; GFX9-NEXT: .LBB69_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v18, v32
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB69_4:
-; GFX9-NEXT: s_branch .LBB69_2
;
; GFX11-LABEL: bitcast_v16i64_to_v64i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v14 :: v_dual_mov_b32 v31, v13
-; GFX11-NEXT: v_dual_mov_b32 v30, v12 :: v_dual_mov_b32 v29, v11
-; GFX11-NEXT: v_dual_mov_b32 v28, v10 :: v_dual_mov_b32 v27, v9
+; GFX11-NEXT: v_dual_mov_b32 v15, v14 :: v_dual_mov_b32 v30, v12
+; GFX11-NEXT: v_dual_mov_b32 v31, v13 :: v_dual_mov_b32 v28, v10
+; GFX11-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v26, v8
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB69_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB69_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB69_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB69_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB69_3:
-; GFX11-NEXT: .LBB69_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB69_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_co_u32 v14, vcc_lo, v14, 3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add_co_ci_u32_e64 v15, null, 0, v15, vcc_lo
@@ -106775,6 +107213,7 @@ define inreg <64 x i16> @bitcast_v16i64_to_v64i16_scalar(<16 x i64> inreg %a, i3
; GFX11-NEXT: v_add_co_ci_u32_e64 v19, null, 0, v19, vcc_lo
; GFX11-NEXT: v_add_co_u32 v16, vcc_lo, v16, 3
; GFX11-NEXT: v_add_co_ci_u32_e64 v17, null, 0, v17, vcc_lo
+; GFX11-NEXT: .LBB69_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -107621,43 +108060,43 @@ define inreg <16 x i64> @bitcast_v64i16_to_v16i64_scalar(<64 x i16> inreg %a, i3
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
; SI-NEXT: v_mov_b32_e32 v49, v12
-; SI-NEXT: v_mov_b32_e32 v56, v10
-; SI-NEXT: s_waitcnt expcnt(6)
-; SI-NEXT: v_mov_b32_e32 v57, v8
+; SI-NEXT: v_mov_b32_e32 v47, v10
+; SI-NEXT: s_waitcnt expcnt(3)
+; SI-NEXT: v_mov_b32_e32 v60, v8
; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:76
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32
; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:8
-; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:4
+; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:4
; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:16
-; SI-NEXT: s_waitcnt expcnt(3)
-; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:12
+; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:12
; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:24
-; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:20
+; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:20
; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:32
-; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:28
+; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:28
; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:40
; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:36
-; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:48
+; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:48
; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:44
-; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:56
-; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:52
+; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:56
+; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:52
; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:64
-; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:60
+; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:60
; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:72
-; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:68
+; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:68
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_waitcnt expcnt(2)
; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v55, 16, v3
-; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v5
-; SI-NEXT: v_lshlrev_b32_e32 v43, 16, v7
-; SI-NEXT: v_lshlrev_b32_e32 v41, 16, v9
+; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v5
+; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v7
+; SI-NEXT: v_lshlrev_b32_e32 v43, 16, v9
; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v11
; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v15
; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v17
-; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v19
+; SI-NEXT: v_lshlrev_b32_e32 v38, 16, v19
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v21
-; SI-NEXT: v_lshlrev_b32_e32 v35, 16, v23
+; SI-NEXT: v_lshlrev_b32_e32 v36, 16, v23
; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v25
; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v27
; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v29
@@ -107665,7 +108104,7 @@ define inreg <16 x i64> @bitcast_v64i16_to_v16i64_scalar(<64 x i16> inreg %a, i3
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v53
; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v31
; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v52
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_and_b64 s[6:7], vcc, exec
; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v51
; SI-NEXT: s_waitcnt vmcnt(13)
; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v8
@@ -107674,102 +108113,103 @@ define inreg <16 x i64> @bitcast_v64i16_to_v16i64_scalar(<64 x i16> inreg %a, i3
; SI-NEXT: s_waitcnt vmcnt(9)
; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v12
; SI-NEXT: s_waitcnt vmcnt(7)
-; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v38
-; SI-NEXT: s_waitcnt vmcnt(5) expcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v36
+; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v37
+; SI-NEXT: s_waitcnt vmcnt(5) expcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v35
; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v34
; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v32
-; SI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v32
+; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(4)
-; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
-; SI-NEXT: s_cbranch_scc0 .LBB71_4
+; SI-NEXT: s_cbranch_scc0 .LBB71_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_waitcnt expcnt(2)
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v7, v0, v61
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v4
-; SI-NEXT: v_or_b32_e32 v9, v0, v50
+; SI-NEXT: v_or_b32_e32 v9, v0, v57
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v6
-; SI-NEXT: v_or_b32_e32 v10, v0, v43
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v57
-; SI-NEXT: v_or_b32_e32 v11, v0, v41
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v56
+; SI-NEXT: v_or_b32_e32 v10, v0, v50
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v60
+; SI-NEXT: v_or_b32_e32 v11, v0, v43
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v47
; SI-NEXT: v_or_b32_e32 v12, v0, v40
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49
-; SI-NEXT: v_mov_b32_e32 v52, v57
-; SI-NEXT: v_mov_b32_e32 v57, v40
-; SI-NEXT: v_mov_b32_e32 v40, v49
-; SI-NEXT: v_mov_b32_e32 v49, v13
+; SI-NEXT: v_mov_b32_e32 v35, v61
+; SI-NEXT: v_mov_b32_e32 v61, v50
+; SI-NEXT: v_mov_b32_e32 v50, v43
+; SI-NEXT: v_mov_b32_e32 v43, v40
+; SI-NEXT: v_mov_b32_e32 v40, v13
; SI-NEXT: v_or_b32_e32 v13, v0, v13
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v14
-; SI-NEXT: v_mov_b32_e32 v36, v41
-; SI-NEXT: v_mov_b32_e32 v41, v14
+; SI-NEXT: v_mov_b32_e32 v52, v60
+; SI-NEXT: v_mov_b32_e32 v60, v14
; SI-NEXT: v_or_b32_e32 v14, v0, v48
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v16
-; SI-NEXT: v_mov_b32_e32 v51, v50
-; SI-NEXT: v_mov_b32_e32 v50, v43
-; SI-NEXT: v_mov_b32_e32 v43, v48
+; SI-NEXT: v_mov_b32_e32 v51, v57
+; SI-NEXT: v_mov_b32_e32 v57, v49
+; SI-NEXT: v_mov_b32_e32 v49, v48
; SI-NEXT: v_mov_b32_e32 v48, v15
; SI-NEXT: v_or_b32_e32 v15, v0, v15
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v18
-; SI-NEXT: v_mov_b32_e32 v38, v61
-; SI-NEXT: v_mov_b32_e32 v61, v56
-; SI-NEXT: v_mov_b32_e32 v56, v16
-; SI-NEXT: v_or_b32_e32 v16, v0, v37
+; SI-NEXT: v_mov_b32_e32 v32, v47
+; SI-NEXT: v_mov_b32_e32 v47, v16
+; SI-NEXT: v_or_b32_e32 v16, v0, v38
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v20
; SI-NEXT: v_or_b32_e32 v17, v0, v17
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v22
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_or_b32_e32 v18, v0, v35
+; SI-NEXT: v_or_b32_e32 v18, v0, v36
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v24
; SI-NEXT: v_or_b32_e32 v19, v0, v19
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v26
-; SI-NEXT: v_mov_b32_e32 v37, v20
+; SI-NEXT: v_mov_b32_e32 v38, v20
; SI-NEXT: v_or_b32_e32 v20, v0, v33
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v28
; SI-NEXT: v_or_b32_e32 v21, v0, v21
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v30
; SI-NEXT: v_or_b32_e32 v22, v0, v31
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v39
-; SI-NEXT: v_mov_b32_e32 v35, v24
-; SI-NEXT: v_mov_b32_e32 v39, v23
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v56
+; SI-NEXT: v_mov_b32_e32 v36, v24
+; SI-NEXT: v_mov_b32_e32 v56, v23
; SI-NEXT: v_or_b32_e32 v23, v0, v23
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v60
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v39
; SI-NEXT: v_mov_b32_e32 v24, v29
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s17, 16
; SI-NEXT: v_or_b32_e32 v24, v0, v24
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v47
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v46
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: s_and_b32 s5, s18, 0xffff
; SI-NEXT: s_lshl_b32 s6, s19, 16
+; SI-NEXT: v_mov_b32_e32 v46, v25
; SI-NEXT: v_or_b32_e32 v25, v0, v25
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v46
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v59
; SI-NEXT: v_mov_b32_e32 v26, v27
; SI-NEXT: s_or_b32 s5, s5, s6
; SI-NEXT: s_and_b32 s6, s20, 0xffff
@@ -107786,29 +108226,28 @@ define inreg <16 x i64> @bitcast_v64i16_to_v16i64_scalar(<64 x i16> inreg %a, i3
; SI-NEXT: s_lshl_b32 s9, s25, 16
; SI-NEXT: v_mov_b32_e32 v33, v28
; SI-NEXT: v_or_b32_e32 v28, v0, v5
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v59
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v42
; SI-NEXT: s_or_b32 s8, s8, s9
; SI-NEXT: s_and_b32 s9, s26, 0xffff
; SI-NEXT: s_lshl_b32 s10, s27, 16
-; SI-NEXT: v_mov_b32_e32 v60, v29
-; SI-NEXT: v_or_b32_e32 v29, v0, v62
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v42
+; SI-NEXT: v_mov_b32_e32 v39, v29
+; SI-NEXT: v_or_b32_e32 v29, v0, v63
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v58
; SI-NEXT: s_or_b32 s9, s9, s10
; SI-NEXT: s_and_b32 s10, s28, 0xffff
; SI-NEXT: s_lshl_b32 s11, s29, 16
; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2
; SI-NEXT: v_or_b32_e32 v30, v0, v3
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v58
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v41
; SI-NEXT: s_or_b32 s10, s10, s11
-; SI-NEXT: v_mov_b32_e32 v63, v2
-; SI-NEXT: v_mov_b32_e32 v32, v55
+; SI-NEXT: v_mov_b32_e32 v37, v2
+; SI-NEXT: v_mov_b32_e32 v34, v55
; SI-NEXT: v_or_b32_e32 v8, v1, v55
; SI-NEXT: v_mov_b32_e32 v55, v4
; SI-NEXT: v_mov_b32_e32 v53, v6
-; SI-NEXT: v_mov_b32_e32 v47, v46
; SI-NEXT: v_mov_b32_e32 v45, v44
-; SI-NEXT: v_mov_b32_e32 v59, v42
-; SI-NEXT: v_or_b32_e32 v31, v0, v34
+; SI-NEXT: v_mov_b32_e32 v42, v58
+; SI-NEXT: v_or_b32_e32 v31, v0, v62
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: v_mov_b32_e32 v2, s6
@@ -107816,12 +108255,45 @@ define inreg <16 x i64> @bitcast_v64i16_to_v16i64_scalar(<64 x i16> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v4, s8
; SI-NEXT: v_mov_b32_e32 v5, s9
; SI-NEXT: v_mov_b32_e32 v6, s10
-; SI-NEXT: s_cbranch_execnz .LBB71_3
-; SI-NEXT: .LBB71_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: s_branch .LBB71_3
+; SI-NEXT: .LBB71_2:
+; SI-NEXT: v_mov_b32_e32 v35, v61
+; SI-NEXT: v_mov_b32_e32 v34, v55
+; SI-NEXT: v_mov_b32_e32 v37, v2
+; SI-NEXT: v_mov_b32_e32 v55, v4
+; SI-NEXT: v_mov_b32_e32 v53, v6
+; SI-NEXT: v_mov_b32_e32 v52, v60
+; SI-NEXT: v_mov_b32_e32 v51, v57
+; SI-NEXT: v_mov_b32_e32 v61, v50
+; SI-NEXT: v_mov_b32_e32 v32, v47
+; SI-NEXT: v_mov_b32_e32 v50, v43
+; SI-NEXT: v_mov_b32_e32 v43, v40
+; SI-NEXT: v_mov_b32_e32 v40, v13
+; SI-NEXT: v_mov_b32_e32 v57, v49
+; SI-NEXT: v_mov_b32_e32 v49, v48
+; SI-NEXT: v_mov_b32_e32 v48, v15
+; SI-NEXT: v_mov_b32_e32 v60, v14
+; SI-NEXT: v_mov_b32_e32 v47, v16
+; SI-NEXT: v_mov_b32_e32 v45, v44
+; SI-NEXT: v_mov_b32_e32 v42, v58
+; SI-NEXT: s_waitcnt expcnt(1)
+; SI-NEXT: v_mov_b32_e32 v38, v20
+; SI-NEXT: v_mov_b32_e32 v56, v23
+; SI-NEXT: v_mov_b32_e32 v36, v24
+; SI-NEXT: v_mov_b32_e32 v33, v28
+; SI-NEXT: v_mov_b32_e32 v39, v29
+; SI-NEXT: v_mov_b32_e32 v46, v25
+; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; SI-NEXT: .LBB71_3: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: v_mov_b32_e32 v41, v42
+; SI-NEXT: s_cbranch_vccnz .LBB71_5
+; SI-NEXT: ; %bb.4: ; %cmp.true
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v63
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v37
; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; SI-NEXT: v_or_b32_e32 v1, v32, v1
+; SI-NEXT: v_or_b32_e32 v1, v34, v1
; SI-NEXT: v_add_i32_e32 v8, vcc, 0x30000, v1
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
; SI-NEXT: s_add_i32 s16, s16, 3
@@ -107867,7 +108339,7 @@ define inreg <16 x i64> @bitcast_v64i16_to_v16i64_scalar(<64 x i16> inreg %a, i3
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v0, v38, v0
+; SI-NEXT: v_or_b32_e32 v0, v35, v0
; SI-NEXT: v_add_i32_e32 v7, vcc, 0x30000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v55
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
@@ -107875,25 +108347,25 @@ define inreg <16 x i64> @bitcast_v64i16_to_v16i64_scalar(<64 x i16> inreg %a, i3
; SI-NEXT: v_add_i32_e32 v9, vcc, 0x30000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v0, v50, v0
+; SI-NEXT: v_or_b32_e32 v0, v61, v0
; SI-NEXT: v_add_i32_e32 v10, vcc, 0x30000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v52
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v0, v36, v0
+; SI-NEXT: v_or_b32_e32 v0, v50, v0
; SI-NEXT: v_add_i32_e32 v11, vcc, 0x30000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v61
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v32
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v0, v57, v0
+; SI-NEXT: v_or_b32_e32 v0, v43, v0
; SI-NEXT: v_add_i32_e32 v12, vcc, 0x30000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v40
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v57
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v0, v49, v0
+; SI-NEXT: v_or_b32_e32 v0, v40, v0
; SI-NEXT: v_add_i32_e32 v13, vcc, 0x30000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v41
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v60
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v0, v43, v0
+; SI-NEXT: v_or_b32_e32 v0, v49, v0
; SI-NEXT: v_add_i32_e32 v14, vcc, 0x30000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v56
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v47
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v48, v0
; SI-NEXT: v_add_i32_e32 v15, vcc, 0x30000, v0
@@ -107904,7 +108376,7 @@ define inreg <16 x i64> @bitcast_v64i16_to_v16i64_scalar(<64 x i16> inreg %a, i3
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v16, vcc, 0x30000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v38
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
@@ -107919,7 +108391,7 @@ define inreg <16 x i64> @bitcast_v64i16_to_v16i64_scalar(<64 x i16> inreg %a, i3
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_add_i32_e32 v18, vcc, 0x30000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v36
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
@@ -107947,31 +108419,31 @@ define inreg <16 x i64> @bitcast_v64i16_to_v16i64_scalar(<64 x i16> inreg %a, i3
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v22, vcc, 0x30000, v0
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v0, v39, v0
+; SI-NEXT: v_or_b32_e32 v0, v56, v0
; SI-NEXT: v_add_i32_e32 v23, vcc, 0x30000, v0
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v0, v60, v0
+; SI-NEXT: v_or_b32_e32 v0, v39, v0
; SI-NEXT: v_add_i32_e32 v24, vcc, 0x30000, v0
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
+; SI-NEXT: v_or_b32_e32 v0, v46, v0
; SI-NEXT: v_add_i32_e32 v25, vcc, 0x30000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v47
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v26, vcc, 0x30000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
@@ -107985,7 +108457,7 @@ define inreg <16 x i64> @bitcast_v64i16_to_v16i64_scalar(<64 x i16> inreg %a, i3
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v28, vcc, 0x30000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
@@ -107994,7 +108466,7 @@ define inreg <16 x i64> @bitcast_v64i16_to_v16i64_scalar(<64 x i16> inreg %a, i3
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v29, vcc, 0x30000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v59
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v41
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
@@ -108009,7 +108481,7 @@ define inreg <16 x i64> @bitcast_v64i16_to_v16i64_scalar(<64 x i16> inreg %a, i3
; SI-NEXT: v_add_i32_e32 v31, vcc, 0x30000, v0
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
-; SI-NEXT: .LBB71_3: ; %end
+; SI-NEXT: .LBB71_5: ; %end
; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
@@ -108028,40 +108500,12 @@ define inreg <16 x i64> @bitcast_v64i16_to_v16i64_scalar(<64 x i16> inreg %a, i3
; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB71_4:
-; SI-NEXT: v_mov_b32_e32 v38, v61
-; SI-NEXT: v_mov_b32_e32 v32, v55
-; SI-NEXT: v_mov_b32_e32 v63, v2
-; SI-NEXT: v_mov_b32_e32 v55, v4
-; SI-NEXT: v_mov_b32_e32 v53, v6
-; SI-NEXT: v_mov_b32_e32 v52, v57
-; SI-NEXT: v_mov_b32_e32 v51, v50
-; SI-NEXT: v_mov_b32_e32 v61, v56
-; SI-NEXT: v_mov_b32_e32 v50, v43
-; SI-NEXT: v_mov_b32_e32 v36, v41
-; SI-NEXT: v_mov_b32_e32 v57, v40
-; SI-NEXT: v_mov_b32_e32 v40, v49
-; SI-NEXT: v_mov_b32_e32 v49, v13
-; SI-NEXT: v_mov_b32_e32 v43, v48
-; SI-NEXT: v_mov_b32_e32 v48, v15
-; SI-NEXT: v_mov_b32_e32 v41, v14
-; SI-NEXT: v_mov_b32_e32 v56, v16
-; SI-NEXT: v_mov_b32_e32 v47, v46
-; SI-NEXT: v_mov_b32_e32 v45, v44
-; SI-NEXT: v_mov_b32_e32 v59, v42
-; SI-NEXT: s_waitcnt expcnt(1)
-; SI-NEXT: v_mov_b32_e32 v37, v20
-; SI-NEXT: v_mov_b32_e32 v39, v23
-; SI-NEXT: v_mov_b32_e32 v35, v24
-; SI-NEXT: v_mov_b32_e32 v33, v28
-; SI-NEXT: v_mov_b32_e32 v60, v29
-; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; SI-NEXT: s_branch .LBB71_2
;
; VI-LABEL: bitcast_v64i16_to_v16i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s6, v2
; VI-NEXT: v_readfirstlane_b32 s7, v3
; VI-NEXT: v_readfirstlane_b32 s8, v4
@@ -108079,12 +108523,15 @@ define inreg <16 x i64> @bitcast_v64i16_to_v16i64_scalar(<64 x i16> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s44, v16
; VI-NEXT: v_readfirstlane_b32 s45, v17
; VI-NEXT: v_readfirstlane_b32 s46, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s47, v1
-; VI-NEXT: s_cbranch_scc0 .LBB71_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB71_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB71_3
-; VI-NEXT: .LBB71_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB71_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB71_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s47, 3
; VI-NEXT: s_and_b32 s4, s47, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
@@ -108245,7 +108692,7 @@ define inreg <16 x i64> @bitcast_v64i16_to_v16i64_scalar(<64 x i16> inreg %a, i3
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s6, s4, 0x30000
-; VI-NEXT: .LBB71_3: ; %end
+; VI-NEXT: .LBB71_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -108279,13 +108726,12 @@ define inreg <16 x i64> @bitcast_v64i16_to_v16i64_scalar(<64 x i16> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v30, s44
; VI-NEXT: v_mov_b32_e32 v31, s45
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB71_4:
-; VI-NEXT: s_branch .LBB71_2
;
; GFX9-LABEL: bitcast_v64i16_to_v16i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v31, v17
; GFX9-NEXT: v_mov_b32_e32 v30, v16
; GFX9-NEXT: v_mov_b32_e32 v29, v15
@@ -108306,7 +108752,7 @@ define inreg <16 x i64> @bitcast_v64i16_to_v16i64_scalar(<64 x i16> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -108319,10 +108765,13 @@ define inreg <16 x i64> @bitcast_v64i16_to_v16i64_scalar(<64 x i16> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB71_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB71_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB71_3
-; GFX9-NEXT: .LBB71_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB71_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB71_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
@@ -108355,118 +108804,113 @@ define inreg <16 x i64> @bitcast_v64i16_to_v16i64_scalar(<64 x i16> inreg %a, i3
; GFX9-NEXT: v_pk_add_u16 v32, v32, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: .LBB71_3: ; %end
+; GFX9-NEXT: .LBB71_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v18, v32
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB71_4:
-; GFX9-NEXT: s_branch .LBB71_2
;
; GFX11-LABEL: bitcast_v64i16_to_v16i64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_store_b32 off, v40, s32 offset:292
-; GFX11-NEXT: scratch_store_b32 off, v41, s32 offset:288
-; GFX11-NEXT: scratch_store_b32 off, v42, s32 offset:284
-; GFX11-NEXT: scratch_store_b32 off, v43, s32 offset:280
-; GFX11-NEXT: scratch_store_b32 off, v44, s32 offset:276
-; GFX11-NEXT: scratch_store_b32 off, v45, s32 offset:272
-; GFX11-NEXT: scratch_store_b32 off, v46, s32 offset:268
-; GFX11-NEXT: scratch_store_b32 off, v47, s32 offset:264
-; GFX11-NEXT: scratch_store_b32 off, v56, s32 offset:260
-; GFX11-NEXT: scratch_store_b32 off, v57, s32 offset:256
-; GFX11-NEXT: scratch_store_b32 off, v58, s32 offset:252
-; GFX11-NEXT: scratch_store_b32 off, v59, s32 offset:248
-; GFX11-NEXT: scratch_store_b32 off, v60, s32 offset:244
-; GFX11-NEXT: scratch_store_b32 off, v61, s32 offset:240
-; GFX11-NEXT: scratch_store_b32 off, v62, s32 offset:236
-; GFX11-NEXT: scratch_store_b32 off, v63, s32 offset:232
-; GFX11-NEXT: scratch_store_b32 off, v72, s32 offset:228
-; GFX11-NEXT: scratch_store_b32 off, v73, s32 offset:224
-; GFX11-NEXT: scratch_store_b32 off, v74, s32 offset:220
-; GFX11-NEXT: scratch_store_b32 off, v75, s32 offset:216
-; GFX11-NEXT: scratch_store_b32 off, v76, s32 offset:212
-; GFX11-NEXT: scratch_store_b32 off, v77, s32 offset:208
-; GFX11-NEXT: scratch_store_b32 off, v78, s32 offset:204
-; GFX11-NEXT: scratch_store_b32 off, v79, s32 offset:200
-; GFX11-NEXT: scratch_store_b32 off, v88, s32 offset:196
-; GFX11-NEXT: scratch_store_b32 off, v89, s32 offset:192
-; GFX11-NEXT: scratch_store_b32 off, v90, s32 offset:188
-; GFX11-NEXT: scratch_store_b32 off, v91, s32 offset:184
-; GFX11-NEXT: scratch_store_b32 off, v92, s32 offset:180
-; GFX11-NEXT: scratch_store_b32 off, v93, s32 offset:176
-; GFX11-NEXT: scratch_store_b32 off, v94, s32 offset:172
-; GFX11-NEXT: scratch_store_b32 off, v95, s32 offset:168
+; GFX11-NEXT: scratch_store_b32 off, v40, s32 offset:284
+; GFX11-NEXT: scratch_store_b32 off, v41, s32 offset:280
+; GFX11-NEXT: scratch_store_b32 off, v42, s32 offset:276
+; GFX11-NEXT: scratch_store_b32 off, v43, s32 offset:272
+; GFX11-NEXT: scratch_store_b32 off, v44, s32 offset:268
+; GFX11-NEXT: scratch_store_b32 off, v45, s32 offset:264
+; GFX11-NEXT: scratch_store_b32 off, v46, s32 offset:260
+; GFX11-NEXT: scratch_store_b32 off, v47, s32 offset:256
+; GFX11-NEXT: scratch_store_b32 off, v56, s32 offset:252
+; GFX11-NEXT: scratch_store_b32 off, v57, s32 offset:248
+; GFX11-NEXT: scratch_store_b32 off, v58, s32 offset:244
+; GFX11-NEXT: scratch_store_b32 off, v59, s32 offset:240
+; GFX11-NEXT: scratch_store_b32 off, v60, s32 offset:236
+; GFX11-NEXT: scratch_store_b32 off, v61, s32 offset:232
+; GFX11-NEXT: scratch_store_b32 off, v62, s32 offset:228
+; GFX11-NEXT: scratch_store_b32 off, v63, s32 offset:224
+; GFX11-NEXT: scratch_store_b32 off, v72, s32 offset:220
+; GFX11-NEXT: scratch_store_b32 off, v73, s32 offset:216
+; GFX11-NEXT: scratch_store_b32 off, v74, s32 offset:212
+; GFX11-NEXT: scratch_store_b32 off, v75, s32 offset:208
+; GFX11-NEXT: scratch_store_b32 off, v76, s32 offset:204
+; GFX11-NEXT: scratch_store_b32 off, v77, s32 offset:200
+; GFX11-NEXT: scratch_store_b32 off, v78, s32 offset:196
+; GFX11-NEXT: scratch_store_b32 off, v79, s32 offset:192
+; GFX11-NEXT: scratch_store_b32 off, v88, s32 offset:188
+; GFX11-NEXT: scratch_store_b32 off, v89, s32 offset:184
+; GFX11-NEXT: scratch_store_b32 off, v90, s32 offset:180
+; GFX11-NEXT: scratch_store_b32 off, v91, s32 offset:176
+; GFX11-NEXT: scratch_store_b32 off, v92, s32 offset:172
+; GFX11-NEXT: scratch_store_b32 off, v93, s32 offset:168
+; GFX11-NEXT: scratch_store_b32 off, v94, s32 offset:164
+; GFX11-NEXT: scratch_store_b32 off, v95, s32 offset:160
; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_store_b32 off, v104, s32 offset:164
-; GFX11-NEXT: scratch_store_b32 off, v105, s32 offset:160
-; GFX11-NEXT: scratch_store_b32 off, v106, s32 offset:156
-; GFX11-NEXT: scratch_store_b32 off, v107, s32 offset:152
-; GFX11-NEXT: scratch_store_b32 off, v108, s32 offset:148
-; GFX11-NEXT: scratch_store_b32 off, v109, s32 offset:144
-; GFX11-NEXT: scratch_store_b32 off, v110, s32 offset:140
-; GFX11-NEXT: scratch_store_b32 off, v111, s32 offset:136
-; GFX11-NEXT: scratch_store_b32 off, v120, s32 offset:132
-; GFX11-NEXT: scratch_store_b32 off, v121, s32 offset:128
-; GFX11-NEXT: scratch_store_b32 off, v122, s32 offset:124
-; GFX11-NEXT: scratch_store_b32 off, v123, s32 offset:120
-; GFX11-NEXT: scratch_store_b32 off, v124, s32 offset:116
-; GFX11-NEXT: scratch_store_b32 off, v125, s32 offset:112
-; GFX11-NEXT: scratch_store_b32 off, v126, s32 offset:108
-; GFX11-NEXT: scratch_store_b32 off, v127, s32 offset:104
-; GFX11-NEXT: scratch_store_b32 off, v136, s32 offset:100
-; GFX11-NEXT: scratch_store_b32 off, v137, s32 offset:96
-; GFX11-NEXT: scratch_store_b32 off, v138, s32 offset:92
-; GFX11-NEXT: scratch_store_b32 off, v139, s32 offset:88
-; GFX11-NEXT: scratch_store_b32 off, v140, s32 offset:84
-; GFX11-NEXT: scratch_store_b32 off, v141, s32 offset:80
-; GFX11-NEXT: scratch_store_b32 off, v142, s32 offset:76
-; GFX11-NEXT: scratch_store_b32 off, v143, s32 offset:72
-; GFX11-NEXT: scratch_store_b32 off, v152, s32 offset:68
-; GFX11-NEXT: scratch_store_b32 off, v153, s32 offset:64
-; GFX11-NEXT: scratch_store_b32 off, v154, s32 offset:60
-; GFX11-NEXT: scratch_store_b32 off, v155, s32 offset:56
-; GFX11-NEXT: scratch_store_b32 off, v156, s32 offset:52
-; GFX11-NEXT: scratch_store_b32 off, v157, s32 offset:48
-; GFX11-NEXT: scratch_store_b32 off, v158, s32 offset:44
-; GFX11-NEXT: scratch_store_b32 off, v159, s32 offset:40
-; GFX11-NEXT: s_clause 0x9
-; GFX11-NEXT: scratch_store_b32 off, v168, s32 offset:36
-; GFX11-NEXT: scratch_store_b32 off, v169, s32 offset:32
-; GFX11-NEXT: scratch_store_b32 off, v170, s32 offset:28
-; GFX11-NEXT: scratch_store_b32 off, v171, s32 offset:24
-; GFX11-NEXT: scratch_store_b32 off, v172, s32 offset:20
-; GFX11-NEXT: scratch_store_b32 off, v173, s32 offset:16
-; GFX11-NEXT: scratch_store_b32 off, v174, s32 offset:12
-; GFX11-NEXT: scratch_store_b32 off, v175, s32 offset:8
-; GFX11-NEXT: scratch_store_b32 off, v184, s32 offset:4
-; GFX11-NEXT: scratch_store_b32 off, v185, s32
+; GFX11-NEXT: scratch_store_b32 off, v104, s32 offset:156
+; GFX11-NEXT: scratch_store_b32 off, v105, s32 offset:152
+; GFX11-NEXT: scratch_store_b32 off, v106, s32 offset:148
+; GFX11-NEXT: scratch_store_b32 off, v107, s32 offset:144
+; GFX11-NEXT: scratch_store_b32 off, v108, s32 offset:140
+; GFX11-NEXT: scratch_store_b32 off, v109, s32 offset:136
+; GFX11-NEXT: scratch_store_b32 off, v110, s32 offset:132
+; GFX11-NEXT: scratch_store_b32 off, v111, s32 offset:128
+; GFX11-NEXT: scratch_store_b32 off, v120, s32 offset:124
+; GFX11-NEXT: scratch_store_b32 off, v121, s32 offset:120
+; GFX11-NEXT: scratch_store_b32 off, v122, s32 offset:116
+; GFX11-NEXT: scratch_store_b32 off, v123, s32 offset:112
+; GFX11-NEXT: scratch_store_b32 off, v124, s32 offset:108
+; GFX11-NEXT: scratch_store_b32 off, v125, s32 offset:104
+; GFX11-NEXT: scratch_store_b32 off, v126, s32 offset:100
+; GFX11-NEXT: scratch_store_b32 off, v127, s32 offset:96
+; GFX11-NEXT: scratch_store_b32 off, v136, s32 offset:92
+; GFX11-NEXT: scratch_store_b32 off, v137, s32 offset:88
+; GFX11-NEXT: scratch_store_b32 off, v138, s32 offset:84
+; GFX11-NEXT: scratch_store_b32 off, v139, s32 offset:80
+; GFX11-NEXT: scratch_store_b32 off, v140, s32 offset:76
+; GFX11-NEXT: scratch_store_b32 off, v141, s32 offset:72
+; GFX11-NEXT: scratch_store_b32 off, v142, s32 offset:68
+; GFX11-NEXT: scratch_store_b32 off, v143, s32 offset:64
+; GFX11-NEXT: scratch_store_b32 off, v152, s32 offset:60
+; GFX11-NEXT: scratch_store_b32 off, v153, s32 offset:56
+; GFX11-NEXT: scratch_store_b32 off, v154, s32 offset:52
+; GFX11-NEXT: scratch_store_b32 off, v155, s32 offset:48
+; GFX11-NEXT: scratch_store_b32 off, v156, s32 offset:44
+; GFX11-NEXT: scratch_store_b32 off, v157, s32 offset:40
+; GFX11-NEXT: scratch_store_b32 off, v158, s32 offset:36
+; GFX11-NEXT: scratch_store_b32 off, v159, s32 offset:32
+; GFX11-NEXT: s_clause 0x7
+; GFX11-NEXT: scratch_store_b32 off, v168, s32 offset:28
+; GFX11-NEXT: scratch_store_b32 off, v169, s32 offset:24
+; GFX11-NEXT: scratch_store_b32 off, v170, s32 offset:20
+; GFX11-NEXT: scratch_store_b32 off, v171, s32 offset:16
+; GFX11-NEXT: scratch_store_b32 off, v172, s32 offset:12
+; GFX11-NEXT: scratch_store_b32 off, v173, s32 offset:8
+; GFX11-NEXT: scratch_store_b32 off, v174, s32 offset:4
+; GFX11-NEXT: scratch_store_b32 off, v175, s32
; GFX11-NEXT: v_dual_mov_b32 v176, v13 :: v_dual_mov_b32 v177, v12
; GFX11-NEXT: v_dual_mov_b32 v178, v11 :: v_dual_mov_b32 v179, v10
; GFX11-NEXT: v_dual_mov_b32 v180, v9 :: v_dual_mov_b32 v181, v8
; GFX11-NEXT: v_dual_mov_b32 v182, v7 :: v_dual_mov_b32 v183, v6
-; GFX11-NEXT: v_dual_mov_b32 v170, v5 :: v_dual_mov_b32 v171, v4
-; GFX11-NEXT: v_dual_mov_b32 v172, v3 :: v_dual_mov_b32 v173, v2
-; GFX11-NEXT: v_dual_mov_b32 v174, v1 :: v_dual_mov_b32 v175, v0
-; GFX11-NEXT: v_dual_mov_b32 v184, s28 :: v_dual_mov_b32 v185, s29
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-NEXT: v_dual_mov_b32 v168, v5 :: v_dual_mov_b32 v169, v4
+; GFX11-NEXT: v_dual_mov_b32 v170, v3 :: v_dual_mov_b32 v171, v2
+; GFX11-NEXT: v_dual_mov_b32 v172, v1 :: v_dual_mov_b32 v173, v0
+; GFX11-NEXT: v_dual_mov_b32 v174, s28 :: v_dual_mov_b32 v175, s29
+; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s4, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB71_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_dual_mov_b32 v47, s0 :: v_dual_mov_b32 v52, s2
-; GFX11-NEXT: v_dual_mov_b32 v49, s1 :: v_dual_mov_b32 v56, s3
-; GFX11-NEXT: v_dual_mov_b32 v61, s16 :: v_dual_mov_b32 v74, s18
-; GFX11-NEXT: v_dual_mov_b32 v67, s17 :: v_dual_mov_b32 v82, s19
-; GFX11-NEXT: v_dual_mov_b32 v91, s20 :: v_dual_mov_b32 v112, s22
-; GFX11-NEXT: v_dual_mov_b32 v101, s21 :: v_dual_mov_b32 v124, s23
-; GFX11-NEXT: v_dual_mov_b32 v137, s24 :: v_dual_mov_b32 v14, s26
-; GFX11-NEXT: v_dual_mov_b32 v151, s25 :: v_dual_mov_b32 v30, s27
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB71_3
+; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v59, s16
+; GFX11-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v65, s17
+; GFX11-NEXT: v_dual_mov_b32 v50, s2 :: v_dual_mov_b32 v89, s20
+; GFX11-NEXT: v_dual_mov_b32 v54, s3 :: v_dual_mov_b32 v99, s21
+; GFX11-NEXT: v_dual_mov_b32 v72, s18 :: v_dual_mov_b32 v135, s24
+; GFX11-NEXT: v_dual_mov_b32 v80, s19 :: v_dual_mov_b32 v149, s25
+; GFX11-NEXT: v_dual_mov_b32 v110, s22 :: v_dual_mov_b32 v17, s26
+; GFX11-NEXT: v_dual_mov_b32 v122, s23 :: v_dual_mov_b32 v33, s27
+; GFX11-NEXT: s_cbranch_execnz .LBB71_3
; GFX11-NEXT: .LBB71_2: ; %cmp.true
-; GFX11-NEXT: v_pk_add_u16 v30, s27, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v14, s26, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v33, s27, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v17, s26, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v176, v176, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v177, v177, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v178, v178, 3 op_sel_hi:[1,0]
@@ -108475,119 +108919,117 @@ define inreg <16 x i64> @bitcast_v64i16_to_v16i64_scalar(<64 x i16> inreg %a, i3
; GFX11-NEXT: v_pk_add_u16 v181, v181, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v182, v182, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v183, v183, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v168, v168, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v169, v169, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v170, v170, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v171, v171, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v172, v172, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v173, v173, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v174, v174, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v175, v175, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v184, v184, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v151, s25, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v137, s24, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v124, s23, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v112, s22, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v101, s21, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v91, s20, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v82, s19, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v74, s18, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v67, s17, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v61, s16, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v56, s3, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v52, s2, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v49, s1, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v47, s0, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v174, v174, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v149, s25, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v135, s24, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v122, s23, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v110, s22, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v99, s21, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v89, s20, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v80, s19, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v72, s18, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v65, s17, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v59, s16, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v54, s3, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v50, s2, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v2, s1, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: .LBB71_3: ; %end
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mov_b32 v0, v47 :: v_dual_mov_b32 v1, v49
-; GFX11-NEXT: v_dual_mov_b32 v3, v56 :: v_dual_mov_b32 v4, v61
-; GFX11-NEXT: v_dual_mov_b32 v6, v74 :: v_dual_mov_b32 v9, v101
-; GFX11-NEXT: v_dual_mov_b32 v7, v82 :: v_dual_mov_b32 v8, v91
-; GFX11-NEXT: v_dual_mov_b32 v11, v124 :: v_dual_mov_b32 v12, v137
-; GFX11-NEXT: v_dual_mov_b32 v15, v30 :: v_dual_mov_b32 v16, v184
-; GFX11-NEXT: v_dual_mov_b32 v17, v185 :: v_dual_mov_b32 v18, v175
-; GFX11-NEXT: v_dual_mov_b32 v19, v174 :: v_dual_mov_b32 v20, v173
-; GFX11-NEXT: v_dual_mov_b32 v21, v172 :: v_dual_mov_b32 v22, v171
-; GFX11-NEXT: v_dual_mov_b32 v23, v170 :: v_dual_mov_b32 v24, v183
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v4, v59
+; GFX11-NEXT: v_dual_mov_b32 v3, v54 :: v_dual_mov_b32 v6, v72
+; GFX11-NEXT: v_dual_mov_b32 v7, v80 :: v_dual_mov_b32 v8, v89
+; GFX11-NEXT: v_dual_mov_b32 v9, v99 :: v_dual_mov_b32 v10, v110
+; GFX11-NEXT: v_dual_mov_b32 v11, v122 :: v_dual_mov_b32 v12, v135
+; GFX11-NEXT: v_dual_mov_b32 v13, v149 :: v_dual_mov_b32 v16, v174
+; GFX11-NEXT: v_dual_mov_b32 v14, v17 :: v_dual_mov_b32 v17, v175
+; GFX11-NEXT: v_dual_mov_b32 v15, v33 :: v_dual_mov_b32 v20, v171
+; GFX11-NEXT: v_dual_mov_b32 v18, v173 :: v_dual_mov_b32 v19, v172
+; GFX11-NEXT: v_dual_mov_b32 v21, v170 :: v_dual_mov_b32 v22, v169
+; GFX11-NEXT: v_dual_mov_b32 v23, v168 :: v_dual_mov_b32 v24, v183
; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_load_b32 v185, off, s32
-; GFX11-NEXT: scratch_load_b32 v184, off, s32 offset:4
-; GFX11-NEXT: scratch_load_b32 v175, off, s32 offset:8
-; GFX11-NEXT: scratch_load_b32 v174, off, s32 offset:12
-; GFX11-NEXT: scratch_load_b32 v173, off, s32 offset:16
-; GFX11-NEXT: scratch_load_b32 v172, off, s32 offset:20
-; GFX11-NEXT: scratch_load_b32 v171, off, s32 offset:24
-; GFX11-NEXT: scratch_load_b32 v170, off, s32 offset:28
-; GFX11-NEXT: scratch_load_b32 v169, off, s32 offset:32
-; GFX11-NEXT: scratch_load_b32 v168, off, s32 offset:36
-; GFX11-NEXT: scratch_load_b32 v159, off, s32 offset:40
-; GFX11-NEXT: scratch_load_b32 v158, off, s32 offset:44
-; GFX11-NEXT: scratch_load_b32 v157, off, s32 offset:48
-; GFX11-NEXT: scratch_load_b32 v156, off, s32 offset:52
-; GFX11-NEXT: scratch_load_b32 v155, off, s32 offset:56
-; GFX11-NEXT: scratch_load_b32 v154, off, s32 offset:60
-; GFX11-NEXT: scratch_load_b32 v153, off, s32 offset:64
-; GFX11-NEXT: scratch_load_b32 v152, off, s32 offset:68
-; GFX11-NEXT: scratch_load_b32 v143, off, s32 offset:72
-; GFX11-NEXT: scratch_load_b32 v142, off, s32 offset:76
-; GFX11-NEXT: scratch_load_b32 v141, off, s32 offset:80
-; GFX11-NEXT: scratch_load_b32 v140, off, s32 offset:84
-; GFX11-NEXT: scratch_load_b32 v139, off, s32 offset:88
-; GFX11-NEXT: scratch_load_b32 v138, off, s32 offset:92
-; GFX11-NEXT: scratch_load_b32 v137, off, s32 offset:96
-; GFX11-NEXT: scratch_load_b32 v136, off, s32 offset:100
-; GFX11-NEXT: scratch_load_b32 v127, off, s32 offset:104
-; GFX11-NEXT: scratch_load_b32 v126, off, s32 offset:108
-; GFX11-NEXT: scratch_load_b32 v125, off, s32 offset:112
-; GFX11-NEXT: scratch_load_b32 v124, off, s32 offset:116
-; GFX11-NEXT: scratch_load_b32 v123, off, s32 offset:120
-; GFX11-NEXT: scratch_load_b32 v122, off, s32 offset:124
+; GFX11-NEXT: scratch_load_b32 v175, off, s32
+; GFX11-NEXT: scratch_load_b32 v174, off, s32 offset:4
+; GFX11-NEXT: scratch_load_b32 v173, off, s32 offset:8
+; GFX11-NEXT: scratch_load_b32 v172, off, s32 offset:12
+; GFX11-NEXT: scratch_load_b32 v171, off, s32 offset:16
+; GFX11-NEXT: scratch_load_b32 v170, off, s32 offset:20
+; GFX11-NEXT: scratch_load_b32 v169, off, s32 offset:24
+; GFX11-NEXT: scratch_load_b32 v168, off, s32 offset:28
+; GFX11-NEXT: scratch_load_b32 v159, off, s32 offset:32
+; GFX11-NEXT: scratch_load_b32 v158, off, s32 offset:36
+; GFX11-NEXT: scratch_load_b32 v157, off, s32 offset:40
+; GFX11-NEXT: scratch_load_b32 v156, off, s32 offset:44
+; GFX11-NEXT: scratch_load_b32 v155, off, s32 offset:48
+; GFX11-NEXT: scratch_load_b32 v154, off, s32 offset:52
+; GFX11-NEXT: scratch_load_b32 v153, off, s32 offset:56
+; GFX11-NEXT: scratch_load_b32 v152, off, s32 offset:60
+; GFX11-NEXT: scratch_load_b32 v143, off, s32 offset:64
+; GFX11-NEXT: scratch_load_b32 v142, off, s32 offset:68
+; GFX11-NEXT: scratch_load_b32 v141, off, s32 offset:72
+; GFX11-NEXT: scratch_load_b32 v140, off, s32 offset:76
+; GFX11-NEXT: scratch_load_b32 v139, off, s32 offset:80
+; GFX11-NEXT: scratch_load_b32 v138, off, s32 offset:84
+; GFX11-NEXT: scratch_load_b32 v137, off, s32 offset:88
+; GFX11-NEXT: scratch_load_b32 v136, off, s32 offset:92
+; GFX11-NEXT: scratch_load_b32 v127, off, s32 offset:96
+; GFX11-NEXT: scratch_load_b32 v126, off, s32 offset:100
+; GFX11-NEXT: scratch_load_b32 v125, off, s32 offset:104
+; GFX11-NEXT: scratch_load_b32 v124, off, s32 offset:108
+; GFX11-NEXT: scratch_load_b32 v123, off, s32 offset:112
+; GFX11-NEXT: scratch_load_b32 v122, off, s32 offset:116
+; GFX11-NEXT: scratch_load_b32 v121, off, s32 offset:120
+; GFX11-NEXT: scratch_load_b32 v120, off, s32 offset:124
; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_load_b32 v121, off, s32 offset:128
-; GFX11-NEXT: scratch_load_b32 v120, off, s32 offset:132
-; GFX11-NEXT: scratch_load_b32 v111, off, s32 offset:136
-; GFX11-NEXT: scratch_load_b32 v110, off, s32 offset:140
-; GFX11-NEXT: scratch_load_b32 v109, off, s32 offset:144
-; GFX11-NEXT: scratch_load_b32 v108, off, s32 offset:148
-; GFX11-NEXT: scratch_load_b32 v107, off, s32 offset:152
-; GFX11-NEXT: scratch_load_b32 v106, off, s32 offset:156
-; GFX11-NEXT: scratch_load_b32 v105, off, s32 offset:160
-; GFX11-NEXT: scratch_load_b32 v104, off, s32 offset:164
-; GFX11-NEXT: scratch_load_b32 v95, off, s32 offset:168
-; GFX11-NEXT: scratch_load_b32 v94, off, s32 offset:172
-; GFX11-NEXT: scratch_load_b32 v93, off, s32 offset:176
-; GFX11-NEXT: scratch_load_b32 v92, off, s32 offset:180
-; GFX11-NEXT: scratch_load_b32 v91, off, s32 offset:184
-; GFX11-NEXT: scratch_load_b32 v90, off, s32 offset:188
-; GFX11-NEXT: scratch_load_b32 v89, off, s32 offset:192
-; GFX11-NEXT: scratch_load_b32 v88, off, s32 offset:196
-; GFX11-NEXT: scratch_load_b32 v79, off, s32 offset:200
-; GFX11-NEXT: scratch_load_b32 v78, off, s32 offset:204
-; GFX11-NEXT: scratch_load_b32 v77, off, s32 offset:208
-; GFX11-NEXT: scratch_load_b32 v76, off, s32 offset:212
-; GFX11-NEXT: scratch_load_b32 v75, off, s32 offset:216
-; GFX11-NEXT: scratch_load_b32 v74, off, s32 offset:220
-; GFX11-NEXT: scratch_load_b32 v73, off, s32 offset:224
-; GFX11-NEXT: scratch_load_b32 v72, off, s32 offset:228
-; GFX11-NEXT: scratch_load_b32 v63, off, s32 offset:232
-; GFX11-NEXT: scratch_load_b32 v62, off, s32 offset:236
-; GFX11-NEXT: scratch_load_b32 v61, off, s32 offset:240
-; GFX11-NEXT: scratch_load_b32 v60, off, s32 offset:244
-; GFX11-NEXT: scratch_load_b32 v59, off, s32 offset:248
-; GFX11-NEXT: scratch_load_b32 v58, off, s32 offset:252
-; GFX11-NEXT: s_clause 0x9
-; GFX11-NEXT: scratch_load_b32 v57, off, s32 offset:256
-; GFX11-NEXT: scratch_load_b32 v56, off, s32 offset:260
-; GFX11-NEXT: scratch_load_b32 v47, off, s32 offset:264
-; GFX11-NEXT: scratch_load_b32 v46, off, s32 offset:268
-; GFX11-NEXT: scratch_load_b32 v45, off, s32 offset:272
-; GFX11-NEXT: scratch_load_b32 v44, off, s32 offset:276
-; GFX11-NEXT: scratch_load_b32 v43, off, s32 offset:280
-; GFX11-NEXT: scratch_load_b32 v42, off, s32 offset:284
-; GFX11-NEXT: scratch_load_b32 v41, off, s32 offset:288
-; GFX11-NEXT: scratch_load_b32 v40, off, s32 offset:292
-; GFX11-NEXT: v_dual_mov_b32 v2, v52 :: v_dual_mov_b32 v5, v67
-; GFX11-NEXT: v_dual_mov_b32 v10, v112 :: v_dual_mov_b32 v13, v151
+; GFX11-NEXT: scratch_load_b32 v111, off, s32 offset:128
+; GFX11-NEXT: scratch_load_b32 v110, off, s32 offset:132
+; GFX11-NEXT: scratch_load_b32 v109, off, s32 offset:136
+; GFX11-NEXT: scratch_load_b32 v108, off, s32 offset:140
+; GFX11-NEXT: scratch_load_b32 v107, off, s32 offset:144
+; GFX11-NEXT: scratch_load_b32 v106, off, s32 offset:148
+; GFX11-NEXT: scratch_load_b32 v105, off, s32 offset:152
+; GFX11-NEXT: scratch_load_b32 v104, off, s32 offset:156
+; GFX11-NEXT: scratch_load_b32 v95, off, s32 offset:160
+; GFX11-NEXT: scratch_load_b32 v94, off, s32 offset:164
+; GFX11-NEXT: scratch_load_b32 v93, off, s32 offset:168
+; GFX11-NEXT: scratch_load_b32 v92, off, s32 offset:172
+; GFX11-NEXT: scratch_load_b32 v91, off, s32 offset:176
+; GFX11-NEXT: scratch_load_b32 v90, off, s32 offset:180
+; GFX11-NEXT: scratch_load_b32 v89, off, s32 offset:184
+; GFX11-NEXT: scratch_load_b32 v88, off, s32 offset:188
+; GFX11-NEXT: scratch_load_b32 v79, off, s32 offset:192
+; GFX11-NEXT: scratch_load_b32 v78, off, s32 offset:196
+; GFX11-NEXT: scratch_load_b32 v77, off, s32 offset:200
+; GFX11-NEXT: scratch_load_b32 v76, off, s32 offset:204
+; GFX11-NEXT: scratch_load_b32 v75, off, s32 offset:208
+; GFX11-NEXT: scratch_load_b32 v74, off, s32 offset:212
+; GFX11-NEXT: scratch_load_b32 v73, off, s32 offset:216
+; GFX11-NEXT: scratch_load_b32 v72, off, s32 offset:220
+; GFX11-NEXT: scratch_load_b32 v63, off, s32 offset:224
+; GFX11-NEXT: scratch_load_b32 v62, off, s32 offset:228
+; GFX11-NEXT: scratch_load_b32 v61, off, s32 offset:232
+; GFX11-NEXT: scratch_load_b32 v60, off, s32 offset:236
+; GFX11-NEXT: scratch_load_b32 v59, off, s32 offset:240
+; GFX11-NEXT: scratch_load_b32 v58, off, s32 offset:244
+; GFX11-NEXT: scratch_load_b32 v57, off, s32 offset:248
+; GFX11-NEXT: scratch_load_b32 v56, off, s32 offset:252
+; GFX11-NEXT: s_clause 0x7
+; GFX11-NEXT: scratch_load_b32 v47, off, s32 offset:256
+; GFX11-NEXT: scratch_load_b32 v46, off, s32 offset:260
+; GFX11-NEXT: scratch_load_b32 v45, off, s32 offset:264
+; GFX11-NEXT: scratch_load_b32 v44, off, s32 offset:268
+; GFX11-NEXT: scratch_load_b32 v43, off, s32 offset:272
+; GFX11-NEXT: scratch_load_b32 v42, off, s32 offset:276
+; GFX11-NEXT: scratch_load_b32 v41, off, s32 offset:280
+; GFX11-NEXT: scratch_load_b32 v40, off, s32 offset:284
+; GFX11-NEXT: v_dual_mov_b32 v2, v50 :: v_dual_mov_b32 v5, v65
; GFX11-NEXT: v_dual_mov_b32 v25, v182 :: v_dual_mov_b32 v26, v181
; GFX11-NEXT: v_dual_mov_b32 v27, v180 :: v_dual_mov_b32 v28, v179
; GFX11-NEXT: v_dual_mov_b32 v29, v178 :: v_dual_mov_b32 v30, v177
@@ -108595,23 +109037,25 @@ define inreg <16 x i64> @bitcast_v64i16_to_v16i64_scalar(<64 x i16> inreg %a, i3
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB71_4:
-; GFX11-NEXT: ; implicit-def: $vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78
; GFX11-NEXT: ; implicit-def: $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79
; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
-; GFX11-NEXT: ; implicit-def: $vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81
-; GFX11-NEXT: ; implicit-def: $vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84
-; GFX11-NEXT: ; implicit-def: $vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88
-; GFX11-NEXT: ; implicit-def: $vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93
-; GFX11-NEXT: ; implicit-def: $vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99
-; GFX11-NEXT: ; implicit-def: $vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106
-; GFX11-NEXT: ; implicit-def: $vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114
-; GFX11-NEXT: ; implicit-def: $vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123
-; GFX11-NEXT: ; implicit-def: $vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133
-; GFX11-NEXT: ; implicit-def: $vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144
-; GFX11-NEXT: ; implicit-def: $vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156
-; GFX11-NEXT: ; implicit-def: $vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169
-; GFX11-NEXT: s_branch .LBB71_2
+; GFX11-NEXT: ; implicit-def: $vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82
+; GFX11-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-NEXT: ; implicit-def: $vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91
+; GFX11-NEXT: ; implicit-def: $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49
+; GFX11-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-NEXT: ; implicit-def: $vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104
+; GFX11-NEXT: ; implicit-def: $vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112
+; GFX11-NEXT: ; implicit-def: $vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121
+; GFX11-NEXT: ; implicit-def: $vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131
+; GFX11-NEXT: ; implicit-def: $vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142
+; GFX11-NEXT: ; implicit-def: $vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154
+; GFX11-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_vccz .LBB71_2
+; GFX11-NEXT: s_branch .LBB71_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -113025,6 +113469,7 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v19
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v31, s16
; SI-NEXT: v_mov_b32_e32 v32, s17
; SI-NEXT: v_mov_b32_e32 v29, s18
@@ -113037,9 +113482,9 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v24, s25
; SI-NEXT: v_mov_b32_e32 v21, s26
; SI-NEXT: v_mov_b32_e32 v22, s27
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v19, s28
; SI-NEXT: v_mov_b32_e32 v20, s29
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
@@ -114062,8 +114507,6 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; SI-NEXT: .LBB73_4:
; SI-NEXT: ; implicit-def: $vgpr36
; SI-NEXT: ; kill: killed $vgpr36
-; SI-NEXT: ; implicit-def: $vgpr36
-; SI-NEXT: ; kill: killed $vgpr36
; SI-NEXT: ; implicit-def: $vgpr46
; SI-NEXT: ; implicit-def: $vgpr44
; SI-NEXT: ; implicit-def: $vgpr42
@@ -114221,7 +114664,11 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; SI-NEXT: ; implicit-def: $vgpr36
; SI-NEXT: ; kill: killed $vgpr36
; SI-NEXT: ; implicit-def: $vgpr36
-; SI-NEXT: s_branch .LBB73_2
+; SI-NEXT: ; kill: killed $vgpr36
+; SI-NEXT: ; implicit-def: $vgpr36
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB73_2
+; SI-NEXT: s_branch .LBB73_3
;
; VI-LABEL: bitcast_v16f64_to_v128i8_scalar:
; VI: ; %bb.0:
@@ -114280,8 +114727,9 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s44, v15
; VI-NEXT: v_readfirstlane_b32 s45, v16
; VI-NEXT: v_readfirstlane_b32 s4, v17
-; VI-NEXT: s_and_b64 s[46:47], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s5, v18
+; VI-NEXT: s_and_b64 s[46:47], vcc, exec
+; VI-NEXT: s_mov_b64 vcc, -1
; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
@@ -114656,8 +115104,6 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; VI-NEXT: .LBB73_3:
; VI-NEXT: ; implicit-def: $sgpr46
; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
; VI-NEXT: ; implicit-def: $sgpr70
; VI-NEXT: ; implicit-def: $sgpr71
; VI-NEXT: ; implicit-def: $sgpr68
@@ -114808,7 +115254,10 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; VI-NEXT: ; implicit-def: $sgpr46
; VI-NEXT: ; kill: killed $sgpr46
; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: s_branch .LBB73_2
+; VI-NEXT: ; kill: killed $sgpr46
+; VI-NEXT: ; implicit-def: $sgpr46
+; VI-NEXT: s_andn2_b64 vcc, exec, vcc
+; VI-NEXT: s_cbranch_vccz .LBB73_2
; VI-NEXT: .LBB73_4:
; VI-NEXT: v_mov_b32_e32 v33, s71
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill
@@ -115568,8 +116017,9 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; GFX9-NEXT: v_readfirstlane_b32 s44, v15
; GFX9-NEXT: v_readfirstlane_b32 s45, v16
; GFX9-NEXT: v_readfirstlane_b32 s4, v17
-; GFX9-NEXT: s_and_b64 s[46:47], vcc, exec
; GFX9-NEXT: v_readfirstlane_b32 s5, v18
+; GFX9-NEXT: s_and_b64 s[46:47], vcc, exec
+; GFX9-NEXT: s_mov_b64 vcc, -1
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
@@ -115951,8 +116401,6 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; GFX9-NEXT: .LBB73_3:
; GFX9-NEXT: ; implicit-def: $sgpr46
; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
; GFX9-NEXT: ; implicit-def: $sgpr80
; GFX9-NEXT: ; implicit-def: $sgpr81
; GFX9-NEXT: ; implicit-def: $sgpr70
@@ -116095,7 +116543,10 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; GFX9-NEXT: ; implicit-def: $sgpr46
; GFX9-NEXT: ; kill: killed $sgpr46
; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: s_branch .LBB73_2
+; GFX9-NEXT: ; kill: killed $sgpr46
+; GFX9-NEXT: ; implicit-def: $sgpr46
+; GFX9-NEXT: s_andn2_b64 vcc, exec, vcc
+; GFX9-NEXT: s_cbranch_vccz .LBB73_2
; GFX9-NEXT: .LBB73_4:
; GFX9-NEXT: v_mov_b32_e32 v15, s81
; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
@@ -116791,287 +117242,282 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_or_saveexec_b32 s4, -1
; GFX11-NEXT: s_clause 0x3
+; GFX11-NEXT: scratch_store_b32 off, v74, s32 offset:72
+; GFX11-NEXT: scratch_store_b32 off, v75, s32 offset:76
; GFX11-NEXT: scratch_store_b32 off, v76, s32 offset:80
; GFX11-NEXT: scratch_store_b32 off, v77, s32 offset:84
-; GFX11-NEXT: scratch_store_b32 off, v78, s32 offset:88
-; GFX11-NEXT: scratch_store_b32 off, v79, s32 offset:92
; GFX11-NEXT: s_mov_b32 exec_lo, s4
-; GFX11-NEXT: v_writelane_b32 v76, s30, 0
-; GFX11-NEXT: v_writelane_b32 v77, s96, 0
+; GFX11-NEXT: v_writelane_b32 v74, s30, 0
+; GFX11-NEXT: v_writelane_b32 v75, s96, 0
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
; GFX11-NEXT: v_readfirstlane_b32 s4, v1
; GFX11-NEXT: v_readfirstlane_b32 s5, v2
-; GFX11-NEXT: v_writelane_b32 v76, s31, 1
-; GFX11-NEXT: v_writelane_b32 v77, s97, 1
+; GFX11-NEXT: v_writelane_b32 v74, s31, 1
+; GFX11-NEXT: v_writelane_b32 v75, s97, 1
; GFX11-NEXT: v_readfirstlane_b32 s6, v3
; GFX11-NEXT: v_readfirstlane_b32 s7, v4
; GFX11-NEXT: v_readfirstlane_b32 s8, v5
-; GFX11-NEXT: v_writelane_b32 v76, s34, 2
-; GFX11-NEXT: v_writelane_b32 v77, s98, 2
+; GFX11-NEXT: v_writelane_b32 v74, s34, 2
+; GFX11-NEXT: v_writelane_b32 v75, s98, 2
; GFX11-NEXT: v_readfirstlane_b32 s9, v6
; GFX11-NEXT: v_readfirstlane_b32 s10, v7
; GFX11-NEXT: v_readfirstlane_b32 s11, v8
-; GFX11-NEXT: v_writelane_b32 v76, s35, 3
-; GFX11-NEXT: v_writelane_b32 v77, s99, 3
+; GFX11-NEXT: v_writelane_b32 v74, s35, 3
+; GFX11-NEXT: v_writelane_b32 v75, s99, 3
; GFX11-NEXT: v_readfirstlane_b32 s12, v9
; GFX11-NEXT: v_readfirstlane_b32 s13, v10
; GFX11-NEXT: v_readfirstlane_b32 s14, v11
-; GFX11-NEXT: v_writelane_b32 v76, s36, 4
-; GFX11-NEXT: v_writelane_b32 v77, s100, 4
+; GFX11-NEXT: v_writelane_b32 v74, s36, 4
+; GFX11-NEXT: v_writelane_b32 v75, s100, 4
; GFX11-NEXT: v_readfirstlane_b32 s15, v12
; GFX11-NEXT: v_readfirstlane_b32 s40, v13
; GFX11-NEXT: v_readfirstlane_b32 s41, v14
-; GFX11-NEXT: v_writelane_b32 v76, s37, 5
-; GFX11-NEXT: v_writelane_b32 v77, s101, 5
-; GFX11-NEXT: s_mov_b32 vcc_hi, 0
+; GFX11-NEXT: v_writelane_b32 v74, s37, 5
+; GFX11-NEXT: v_writelane_b32 v75, s101, 5
; GFX11-NEXT: s_and_b32 s42, vcc_lo, exec_lo
-; GFX11-NEXT: s_clause 0x13
-; GFX11-NEXT: scratch_store_b32 off, v40, s32 offset:76
-; GFX11-NEXT: scratch_store_b32 off, v41, s32 offset:72
-; GFX11-NEXT: scratch_store_b32 off, v42, s32 offset:68
-; GFX11-NEXT: scratch_store_b32 off, v43, s32 offset:64
-; GFX11-NEXT: scratch_store_b32 off, v44, s32 offset:60
-; GFX11-NEXT: scratch_store_b32 off, v45, s32 offset:56
-; GFX11-NEXT: scratch_store_b32 off, v46, s32 offset:52
-; GFX11-NEXT: scratch_store_b32 off, v47, s32 offset:48
-; GFX11-NEXT: scratch_store_b32 off, v56, s32 offset:44
-; GFX11-NEXT: scratch_store_b32 off, v57, s32 offset:40
-; GFX11-NEXT: scratch_store_b32 off, v58, s32 offset:36
-; GFX11-NEXT: scratch_store_b32 off, v59, s32 offset:32
-; GFX11-NEXT: scratch_store_b32 off, v60, s32 offset:28
-; GFX11-NEXT: scratch_store_b32 off, v61, s32 offset:24
-; GFX11-NEXT: scratch_store_b32 off, v62, s32 offset:20
-; GFX11-NEXT: scratch_store_b32 off, v63, s32 offset:16
-; GFX11-NEXT: scratch_store_b32 off, v72, s32 offset:12
-; GFX11-NEXT: scratch_store_b32 off, v73, s32 offset:8
-; GFX11-NEXT: scratch_store_b32 off, v74, s32 offset:4
-; GFX11-NEXT: scratch_store_b32 off, v75, s32
-; GFX11-NEXT: v_writelane_b32 v76, s38, 6
-; GFX11-NEXT: v_writelane_b32 v77, s102, 6
-; GFX11-NEXT: ; implicit-def: $vgpr78 : SGPR spill to VGPR lane
-; GFX11-NEXT: ; implicit-def: $vgpr79 : SGPR spill to VGPR lane
-; GFX11-NEXT: v_writelane_b32 v76, s39, 7
-; GFX11-NEXT: v_writelane_b32 v77, s103, 7
-; GFX11-NEXT: v_writelane_b32 v76, s48, 8
-; GFX11-NEXT: v_writelane_b32 v77, s104, 8
-; GFX11-NEXT: v_writelane_b32 v76, s49, 9
-; GFX11-NEXT: v_writelane_b32 v76, s50, 10
-; GFX11-NEXT: v_writelane_b32 v76, s51, 11
-; GFX11-NEXT: v_writelane_b32 v76, s52, 12
-; GFX11-NEXT: v_writelane_b32 v76, s53, 13
-; GFX11-NEXT: v_writelane_b32 v76, s54, 14
-; GFX11-NEXT: v_writelane_b32 v76, s55, 15
-; GFX11-NEXT: v_writelane_b32 v76, s64, 16
-; GFX11-NEXT: v_writelane_b32 v76, s65, 17
-; GFX11-NEXT: v_writelane_b32 v76, s66, 18
-; GFX11-NEXT: v_writelane_b32 v76, s67, 19
-; GFX11-NEXT: v_writelane_b32 v76, s68, 20
-; GFX11-NEXT: v_writelane_b32 v76, s69, 21
-; GFX11-NEXT: v_writelane_b32 v76, s70, 22
-; GFX11-NEXT: v_writelane_b32 v76, s71, 23
-; GFX11-NEXT: v_writelane_b32 v76, s80, 24
-; GFX11-NEXT: v_writelane_b32 v76, s81, 25
-; GFX11-NEXT: v_writelane_b32 v76, s82, 26
-; GFX11-NEXT: v_writelane_b32 v76, s83, 27
-; GFX11-NEXT: v_writelane_b32 v76, s84, 28
-; GFX11-NEXT: v_writelane_b32 v76, s85, 29
-; GFX11-NEXT: v_writelane_b32 v76, s86, 30
-; GFX11-NEXT: v_writelane_b32 v76, s87, 31
+; GFX11-NEXT: s_mov_b32 vcc_lo, -1
+; GFX11-NEXT: s_clause 0x11
+; GFX11-NEXT: scratch_store_b32 off, v40, s32 offset:68
+; GFX11-NEXT: scratch_store_b32 off, v41, s32 offset:64
+; GFX11-NEXT: scratch_store_b32 off, v42, s32 offset:60
+; GFX11-NEXT: scratch_store_b32 off, v43, s32 offset:56
+; GFX11-NEXT: scratch_store_b32 off, v44, s32 offset:52
+; GFX11-NEXT: scratch_store_b32 off, v45, s32 offset:48
+; GFX11-NEXT: scratch_store_b32 off, v46, s32 offset:44
+; GFX11-NEXT: scratch_store_b32 off, v47, s32 offset:40
+; GFX11-NEXT: scratch_store_b32 off, v56, s32 offset:36
+; GFX11-NEXT: scratch_store_b32 off, v57, s32 offset:32
+; GFX11-NEXT: scratch_store_b32 off, v58, s32 offset:28
+; GFX11-NEXT: scratch_store_b32 off, v59, s32 offset:24
+; GFX11-NEXT: scratch_store_b32 off, v60, s32 offset:20
+; GFX11-NEXT: scratch_store_b32 off, v61, s32 offset:16
+; GFX11-NEXT: scratch_store_b32 off, v62, s32 offset:12
+; GFX11-NEXT: scratch_store_b32 off, v63, s32 offset:8
+; GFX11-NEXT: scratch_store_b32 off, v72, s32 offset:4
+; GFX11-NEXT: scratch_store_b32 off, v73, s32
+; GFX11-NEXT: v_writelane_b32 v74, s38, 6
+; GFX11-NEXT: v_writelane_b32 v75, s102, 6
+; GFX11-NEXT: ; implicit-def: $vgpr76 : SGPR spill to VGPR lane
+; GFX11-NEXT: ; implicit-def: $vgpr77 : SGPR spill to VGPR lane
+; GFX11-NEXT: v_writelane_b32 v74, s39, 7
+; GFX11-NEXT: v_writelane_b32 v75, s103, 7
+; GFX11-NEXT: v_writelane_b32 v74, s48, 8
+; GFX11-NEXT: v_writelane_b32 v75, s104, 8
+; GFX11-NEXT: v_writelane_b32 v74, s49, 9
+; GFX11-NEXT: v_writelane_b32 v74, s50, 10
+; GFX11-NEXT: v_writelane_b32 v74, s51, 11
+; GFX11-NEXT: v_writelane_b32 v74, s52, 12
+; GFX11-NEXT: v_writelane_b32 v74, s53, 13
+; GFX11-NEXT: v_writelane_b32 v74, s54, 14
+; GFX11-NEXT: v_writelane_b32 v74, s55, 15
+; GFX11-NEXT: v_writelane_b32 v74, s64, 16
+; GFX11-NEXT: v_writelane_b32 v74, s65, 17
+; GFX11-NEXT: v_writelane_b32 v74, s66, 18
+; GFX11-NEXT: v_writelane_b32 v74, s67, 19
+; GFX11-NEXT: v_writelane_b32 v74, s68, 20
+; GFX11-NEXT: v_writelane_b32 v74, s69, 21
+; GFX11-NEXT: v_writelane_b32 v74, s70, 22
+; GFX11-NEXT: v_writelane_b32 v74, s71, 23
+; GFX11-NEXT: v_writelane_b32 v74, s80, 24
+; GFX11-NEXT: v_writelane_b32 v74, s81, 25
+; GFX11-NEXT: v_writelane_b32 v74, s82, 26
+; GFX11-NEXT: v_writelane_b32 v74, s83, 27
+; GFX11-NEXT: v_writelane_b32 v74, s84, 28
+; GFX11-NEXT: v_writelane_b32 v74, s85, 29
+; GFX11-NEXT: v_writelane_b32 v74, s86, 30
+; GFX11-NEXT: v_writelane_b32 v74, s87, 31
; GFX11-NEXT: s_cbranch_scc0 .LBB73_3
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s42, s13, 16
-; GFX11-NEXT: s_lshr_b32 s50, s41, 24
-; GFX11-NEXT: v_writelane_b32 v78, s42, 0
-; GFX11-NEXT: s_lshr_b32 s42, s13, 8
-; GFX11-NEXT: s_lshr_b32 s49, s41, 16
-; GFX11-NEXT: s_lshr_b32 s48, s41, 8
-; GFX11-NEXT: s_lshr_b32 s52, s40, 16
-; GFX11-NEXT: v_writelane_b32 v78, s42, 1
+; GFX11-NEXT: s_lshr_b32 s48, s41, 24
+; GFX11-NEXT: v_writelane_b32 v76, s42, 0
; GFX11-NEXT: s_lshr_b32 s42, s11, 24
-; GFX11-NEXT: s_lshr_b32 s51, s40, 8
-; GFX11-NEXT: s_lshr_b32 s39, s15, 24
-; GFX11-NEXT: s_lshr_b32 s38, s15, 16
-; GFX11-NEXT: v_writelane_b32 v78, s42, 2
+; GFX11-NEXT: s_lshr_b32 s39, s41, 16
+; GFX11-NEXT: s_lshr_b32 s38, s41, 8
+; GFX11-NEXT: s_lshr_b32 s50, s40, 16
+; GFX11-NEXT: v_writelane_b32 v76, s42, 1
; GFX11-NEXT: s_lshr_b32 s42, s11, 16
-; GFX11-NEXT: s_lshr_b32 s37, s15, 8
-; GFX11-NEXT: s_lshr_b32 s54, s14, 16
-; GFX11-NEXT: s_lshr_b32 s53, s14, 8
-; GFX11-NEXT: v_writelane_b32 v78, s42, 3
+; GFX11-NEXT: s_lshr_b32 s49, s40, 8
+; GFX11-NEXT: s_lshr_b32 s37, s15, 24
+; GFX11-NEXT: s_lshr_b32 s36, s15, 16
+; GFX11-NEXT: v_writelane_b32 v76, s42, 2
; GFX11-NEXT: s_lshr_b32 s42, s11, 8
-; GFX11-NEXT: s_lshr_b32 s36, s13, 24
-; GFX11-NEXT: s_lshr_b32 s64, s12, 16
-; GFX11-NEXT: s_lshr_b32 s55, s12, 8
-; GFX11-NEXT: v_writelane_b32 v78, s42, 4
+; GFX11-NEXT: s_lshr_b32 s35, s15, 8
+; GFX11-NEXT: s_lshr_b32 s52, s14, 16
+; GFX11-NEXT: s_lshr_b32 s51, s14, 8
+; GFX11-NEXT: v_writelane_b32 v76, s42, 3
; GFX11-NEXT: s_lshr_b32 s42, s9, 24
-; GFX11-NEXT: s_lshr_b32 s66, s10, 16
-; GFX11-NEXT: s_lshr_b32 s65, s10, 8
-; GFX11-NEXT: s_lshr_b32 s68, s8, 16
-; GFX11-NEXT: v_writelane_b32 v78, s42, 5
+; GFX11-NEXT: s_lshr_b32 s34, s13, 24
+; GFX11-NEXT: s_lshr_b32 s104, s13, 8
+; GFX11-NEXT: s_lshr_b32 s54, s12, 16
+; GFX11-NEXT: v_writelane_b32 v76, s42, 4
; GFX11-NEXT: s_lshr_b32 s42, s9, 16
-; GFX11-NEXT: s_lshr_b32 s67, s8, 8
-; GFX11-NEXT: s_lshr_b32 s70, s6, 16
-; GFX11-NEXT: s_lshr_b32 s69, s6, 8
-; GFX11-NEXT: v_writelane_b32 v78, s42, 6
+; GFX11-NEXT: s_lshr_b32 s53, s12, 8
+; GFX11-NEXT: s_lshr_b32 s64, s10, 16
+; GFX11-NEXT: s_lshr_b32 s55, s10, 8
+; GFX11-NEXT: v_writelane_b32 v76, s42, 5
; GFX11-NEXT: s_lshr_b32 s42, s9, 8
-; GFX11-NEXT: s_lshr_b32 s80, s4, 16
-; GFX11-NEXT: s_lshr_b32 s71, s4, 8
-; GFX11-NEXT: s_lshr_b32 s82, s28, 16
-; GFX11-NEXT: v_writelane_b32 v78, s42, 7
+; GFX11-NEXT: s_lshr_b32 s66, s8, 16
+; GFX11-NEXT: s_lshr_b32 s65, s8, 8
+; GFX11-NEXT: s_lshr_b32 s68, s6, 16
+; GFX11-NEXT: v_writelane_b32 v76, s42, 6
; GFX11-NEXT: s_lshr_b32 s42, s7, 24
-; GFX11-NEXT: s_lshr_b32 s81, s28, 8
-; GFX11-NEXT: s_lshr_b32 s84, s26, 16
-; GFX11-NEXT: s_lshr_b32 s83, s26, 8
-; GFX11-NEXT: v_writelane_b32 v78, s42, 8
+; GFX11-NEXT: s_lshr_b32 s67, s6, 8
+; GFX11-NEXT: s_lshr_b32 s70, s4, 16
+; GFX11-NEXT: s_lshr_b32 s69, s4, 8
+; GFX11-NEXT: v_writelane_b32 v76, s42, 7
; GFX11-NEXT: s_lshr_b32 s42, s7, 16
-; GFX11-NEXT: s_lshr_b32 s86, s24, 16
-; GFX11-NEXT: s_lshr_b32 s85, s24, 8
-; GFX11-NEXT: s_lshr_b32 s96, s22, 16
-; GFX11-NEXT: v_writelane_b32 v78, s42, 9
+; GFX11-NEXT: s_lshr_b32 s80, s28, 16
+; GFX11-NEXT: s_lshr_b32 s71, s28, 8
+; GFX11-NEXT: s_lshr_b32 s82, s26, 16
+; GFX11-NEXT: v_writelane_b32 v76, s42, 8
; GFX11-NEXT: s_lshr_b32 s42, s7, 8
-; GFX11-NEXT: s_lshr_b32 s87, s22, 8
-; GFX11-NEXT: s_lshr_b32 s98, s20, 16
-; GFX11-NEXT: s_lshr_b32 s97, s20, 8
-; GFX11-NEXT: v_writelane_b32 v78, s42, 10
+; GFX11-NEXT: s_lshr_b32 s81, s26, 8
+; GFX11-NEXT: s_lshr_b32 s84, s24, 16
+; GFX11-NEXT: s_lshr_b32 s83, s24, 8
+; GFX11-NEXT: v_writelane_b32 v76, s42, 9
; GFX11-NEXT: s_lshr_b32 s42, s5, 24
-; GFX11-NEXT: s_lshr_b32 s100, s18, 16
-; GFX11-NEXT: s_lshr_b32 s99, s18, 8
-; GFX11-NEXT: s_lshr_b32 s102, s16, 16
-; GFX11-NEXT: v_writelane_b32 v78, s42, 11
+; GFX11-NEXT: s_lshr_b32 s86, s22, 16
+; GFX11-NEXT: s_lshr_b32 s85, s22, 8
+; GFX11-NEXT: s_lshr_b32 s96, s20, 16
+; GFX11-NEXT: v_writelane_b32 v76, s42, 10
; GFX11-NEXT: s_lshr_b32 s42, s5, 16
-; GFX11-NEXT: s_lshr_b32 s101, s16, 8
-; GFX11-NEXT: s_lshr_b32 s104, s2, 16
-; GFX11-NEXT: s_lshr_b32 s103, s2, 8
-; GFX11-NEXT: v_writelane_b32 v78, s42, 12
+; GFX11-NEXT: s_lshr_b32 s87, s20, 8
+; GFX11-NEXT: s_lshr_b32 s98, s18, 16
+; GFX11-NEXT: s_lshr_b32 s97, s18, 8
+; GFX11-NEXT: v_writelane_b32 v76, s42, 11
; GFX11-NEXT: s_lshr_b32 s42, s5, 8
-; GFX11-NEXT: s_lshr_b32 s35, s0, 16
-; GFX11-NEXT: s_lshr_b32 s34, s0, 8
-; GFX11-NEXT: s_lshr_b64 s[62:63], s[40:41], 24
-; GFX11-NEXT: v_writelane_b32 v78, s42, 13
+; GFX11-NEXT: s_lshr_b32 s100, s16, 16
+; GFX11-NEXT: s_lshr_b32 s99, s16, 8
+; GFX11-NEXT: s_lshr_b32 s102, s2, 16
+; GFX11-NEXT: v_writelane_b32 v76, s42, 12
; GFX11-NEXT: s_lshr_b32 s42, s29, 24
+; GFX11-NEXT: s_lshr_b32 s101, s2, 8
+; GFX11-NEXT: s_lshr_b32 vcc_hi, s0, 16
+; GFX11-NEXT: s_lshr_b32 s103, s0, 8
+; GFX11-NEXT: v_writelane_b32 v76, s42, 13
+; GFX11-NEXT: s_lshr_b32 s42, s29, 16
+; GFX11-NEXT: s_lshr_b64 s[62:63], s[40:41], 24
; GFX11-NEXT: s_lshr_b64 s[72:73], s[14:15], 24
+; GFX11-NEXT: v_writelane_b32 v76, s42, 14
+; GFX11-NEXT: s_lshr_b32 s42, s29, 8
; GFX11-NEXT: s_lshr_b64 s[74:75], s[12:13], 24
; GFX11-NEXT: s_lshr_b64 s[76:77], s[10:11], 24
-; GFX11-NEXT: v_writelane_b32 v78, s42, 14
-; GFX11-NEXT: s_lshr_b32 s42, s29, 16
; GFX11-NEXT: s_lshr_b64 s[78:79], s[8:9], 24
+; GFX11-NEXT: v_writelane_b32 v76, s42, 15
+; GFX11-NEXT: s_lshr_b32 s42, s27, 24
; GFX11-NEXT: s_lshr_b64 s[88:89], s[6:7], 24
; GFX11-NEXT: s_lshr_b64 s[90:91], s[4:5], 24
-; GFX11-NEXT: v_writelane_b32 v78, s42, 15
-; GFX11-NEXT: s_lshr_b32 s42, s29, 8
; GFX11-NEXT: s_lshr_b64 s[92:93], s[28:29], 24
+; GFX11-NEXT: v_writelane_b32 v76, s42, 16
+; GFX11-NEXT: s_lshr_b32 s42, s27, 16
; GFX11-NEXT: s_lshr_b64 s[94:95], s[26:27], 24
; GFX11-NEXT: s_lshr_b64 s[30:31], s[24:25], 24
-; GFX11-NEXT: v_writelane_b32 v78, s42, 16
-; GFX11-NEXT: s_lshr_b32 s42, s27, 24
; GFX11-NEXT: s_lshr_b64 s[60:61], s[22:23], 24
+; GFX11-NEXT: v_writelane_b32 v76, s42, 17
+; GFX11-NEXT: s_lshr_b32 s42, s27, 8
; GFX11-NEXT: s_lshr_b64 s[58:59], s[20:21], 24
; GFX11-NEXT: s_lshr_b64 s[56:57], s[18:19], 24
-; GFX11-NEXT: v_writelane_b32 v78, s42, 17
-; GFX11-NEXT: s_lshr_b32 s42, s27, 16
; GFX11-NEXT: s_lshr_b64 s[46:47], s[16:17], 24
-; GFX11-NEXT: s_lshr_b64 s[44:45], s[2:3], 24
-; GFX11-NEXT: v_writelane_b32 v78, s42, 18
-; GFX11-NEXT: s_lshr_b32 s42, s27, 8
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v78, s42, 19
+; GFX11-NEXT: v_writelane_b32 v76, s42, 18
; GFX11-NEXT: s_lshr_b32 s42, s25, 24
-; GFX11-NEXT: v_writelane_b32 v78, s42, 20
+; GFX11-NEXT: s_lshr_b64 s[44:45], s[2:3], 24
+; GFX11-NEXT: v_writelane_b32 v76, s42, 19
; GFX11-NEXT: s_lshr_b32 s42, s25, 16
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v78, s42, 21
+; GFX11-NEXT: v_writelane_b32 v76, s42, 20
; GFX11-NEXT: s_lshr_b32 s42, s25, 8
-; GFX11-NEXT: v_writelane_b32 v78, s42, 22
+; GFX11-NEXT: v_writelane_b32 v76, s42, 21
; GFX11-NEXT: s_lshr_b32 s42, s23, 24
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v78, s42, 23
+; GFX11-NEXT: v_writelane_b32 v76, s42, 22
; GFX11-NEXT: s_lshr_b32 s42, s23, 16
-; GFX11-NEXT: v_writelane_b32 v78, s42, 24
+; GFX11-NEXT: v_writelane_b32 v76, s42, 23
; GFX11-NEXT: s_lshr_b32 s42, s23, 8
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v78, s42, 25
+; GFX11-NEXT: v_writelane_b32 v76, s42, 24
; GFX11-NEXT: s_lshr_b32 s42, s21, 24
-; GFX11-NEXT: v_writelane_b32 v78, s42, 26
+; GFX11-NEXT: v_writelane_b32 v76, s42, 25
; GFX11-NEXT: s_lshr_b32 s42, s21, 16
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v78, s42, 27
+; GFX11-NEXT: v_writelane_b32 v76, s42, 26
; GFX11-NEXT: s_lshr_b32 s42, s21, 8
-; GFX11-NEXT: v_writelane_b32 v78, s42, 28
+; GFX11-NEXT: v_writelane_b32 v76, s42, 27
; GFX11-NEXT: s_lshr_b32 s42, s19, 24
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v78, s42, 29
+; GFX11-NEXT: v_writelane_b32 v76, s42, 28
; GFX11-NEXT: s_lshr_b32 s42, s19, 16
-; GFX11-NEXT: v_writelane_b32 v78, s42, 30
+; GFX11-NEXT: v_writelane_b32 v76, s42, 29
; GFX11-NEXT: s_lshr_b32 s42, s19, 8
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v78, s42, 31
+; GFX11-NEXT: v_writelane_b32 v76, s42, 30
; GFX11-NEXT: s_lshr_b32 s42, s17, 24
-; GFX11-NEXT: v_writelane_b32 v79, s42, 0
+; GFX11-NEXT: v_writelane_b32 v76, s42, 31
; GFX11-NEXT: s_lshr_b32 s42, s17, 16
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v79, s42, 1
+; GFX11-NEXT: v_writelane_b32 v77, s42, 0
; GFX11-NEXT: s_lshr_b32 s42, s17, 8
-; GFX11-NEXT: v_writelane_b32 v79, s42, 2
+; GFX11-NEXT: v_writelane_b32 v77, s42, 1
; GFX11-NEXT: s_lshr_b32 s42, s3, 24
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v79, s42, 3
+; GFX11-NEXT: v_writelane_b32 v77, s42, 2
; GFX11-NEXT: s_lshr_b32 s42, s3, 16
-; GFX11-NEXT: v_writelane_b32 v79, s42, 4
+; GFX11-NEXT: v_writelane_b32 v77, s42, 3
; GFX11-NEXT: s_lshr_b32 s42, s3, 8
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v79, s42, 5
+; GFX11-NEXT: v_writelane_b32 v77, s42, 4
; GFX11-NEXT: s_lshr_b32 s42, s1, 24
-; GFX11-NEXT: v_writelane_b32 v79, s42, 6
+; GFX11-NEXT: v_writelane_b32 v77, s42, 5
; GFX11-NEXT: s_lshr_b32 s42, s1, 16
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v79, s42, 7
+; GFX11-NEXT: v_writelane_b32 v77, s42, 6
; GFX11-NEXT: s_lshr_b32 s42, s1, 8
-; GFX11-NEXT: v_writelane_b32 v79, s42, 8
+; GFX11-NEXT: v_writelane_b32 v77, s42, 7
; GFX11-NEXT: s_lshr_b64 s[42:43], s[0:1], 24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, vcc_hi
-; GFX11-NEXT: s_cbranch_vccnz .LBB73_4
+; GFX11-NEXT: s_cbranch_execnz .LBB73_4
; GFX11-NEXT: .LBB73_2: ; %cmp.true
-; GFX11-NEXT: v_add_f64 v[23:24], s[24:25], 1.0
-; GFX11-NEXT: v_add_f64 v[28:29], s[22:23], 1.0
-; GFX11-NEXT: v_add_f64 v[32:33], s[20:21], 1.0
-; GFX11-NEXT: v_add_f64 v[5:6], s[12:13], 1.0
-; GFX11-NEXT: v_add_f64 v[36:37], s[18:19], 1.0
-; GFX11-NEXT: v_add_f64 v[52:53], s[2:3], 1.0
+; GFX11-NEXT: v_add_f64 v[22:23], s[24:25], 1.0
+; GFX11-NEXT: v_add_f64 v[27:28], s[22:23], 1.0
+; GFX11-NEXT: v_add_f64 v[31:32], s[20:21], 1.0
+; GFX11-NEXT: v_add_f64 v[35:36], s[18:19], 1.0
+; GFX11-NEXT: v_add_f64 v[48:49], s[16:17], 1.0
; GFX11-NEXT: v_add_f64 v[1:2], s[40:41], 1.0
+; GFX11-NEXT: v_add_f64 v[5:6], s[12:13], 1.0
+; GFX11-NEXT: v_add_f64 v[13:14], s[4:5], 1.0
+; GFX11-NEXT: v_add_f64 v[50:51], s[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[3:4], s[14:15], 1.0
; GFX11-NEXT: v_add_f64 v[7:8], s[10:11], 1.0
; GFX11-NEXT: v_add_f64 v[9:10], s[8:9], 1.0
; GFX11-NEXT: v_add_f64 v[11:12], s[6:7], 1.0
-; GFX11-NEXT: v_add_f64 v[13:14], s[4:5], 1.0
; GFX11-NEXT: v_add_f64 v[15:16], s[28:29], 1.0
-; GFX11-NEXT: v_add_f64 v[19:20], s[26:27], 1.0
-; GFX11-NEXT: v_add_f64 v[48:49], s[16:17], 1.0
-; GFX11-NEXT: v_add_f64 v[64:65], s[0:1], 1.0
-; GFX11-NEXT: v_lshrrev_b64 v[66:67], 24, v[23:24]
-; GFX11-NEXT: v_lshrrev_b64 v[67:68], 24, v[28:29]
-; GFX11-NEXT: v_lshrrev_b64 v[68:69], 24, v[32:33]
-; GFX11-NEXT: v_lshrrev_b64 v[25:26], 24, v[5:6]
-; GFX11-NEXT: v_lshrrev_b64 v[69:70], 24, v[36:37]
-; GFX11-NEXT: v_lshrrev_b64 v[80:81], 24, v[52:53]
+; GFX11-NEXT: v_add_f64 v[20:21], s[26:27], 1.0
+; GFX11-NEXT: v_add_f64 v[54:55], s[0:1], 1.0
+; GFX11-NEXT: v_lshrrev_b64 v[64:65], 24, v[22:23]
+; GFX11-NEXT: v_lshrrev_b64 v[65:66], 24, v[27:28]
+; GFX11-NEXT: v_lshrrev_b64 v[66:67], 24, v[31:32]
+; GFX11-NEXT: v_lshrrev_b64 v[67:68], 24, v[35:36]
+; GFX11-NEXT: v_lshrrev_b64 v[68:69], 24, v[48:49]
; GFX11-NEXT: v_lshrrev_b64 v[17:18], 24, v[1:2]
-; GFX11-NEXT: v_lshrrev_b64 v[21:22], 24, v[3:4]
-; GFX11-NEXT: v_lshrrev_b64 v[26:27], 24, v[7:8]
-; GFX11-NEXT: v_lshrrev_b64 v[30:31], 24, v[9:10]
-; GFX11-NEXT: v_lshrrev_b64 v[34:35], 24, v[11:12]
-; GFX11-NEXT: v_lshrrev_b64 v[38:39], 24, v[13:14]
-; GFX11-NEXT: v_lshrrev_b64 v[50:51], 24, v[15:16]
-; GFX11-NEXT: v_lshrrev_b64 v[54:55], 24, v[19:20]
-; GFX11-NEXT: v_lshrrev_b64 v[70:71], 24, v[48:49]
-; GFX11-NEXT: v_lshrrev_b64 v[81:82], 24, v[64:65]
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 24, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 8, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 8, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 24, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v4
+; GFX11-NEXT: v_lshrrev_b64 v[24:25], 24, v[5:6]
+; GFX11-NEXT: v_lshrrev_b64 v[37:38], 24, v[13:14]
+; GFX11-NEXT: v_lshrrev_b64 v[69:70], 24, v[50:51]
+; GFX11-NEXT: v_lshrrev_b64 v[18:19], 24, v[3:4]
+; GFX11-NEXT: v_lshrrev_b64 v[25:26], 24, v[7:8]
+; GFX11-NEXT: v_lshrrev_b64 v[29:30], 24, v[9:10]
+; GFX11-NEXT: v_lshrrev_b64 v[33:34], 24, v[11:12]
+; GFX11-NEXT: v_lshrrev_b64 v[38:39], 24, v[15:16]
+; GFX11-NEXT: v_lshrrev_b64 v[52:53], 24, v[20:21]
+; GFX11-NEXT: v_lshrrev_b64 v[70:71], 24, v[54:55]
+; GFX11-NEXT: v_lshrrev_b32_e32 v26, 24, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v39, 8, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v30, 8, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v80, 24, v4
+; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v4
; GFX11-NEXT: v_lshrrev_b32_e32 v83, 8, v4
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 8, v3
+; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v3
+; GFX11-NEXT: v_lshrrev_b32_e32 v81, 8, v3
; GFX11-NEXT: v_lshrrev_b32_e32 v85, 24, v6
; GFX11-NEXT: v_lshrrev_b32_e32 v87, 16, v6
; GFX11-NEXT: v_lshrrev_b32_e32 v96, 8, v6
@@ -117087,7 +117533,7 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v116, 8, v10
; GFX11-NEXT: v_lshrrev_b32_e32 v102, 16, v9
; GFX11-NEXT: v_lshrrev_b32_e32 v103, 8, v9
-; GFX11-NEXT: v_lshrrev_b32_e32 v119, 24, v12
+; GFX11-NEXT: v_lshrrev_b32_e32 v118, 24, v12
; GFX11-NEXT: v_lshrrev_b32_e32 v128, 16, v12
; GFX11-NEXT: v_lshrrev_b32_e32 v129, 8, v12
; GFX11-NEXT: v_lshrrev_b32_e32 v113, 16, v11
@@ -117096,78 +117542,74 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v133, 16, v14
; GFX11-NEXT: v_lshrrev_b32_e32 v134, 8, v14
; GFX11-NEXT: v_lshrrev_b32_e32 v117, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v118, 8, v13
+; GFX11-NEXT: v_lshrrev_b32_e32 v119, 8, v13
; GFX11-NEXT: v_lshrrev_b32_e32 v145, 24, v16
; GFX11-NEXT: v_lshrrev_b32_e32 v146, 16, v16
; GFX11-NEXT: v_lshrrev_b32_e32 v147, 8, v16
; GFX11-NEXT: v_lshrrev_b32_e32 v130, 16, v15
; GFX11-NEXT: v_lshrrev_b32_e32 v131, 8, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v150, 24, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v151, 16, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v162, 8, v20
-; GFX11-NEXT: v_lshrrev_b32_e32 v135, 16, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v144, 8, v19
-; GFX11-NEXT: v_lshrrev_b32_e32 v164, 24, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v166, 16, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v167, 8, v24
-; GFX11-NEXT: v_lshrrev_b32_e32 v148, 16, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v149, 8, v23
-; GFX11-NEXT: v_lshrrev_b32_e32 v178, 24, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v179, 16, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v180, 8, v29
-; GFX11-NEXT: v_lshrrev_b32_e32 v160, 16, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v161, 8, v28
-; GFX11-NEXT: v_lshrrev_b32_e32 v183, 24, v33
-; GFX11-NEXT: v_lshrrev_b32_e32 v40, 16, v33
-; GFX11-NEXT: v_lshrrev_b32_e32 v42, 8, v33
-; GFX11-NEXT: v_lshrrev_b32_e32 v163, 16, v32
-; GFX11-NEXT: v_lshrrev_b32_e32 v165, 8, v32
-; GFX11-NEXT: v_lshrrev_b32_e32 v45, 24, v37
-; GFX11-NEXT: v_lshrrev_b32_e32 v47, 16, v37
-; GFX11-NEXT: v_lshrrev_b32_e32 v56, 8, v37
-; GFX11-NEXT: v_lshrrev_b32_e32 v176, 16, v36
-; GFX11-NEXT: v_lshrrev_b32_e32 v177, 8, v36
+; GFX11-NEXT: v_lshrrev_b32_e32 v150, 24, v21
+; GFX11-NEXT: v_lshrrev_b32_e32 v151, 16, v21
+; GFX11-NEXT: v_lshrrev_b32_e32 v161, 8, v21
+; GFX11-NEXT: v_lshrrev_b32_e32 v135, 16, v20
+; GFX11-NEXT: v_lshrrev_b32_e32 v144, 8, v20
+; GFX11-NEXT: v_lshrrev_b32_e32 v164, 24, v23
+; GFX11-NEXT: v_lshrrev_b32_e32 v166, 16, v23
+; GFX11-NEXT: v_lshrrev_b32_e32 v167, 8, v23
+; GFX11-NEXT: v_lshrrev_b32_e32 v148, 16, v22
+; GFX11-NEXT: v_lshrrev_b32_e32 v149, 8, v22
+; GFX11-NEXT: v_lshrrev_b32_e32 v178, 24, v28
+; GFX11-NEXT: v_lshrrev_b32_e32 v179, 16, v28
+; GFX11-NEXT: v_lshrrev_b32_e32 v180, 8, v28
+; GFX11-NEXT: v_lshrrev_b32_e32 v160, 16, v27
+; GFX11-NEXT: v_lshrrev_b32_e32 v162, 8, v27
+; GFX11-NEXT: v_lshrrev_b32_e32 v183, 24, v32
+; GFX11-NEXT: v_lshrrev_b32_e32 v40, 16, v32
+; GFX11-NEXT: v_lshrrev_b32_e32 v41, 8, v32
+; GFX11-NEXT: v_lshrrev_b32_e32 v163, 16, v31
+; GFX11-NEXT: v_lshrrev_b32_e32 v165, 8, v31
+; GFX11-NEXT: v_lshrrev_b32_e32 v45, 24, v36
+; GFX11-NEXT: v_lshrrev_b32_e32 v47, 16, v36
+; GFX11-NEXT: v_lshrrev_b32_e32 v56, 8, v36
+; GFX11-NEXT: v_lshrrev_b32_e32 v176, 16, v35
+; GFX11-NEXT: v_lshrrev_b32_e32 v177, 8, v35
; GFX11-NEXT: v_lshrrev_b32_e32 v57, 24, v49
; GFX11-NEXT: v_lshrrev_b32_e32 v58, 16, v49
; GFX11-NEXT: v_lshrrev_b32_e32 v59, 8, v49
; GFX11-NEXT: v_lshrrev_b32_e32 v181, 16, v48
; GFX11-NEXT: v_lshrrev_b32_e32 v182, 8, v48
-; GFX11-NEXT: v_lshrrev_b32_e32 v60, 24, v53
-; GFX11-NEXT: v_lshrrev_b32_e32 v61, 16, v53
-; GFX11-NEXT: v_lshrrev_b32_e32 v62, 8, v53
-; GFX11-NEXT: v_lshrrev_b32_e32 v41, 16, v52
-; GFX11-NEXT: v_lshrrev_b32_e32 v43, 8, v52
-; GFX11-NEXT: v_lshrrev_b32_e32 v63, 24, v65
-; GFX11-NEXT: v_lshrrev_b32_e32 v72, 16, v65
-; GFX11-NEXT: v_lshrrev_b32_e32 v73, 8, v65
-; GFX11-NEXT: v_lshrrev_b32_e32 v44, 16, v64
-; GFX11-NEXT: v_lshrrev_b32_e32 v46, 8, v64
+; GFX11-NEXT: v_lshrrev_b32_e32 v60, 24, v51
+; GFX11-NEXT: v_lshrrev_b32_e32 v61, 16, v51
+; GFX11-NEXT: v_lshrrev_b32_e32 v62, 8, v51
+; GFX11-NEXT: v_lshrrev_b32_e32 v42, 16, v50
+; GFX11-NEXT: v_lshrrev_b32_e32 v43, 8, v50
+; GFX11-NEXT: v_lshrrev_b32_e32 v63, 24, v55
+; GFX11-NEXT: v_lshrrev_b32_e32 v72, 16, v55
+; GFX11-NEXT: v_lshrrev_b32_e32 v73, 8, v55
+; GFX11-NEXT: v_lshrrev_b32_e32 v44, 16, v54
+; GFX11-NEXT: v_lshrrev_b32_e32 v46, 8, v54
; GFX11-NEXT: s_branch .LBB73_5
; GFX11-NEXT: .LBB73_3:
; GFX11-NEXT: ; implicit-def: $sgpr43
; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr34
-; GFX11-NEXT: ; implicit-def: $sgpr35
-; GFX11-NEXT: ; implicit-def: $sgpr42
; GFX11-NEXT: ; implicit-def: $sgpr103
-; GFX11-NEXT: ; implicit-def: $sgpr104
-; GFX11-NEXT: ; implicit-def: $sgpr44
+; GFX11-NEXT: ; implicit-def: $vcc_hi
+; GFX11-NEXT: ; implicit-def: $sgpr42
; GFX11-NEXT: ; implicit-def: $sgpr101
; GFX11-NEXT: ; implicit-def: $sgpr102
-; GFX11-NEXT: ; implicit-def: $sgpr46
+; GFX11-NEXT: ; implicit-def: $sgpr44
; GFX11-NEXT: ; implicit-def: $sgpr99
; GFX11-NEXT: ; implicit-def: $sgpr100
-; GFX11-NEXT: ; implicit-def: $sgpr56
+; GFX11-NEXT: ; implicit-def: $sgpr46
; GFX11-NEXT: ; implicit-def: $sgpr97
; GFX11-NEXT: ; implicit-def: $sgpr98
-; GFX11-NEXT: ; implicit-def: $sgpr58
+; GFX11-NEXT: ; implicit-def: $sgpr56
; GFX11-NEXT: ; implicit-def: $sgpr87
; GFX11-NEXT: ; implicit-def: $sgpr96
-; GFX11-NEXT: ; implicit-def: $sgpr60
+; GFX11-NEXT: ; implicit-def: $sgpr58
; GFX11-NEXT: ; implicit-def: $sgpr85
; GFX11-NEXT: ; implicit-def: $sgpr86
+; GFX11-NEXT: ; implicit-def: $sgpr60
; GFX11-NEXT: ; implicit-def: $sgpr83
; GFX11-NEXT: ; implicit-def: $sgpr84
; GFX11-NEXT: ; implicit-def: $sgpr81
@@ -117182,17 +117624,20 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; GFX11-NEXT: ; implicit-def: $sgpr66
; GFX11-NEXT: ; implicit-def: $sgpr55
; GFX11-NEXT: ; implicit-def: $sgpr64
-; GFX11-NEXT: ; implicit-def: $sgpr36
; GFX11-NEXT: ; implicit-def: $sgpr53
; GFX11-NEXT: ; implicit-def: $sgpr54
-; GFX11-NEXT: ; implicit-def: $sgpr37
-; GFX11-NEXT: ; implicit-def: $sgpr38
-; GFX11-NEXT: ; implicit-def: $sgpr39
+; GFX11-NEXT: ; implicit-def: $sgpr104
+; GFX11-NEXT: ; implicit-def: $sgpr34
; GFX11-NEXT: ; implicit-def: $sgpr51
; GFX11-NEXT: ; implicit-def: $sgpr52
-; GFX11-NEXT: ; implicit-def: $sgpr48
+; GFX11-NEXT: ; implicit-def: $sgpr35
+; GFX11-NEXT: ; implicit-def: $sgpr36
+; GFX11-NEXT: ; implicit-def: $sgpr37
; GFX11-NEXT: ; implicit-def: $sgpr49
; GFX11-NEXT: ; implicit-def: $sgpr50
+; GFX11-NEXT: ; implicit-def: $sgpr38
+; GFX11-NEXT: ; implicit-def: $sgpr39
+; GFX11-NEXT: ; implicit-def: $sgpr48
; GFX11-NEXT: ; implicit-def: $sgpr30
; GFX11-NEXT: ; implicit-def: $sgpr94
; GFX11-NEXT: ; implicit-def: $sgpr92
@@ -117281,434 +117726,431 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; GFX11-NEXT: ; kill: killed $sgpr43
; GFX11-NEXT: ; implicit-def: $sgpr43
; GFX11-NEXT: ; kill: killed $sgpr43
-; GFX11-NEXT: s_branch .LBB73_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, vcc_lo
+; GFX11-NEXT: s_cbranch_vccz .LBB73_2
; GFX11-NEXT: .LBB73_4:
-; GFX11-NEXT: v_dual_mov_b32 v64, s0 :: v_dual_mov_b32 v65, s1
-; GFX11-NEXT: v_readlane_b32 s0, v78, 0
+; GFX11-NEXT: v_dual_mov_b32 v54, s0 :: v_dual_mov_b32 v55, s1
+; GFX11-NEXT: v_readlane_b32 s0, v76, 0
; GFX11-NEXT: v_dual_mov_b32 v1, s40 :: v_dual_mov_b32 v2, s41
; GFX11-NEXT: v_dual_mov_b32 v3, s14 :: v_dual_mov_b32 v4, s15
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_mov_b32_e32 v87, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 1
-; GFX11-NEXT: v_mov_b32_e32 v39, s54
+; GFX11-NEXT: v_readlane_b32 s0, v76, 1
; GFX11-NEXT: v_dual_mov_b32 v5, s12 :: v_dual_mov_b32 v6, s13
; GFX11-NEXT: v_dual_mov_b32 v7, s10 :: v_dual_mov_b32 v8, s11
-; GFX11-NEXT: v_mov_b32_e32 v96, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 2
+; GFX11-NEXT: v_mov_b32_e32 v99, s0
+; GFX11-NEXT: v_readlane_b32 s0, v76, 2
; GFX11-NEXT: v_dual_mov_b32 v9, s8 :: v_dual_mov_b32 v10, s9
; GFX11-NEXT: v_dual_mov_b32 v11, s6 :: v_dual_mov_b32 v12, s7
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_mov_b32_e32 v99, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 3
-; GFX11-NEXT: v_mov_b32_e32 v55, s53
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_mov_b32_e32 v100, s0
+; GFX11-NEXT: v_readlane_b32 s0, v76, 3
; GFX11-NEXT: v_dual_mov_b32 v13, s4 :: v_dual_mov_b32 v14, s5
; GFX11-NEXT: v_dual_mov_b32 v15, s28 :: v_dual_mov_b32 v16, s29
-; GFX11-NEXT: v_mov_b32_e32 v100, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 4
-; GFX11-NEXT: v_dual_mov_b32 v19, s26 :: v_dual_mov_b32 v20, s27
-; GFX11-NEXT: v_dual_mov_b32 v23, s24 :: v_dual_mov_b32 v24, s25
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_mov_b32_e32 v101, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 5
-; GFX11-NEXT: v_dual_mov_b32 v27, s51 :: v_dual_mov_b32 v28, s22
-; GFX11-NEXT: v_dual_mov_b32 v29, s23 :: v_dual_mov_b32 v32, s20
-; GFX11-NEXT: v_dual_mov_b32 v33, s21 :: v_dual_mov_b32 v112, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 6
-; GFX11-NEXT: v_dual_mov_b32 v31, s49 :: v_dual_mov_b32 v36, s18
-; GFX11-NEXT: v_dual_mov_b32 v37, s19 :: v_dual_mov_b32 v48, s16
+; GFX11-NEXT: v_readlane_b32 s0, v76, 4
+; GFX11-NEXT: v_dual_mov_b32 v20, s26 :: v_dual_mov_b32 v21, s27
+; GFX11-NEXT: v_dual_mov_b32 v22, s24 :: v_dual_mov_b32 v23, s25
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_mov_b32_e32 v112, s0
+; GFX11-NEXT: v_readlane_b32 s0, v76, 5
+; GFX11-NEXT: v_dual_mov_b32 v27, s22 :: v_dual_mov_b32 v28, s23
+; GFX11-NEXT: v_dual_mov_b32 v31, s20 :: v_dual_mov_b32 v32, s21
+; GFX11-NEXT: v_mov_b32_e32 v114, s0
+; GFX11-NEXT: v_readlane_b32 s0, v76, 6
+; GFX11-NEXT: v_dual_mov_b32 v35, s18 :: v_dual_mov_b32 v36, s19
+; GFX11-NEXT: v_dual_mov_b32 v48, s16 :: v_dual_mov_b32 v49, s17
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_mov_b32_e32 v116, s0
+; GFX11-NEXT: v_readlane_b32 s0, v76, 7
+; GFX11-NEXT: v_dual_mov_b32 v50, s2 :: v_dual_mov_b32 v51, s3
+; GFX11-NEXT: v_dual_mov_b32 v44, vcc_hi :: v_dual_mov_b32 v43, s101
+; GFX11-NEXT: v_mov_b32_e32 v118, s0
+; GFX11-NEXT: v_readlane_b32 s0, v76, 8
+; GFX11-NEXT: v_dual_mov_b32 v46, s103 :: v_dual_mov_b32 v181, s100
+; GFX11-NEXT: v_dual_mov_b32 v42, s102 :: v_dual_mov_b32 v177, s97
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_mov_b32 v49, s17 :: v_dual_mov_b32 v114, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 7
-; GFX11-NEXT: v_dual_mov_b32 v35, s48 :: v_dual_mov_b32 v52, s2
-; GFX11-NEXT: v_dual_mov_b32 v53, s3 :: v_dual_mov_b32 v44, s35
-; GFX11-NEXT: v_dual_mov_b32 v41, s104 :: v_dual_mov_b32 v116, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 8
-; GFX11-NEXT: v_dual_mov_b32 v46, s34 :: v_dual_mov_b32 v43, s103
-; GFX11-NEXT: v_dual_mov_b32 v181, s102 :: v_dual_mov_b32 v182, s101
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_mov_b32_e32 v119, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 9
-; GFX11-NEXT: v_dual_mov_b32 v51, s39 :: v_dual_mov_b32 v176, s100
-; GFX11-NEXT: v_mov_b32_e32 v177, s99
-; GFX11-NEXT: v_dual_mov_b32 v163, s98 :: v_dual_mov_b32 v160, s96
; GFX11-NEXT: v_mov_b32_e32 v128, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 10
-; GFX11-NEXT: v_dual_mov_b32 v165, s97 :: v_dual_mov_b32 v148, s86
-; GFX11-NEXT: v_dual_mov_b32 v161, s87 :: v_dual_mov_b32 v144, s83
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_readlane_b32 s0, v76, 9
+; GFX11-NEXT: v_dual_mov_b32 v182, s99 :: v_dual_mov_b32 v163, s96
+; GFX11-NEXT: v_dual_mov_b32 v176, s98 :: v_dual_mov_b32 v165, s87
; GFX11-NEXT: v_mov_b32_e32 v129, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 11
-; GFX11-NEXT: v_mov_b32_e32 v71, s38
-; GFX11-NEXT: v_dual_mov_b32 v149, s85 :: v_dual_mov_b32 v130, s82
-; GFX11-NEXT: v_dual_mov_b32 v135, s84 :: v_dual_mov_b32 v118, s71
+; GFX11-NEXT: v_readlane_b32 s0, v76, 10
+; GFX11-NEXT: v_dual_mov_b32 v160, s86 :: v_dual_mov_b32 v149, s83
+; GFX11-NEXT: v_dual_mov_b32 v162, s85 :: v_dual_mov_b32 v135, s82
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_mov_b32_e32 v132, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 12
-; GFX11-NEXT: v_dual_mov_b32 v131, s81 :: v_dual_mov_b32 v102, s68
-; GFX11-NEXT: v_dual_mov_b32 v117, s80 :: v_dual_mov_b32 v98, s65
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_readlane_b32 s0, v76, 11
+; GFX11-NEXT: v_dual_mov_b32 v148, s84 :: v_dual_mov_b32 v131, s71
+; GFX11-NEXT: v_dual_mov_b32 v144, s81 :: v_dual_mov_b32 v117, s70
; GFX11-NEXT: v_mov_b32_e32 v133, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 13
-; GFX11-NEXT: v_mov_b32_e32 v83, s37
-; GFX11-NEXT: v_dual_mov_b32 v113, s70 :: v_dual_mov_b32 v84, s64
-; GFX11-NEXT: v_dual_mov_b32 v115, s69 :: v_dual_mov_b32 v86, s55
+; GFX11-NEXT: v_readlane_b32 s0, v76, 12
+; GFX11-NEXT: v_dual_mov_b32 v130, s80 :: v_dual_mov_b32 v119, s69
+; GFX11-NEXT: v_dual_mov_b32 v113, s68 :: v_dual_mov_b32 v102, s66
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_mov_b32_e32 v134, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 14
-; GFX11-NEXT: v_dual_mov_b32 v103, s67 :: v_dual_mov_b32 v18, s52
-; GFX11-NEXT: v_dual_mov_b32 v97, s66 :: v_dual_mov_b32 v22, s50
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_readlane_b32 s0, v76, 13
+; GFX11-NEXT: v_dual_mov_b32 v115, s67 :: v_dual_mov_b32 v98, s55
+; GFX11-NEXT: v_dual_mov_b32 v103, s65 :: v_dual_mov_b32 v84, s54
; GFX11-NEXT: v_mov_b32_e32 v145, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 15
-; GFX11-NEXT: v_mov_b32_e32 v85, s36
-; GFX11-NEXT: v_dual_mov_b32 v81, s42 :: v_dual_mov_b32 v38, s90
-; GFX11-NEXT: v_dual_mov_b32 v69, s56 :: v_dual_mov_b32 v34, s88
+; GFX11-NEXT: v_readlane_b32 s0, v76, 14
+; GFX11-NEXT: v_dual_mov_b32 v97, s64 :: v_dual_mov_b32 v86, s53
+; GFX11-NEXT: v_dual_mov_b32 v53, s52 :: v_dual_mov_b32 v30, s49
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_mov_b32_e32 v146, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 16
-; GFX11-NEXT: v_dual_mov_b32 v67, s60 :: v_dual_mov_b32 v30, s78
-; GFX11-NEXT: v_dual_mov_b32 v26, s76 :: v_dual_mov_b32 v25, s74
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_readlane_b32 s0, v76, 15
+; GFX11-NEXT: v_dual_mov_b32 v81, s51 :: v_dual_mov_b32 v26, s48
+; GFX11-NEXT: v_dual_mov_b32 v19, s50 :: v_dual_mov_b32 v34, s39
; GFX11-NEXT: v_mov_b32_e32 v147, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 17
-; GFX11-NEXT: v_mov_b32_e32 v21, s72
-; GFX11-NEXT: v_dual_mov_b32 v17, s62 :: v_dual_mov_b32 v80, s44
-; GFX11-NEXT: v_mov_b32_e32 v70, s46
+; GFX11-NEXT: v_readlane_b32 s0, v76, 16
+; GFX11-NEXT: v_dual_mov_b32 v39, s38 :: v_dual_mov_b32 v80, s37
+; GFX11-NEXT: v_dual_mov_b32 v82, s36 :: v_dual_mov_b32 v83, s35
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_mov_b32_e32 v150, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 18
-; GFX11-NEXT: v_mov_b32_e32 v68, s58
-; GFX11-NEXT: v_mov_b32_e32 v66, s30
-; GFX11-NEXT: v_mov_b32_e32 v54, s94
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mov_b32 v50, s92 :: v_dual_mov_b32 v151, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 19
-; GFX11-NEXT: v_mov_b32_e32 v162, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 20
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_readlane_b32 s0, v76, 17
+; GFX11-NEXT: v_dual_mov_b32 v85, s34 :: v_dual_mov_b32 v96, s104
+; GFX11-NEXT: v_dual_mov_b32 v70, s42 :: v_dual_mov_b32 v69, s44
+; GFX11-NEXT: v_mov_b32_e32 v151, s0
+; GFX11-NEXT: v_readlane_b32 s0, v76, 18
+; GFX11-NEXT: v_dual_mov_b32 v68, s46 :: v_dual_mov_b32 v67, s56
+; GFX11-NEXT: v_dual_mov_b32 v66, s58 :: v_dual_mov_b32 v65, s60
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_mov_b32_e32 v161, s0
+; GFX11-NEXT: v_readlane_b32 s0, v76, 19
+; GFX11-NEXT: v_dual_mov_b32 v64, s30 :: v_dual_mov_b32 v37, s90
+; GFX11-NEXT: v_dual_mov_b32 v52, s94 :: v_dual_mov_b32 v33, s88
; GFX11-NEXT: v_mov_b32_e32 v164, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 21
+; GFX11-NEXT: v_readlane_b32 s0, v76, 20
+; GFX11-NEXT: v_dual_mov_b32 v38, s92 :: v_dual_mov_b32 v29, s78
+; GFX11-NEXT: v_dual_mov_b32 v25, s76 :: v_dual_mov_b32 v24, s74
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_2) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_mov_b32_e32 v166, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 22
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_readlane_b32 s0, v76, 21
+; GFX11-NEXT: v_dual_mov_b32 v18, s72 :: v_dual_mov_b32 v17, s62
; GFX11-NEXT: v_mov_b32_e32 v167, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 23
-; GFX11-NEXT: v_mov_b32_e32 v178, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 24
+; GFX11-NEXT: v_readlane_b32 s0, v76, 22
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v178, s0
+; GFX11-NEXT: v_readlane_b32 s0, v76, 23
; GFX11-NEXT: v_mov_b32_e32 v179, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 25
-; GFX11-NEXT: v_mov_b32_e32 v180, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 26
+; GFX11-NEXT: v_readlane_b32 s0, v76, 24
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v180, s0
+; GFX11-NEXT: v_readlane_b32 s0, v76, 25
; GFX11-NEXT: v_mov_b32_e32 v183, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 27
+; GFX11-NEXT: v_readlane_b32 s0, v76, 26
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_mov_b32_e32 v40, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 28
+; GFX11-NEXT: v_readlane_b32 s0, v76, 27
+; GFX11-NEXT: v_mov_b32_e32 v41, s0
+; GFX11-NEXT: v_readlane_b32 s0, v76, 28
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
-; GFX11-NEXT: v_mov_b32_e32 v42, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 29
; GFX11-NEXT: v_mov_b32_e32 v45, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 30
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_readlane_b32 s0, v76, 29
; GFX11-NEXT: v_mov_b32_e32 v47, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 31
-; GFX11-NEXT: v_mov_b32_e32 v56, s0
-; GFX11-NEXT: v_readlane_b32 s0, v79, 0
+; GFX11-NEXT: v_readlane_b32 s0, v76, 30
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v56, s0
+; GFX11-NEXT: v_readlane_b32 s0, v76, 31
; GFX11-NEXT: v_mov_b32_e32 v57, s0
-; GFX11-NEXT: v_readlane_b32 s0, v79, 1
-; GFX11-NEXT: v_mov_b32_e32 v58, s0
-; GFX11-NEXT: v_readlane_b32 s0, v79, 2
+; GFX11-NEXT: v_readlane_b32 s0, v77, 0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v58, s0
+; GFX11-NEXT: v_readlane_b32 s0, v77, 1
; GFX11-NEXT: v_mov_b32_e32 v59, s0
-; GFX11-NEXT: v_readlane_b32 s0, v79, 3
-; GFX11-NEXT: v_mov_b32_e32 v60, s0
-; GFX11-NEXT: v_readlane_b32 s0, v79, 4
+; GFX11-NEXT: v_readlane_b32 s0, v77, 2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v60, s0
+; GFX11-NEXT: v_readlane_b32 s0, v77, 3
; GFX11-NEXT: v_mov_b32_e32 v61, s0
-; GFX11-NEXT: v_readlane_b32 s0, v79, 5
-; GFX11-NEXT: v_mov_b32_e32 v62, s0
-; GFX11-NEXT: v_readlane_b32 s0, v79, 6
+; GFX11-NEXT: v_readlane_b32 s0, v77, 4
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v62, s0
+; GFX11-NEXT: v_readlane_b32 s0, v77, 5
; GFX11-NEXT: v_mov_b32_e32 v63, s0
-; GFX11-NEXT: v_readlane_b32 s0, v79, 7
+; GFX11-NEXT: v_readlane_b32 s0, v77, 6
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_mov_b32_e32 v72, s0
-; GFX11-NEXT: v_readlane_b32 s0, v79, 8
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
+; GFX11-NEXT: v_readlane_b32 s0, v77, 7
; GFX11-NEXT: v_mov_b32_e32 v73, s0
; GFX11-NEXT: .LBB73_5: ; %end
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_lshlrev_b32_e32 v82, 8, v46
-; GFX11-NEXT: v_and_b32_e32 v64, 0xff, v64
-; GFX11-NEXT: v_lshlrev_b32_e32 v81, 8, v81
-; GFX11-NEXT: v_and_b32_e32 v65, 0xff, v65
-; GFX11-NEXT: v_lshlrev_b32_e32 v80, 8, v80
-; GFX11-NEXT: v_and_b32_e32 v52, 0xff, v52
-; GFX11-NEXT: v_or_b32_e32 v64, v64, v82
-; GFX11-NEXT: v_and_b32_e32 v82, 0xff, v44
+; GFX11-NEXT: v_lshlrev_b32_e32 v71, 8, v46
+; GFX11-NEXT: v_and_b32_e32 v54, 0xff, v54
+; GFX11-NEXT: v_lshlrev_b32_e32 v70, 8, v70
+; GFX11-NEXT: v_and_b32_e32 v55, 0xff, v55
+; GFX11-NEXT: v_lshlrev_b32_e32 v69, 8, v69
+; GFX11-NEXT: v_and_b32_e32 v50, 0xff, v50
+; GFX11-NEXT: v_or_b32_e32 v54, v54, v71
+; GFX11-NEXT: v_and_b32_e32 v71, 0xff, v44
; GFX11-NEXT: v_lshlrev_b32_e32 v44, 8, v63
; GFX11-NEXT: v_lshlrev_b32_e32 v43, 8, v43
-; GFX11-NEXT: v_and_b32_e32 v41, 0xff, v41
-; GFX11-NEXT: v_and_b32_e32 v64, 0xffff, v64
-; GFX11-NEXT: v_or_b32_e32 v81, v82, v81
-; GFX11-NEXT: v_lshlrev_b32_e32 v82, 8, v73
-; GFX11-NEXT: v_or_b32_e32 v52, v52, v43
-; GFX11-NEXT: v_or_b32_e32 v80, v41, v80
-; GFX11-NEXT: v_and_b32_e32 v53, 0xff, v53
-; GFX11-NEXT: v_lshlrev_b32_e32 v81, 16, v81
-; GFX11-NEXT: v_or_b32_e32 v65, v65, v82
-; GFX11-NEXT: v_and_b32_e32 v82, 0xff, v72
-; GFX11-NEXT: v_lshlrev_b32_e32 v41, 8, v60
-; GFX11-NEXT: v_and_b32_e32 v52, 0xffff, v52
-; GFX11-NEXT: v_or_b32_e32 v72, v64, v81
-; GFX11-NEXT: v_and_b32_e32 v64, 0xffff, v65
-; GFX11-NEXT: v_or_b32_e32 v82, v82, v44
-; GFX11-NEXT: v_lshlrev_b32_e32 v81, 8, v62
+; GFX11-NEXT: v_and_b32_e32 v42, 0xff, v42
+; GFX11-NEXT: v_and_b32_e32 v54, 0xffff, v54
+; GFX11-NEXT: v_or_b32_e32 v70, v71, v70
+; GFX11-NEXT: v_lshlrev_b32_e32 v71, 8, v73
+; GFX11-NEXT: v_or_b32_e32 v50, v50, v43
+; GFX11-NEXT: v_or_b32_e32 v42, v42, v69
+; GFX11-NEXT: v_and_b32_e32 v51, 0xff, v51
+; GFX11-NEXT: v_lshlrev_b32_e32 v70, 16, v70
+; GFX11-NEXT: v_or_b32_e32 v55, v55, v71
+; GFX11-NEXT: v_and_b32_e32 v71, 0xff, v72
+; GFX11-NEXT: v_and_b32_e32 v43, 0xff, v61
+; GFX11-NEXT: v_and_b32_e32 v50, 0xffff, v50
+; GFX11-NEXT: v_or_b32_e32 v69, v54, v70
+; GFX11-NEXT: v_and_b32_e32 v54, 0xffff, v55
+; GFX11-NEXT: v_or_b32_e32 v71, v71, v44
+; GFX11-NEXT: v_lshlrev_b32_e32 v44, 8, v60
; GFX11-NEXT: v_and_b32_e32 v48, 0xff, v48
-; GFX11-NEXT: v_lshlrev_b32_e32 v70, 8, v70
+; GFX11-NEXT: v_lshlrev_b32_e32 v182, 8, v182
+; GFX11-NEXT: v_and_b32_e32 v181, 0xff, v181
+; GFX11-NEXT: v_lshlrev_b32_e32 v55, 16, v71
+; GFX11-NEXT: v_lshlrev_b32_e32 v71, 8, v62
+; GFX11-NEXT: v_lshlrev_b32_e32 v68, 8, v68
+; GFX11-NEXT: v_or_b32_e32 v48, v48, v182
; GFX11-NEXT: v_and_b32_e32 v49, 0xff, v49
-; GFX11-NEXT: v_lshlrev_b32_e32 v65, 16, v82
-; GFX11-NEXT: v_and_b32_e32 v82, 0xff, v61
-; GFX11-NEXT: v_or_b32_e32 v53, v53, v81
-; GFX11-NEXT: v_and_b32_e32 v81, 0xff, v181
-; GFX11-NEXT: v_and_b32_e32 v36, 0xff, v36
-; GFX11-NEXT: v_or_b32_e32 v73, v64, v65
-; GFX11-NEXT: v_lshlrev_b32_e32 v64, 16, v80
-; GFX11-NEXT: v_or_b32_e32 v65, v82, v41
-; GFX11-NEXT: v_lshlrev_b32_e32 v80, 8, v182
-; GFX11-NEXT: v_lshlrev_b32_e32 v69, 8, v69
-; GFX11-NEXT: v_and_b32_e32 v37, 0xff, v37
-; GFX11-NEXT: v_or_b32_e32 v74, v52, v64
-; GFX11-NEXT: v_and_b32_e32 v52, 0xffff, v53
-; GFX11-NEXT: v_lshlrev_b32_e32 v53, 16, v65
-; GFX11-NEXT: v_or_b32_e32 v48, v48, v80
-; GFX11-NEXT: v_or_b32_e32 v64, v81, v70
-; GFX11-NEXT: v_lshlrev_b32_e32 v65, 8, v59
-; GFX11-NEXT: v_and_b32_e32 v70, 0xff, v58
-; GFX11-NEXT: v_lshlrev_b32_e32 v80, 8, v57
-; GFX11-NEXT: v_or_b32_e32 v75, v52, v53
+; GFX11-NEXT: v_or_b32_e32 v70, v54, v55
+; GFX11-NEXT: v_lshlrev_b32_e32 v54, 16, v42
+; GFX11-NEXT: v_or_b32_e32 v51, v51, v71
+; GFX11-NEXT: v_or_b32_e32 v55, v43, v44
; GFX11-NEXT: v_and_b32_e32 v48, 0xffff, v48
-; GFX11-NEXT: v_lshlrev_b32_e32 v52, 16, v64
-; GFX11-NEXT: v_or_b32_e32 v49, v49, v65
-; GFX11-NEXT: v_or_b32_e32 v53, v70, v80
-; GFX11-NEXT: v_lshlrev_b32_e32 v64, 8, v177
-; GFX11-NEXT: v_and_b32_e32 v65, 0xff, v176
-; GFX11-NEXT: v_or_b32_e32 v43, v48, v52
-; GFX11-NEXT: v_and_b32_e32 v48, 0xffff, v49
-; GFX11-NEXT: v_lshlrev_b32_e32 v49, 16, v53
-; GFX11-NEXT: v_or_b32_e32 v36, v36, v64
-; GFX11-NEXT: v_or_b32_e32 v52, v65, v69
-; GFX11-NEXT: v_lshlrev_b32_e32 v53, 8, v56
-; GFX11-NEXT: v_and_b32_e32 v64, 0xff, v47
-; GFX11-NEXT: v_lshlrev_b32_e32 v65, 8, v45
-; GFX11-NEXT: v_or_b32_e32 v44, v48, v49
-; GFX11-NEXT: v_lshlrev_b32_e32 v48, 16, v52
-; GFX11-NEXT: v_or_b32_e32 v37, v37, v53
-; GFX11-NEXT: v_and_b32_e32 v32, 0xff, v32
-; GFX11-NEXT: v_or_b32_e32 v49, v64, v65
-; GFX11-NEXT: v_lshlrev_b32_e32 v52, 8, v165
-; GFX11-NEXT: v_and_b32_e32 v53, 0xff, v163
-; GFX11-NEXT: v_lshlrev_b32_e32 v64, 8, v68
+; GFX11-NEXT: v_and_b32_e32 v35, 0xff, v35
+; GFX11-NEXT: v_or_b32_e32 v71, v50, v54
+; GFX11-NEXT: v_and_b32_e32 v50, 0xffff, v51
+; GFX11-NEXT: v_lshlrev_b32_e32 v51, 16, v55
+; GFX11-NEXT: v_or_b32_e32 v54, v181, v68
+; GFX11-NEXT: v_lshlrev_b32_e32 v55, 8, v59
+; GFX11-NEXT: v_and_b32_e32 v68, 0xff, v58
+; GFX11-NEXT: v_lshlrev_b32_e32 v181, 8, v57
+; GFX11-NEXT: v_or_b32_e32 v72, v50, v51
+; GFX11-NEXT: v_lshlrev_b32_e32 v50, 16, v54
+; GFX11-NEXT: v_or_b32_e32 v49, v49, v55
+; GFX11-NEXT: v_lshlrev_b32_e32 v54, 8, v177
+; GFX11-NEXT: v_or_b32_e32 v51, v68, v181
+; GFX11-NEXT: v_and_b32_e32 v55, 0xff, v176
+; GFX11-NEXT: v_lshlrev_b32_e32 v67, 8, v67
+; GFX11-NEXT: v_or_b32_e32 v48, v48, v50
+; GFX11-NEXT: v_and_b32_e32 v49, 0xffff, v49
+; GFX11-NEXT: v_lshlrev_b32_e32 v50, 16, v51
+; GFX11-NEXT: v_or_b32_e32 v35, v35, v54
+; GFX11-NEXT: v_or_b32_e32 v51, v55, v67
+; GFX11-NEXT: v_and_b32_e32 v36, 0xff, v36
+; GFX11-NEXT: v_lshlrev_b32_e32 v54, 8, v56
+; GFX11-NEXT: v_and_b32_e32 v55, 0xff, v47
+; GFX11-NEXT: v_lshlrev_b32_e32 v67, 8, v45
+; GFX11-NEXT: v_or_b32_e32 v49, v49, v50
+; GFX11-NEXT: v_lshlrev_b32_e32 v50, 16, v51
+; GFX11-NEXT: v_or_b32_e32 v36, v36, v54
+; GFX11-NEXT: v_and_b32_e32 v31, 0xff, v31
+; GFX11-NEXT: v_or_b32_e32 v51, v55, v67
+; GFX11-NEXT: v_lshlrev_b32_e32 v54, 8, v165
+; GFX11-NEXT: v_and_b32_e32 v55, 0xff, v163
+; GFX11-NEXT: v_lshlrev_b32_e32 v66, 8, v66
+; GFX11-NEXT: v_and_b32_e32 v35, 0xffff, v35
; GFX11-NEXT: v_and_b32_e32 v36, 0xffff, v36
-; GFX11-NEXT: v_and_b32_e32 v33, 0xff, v33
-; GFX11-NEXT: v_lshlrev_b32_e32 v65, 8, v42
-; GFX11-NEXT: v_and_b32_e32 v37, 0xffff, v37
-; GFX11-NEXT: v_lshlrev_b32_e32 v49, 16, v49
-; GFX11-NEXT: v_or_b32_e32 v32, v32, v52
-; GFX11-NEXT: v_or_b32_e32 v52, v53, v64
-; GFX11-NEXT: v_or_b32_e32 v33, v33, v65
-; GFX11-NEXT: v_or_b32_e32 v45, v36, v48
-; GFX11-NEXT: v_or_b32_e32 v46, v37, v49
-; GFX11-NEXT: v_and_b32_e32 v37, 0xff, v40
-; GFX11-NEXT: v_lshlrev_b32_e32 v36, 16, v52
-; GFX11-NEXT: v_lshlrev_b32_e32 v48, 8, v183
+; GFX11-NEXT: v_lshlrev_b32_e32 v51, 16, v51
+; GFX11-NEXT: v_or_b32_e32 v31, v31, v54
+; GFX11-NEXT: v_or_b32_e32 v54, v55, v66
+; GFX11-NEXT: v_and_b32_e32 v32, 0xff, v32
+; GFX11-NEXT: v_lshlrev_b32_e32 v67, 8, v41
+; GFX11-NEXT: v_or_b32_e32 v50, v35, v50
+; GFX11-NEXT: v_or_b32_e32 v51, v36, v51
+; GFX11-NEXT: v_lshlrev_b32_e32 v35, 16, v54
+; GFX11-NEXT: v_and_b32_e32 v36, 0xff, v40
+; GFX11-NEXT: v_lshlrev_b32_e32 v54, 8, v183
+; GFX11-NEXT: v_and_b32_e32 v27, 0xff, v27
+; GFX11-NEXT: v_lshlrev_b32_e32 v55, 8, v162
+; GFX11-NEXT: v_and_b32_e32 v66, 0xff, v160
+; GFX11-NEXT: v_lshlrev_b32_e32 v65, 8, v65
+; GFX11-NEXT: v_or_b32_e32 v32, v32, v67
; GFX11-NEXT: v_and_b32_e32 v28, 0xff, v28
-; GFX11-NEXT: v_lshlrev_b32_e32 v49, 8, v161
-; GFX11-NEXT: v_and_b32_e32 v52, 0xff, v160
-; GFX11-NEXT: v_lshlrev_b32_e32 v53, 8, v67
-; GFX11-NEXT: v_and_b32_e32 v29, 0xff, v29
-; GFX11-NEXT: v_lshlrev_b32_e32 v64, 8, v180
-; GFX11-NEXT: v_and_b32_e32 v65, 0xff, v179
-; GFX11-NEXT: v_lshlrev_b32_e32 v67, 8, v178
-; GFX11-NEXT: v_or_b32_e32 v37, v37, v48
-; GFX11-NEXT: v_or_b32_e32 v28, v28, v49
-; GFX11-NEXT: v_or_b32_e32 v48, v52, v53
-; GFX11-NEXT: v_or_b32_e32 v29, v29, v64
-; GFX11-NEXT: v_or_b32_e32 v49, v65, v67
+; GFX11-NEXT: v_lshlrev_b32_e32 v67, 8, v180
+; GFX11-NEXT: v_and_b32_e32 v68, 0xff, v179
+; GFX11-NEXT: v_lshlrev_b32_e32 v160, 8, v178
+; GFX11-NEXT: v_or_b32_e32 v36, v36, v54
+; GFX11-NEXT: v_or_b32_e32 v27, v27, v55
+; GFX11-NEXT: v_or_b32_e32 v54, v66, v65
+; GFX11-NEXT: v_or_b32_e32 v28, v28, v67
+; GFX11-NEXT: v_or_b32_e32 v55, v68, v160
+; GFX11-NEXT: v_and_b32_e32 v31, 0xffff, v31
+; GFX11-NEXT: v_and_b32_e32 v27, 0xffff, v27
+; GFX11-NEXT: v_lshlrev_b32_e32 v54, 16, v54
; GFX11-NEXT: v_and_b32_e32 v32, 0xffff, v32
-; GFX11-NEXT: v_and_b32_e32 v33, 0xffff, v33
-; GFX11-NEXT: v_lshlrev_b32_e32 v37, 16, v37
+; GFX11-NEXT: v_lshlrev_b32_e32 v36, 16, v36
; GFX11-NEXT: v_and_b32_e32 v28, 0xffff, v28
-; GFX11-NEXT: v_lshlrev_b32_e32 v48, 16, v48
-; GFX11-NEXT: v_and_b32_e32 v29, 0xffff, v29
-; GFX11-NEXT: v_lshlrev_b32_e32 v49, 16, v49
-; GFX11-NEXT: v_or_b32_e32 v67, v32, v36
-; GFX11-NEXT: v_or_b32_e32 v68, v33, v37
-; GFX11-NEXT: v_or_b32_e32 v69, v28, v48
-; GFX11-NEXT: v_and_b32_e32 v23, 0xff, v23
-; GFX11-NEXT: v_or_b32_e32 v70, v29, v49
-; GFX11-NEXT: v_lshlrev_b32_e32 v28, 8, v149
-; GFX11-NEXT: v_and_b32_e32 v29, 0xff, v148
-; GFX11-NEXT: v_lshlrev_b32_e32 v32, 8, v66
-; GFX11-NEXT: v_and_b32_e32 v24, 0xff, v24
-; GFX11-NEXT: v_lshlrev_b32_e32 v33, 8, v167
-; GFX11-NEXT: v_and_b32_e32 v36, 0xff, v166
-; GFX11-NEXT: v_lshlrev_b32_e32 v37, 8, v164
-; GFX11-NEXT: v_and_b32_e32 v19, 0xff, v19
-; GFX11-NEXT: v_lshlrev_b32_e32 v48, 8, v144
-; GFX11-NEXT: v_or_b32_e32 v23, v23, v28
-; GFX11-NEXT: v_or_b32_e32 v28, v29, v32
-; GFX11-NEXT: v_or_b32_e32 v24, v24, v33
-; GFX11-NEXT: v_or_b32_e32 v29, v36, v37
-; GFX11-NEXT: v_or_b32_e32 v19, v19, v48
-; GFX11-NEXT: v_and_b32_e32 v32, 0xff, v135
-; GFX11-NEXT: v_lshlrev_b32_e32 v33, 8, v54
+; GFX11-NEXT: v_lshlrev_b32_e32 v55, 16, v55
+; GFX11-NEXT: s_clause 0x1
+; GFX11-NEXT: scratch_store_b128 v0, v[69:72], off
+; GFX11-NEXT: scratch_store_b128 v0, v[48:51], off offset:16
+; GFX11-NEXT: v_or_b32_e32 v50, v27, v54
; GFX11-NEXT: v_and_b32_e32 v20, 0xff, v20
-; GFX11-NEXT: v_lshlrev_b32_e32 v36, 8, v162
-; GFX11-NEXT: v_and_b32_e32 v37, 0xff, v151
-; GFX11-NEXT: v_lshlrev_b32_e32 v48, 8, v150
+; GFX11-NEXT: v_lshlrev_b32_e32 v54, 8, v144
+; GFX11-NEXT: v_or_b32_e32 v48, v31, v35
+; GFX11-NEXT: v_or_b32_e32 v49, v32, v36
+; GFX11-NEXT: v_or_b32_e32 v51, v28, v55
+; GFX11-NEXT: v_and_b32_e32 v22, 0xff, v22
+; GFX11-NEXT: v_lshlrev_b32_e32 v27, 8, v149
+; GFX11-NEXT: v_and_b32_e32 v28, 0xff, v148
+; GFX11-NEXT: v_lshlrev_b32_e32 v31, 8, v64
+; GFX11-NEXT: v_and_b32_e32 v23, 0xff, v23
+; GFX11-NEXT: v_lshlrev_b32_e32 v32, 8, v167
+; GFX11-NEXT: v_and_b32_e32 v35, 0xff, v166
+; GFX11-NEXT: v_lshlrev_b32_e32 v36, 8, v164
+; GFX11-NEXT: v_or_b32_e32 v20, v20, v54
+; GFX11-NEXT: v_or_b32_e32 v22, v22, v27
+; GFX11-NEXT: v_or_b32_e32 v27, v28, v31
+; GFX11-NEXT: v_or_b32_e32 v23, v23, v32
+; GFX11-NEXT: v_or_b32_e32 v28, v35, v36
+; GFX11-NEXT: v_and_b32_e32 v31, 0xffff, v20
+; GFX11-NEXT: v_and_b32_e32 v20, 0xff, v135
+; GFX11-NEXT: v_lshlrev_b32_e32 v32, 8, v52
+; GFX11-NEXT: v_and_b32_e32 v21, 0xff, v21
+; GFX11-NEXT: v_lshlrev_b32_e32 v35, 8, v161
; GFX11-NEXT: v_and_b32_e32 v15, 0xff, v15
-; GFX11-NEXT: v_lshlrev_b32_e32 v49, 8, v131
-; GFX11-NEXT: v_and_b32_e32 v52, 0xff, v130
-; GFX11-NEXT: v_lshlrev_b32_e32 v50, 8, v50
-; GFX11-NEXT: v_or_b32_e32 v32, v32, v33
-; GFX11-NEXT: v_or_b32_e32 v20, v20, v36
-; GFX11-NEXT: v_or_b32_e32 v33, v37, v48
-; GFX11-NEXT: v_or_b32_e32 v15, v15, v49
-; GFX11-NEXT: v_or_b32_e32 v36, v52, v50
+; GFX11-NEXT: v_lshlrev_b32_e32 v54, 8, v131
+; GFX11-NEXT: v_and_b32_e32 v55, 0xff, v130
+; GFX11-NEXT: v_lshlrev_b32_e32 v38, 8, v38
+; GFX11-NEXT: v_and_b32_e32 v36, 0xff, v151
+; GFX11-NEXT: v_lshlrev_b32_e32 v52, 8, v150
+; GFX11-NEXT: v_or_b32_e32 v20, v20, v32
+; GFX11-NEXT: v_or_b32_e32 v21, v21, v35
+; GFX11-NEXT: v_or_b32_e32 v15, v15, v54
+; GFX11-NEXT: v_or_b32_e32 v35, v55, v38
+; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX11-NEXT: v_lshlrev_b32_e32 v27, 16, v27
; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v23
; GFX11-NEXT: v_lshlrev_b32_e32 v28, 16, v28
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_lshlrev_b32_e32 v32, 16, v32
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_lshlrev_b32_e32 v33, 16, v33
+; GFX11-NEXT: v_or_b32_e32 v32, v36, v52
+; GFX11-NEXT: v_lshlrev_b32_e32 v36, 16, v20
; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_lshlrev_b32_e32 v36, 16, v36
-; GFX11-NEXT: v_and_b32_e32 v24, 0xffff, v24
-; GFX11-NEXT: v_lshlrev_b32_e32 v29, 16, v29
-; GFX11-NEXT: v_or_b32_e32 v148, v23, v28
-; GFX11-NEXT: v_or_b32_e32 v150, v19, v32
-; GFX11-NEXT: v_or_b32_e32 v151, v20, v33
-; GFX11-NEXT: v_or_b32_e32 v130, v15, v36
+; GFX11-NEXT: v_lshlrev_b32_e32 v35, 16, v35
+; GFX11-NEXT: v_and_b32_e32 v38, 0xffff, v21
+; GFX11-NEXT: v_lshlrev_b32_e32 v32, 16, v32
+; GFX11-NEXT: v_or_b32_e32 v20, v22, v27
+; GFX11-NEXT: v_or_b32_e32 v21, v23, v28
+; GFX11-NEXT: v_or_b32_e32 v22, v31, v36
+; GFX11-NEXT: v_or_b32_e32 v35, v15, v35
; GFX11-NEXT: v_and_b32_e32 v15, 0xff, v16
; GFX11-NEXT: v_lshlrev_b32_e32 v16, 8, v147
-; GFX11-NEXT: v_and_b32_e32 v19, 0xff, v146
-; GFX11-NEXT: v_lshlrev_b32_e32 v20, 8, v145
+; GFX11-NEXT: v_and_b32_e32 v27, 0xff, v146
+; GFX11-NEXT: v_lshlrev_b32_e32 v28, 8, v145
; GFX11-NEXT: v_and_b32_e32 v13, 0xff, v13
-; GFX11-NEXT: v_lshlrev_b32_e32 v23, 8, v118
-; GFX11-NEXT: v_or_b32_e32 v149, v24, v29
-; GFX11-NEXT: v_and_b32_e32 v24, 0xff, v117
-; GFX11-NEXT: v_lshlrev_b32_e32 v28, 8, v38
+; GFX11-NEXT: v_lshlrev_b32_e32 v31, 8, v119
+; GFX11-NEXT: v_or_b32_e32 v23, v38, v32
+; GFX11-NEXT: v_and_b32_e32 v32, 0xff, v117
+; GFX11-NEXT: v_lshlrev_b32_e32 v36, 8, v37
; GFX11-NEXT: v_and_b32_e32 v14, 0xff, v14
-; GFX11-NEXT: v_lshlrev_b32_e32 v29, 8, v134
+; GFX11-NEXT: v_lshlrev_b32_e32 v37, 8, v134
; GFX11-NEXT: v_or_b32_e32 v15, v15, v16
-; GFX11-NEXT: v_or_b32_e32 v16, v19, v20
-; GFX11-NEXT: v_or_b32_e32 v13, v13, v23
-; GFX11-NEXT: v_and_b32_e32 v20, 0xff, v133
-; GFX11-NEXT: v_lshlrev_b32_e32 v23, 8, v132
-; GFX11-NEXT: v_or_b32_e32 v19, v24, v28
-; GFX11-NEXT: v_or_b32_e32 v14, v14, v29
+; GFX11-NEXT: v_or_b32_e32 v16, v27, v28
+; GFX11-NEXT: v_or_b32_e32 v13, v13, v31
+; GFX11-NEXT: v_and_b32_e32 v28, 0xff, v133
+; GFX11-NEXT: v_lshlrev_b32_e32 v31, 8, v132
+; GFX11-NEXT: v_or_b32_e32 v27, v32, v36
+; GFX11-NEXT: v_or_b32_e32 v14, v14, v37
; GFX11-NEXT: v_and_b32_e32 v11, 0xff, v11
-; GFX11-NEXT: v_lshlrev_b32_e32 v24, 8, v115
-; GFX11-NEXT: v_and_b32_e32 v28, 0xff, v113
-; GFX11-NEXT: v_lshlrev_b32_e32 v29, 8, v34
-; GFX11-NEXT: v_and_b32_e32 v12, 0xff, v12
-; GFX11-NEXT: v_lshlrev_b32_e32 v32, 8, v129
-; GFX11-NEXT: v_and_b32_e32 v33, 0xff, v128
-; GFX11-NEXT: v_lshlrev_b32_e32 v34, 8, v119
-; GFX11-NEXT: v_or_b32_e32 v20, v20, v23
+; GFX11-NEXT: v_lshlrev_b32_e32 v32, 8, v115
+; GFX11-NEXT: v_and_b32_e32 v36, 0xff, v113
+; GFX11-NEXT: v_lshlrev_b32_e32 v33, 8, v33
+; GFX11-NEXT: v_or_b32_e32 v28, v28, v31
; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
; GFX11-NEXT: v_lshlrev_b32_e32 v16, 16, v16
; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v13
-; GFX11-NEXT: v_lshlrev_b32_e32 v19, 16, v19
+; GFX11-NEXT: v_lshlrev_b32_e32 v27, 16, v27
; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_or_b32_e32 v11, v11, v24
-; GFX11-NEXT: v_or_b32_e32 v23, v28, v29
-; GFX11-NEXT: v_or_b32_e32 v12, v12, v32
-; GFX11-NEXT: v_or_b32_e32 v24, v33, v34
-; GFX11-NEXT: v_lshlrev_b32_e32 v20, 16, v20
+; GFX11-NEXT: v_and_b32_e32 v12, 0xff, v12
+; GFX11-NEXT: v_lshlrev_b32_e32 v37, 8, v129
+; GFX11-NEXT: v_and_b32_e32 v38, 0xff, v128
+; GFX11-NEXT: v_lshlrev_b32_e32 v52, 8, v118
+; GFX11-NEXT: v_or_b32_e32 v11, v11, v32
+; GFX11-NEXT: v_or_b32_e32 v31, v36, v33
+; GFX11-NEXT: v_lshlrev_b32_e32 v28, 16, v28
+; GFX11-NEXT: v_or_b32_e32 v12, v12, v37
+; GFX11-NEXT: v_or_b32_e32 v32, v38, v52
; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-NEXT: v_lshlrev_b32_e32 v23, 16, v23
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX11-NEXT: v_lshlrev_b32_e32 v24, 16, v24
-; GFX11-NEXT: v_or_b32_e32 v131, v15, v16
-; GFX11-NEXT: v_or_b32_e32 v132, v13, v19
-; GFX11-NEXT: v_or_b32_e32 v133, v14, v20
+; GFX11-NEXT: v_lshlrev_b32_e32 v31, 16, v31
+; GFX11-NEXT: v_or_b32_e32 v36, v15, v16
+; GFX11-NEXT: v_or_b32_e32 v37, v13, v27
+; GFX11-NEXT: v_or_b32_e32 v38, v14, v28
; GFX11-NEXT: v_and_b32_e32 v9, 0xff, v9
; GFX11-NEXT: v_lshlrev_b32_e32 v13, 8, v103
; GFX11-NEXT: v_and_b32_e32 v14, 0xff, v102
-; GFX11-NEXT: v_lshlrev_b32_e32 v15, 8, v30
+; GFX11-NEXT: v_lshlrev_b32_e32 v15, 8, v29
; GFX11-NEXT: v_and_b32_e32 v10, 0xff, v10
; GFX11-NEXT: v_lshlrev_b32_e32 v16, 8, v116
-; GFX11-NEXT: v_and_b32_e32 v19, 0xff, v114
-; GFX11-NEXT: v_lshlrev_b32_e32 v20, 8, v112
-; GFX11-NEXT: v_or_b32_e32 v11, v11, v23
-; GFX11-NEXT: v_or_b32_e32 v12, v12, v24
; GFX11-NEXT: v_and_b32_e32 v7, 0xff, v7
-; GFX11-NEXT: v_lshlrev_b32_e32 v23, 8, v98
+; GFX11-NEXT: v_lshlrev_b32_e32 v29, 8, v98
+; GFX11-NEXT: v_or_b32_e32 v11, v11, v31
+; GFX11-NEXT: v_and_b32_e32 v27, 0xff, v114
+; GFX11-NEXT: v_lshlrev_b32_e32 v28, 8, v112
; GFX11-NEXT: v_or_b32_e32 v9, v9, v13
; GFX11-NEXT: v_or_b32_e32 v13, v14, v15
; GFX11-NEXT: v_or_b32_e32 v10, v10, v16
-; GFX11-NEXT: v_or_b32_e32 v14, v19, v20
+; GFX11-NEXT: v_or_b32_e32 v7, v7, v29
; GFX11-NEXT: v_and_b32_e32 v15, 0xff, v97
-; GFX11-NEXT: v_lshlrev_b32_e32 v16, 8, v26
-; GFX11-NEXT: v_and_b32_e32 v8, 0xff, v8
-; GFX11-NEXT: v_lshlrev_b32_e32 v19, 8, v101
+; GFX11-NEXT: v_lshlrev_b32_e32 v16, 8, v25
; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v24, 8, v86
-; GFX11-NEXT: v_and_b32_e32 v26, 0xff, v84
-; GFX11-NEXT: v_lshlrev_b32_e32 v25, 8, v25
-; GFX11-NEXT: v_or_b32_e32 v7, v7, v23
-; GFX11-NEXT: v_and_b32_e32 v20, 0xff, v100
-; GFX11-NEXT: v_lshlrev_b32_e32 v23, 8, v99
+; GFX11-NEXT: v_lshlrev_b32_e32 v29, 8, v86
+; GFX11-NEXT: v_and_b32_e32 v31, 0xff, v84
+; GFX11-NEXT: v_lshlrev_b32_e32 v24, 8, v24
+; GFX11-NEXT: v_or_b32_e32 v14, v27, v28
+; GFX11-NEXT: v_and_b32_e32 v8, 0xff, v8
+; GFX11-NEXT: v_lshlrev_b32_e32 v25, 8, v101
+; GFX11-NEXT: v_and_b32_e32 v27, 0xff, v100
+; GFX11-NEXT: v_lshlrev_b32_e32 v28, 8, v99
; GFX11-NEXT: v_or_b32_e32 v15, v15, v16
-; GFX11-NEXT: v_or_b32_e32 v8, v8, v19
-; GFX11-NEXT: v_or_b32_e32 v5, v5, v24
-; GFX11-NEXT: v_or_b32_e32 v19, v26, v25
+; GFX11-NEXT: v_or_b32_e32 v5, v5, v29
+; GFX11-NEXT: v_or_b32_e32 v24, v31, v24
; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v9
; GFX11-NEXT: v_lshlrev_b32_e32 v13, 16, v13
; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v10
; GFX11-NEXT: v_lshlrev_b32_e32 v14, 16, v14
; GFX11-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; GFX11-NEXT: v_or_b32_e32 v16, v20, v23
+; GFX11-NEXT: v_or_b32_e32 v8, v8, v25
+; GFX11-NEXT: v_or_b32_e32 v16, v27, v28
; GFX11-NEXT: v_lshlrev_b32_e32 v15, 16, v15
; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; GFX11-NEXT: v_lshlrev_b32_e32 v19, 16, v19
+; GFX11-NEXT: v_lshlrev_b32_e32 v24, 16, v24
; GFX11-NEXT: v_and_b32_e32 v8, 0xffff, v8
; GFX11-NEXT: v_lshlrev_b32_e32 v16, 16, v16
; GFX11-NEXT: v_or_b32_e32 v13, v9, v13
; GFX11-NEXT: v_or_b32_e32 v14, v10, v14
; GFX11-NEXT: v_or_b32_e32 v7, v7, v15
-; GFX11-NEXT: v_or_b32_e32 v9, v5, v19
+; GFX11-NEXT: v_or_b32_e32 v9, v5, v24
; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v6
; GFX11-NEXT: v_lshlrev_b32_e32 v6, 8, v96
; GFX11-NEXT: v_and_b32_e32 v10, 0xff, v87
; GFX11-NEXT: v_lshlrev_b32_e32 v15, 8, v85
-; GFX11-NEXT: v_and_b32_e32 v19, 0xff, v39
-; GFX11-NEXT: v_lshlrev_b32_e32 v20, 8, v21
+; GFX11-NEXT: v_and_b32_e32 v24, 0xff, v53
+; GFX11-NEXT: v_lshlrev_b32_e32 v18, 8, v18
; GFX11-NEXT: v_or_b32_e32 v8, v8, v16
; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v3
-; GFX11-NEXT: v_lshlrev_b32_e32 v16, 8, v55
+; GFX11-NEXT: v_lshlrev_b32_e32 v16, 8, v81
; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v4
-; GFX11-NEXT: v_lshlrev_b32_e32 v21, 8, v83
+; GFX11-NEXT: v_lshlrev_b32_e32 v25, 8, v83
; GFX11-NEXT: v_or_b32_e32 v5, v5, v6
; GFX11-NEXT: v_or_b32_e32 v6, v10, v15
-; GFX11-NEXT: v_or_b32_e32 v10, v19, v20
+; GFX11-NEXT: v_or_b32_e32 v10, v24, v18
; GFX11-NEXT: v_or_b32_e32 v3, v3, v16
-; GFX11-NEXT: v_or_b32_e32 v4, v4, v21
-; GFX11-NEXT: v_lshlrev_b32_e32 v16, 8, v51
+; GFX11-NEXT: v_or_b32_e32 v4, v4, v25
+; GFX11-NEXT: v_lshlrev_b32_e32 v16, 8, v80
; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v1
; GFX11-NEXT: v_lshlrev_b32_e32 v15, 16, v10
-; GFX11-NEXT: v_and_b32_e32 v10, 0xff, v71
-; GFX11-NEXT: v_lshlrev_b32_e32 v19, 8, v27
-; GFX11-NEXT: v_and_b32_e32 v18, 0xff, v18
+; GFX11-NEXT: v_and_b32_e32 v10, 0xff, v82
+; GFX11-NEXT: v_lshlrev_b32_e32 v18, 8, v30
+; GFX11-NEXT: v_and_b32_e32 v19, 0xff, v19
; GFX11-NEXT: v_lshlrev_b32_e32 v17, 8, v17
; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v2
-; GFX11-NEXT: v_lshlrev_b32_e32 v20, 8, v35
-; GFX11-NEXT: v_and_b32_e32 v21, 0xff, v31
-; GFX11-NEXT: v_lshlrev_b32_e32 v22, 8, v22
+; GFX11-NEXT: v_lshlrev_b32_e32 v24, 8, v39
+; GFX11-NEXT: v_and_b32_e32 v25, 0xff, v34
+; GFX11-NEXT: v_lshlrev_b32_e32 v26, 8, v26
; GFX11-NEXT: v_or_b32_e32 v10, v10, v16
-; GFX11-NEXT: v_or_b32_e32 v1, v1, v19
-; GFX11-NEXT: v_or_b32_e32 v16, v18, v17
-; GFX11-NEXT: v_or_b32_e32 v2, v2, v20
-; GFX11-NEXT: v_or_b32_e32 v17, v21, v22
+; GFX11-NEXT: v_or_b32_e32 v1, v1, v18
+; GFX11-NEXT: v_or_b32_e32 v16, v19, v17
+; GFX11-NEXT: v_or_b32_e32 v2, v2, v24
+; GFX11-NEXT: v_or_b32_e32 v17, v25, v26
+; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v12
+; GFX11-NEXT: v_lshlrev_b32_e32 v32, 16, v32
; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v5
; GFX11-NEXT: v_lshlrev_b32_e32 v6, 16, v6
; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
@@ -117716,91 +118158,87 @@ define inreg <128 x i8> @bitcast_v16f64_to_v128i8_scalar(<16 x double> inreg %a,
; GFX11-NEXT: v_lshlrev_b32_e32 v18, 16, v10
; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v1
; GFX11-NEXT: v_lshlrev_b32_e32 v16, 16, v16
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v2
+; GFX11-NEXT: v_and_b32_e32 v24, 0xffff, v2
; GFX11-NEXT: v_lshlrev_b32_e32 v17, 16, v17
+; GFX11-NEXT: v_or_b32_e32 v12, v12, v32
; GFX11-NEXT: v_or_b32_e32 v10, v5, v6
-; GFX11-NEXT: s_clause 0x1
-; GFX11-NEXT: scratch_store_b128 v0, v[72:75], off
-; GFX11-NEXT: scratch_store_b128 v0, v[43:46], off offset:16
; GFX11-NEXT: v_or_b32_e32 v1, v3, v15
; GFX11-NEXT: v_or_b32_e32 v2, v4, v18
; GFX11-NEXT: v_or_b32_e32 v3, v19, v16
-; GFX11-NEXT: v_or_b32_e32 v4, v20, v17
+; GFX11-NEXT: v_or_b32_e32 v4, v24, v17
; GFX11-NEXT: s_clause 0x5
-; GFX11-NEXT: scratch_store_b128 v0, v[67:70], off offset:32
-; GFX11-NEXT: scratch_store_b128 v0, v[148:151], off offset:48
-; GFX11-NEXT: scratch_store_b128 v0, v[130:133], off offset:64
+; GFX11-NEXT: scratch_store_b128 v0, v[48:51], off offset:32
+; GFX11-NEXT: scratch_store_b128 v0, v[20:23], off offset:48
+; GFX11-NEXT: scratch_store_b128 v0, v[35:38], off offset:64
; GFX11-NEXT: scratch_store_b128 v0, v[11:14], off offset:80
; GFX11-NEXT: scratch_store_b128 v0, v[7:10], off offset:96
; GFX11-NEXT: scratch_store_b128 v0, v[1:4], off offset:112
-; GFX11-NEXT: s_clause 0x13
-; GFX11-NEXT: scratch_load_b32 v75, off, s32
-; GFX11-NEXT: scratch_load_b32 v74, off, s32 offset:4
-; GFX11-NEXT: scratch_load_b32 v73, off, s32 offset:8
-; GFX11-NEXT: scratch_load_b32 v72, off, s32 offset:12
-; GFX11-NEXT: scratch_load_b32 v63, off, s32 offset:16
-; GFX11-NEXT: scratch_load_b32 v62, off, s32 offset:20
-; GFX11-NEXT: scratch_load_b32 v61, off, s32 offset:24
-; GFX11-NEXT: scratch_load_b32 v60, off, s32 offset:28
-; GFX11-NEXT: scratch_load_b32 v59, off, s32 offset:32
-; GFX11-NEXT: scratch_load_b32 v58, off, s32 offset:36
-; GFX11-NEXT: scratch_load_b32 v57, off, s32 offset:40
-; GFX11-NEXT: scratch_load_b32 v56, off, s32 offset:44
-; GFX11-NEXT: scratch_load_b32 v47, off, s32 offset:48
-; GFX11-NEXT: scratch_load_b32 v46, off, s32 offset:52
-; GFX11-NEXT: scratch_load_b32 v45, off, s32 offset:56
-; GFX11-NEXT: scratch_load_b32 v44, off, s32 offset:60
-; GFX11-NEXT: scratch_load_b32 v43, off, s32 offset:64
-; GFX11-NEXT: scratch_load_b32 v42, off, s32 offset:68
-; GFX11-NEXT: scratch_load_b32 v41, off, s32 offset:72
-; GFX11-NEXT: scratch_load_b32 v40, off, s32 offset:76
-; GFX11-NEXT: v_readlane_b32 s104, v77, 8
-; GFX11-NEXT: v_readlane_b32 s103, v77, 7
-; GFX11-NEXT: v_readlane_b32 s102, v77, 6
-; GFX11-NEXT: v_readlane_b32 s101, v77, 5
-; GFX11-NEXT: v_readlane_b32 s100, v77, 4
-; GFX11-NEXT: v_readlane_b32 s99, v77, 3
-; GFX11-NEXT: v_readlane_b32 s98, v77, 2
-; GFX11-NEXT: v_readlane_b32 s97, v77, 1
-; GFX11-NEXT: v_readlane_b32 s96, v77, 0
-; GFX11-NEXT: v_readlane_b32 s87, v76, 31
-; GFX11-NEXT: v_readlane_b32 s86, v76, 30
-; GFX11-NEXT: v_readlane_b32 s85, v76, 29
-; GFX11-NEXT: v_readlane_b32 s84, v76, 28
-; GFX11-NEXT: v_readlane_b32 s83, v76, 27
-; GFX11-NEXT: v_readlane_b32 s82, v76, 26
-; GFX11-NEXT: v_readlane_b32 s81, v76, 25
-; GFX11-NEXT: v_readlane_b32 s80, v76, 24
-; GFX11-NEXT: v_readlane_b32 s71, v76, 23
-; GFX11-NEXT: v_readlane_b32 s70, v76, 22
-; GFX11-NEXT: v_readlane_b32 s69, v76, 21
-; GFX11-NEXT: v_readlane_b32 s68, v76, 20
-; GFX11-NEXT: v_readlane_b32 s67, v76, 19
-; GFX11-NEXT: v_readlane_b32 s66, v76, 18
-; GFX11-NEXT: v_readlane_b32 s65, v76, 17
-; GFX11-NEXT: v_readlane_b32 s64, v76, 16
-; GFX11-NEXT: v_readlane_b32 s55, v76, 15
-; GFX11-NEXT: v_readlane_b32 s54, v76, 14
-; GFX11-NEXT: v_readlane_b32 s53, v76, 13
-; GFX11-NEXT: v_readlane_b32 s52, v76, 12
-; GFX11-NEXT: v_readlane_b32 s51, v76, 11
-; GFX11-NEXT: v_readlane_b32 s50, v76, 10
-; GFX11-NEXT: v_readlane_b32 s49, v76, 9
-; GFX11-NEXT: v_readlane_b32 s48, v76, 8
-; GFX11-NEXT: v_readlane_b32 s39, v76, 7
-; GFX11-NEXT: v_readlane_b32 s38, v76, 6
-; GFX11-NEXT: v_readlane_b32 s37, v76, 5
-; GFX11-NEXT: v_readlane_b32 s36, v76, 4
-; GFX11-NEXT: v_readlane_b32 s35, v76, 3
-; GFX11-NEXT: v_readlane_b32 s34, v76, 2
-; GFX11-NEXT: v_readlane_b32 s31, v76, 1
-; GFX11-NEXT: v_readlane_b32 s30, v76, 0
+; GFX11-NEXT: s_clause 0x11
+; GFX11-NEXT: scratch_load_b32 v73, off, s32
+; GFX11-NEXT: scratch_load_b32 v72, off, s32 offset:4
+; GFX11-NEXT: scratch_load_b32 v63, off, s32 offset:8
+; GFX11-NEXT: scratch_load_b32 v62, off, s32 offset:12
+; GFX11-NEXT: scratch_load_b32 v61, off, s32 offset:16
+; GFX11-NEXT: scratch_load_b32 v60, off, s32 offset:20
+; GFX11-NEXT: scratch_load_b32 v59, off, s32 offset:24
+; GFX11-NEXT: scratch_load_b32 v58, off, s32 offset:28
+; GFX11-NEXT: scratch_load_b32 v57, off, s32 offset:32
+; GFX11-NEXT: scratch_load_b32 v56, off, s32 offset:36
+; GFX11-NEXT: scratch_load_b32 v47, off, s32 offset:40
+; GFX11-NEXT: scratch_load_b32 v46, off, s32 offset:44
+; GFX11-NEXT: scratch_load_b32 v45, off, s32 offset:48
+; GFX11-NEXT: scratch_load_b32 v44, off, s32 offset:52
+; GFX11-NEXT: scratch_load_b32 v43, off, s32 offset:56
+; GFX11-NEXT: scratch_load_b32 v42, off, s32 offset:60
+; GFX11-NEXT: scratch_load_b32 v41, off, s32 offset:64
+; GFX11-NEXT: scratch_load_b32 v40, off, s32 offset:68
+; GFX11-NEXT: v_readlane_b32 s104, v75, 8
+; GFX11-NEXT: v_readlane_b32 s103, v75, 7
+; GFX11-NEXT: v_readlane_b32 s102, v75, 6
+; GFX11-NEXT: v_readlane_b32 s101, v75, 5
+; GFX11-NEXT: v_readlane_b32 s100, v75, 4
+; GFX11-NEXT: v_readlane_b32 s99, v75, 3
+; GFX11-NEXT: v_readlane_b32 s98, v75, 2
+; GFX11-NEXT: v_readlane_b32 s97, v75, 1
+; GFX11-NEXT: v_readlane_b32 s96, v75, 0
+; GFX11-NEXT: v_readlane_b32 s87, v74, 31
+; GFX11-NEXT: v_readlane_b32 s86, v74, 30
+; GFX11-NEXT: v_readlane_b32 s85, v74, 29
+; GFX11-NEXT: v_readlane_b32 s84, v74, 28
+; GFX11-NEXT: v_readlane_b32 s83, v74, 27
+; GFX11-NEXT: v_readlane_b32 s82, v74, 26
+; GFX11-NEXT: v_readlane_b32 s81, v74, 25
+; GFX11-NEXT: v_readlane_b32 s80, v74, 24
+; GFX11-NEXT: v_readlane_b32 s71, v74, 23
+; GFX11-NEXT: v_readlane_b32 s70, v74, 22
+; GFX11-NEXT: v_readlane_b32 s69, v74, 21
+; GFX11-NEXT: v_readlane_b32 s68, v74, 20
+; GFX11-NEXT: v_readlane_b32 s67, v74, 19
+; GFX11-NEXT: v_readlane_b32 s66, v74, 18
+; GFX11-NEXT: v_readlane_b32 s65, v74, 17
+; GFX11-NEXT: v_readlane_b32 s64, v74, 16
+; GFX11-NEXT: v_readlane_b32 s55, v74, 15
+; GFX11-NEXT: v_readlane_b32 s54, v74, 14
+; GFX11-NEXT: v_readlane_b32 s53, v74, 13
+; GFX11-NEXT: v_readlane_b32 s52, v74, 12
+; GFX11-NEXT: v_readlane_b32 s51, v74, 11
+; GFX11-NEXT: v_readlane_b32 s50, v74, 10
+; GFX11-NEXT: v_readlane_b32 s49, v74, 9
+; GFX11-NEXT: v_readlane_b32 s48, v74, 8
+; GFX11-NEXT: v_readlane_b32 s39, v74, 7
+; GFX11-NEXT: v_readlane_b32 s38, v74, 6
+; GFX11-NEXT: v_readlane_b32 s37, v74, 5
+; GFX11-NEXT: v_readlane_b32 s36, v74, 4
+; GFX11-NEXT: v_readlane_b32 s35, v74, 3
+; GFX11-NEXT: v_readlane_b32 s34, v74, 2
+; GFX11-NEXT: v_readlane_b32 s31, v74, 1
+; GFX11-NEXT: v_readlane_b32 s30, v74, 0
; GFX11-NEXT: s_or_saveexec_b32 s0, -1
; GFX11-NEXT: s_clause 0x3
+; GFX11-NEXT: scratch_load_b32 v74, off, s32 offset:72
+; GFX11-NEXT: scratch_load_b32 v75, off, s32 offset:76
; GFX11-NEXT: scratch_load_b32 v76, off, s32 offset:80
; GFX11-NEXT: scratch_load_b32 v77, off, s32 offset:84
-; GFX11-NEXT: scratch_load_b32 v78, off, s32 offset:88
-; GFX11-NEXT: scratch_load_b32 v79, off, s32 offset:92
; GFX11-NEXT: s_mov_b32 exec_lo, s0
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -123872,8 +124310,17 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:332
; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32
@@ -123882,15 +124329,15 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:24
; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:32
; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:40
-; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:48
-; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:56
-; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:64
-; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:72
-; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:80
-; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:88
-; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:96
-; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:104
-; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:112
+; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:48
+; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:56
+; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:64
+; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:72
+; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:80
+; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:88
+; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:96
+; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:104
+; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:112
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:120
; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:128
; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:136
@@ -123900,113 +124347,92 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:168
; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:176
; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v1
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v7
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v9
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v11
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v13
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v17
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v19
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v21
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
-; SI-NEXT: v_lshlrev_b32_e32 v60, 8, v3
-; SI-NEXT: v_lshlrev_b32_e32 v30, 24, v5
-; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v15
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v16, 8, v3
+; SI-NEXT: v_lshlrev_b32_e32 v62, 24, v5
+; SI-NEXT: v_lshlrev_b32_e32 v28, 8, v7
+; SI-NEXT: v_lshlrev_b32_e32 v20, 24, v9
+; SI-NEXT: v_lshlrev_b32_e32 v46, 8, v11
+; SI-NEXT: v_lshlrev_b32_e32 v57, 24, v13
+; SI-NEXT: v_lshlrev_b32_e32 v24, 8, v15
+; SI-NEXT: v_lshlrev_b32_e32 v12, 24, v17
+; SI-NEXT: v_lshlrev_b32_e32 v26, 8, v19
+; SI-NEXT: v_lshlrev_b32_e32 v17, 24, v21
+; SI-NEXT: v_lshlrev_b32_e32 v19, 8, v23
+; SI-NEXT: v_lshlrev_b32_e32 v15, 24, v25
+; SI-NEXT: v_lshlrev_b32_e32 v21, 8, v27
+; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v29
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v23
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v25
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v27
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v29
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v45
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v44
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v43
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v42
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v41
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v40
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v55
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v54
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v54
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v53
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v53
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v52
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v52
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v51
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v51
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v50
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v50
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v49
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v49
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v48
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v48
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v39
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v39
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v30
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v31
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v32
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v33
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v34
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v35
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v36
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v37
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v38
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:184
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:192
@@ -124016,31 +124442,31 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:224
; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:232
; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:240
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_and_b64 s[6:7], vcc, exec
; SI-NEXT: s_waitcnt vmcnt(7)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v0
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v1
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v13
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v3
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v11
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v5
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v9
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v7
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:248
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:256
@@ -124052,140 +124478,157 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:304
; SI-NEXT: s_waitcnt vmcnt(7)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v0
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v1
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v13
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v3
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v11
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v5
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:312
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:320
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:328
-; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:4
-; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:12
-; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:20
-; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:28
-; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:36
-; SI-NEXT: s_waitcnt vmcnt(14)
+; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:324
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:316
+; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:308
+; SI-NEXT: s_waitcnt vmcnt(13)
; SI-NEXT: v_lshlrev_b32_e32 v9, 24, v9
-; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v7
-; SI-NEXT: s_waitcnt vmcnt(7)
+; SI-NEXT: s_waitcnt vmcnt(12)
+; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v7
+; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v0
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:44
-; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:52
-; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:60
-; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:68
-; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:76
-; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:84
-; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:92
-; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:100
-; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:108
-; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:116
-; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:124
-; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:132
-; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:140
-; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:148
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:156
-; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:164
-; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:172
-; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:180
-; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:188
-; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:196
-; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:204
-; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:212
-; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:220
-; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:228
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:236
-; SI-NEXT: s_waitcnt vmcnt(14)
-; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v1
-; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v3
; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
+; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:300
+; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:292
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v1
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:244
-; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:252
-; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:260
-; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:268
-; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:276
-; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:284
-; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:292
-; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:300
-; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:308
-; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:316
-; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:324
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v3
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
+; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:284
+; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:276
+; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:268
+; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:260
+; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:252
+; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:244
+; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:236
+; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:228
+; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:220
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:212
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill
+; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:204
+; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:196
+; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:188
+; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:180
+; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:172
+; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:164
+; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:156
+; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:148
+; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:140
+; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:132
+; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:124
+; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:116
+; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:108
+; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:100
+; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:92
+; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:84
+; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:76
+; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:68
+; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:60
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:52
+; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:44
+; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:36
+; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:28
+; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:20
+; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:12
+; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:4
+; SI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(14)
+; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(14)
-; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB75_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
-; SI-NEXT: v_mov_b32_e32 v57, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v4
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v0, 0xff, v2
-; SI-NEXT: v_and_b32_e32 v2, 0xff, v6
-; SI-NEXT: v_or_b32_e32 v0, v0, v60
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v4
+; SI-NEXT: v_or_b32_e32 v0, v0, v16
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v30, v1
-; SI-NEXT: s_waitcnt expcnt(4)
-; SI-NEXT: v_mov_b32_e32 v30, v5
+; SI-NEXT: v_or_b32_e32 v1, v62, v1
+; SI-NEXT: v_or_b32_e32 v4, v0, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v10
+; SI-NEXT: v_or_b32_e32 v0, v0, v46
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: v_and_b32_e32 v2, 0xff, v6
+; SI-NEXT: v_and_b32_e32 v3, 0xff, v8
+; SI-NEXT: v_or_b32_e32 v2, v2, v28
+; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; SI-NEXT: v_or_b32_e32 v3, v20, v3
+; SI-NEXT: v_or_b32_e32 v5, v2, v3
+; SI-NEXT: v_mov_b32_e32 v2, v9
; SI-NEXT: s_and_b32 s4, s28, 0xff
; SI-NEXT: s_lshl_b32 s5, s29, 8
; SI-NEXT: s_or_b32 s4, s4, s5
@@ -124194,306 +124637,310 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; SI-NEXT: s_lshl_b32 s6, s19, 24
; SI-NEXT: s_lshl_b32 s7, s23, 24
; SI-NEXT: s_lshl_b32 s8, s27, 24
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_or_b32_e32 v2, v2, v3
-; SI-NEXT: v_and_b32_e32 v3, 0xff, v8
-; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_or_b32_e32 v3, v4, v3
-; SI-NEXT: v_or_b32_e32 v4, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; SI-NEXT: v_or_b32_e32 v5, v2, v3
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v10
-; SI-NEXT: v_mov_b32_e32 v3, v7
-; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v12
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: v_or_b32_e32 v1, v57, v1
; SI-NEXT: v_or_b32_e32 v6, v0, v1
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v14
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v16
-; SI-NEXT: v_or_b32_e32 v0, v0, v15
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v57, v37
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
+; SI-NEXT: v_or_b32_e32 v0, v0, v24
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(4)
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
+; SI-NEXT: v_or_b32_e32 v1, v12, v1
; SI-NEXT: v_or_b32_e32 v7, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v18
-; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v20
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
+; SI-NEXT: v_or_b32_e32 v0, v0, v26
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
+; SI-NEXT: v_or_b32_e32 v1, v17, v1
; SI-NEXT: v_or_b32_e32 v8, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v22
-; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(3)
-; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v24
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
+; SI-NEXT: v_or_b32_e32 v0, v0, v19
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
-; SI-NEXT: v_mov_b32_e32 v2, v9
+; SI-NEXT: v_or_b32_e32 v1, v15, v1
; SI-NEXT: v_or_b32_e32 v9, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v26
-; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v28
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
+; SI-NEXT: v_or_b32_e32 v0, v0, v21
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v10, v1
+; SI-NEXT: v_or_b32_e32 v1, v27, v1
; SI-NEXT: v_or_b32_e32 v10, v0, v1
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
; SI-NEXT: v_and_b32_e32 v1, 0xff, v11
-; SI-NEXT: s_waitcnt expcnt(1)
-; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_or_b32_e32 v1, v11, v1
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_or_b32_e32 v11, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v17
-; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v39
+; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v23
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v54
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v12, v1
-; SI-NEXT: v_or_b32_e32 v12, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v25
-; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
+; SI-NEXT: v_or_b32_e32 v12, v0, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v23
+; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
; SI-NEXT: v_and_b32_e32 v1, 0xff, v13
-; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_or_b32_e32 v1, v13, v1
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
; SI-NEXT: v_or_b32_e32 v13, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v58
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v58, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v25
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v29
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v14
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v14, v1
-; SI-NEXT: v_or_b32_e32 v14, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v27
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v60, v1
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
+; SI-NEXT: v_or_b32_e32 v14, v0, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v43
+; SI-NEXT: v_mov_b32_e32 v43, v63
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v62
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v48
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v15, v1
-; SI-NEXT: v_or_b32_e32 v15, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v43
-; SI-NEXT: v_mov_b32_e32 v43, v16
; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
+; SI-NEXT: v_or_b32_e32 v15, v0, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v42
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_mov_b32_e32 v42, v1
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v21
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v40
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v16, v1
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
; SI-NEXT: v_or_b32_e32 v16, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v19
-; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v18
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_mov_b32_e32 v46, v1
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v55
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v49
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v17, v1
-; SI-NEXT: v_or_b32_e32 v17, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v51
-; SI-NEXT: v_mov_b32_e32 v55, v22
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v51, v1
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
+; SI-NEXT: v_or_b32_e32 v17, v0, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v53
+; SI-NEXT: v_mov_b32_e32 v49, v61
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v44
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v22
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v18, v1
-; SI-NEXT: v_or_b32_e32 v18, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v50
-; SI-NEXT: v_mov_b32_e32 v44, v23
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v50, v1
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
+; SI-NEXT: v_or_b32_e32 v18, v0, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v53, v3
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v29
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v63
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v38
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v19, v1
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
; SI-NEXT: v_or_b32_e32 v19, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v61
-; SI-NEXT: v_mov_b32_e32 v61, v45
-; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v36
+; SI-NEXT: v_mov_b32_e32 v36, v31
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_mov_b32_e32 v40, v1
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v40
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v30
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v20, v1
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
; SI-NEXT: v_or_b32_e32 v20, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v31
-; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v34
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_mov_b32_e32 v34, v1
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v32
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v51
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v21, v1
-; SI-NEXT: v_or_b32_e32 v21, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v59
-; SI-NEXT: v_mov_b32_e32 v59, v24
; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
+; SI-NEXT: v_or_b32_e32 v21, v0, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v33
+; SI-NEXT: v_mov_b32_e32 v51, v47
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v39
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v37
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v22, v1
+; SI-NEXT: v_or_b32_e32 v1, v62, v1
; SI-NEXT: v_or_b32_e32 v22, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v61
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v39, v1
-; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v49
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v44
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v59
+; SI-NEXT: v_or_b32_e32 v0, v0, v54
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v23, v1
+; SI-NEXT: v_or_b32_e32 v1, v39, v1
; SI-NEXT: v_or_b32_e32 v23, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v53
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v37, v56
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v37
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: v_mov_b32_e32 v33, v3
+; SI-NEXT: v_mov_b32_e32 v44, v59
+; SI-NEXT: v_mov_b32_e32 v59, v58
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v47
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v56
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v24, v1
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
; SI-NEXT: v_or_b32_e32 v24, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v42
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v42, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v32
+; SI-NEXT: v_mov_b32_e32 v32, v35
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v52
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v35
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v25, v1
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
; SI-NEXT: v_or_b32_e32 v25, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v0, 0xff, v45
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v45, v41
+; SI-NEXT: v_mov_b32_e32 v35, v39
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v56
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v41
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v63, v1
-; SI-NEXT: v_or_b32_e32 v26, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v48
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v32, v1
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
+; SI-NEXT: v_or_b32_e32 v26, v0, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v63
+; SI-NEXT: v_mov_b32_e32 v41, v62
+; SI-NEXT: v_mov_b32_e32 v63, v56
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v46
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v47
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v27, v1
-; SI-NEXT: v_or_b32_e32 v27, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v38
; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
+; SI-NEXT: v_or_b32_e32 v27, v0, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v58
+; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v41
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v50
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v28, v1
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_or_b32_e32 v1, v3, v1
; SI-NEXT: v_or_b32_e32 v28, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v37
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v62, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v60
+; SI-NEXT: v_mov_b32_e32 v50, v60
+; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v38, v3
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v54
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v61
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v29, v0, v1
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v36
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v35
-; SI-NEXT: v_or_b32_e32 v0, v0, v30
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v52
+; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v61, v54
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: v_or_b32_e32 v0, v0, v1
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v60
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
+; SI-NEXT: v_or_b32_e32 v1, v58, v1
; SI-NEXT: v_or_b32_e32 v30, v0, v1
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v34
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v33
-; SI-NEXT: v_or_b32_e32 v0, v0, v3
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v31
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v0, v0, v1
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v55
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v57, v1
+; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v31, v0, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v40
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v52
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v33, v34
-; SI-NEXT: v_mov_b32_e32 v34, v35
-; SI-NEXT: v_mov_b32_e32 v35, v36
-; SI-NEXT: v_mov_b32_e32 v36, v54
-; SI-NEXT: v_mov_b32_e32 v54, v37
-; SI-NEXT: v_mov_b32_e32 v37, v41
-; SI-NEXT: v_mov_b32_e32 v41, v38
-; SI-NEXT: v_mov_b32_e32 v38, v63
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_or_b32_e32 v3, s4, v0
@@ -124520,61 +124967,64 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; SI-NEXT: s_and_b32 s6, s6, 0xffff
; SI-NEXT: s_or_b32 s7, s8, s7
; SI-NEXT: s_or_b32 s6, s6, s7
-; SI-NEXT: v_mov_b32_e32 v57, v1
+; SI-NEXT: v_mov_b32_e32 v48, v1
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: s_mov_b64 s[4:5], 0
; SI-NEXT: s_branch .LBB75_3
; SI-NEXT: .LBB75_2:
-; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(2)
-; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
-; SI-NEXT: v_mov_b32_e32 v61, v45
-; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v44, v59
+; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v32, v35
+; SI-NEXT: v_mov_b32_e32 v45, v41
+; SI-NEXT: v_mov_b32_e32 v43, v63
+; SI-NEXT: v_mov_b32_e32 v59, v58
+; SI-NEXT: v_mov_b32_e32 v50, v60
+; SI-NEXT: v_mov_b32_e32 v49, v61
+; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v57, v37
+; SI-NEXT: v_mov_b32_e32 v51, v47
+; SI-NEXT: v_mov_b32_e32 v36, v31
+; SI-NEXT: v_mov_b32_e32 v37, v56
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_mov_b32_e32 v45, v33
-; SI-NEXT: v_mov_b32_e32 v33, v34
-; SI-NEXT: v_mov_b32_e32 v34, v35
-; SI-NEXT: v_mov_b32_e32 v35, v36
-; SI-NEXT: v_mov_b32_e32 v36, v54
-; SI-NEXT: v_mov_b32_e32 v54, v37
-; SI-NEXT: v_mov_b32_e32 v37, v41
-; SI-NEXT: v_mov_b32_e32 v41, v38
-; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
; SI-NEXT: .LBB75_3: ; %Flow
-; SI-NEXT: v_mov_b32_e32 v63, v46
+; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(2)
+; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(1)
+; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: v_mov_b32_e32 v47, v44
; SI-NEXT: s_cbranch_vccnz .LBB75_5
; SI-NEXT: ; %bb.4: ; %cmp.true
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
; SI-NEXT: s_add_i32 s28, s28, 3
-; SI-NEXT: s_waitcnt vmcnt(4)
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v40
+; SI-NEXT: s_waitcnt vmcnt(11)
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v52
; SI-NEXT: s_and_b32 s4, s28, 0xff
; SI-NEXT: s_lshl_b32 s5, s29, 8
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_or_b32 s4, s5, s4
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; SI-NEXT: s_addk_i32 s4, 0x300
-; SI-NEXT: v_or_b32_e32 v0, v57, v0
+; SI-NEXT: v_or_b32_e32 v0, v48, v0
; SI-NEXT: s_and_b32 s4, s4, 0xffff
; SI-NEXT: v_or_b32_e32 v0, s4, v0
; SI-NEXT: s_add_i32 s16, s16, 3
@@ -124621,7 +125071,7 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v1, v2, v1
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v1, vcc, 0x300, v1
; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -124630,17 +125080,17 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; SI-NEXT: v_or_b32_e32 v2, v3, v2
; SI-NEXT: v_add_i32_e32 v3, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_add_i32_e32 v4, vcc, 0x3000000, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -124650,15 +125100,15 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v5, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -124668,15 +125118,15 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v6, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -124686,15 +125136,15 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v7, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -124704,15 +125154,15 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v8, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -124722,15 +125172,15 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v9, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -124740,15 +125190,15 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v10, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -124757,34 +125207,66 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_add_i32_e32 v11, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
+; SI-NEXT: v_or_b32_e32 v0, v39, v0
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
+; SI-NEXT: v_or_b32_e32 v1, v54, v1
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v12, vcc, 0x3000000, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
+; SI-NEXT: v_or_b32_e32 v1, v55, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v12, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: v_add_i32_e32 v13, vcc, 0x3000000, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT: v_or_b32_e32 v1, v62, v1
+; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: v_add_i32_e32 v14, vcc, 0x3000000, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -124793,16 +125275,16 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v13, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v15, vcc, 0x3000000, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
-; SI-NEXT: v_or_b32_e32 v0, v58, v0
+; SI-NEXT: v_or_b32_e32 v0, v42, v0
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
@@ -124810,16 +125292,16 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v14, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v16, vcc, 0x3000000, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
-; SI-NEXT: v_or_b32_e32 v0, v60, v0
+; SI-NEXT: v_or_b32_e32 v0, v46, v0
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
@@ -124827,33 +125309,33 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v15, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v17, vcc, 0x3000000, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_or_b32_e32 v1, v43, v1
+; SI-NEXT: v_or_b32_e32 v1, v53, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v16, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v18, vcc, 0x3000000, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -124862,16 +125344,16 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v17, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v19, vcc, 0x3000000, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
-; SI-NEXT: v_or_b32_e32 v0, v51, v0
+; SI-NEXT: v_or_b32_e32 v0, v40, v0
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
@@ -124879,16 +125361,16 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v18, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v20, vcc, 0x3000000, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
-; SI-NEXT: v_or_b32_e32 v0, v50, v0
+; SI-NEXT: v_or_b32_e32 v0, v34, v0
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
@@ -124896,173 +125378,147 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v19, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
-; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
-; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v20, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v21, vcc, 0x3000000, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
-; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v57
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
-; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v21, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
+; SI-NEXT: v_or_b32_e32 v1, v41, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_or_b32_e32 v1, v55, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v22, vcc, 0x3000000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v61
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v49
-; SI-NEXT: v_or_b32_e32 v0, v39, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v47
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT: v_or_b32_e32 v1, v35, v1
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
+; SI-NEXT: v_or_b32_e32 v0, v61, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
-; SI-NEXT: v_or_b32_e32 v1, v44, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v23, vcc, 0x3000000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v47
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v63
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
-; SI-NEXT: v_or_b32_e32 v1, v59, v1
+; SI-NEXT: v_or_b32_e32 v1, v33, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v24, vcc, 0x3000000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v52
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: v_or_b32_e32 v0, v42, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v32
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v25, vcc, 0x3000000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v45
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v56
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v45
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v38, v1
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v26, vcc, 0x3000000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v48
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v43
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v63
-; SI-NEXT: v_or_b32_e32 v0, v32, v0
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v51
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v27, vcc, 0x3000000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v41
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v59
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v37
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT: v_or_b32_e32 v1, v38, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v28, vcc, 0x3000000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v54
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v50
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v36
-; SI-NEXT: v_or_b32_e32 v0, v62, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v49
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v29, vcc, 0x3000000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v34
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v60
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
+; SI-NEXT: v_or_b32_e32 v1, v58, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v30, vcc, 0x3000000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v33
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v36
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -125092,7 +125548,7 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
;
; VI-LABEL: bitcast_v128i8_to_v16f64_scalar:
@@ -125114,21 +125570,21 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:332
+; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
+; VI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:332
; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32
; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:8
; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:16
@@ -125143,7 +125599,7 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:88
; VI-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:96
; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:104
-; VI-NEXT: buffer_load_ushort v22, off, s[0:3], s32 offset:112
+; VI-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:112
; VI-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:120
; VI-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:128
; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:136
@@ -125152,76 +125608,80 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:160
; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:168
; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:176
-; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v1
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v11
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v13
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v15
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v17
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v19
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v21
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v23
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v8, 8, v3
-; VI-NEXT: v_lshlrev_b32_e32 v59, 8, v5
-; VI-NEXT: v_lshlrev_b32_e32 v14, 8, v7
-; VI-NEXT: v_lshlrev_b32_e32 v10, 8, v9
-; VI-NEXT: v_lshlrev_b32_e32 v16, 8, v11
-; VI-NEXT: v_lshlrev_b32_e32 v6, 8, v13
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v25
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v27
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v6, 8, v3
+; VI-NEXT: v_lshlrev_b32_e32 v10, 8, v5
+; VI-NEXT: v_lshlrev_b32_e32 v8, 8, v7
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v9
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: v_lshlrev_b32_e32 v46, 8, v29
; VI-NEXT: s_waitcnt vmcnt(14)
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v23
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v25
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v27
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v29
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v44
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v43
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v42
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v41
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v40
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v55
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v54
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v53
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v52
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v51
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v50
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v49
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v48
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v39
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v45
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v44
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v43
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v42
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v41
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v40
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v55
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v54
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v53
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v52
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v51
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v50
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v49
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v48
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v39
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v30
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v31
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v32
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v33
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v34
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
; VI-NEXT: s_waitcnt vmcnt(14)
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v22
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v31
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v32
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v33
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v34
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v35
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v36
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:184
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v35
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v36
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v37
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
+; VI-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:184
; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:192
; VI-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:200
; VI-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:208
@@ -125229,30 +125689,30 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; VI-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:224
; VI-NEXT: buffer_load_ushort v9, off, s[0:3], s32 offset:232
; VI-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:240
-; VI-NEXT: v_lshlrev_b32_e32 v52, 8, v37
-; VI-NEXT: v_lshlrev_b32_e32 v31, 8, v38
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_and_b64 s[6:7], vcc, exec
+; VI-NEXT: v_lshlrev_b32_e32 v38, 8, v38
; VI-NEXT: s_waitcnt vmcnt(7)
-; VI-NEXT: v_lshlrev_b32_e32 v26, 8, v0
+; VI-NEXT: v_lshlrev_b32_e32 v35, 8, v15
; VI-NEXT: s_waitcnt vmcnt(6)
-; VI-NEXT: v_lshlrev_b32_e32 v32, 8, v1
+; VI-NEXT: v_lshlrev_b32_e32 v24, 8, v1
; VI-NEXT: s_waitcnt vmcnt(5)
-; VI-NEXT: v_lshlrev_b32_e32 v54, 8, v13
+; VI-NEXT: v_lshlrev_b32_e32 v26, 8, v13
; VI-NEXT: s_waitcnt vmcnt(4)
-; VI-NEXT: v_lshlrev_b32_e32 v49, 8, v3
-; VI-NEXT: s_waitcnt vmcnt(3)
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v11
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(3)
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v5
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(3)
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v9
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(3)
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v7
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:248
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v3
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v11
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v5
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v9
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v7
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
+; VI-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:248
; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:256
; VI-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:264
; VI-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:272
@@ -125261,130 +125721,127 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; VI-NEXT: buffer_load_ushort v9, off, s[0:3], s32 offset:296
; VI-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:304
; VI-NEXT: s_waitcnt vmcnt(7)
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v0
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v12, 8, v15
+; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
; VI-NEXT: s_waitcnt vmcnt(7)
-; VI-NEXT: v_lshlrev_b32_e32 v48, 8, v1
-; VI-NEXT: s_waitcnt vmcnt(6)
+; VI-NEXT: v_lshlrev_b32_e32 v50, 8, v1
+; VI-NEXT: s_waitcnt vmcnt(5)
+; VI-NEXT: v_lshlrev_b32_e32 v51, 8, v3
; VI-NEXT: v_lshlrev_b32_e32 v27, 8, v13
-; VI-NEXT: s_waitcnt vmcnt(4)
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v11
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(4)
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v5
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v29, 8, v3
-; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:312
+; VI-NEXT: s_waitcnt vmcnt(3)
+; VI-NEXT: v_lshlrev_b32_e32 v31, 8, v5
+; VI-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:312
; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:320
; VI-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:328
-; VI-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:4
-; VI-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:12
-; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:20
-; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:28
-; VI-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:36
-; VI-NEXT: s_waitcnt vmcnt(11)
-; VI-NEXT: v_lshlrev_b32_e32 v5, 8, v7
-; VI-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; VI-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:324
+; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:316
+; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:308
+; VI-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:300
+; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:292
+; VI-NEXT: v_lshlrev_b32_e32 v52, 8, v11
+; VI-NEXT: s_waitcnt vmcnt(10)
+; VI-NEXT: v_lshlrev_b32_e32 v29, 8, v9
+; VI-NEXT: s_waitcnt vmcnt(9)
+; VI-NEXT: v_lshlrev_b32_e32 v30, 8, v7
; VI-NEXT: s_waitcnt vmcnt(7)
-; VI-NEXT: v_lshlrev_b32_e32 v7, 8, v0
+; VI-NEXT: v_lshlrev_b32_e32 v5, 8, v5
; VI-NEXT: s_waitcnt vmcnt(6)
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v1
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v18, off, s[0:3], s32 offset:44
-; VI-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:52
-; VI-NEXT: buffer_load_ushort v17, off, s[0:3], s32 offset:60
-; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:68
-; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:76
-; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:84
-; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:92
-; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:100
-; VI-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:108
-; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:116
-; VI-NEXT: buffer_load_ushort v25, off, s[0:3], s32 offset:124
-; VI-NEXT: buffer_load_ushort v19, off, s[0:3], s32 offset:132
-; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:140
-; VI-NEXT: buffer_load_ushort v20, off, s[0:3], s32 offset:148
-; VI-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:156
-; VI-NEXT: buffer_load_ushort v21, off, s[0:3], s32 offset:164
-; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:172
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v1
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(6)
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v3
+; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill
+; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:284
+; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:276
+; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:268
+; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:260
+; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:252
+; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:244
+; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:236
+; VI-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:228
+; VI-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:220
+; VI-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:212
+; VI-NEXT: buffer_load_ushort v25, off, s[0:3], s32 offset:204
+; VI-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:196
+; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:188
; VI-NEXT: buffer_load_ushort v22, off, s[0:3], s32 offset:180
-; VI-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:188
-; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:196
-; VI-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:204
-; VI-NEXT: buffer_load_ushort v24, off, s[0:3], s32 offset:212
-; VI-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:220
-; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:228
-; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:236
-; VI-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:244
-; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:252
-; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:260
-; VI-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:268
-; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:276
-; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:284
-; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:292
-; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:300
-; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:308
-; VI-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:316
-; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:324
+; VI-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:172
+; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:164
+; VI-NEXT: buffer_load_ushort v21, off, s[0:3], s32 offset:156
+; VI-NEXT: buffer_load_ushort v20, off, s[0:3], s32 offset:148
+; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:140
+; VI-NEXT: buffer_load_ushort v19, off, s[0:3], s32 offset:132
+; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:124
+; VI-NEXT: buffer_load_ushort v18, off, s[0:3], s32 offset:116
+; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:108
+; VI-NEXT: buffer_load_ushort v17, off, s[0:3], s32 offset:100
+; VI-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:92
+; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:84
+; VI-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:76
+; VI-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:68
+; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:60
+; VI-NEXT: buffer_load_ushort v16, off, s[0:3], s32 offset:52
+; VI-NEXT: buffer_load_ushort v14, off, s[0:3], s32 offset:44
+; VI-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:36
+; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:28
+; VI-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:20
+; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:12
+; VI-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:4
+; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
; VI-NEXT: s_waitcnt vmcnt(14)
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v3
-; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(12)
-; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(14)
+; VI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill
; VI-NEXT: s_cbranch_scc0 .LBB75_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: v_or_b32_sdwa v0, v2, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v4, v59 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v5, v2, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
-; VI-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v1, v4, v10 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v4, v5, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: s_and_b32 s4, s28, 0xff
; VI-NEXT: s_lshl_b32 s5, s29, 8
; VI-NEXT: s_or_b32 s4, s4, s5
@@ -125393,208 +125850,207 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; VI-NEXT: s_lshl_b32 s6, s19, 8
; VI-NEXT: s_lshl_b32 s7, s23, 8
; VI-NEXT: s_lshl_b32 s8, s27, 8
-; VI-NEXT: s_waitcnt vmcnt(3)
-; VI-NEXT: v_or_b32_sdwa v2, v2, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(2)
-; VI-NEXT: v_or_b32_sdwa v3, v3, v10 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v0, v0, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v2, v2, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v1, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
-; VI-NEXT: v_mov_b32_e32 v3, v7
+; VI-NEXT: v_or_b32_sdwa v3, v3, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v2, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v1, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v6, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v3, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v1, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v7, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
-; VI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill
-; VI-NEXT: v_mov_b32_e32 v29, v9
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v1, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v1, v46 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v46, v47
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v11, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v0, v12, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v50, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v12, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v50, v0
-; VI-NEXT: v_or_b32_sdwa v0, v56, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v13, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v59, v0
-; VI-NEXT: v_or_b32_sdwa v0, v18, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v14, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v15, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v16, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v56, v0
-; VI-NEXT: v_or_b32_sdwa v0, v17, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v61, v0
+; VI-NEXT: v_or_b32_sdwa v0, v37, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v39, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v37, v1
+; VI-NEXT: v_or_b32_sdwa v1, v15, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v39, v0
-; VI-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v38, v1
-; VI-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v63, v1
+; VI-NEXT: v_or_b32_sdwa v1, v36, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v16, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v37, v0
-; VI-NEXT: v_or_b32_sdwa v0, v57, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v36, v0
+; VI-NEXT: v_or_b32_sdwa v0, v59, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v36, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v17, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v17, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v59, v45
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v36, v0
-; VI-NEXT: v_or_b32_sdwa v0, v35, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v34, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v35, v1
-; VI-NEXT: v_or_b32_sdwa v1, v33, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v34, v1
+; VI-NEXT: v_or_b32_sdwa v1, v18, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v18, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v33, v0
-; VI-NEXT: v_or_b32_sdwa v0, v25, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v19, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v19, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v0, v51, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v58, v0
+; VI-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v20, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v20, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
-; VI-NEXT: v_or_b32_sdwa v1, v21, v52 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v51, v3
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_or_b32_sdwa v0, v21, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v0, v28, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v42, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v21, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v34, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v22, v26 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v28, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v22, v35 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v22, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v23, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v43, v54 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v62, v24 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v23, v26 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v23, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
-; VI-NEXT: v_mov_b32_e32 v43, v49
-; VI-NEXT: v_or_b32_sdwa v0, v30, v49 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v32, v54
-; VI-NEXT: v_mov_b32_e32 v34, v26
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v35, v24
+; VI-NEXT: v_mov_b32_e32 v62, v26
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_or_b32_sdwa v0, v25, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v49, v1
-; VI-NEXT: v_or_b32_sdwa v1, v24, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v33, v1
+; VI-NEXT: v_or_b32_sdwa v1, v54, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v24, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v54, v0
-; VI-NEXT: v_or_b32_sdwa v0, v46, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v45, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v46, v61
+; VI-NEXT: v_or_b32_sdwa v1, v32, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v45, v32
; VI-NEXT: v_or_b32_sdwa v25, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v54, v0
+; VI-NEXT: v_or_b32_sdwa v0, v44, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v45, v61 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v57, v32 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v26, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v58, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v44, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v47, v45
+; VI-NEXT: v_or_b32_sdwa v0, v41, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v43, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v27, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
-; VI-NEXT: v_mov_b32_e32 v58, v44
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v48, v0
-; VI-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v42, v45 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v39, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v40, v52 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v28, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
-; VI-NEXT: v_or_b32_sdwa v1, v40, v29 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v63, v42
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v0, v41, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v56, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v60, v29 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v29, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
-; VI-NEXT: v_or_b32_sdwa v1, v60, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v55, v30 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v50, v51
+; VI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v56, v60
+; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: v_or_b32_sdwa v1, v53, v55 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v30, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v0, v55, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v49, v52 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v53, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v48, v51 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v31, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v57, v0
-; VI-NEXT: v_or_b32_sdwa v0, v52, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v48, v2
+; VI-NEXT: v_mov_b32_e32 v53, v55
+; VI-NEXT: v_mov_b32_e32 v55, v40
+; VI-NEXT: v_mov_b32_e32 v40, v39
+; VI-NEXT: v_mov_b32_e32 v39, v43
+; VI-NEXT: v_mov_b32_e32 v43, v32
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v42, v0
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_e32 v3, s4, v0
; VI-NEXT: s_and_b32 s4, s16, 0xff
; VI-NEXT: s_or_b32 s4, s4, s5
@@ -125625,52 +126081,49 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; VI-NEXT: s_mov_b64 s[4:5], 0
; VI-NEXT: s_branch .LBB75_3
; VI-NEXT: .LBB75_2:
-; VI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v35, v24
+; VI-NEXT: v_mov_b32_e32 v62, v26
+; VI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; VI-NEXT: v_mov_b32_e32 v32, v54
-; VI-NEXT: v_mov_b32_e32 v43, v49
-; VI-NEXT: v_mov_b32_e32 v46, v61
-; VI-NEXT: v_mov_b32_e32 v47, v45
-; VI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
-; VI-NEXT: v_mov_b32_e32 v34, v26
-; VI-NEXT: v_mov_b32_e32 v58, v44
-; VI-NEXT: s_waitcnt vmcnt(14)
-; VI-NEXT: v_mov_b32_e32 v63, v42
-; VI-NEXT: v_mov_b32_e32 v51, v7
-; VI-NEXT: v_mov_b32_e32 v48, v29
-; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v50, v51
+; VI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; VI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v55, v40
+; VI-NEXT: v_mov_b32_e32 v40, v39
+; VI-NEXT: v_mov_b32_e32 v39, v43
+; VI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v46, v47
+; VI-NEXT: v_mov_b32_e32 v59, v45
+; VI-NEXT: v_mov_b32_e32 v45, v32
+; VI-NEXT: v_mov_b32_e32 v56, v60
; VI-NEXT: .LBB75_3: ; %Flow
; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
-; VI-NEXT: v_mov_b32_e32 v44, v47
-; VI-NEXT: v_mov_b32_e32 v47, v46
-; VI-NEXT: s_waitcnt vmcnt(3)
-; VI-NEXT: v_mov_b32_e32 v46, v49
+; VI-NEXT: v_mov_b32_e32 v32, v59
+; VI-NEXT: s_waitcnt vmcnt(7)
+; VI-NEXT: v_mov_b32_e32 v59, v33
; VI-NEXT: s_cbranch_vccnz .LBB75_5
; VI-NEXT: ; %bb.4: ; %cmp.true
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
; VI-NEXT: s_add_i32 s28, s28, 3
; VI-NEXT: s_and_b32 s4, s28, 0xff
; VI-NEXT: s_lshl_b32 s5, s29, 8
; VI-NEXT: s_or_b32 s4, s5, s4
-; VI-NEXT: s_waitcnt vmcnt(4)
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v52
+; VI-NEXT: s_waitcnt vmcnt(7)
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v48
; VI-NEXT: s_addk_i32 s4, 0x300
-; VI-NEXT: v_or_b32_sdwa v0, v57, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v0, v42, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_and_b32 s4, s4, 0xffff
; VI-NEXT: v_or_b32_e32 v0, s4, v0
; VI-NEXT: s_add_i32 s16, s16, 3
@@ -125716,17 +126169,17 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v1, vcc, 0x300, v1
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
; VI-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v3, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v4, vcc, 0x3000000, v1
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: s_waitcnt vmcnt(1)
@@ -125739,327 +126192,332 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v5, vcc, 0x3000000, v0
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v6, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v7, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v8, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v9, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v10, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v11, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(2)
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v46
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v12, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v50, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
+; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v13, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v59, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
+; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v14, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v56, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v15, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(2)
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v39, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
-; VI-NEXT: v_or_b32_sdwa v1, v38, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
+; VI-NEXT: v_or_b32_sdwa v1, v63, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v16, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v37, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v0, v36, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v17, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v36, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
-; VI-NEXT: v_or_b32_sdwa v1, v35, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
+; VI-NEXT: v_or_b32_sdwa v1, v34, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v18, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v19, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
+; VI-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
-; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v20, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v21, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
-; VI-NEXT: v_or_b32_sdwa v1, v34, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v22, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
-; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v0, v35, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
-; VI-NEXT: v_or_b32_sdwa v1, v32, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v1, v62, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v23, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v43, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
-; VI-NEXT: v_or_b32_sdwa v1, v46, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
+; VI-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v24, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v47
-; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v54, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v32
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v45
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
+; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v25, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v44
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v44
+; VI-NEXT: v_or_b32_sdwa v0, v54, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v57
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
-; VI-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v1, v43, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v26, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v41
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v58
-; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v39
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v27, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v63
-; VI-NEXT: v_or_b32_sdwa v1, v45, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v48, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v40
+; VI-NEXT: v_or_b32_sdwa v0, v50, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v55
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v28, vcc, 0x3000000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v41
-; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
+; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v40
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v56
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v29, vcc, 0x3000000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v62
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v2, s6
; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v60
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
-; VI-NEXT: v_or_b32_sdwa v1, v51, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
+; VI-NEXT: v_or_b32_sdwa v1, v53, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v30, vcc, 0x3000000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v55
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v53
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v49
+; VI-NEXT: v_or_b32_sdwa v0, v52, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
-; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
+; VI-NEXT: v_or_b32_sdwa v1, v51, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v31, vcc, 0x3000000, v0
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
-; VI-NEXT: v_mov_b32_e32 v2, s6
; VI-NEXT: .LBB75_5: ; %end
; VI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload
@@ -126099,28 +126557,37 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:332
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_load_ushort v2, off, s[0:3], s32
-; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:8
-; GFX9-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:16
-; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:24
-; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:32
-; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:40
-; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:48
-; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:56
-; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:64
-; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:72
-; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:80
-; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:88
-; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:96
-; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:104
-; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:112
+; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:332
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32
+; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:8
+; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:16
+; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:24
+; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:32
+; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:40
+; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:48
+; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:56
+; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:64
+; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:72
+; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:80
+; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:88
+; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:96
+; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:104
+; GFX9-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:112
; GFX9-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:120
; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:128
; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:136
@@ -126130,270 +126597,294 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:168
; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:176
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v7
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v11
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v13
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v15
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v17
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v19
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v21
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v23
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v25
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v27
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v29
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshlrev_b32_e32 v10, 8, v3
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 8, v3
; GFX9-NEXT: v_lshlrev_b32_e32 v8, 8, v5
-; GFX9-NEXT: v_lshlrev_b32_e32 v9, 8, v9
-; GFX9-NEXT: s_waitcnt vmcnt(35)
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v43
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v2
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v4
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v6
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v42
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v41
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v40
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v55
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v54
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v53
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v52
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v51
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v50
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v49
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v48
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v39
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v31
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v32
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v33
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v34
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v35
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v36
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v37
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(34)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v38
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:184
+; GFX9-NEXT: v_lshlrev_b32_e32 v18, 8, v7
+; GFX9-NEXT: v_lshlrev_b32_e32 v10, 8, v9
+; GFX9-NEXT: v_lshlrev_b32_e32 v6, 8, v13
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_waitcnt vmcnt(28)
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v21
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v23
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v25
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v27
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v29
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(32)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v44
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(32)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v43
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(32)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v42
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(32)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v41
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(32)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v40
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(32)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v55
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(32)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v54
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(32)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v53
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(32)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v52
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v50
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v49
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v48
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v39
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v30
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v31
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v32
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v33
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v34
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v35
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v36
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v37
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(31)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v38
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:184
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:192
-; GFX9-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:200
+; GFX9-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:200
; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:208
-; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:216
+; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:216
; GFX9-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:224
-; GFX9-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:232
+; GFX9-NEXT: buffer_load_ushort v9, off, s[0:3], s32 offset:232
; GFX9-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:240
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_and_b64 s[6:7], vcc, exec
+; GFX9-NEXT: v_lshlrev_b32_e32 v51, 8, v51
; GFX9-NEXT: s_waitcnt vmcnt(7)
-; GFX9-NEXT: v_lshlrev_b32_e32 v38, 8, v11
+; GFX9-NEXT: v_lshlrev_b32_e32 v61, 8, v0
; GFX9-NEXT: s_waitcnt vmcnt(6)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v1
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v2
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v13
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v3
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(5)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v5
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(5)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v6
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(5)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v7
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshlrev_b32_e32 v49, 8, v4
-; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:248
+; GFX9-NEXT: v_lshlrev_b32_e32 v40, 8, v3
+; GFX9-NEXT: s_waitcnt vmcnt(3)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v9
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(3)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v7
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v25, 8, v11
+; GFX9-NEXT: v_lshlrev_b32_e32 v32, 8, v5
+; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:248
; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:256
-; GFX9-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:264
+; GFX9-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:264
; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:272
-; GFX9-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:280
+; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:280
; GFX9-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:288
-; GFX9-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:296
+; GFX9-NEXT: buffer_load_ushort v9, off, s[0:3], s32 offset:296
; GFX9-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:304
; GFX9-NEXT: s_waitcnt vmcnt(7)
-; GFX9-NEXT: v_lshlrev_b32_e32 v11, 8, v11
-; GFX9-NEXT: s_waitcnt vmcnt(6)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(6)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v2
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(6)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v3
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v0
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(7)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v1
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v4
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v3
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v5
-; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:312
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v11
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:312
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:320
-; GFX9-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:328
-; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:4
-; GFX9-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:12
-; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:20
-; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:28
-; GFX9-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:36
+; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:328
+; GFX9-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:324
+; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:316
+; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:308
+; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:300
+; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:292
+; GFX9-NEXT: v_lshlrev_b32_e32 v27, 8, v13
; GFX9-NEXT: s_waitcnt vmcnt(14)
-; GFX9-NEXT: v_lshlrev_b32_e32 v4, 8, v7
-; GFX9-NEXT: v_lshlrev_b32_e32 v5, 8, v6
+; GFX9-NEXT: v_lshlrev_b32_e32 v31, 8, v5
+; GFX9-NEXT: s_waitcnt vmcnt(13)
+; GFX9-NEXT: v_lshlrev_b32_e32 v29, 8, v9
+; GFX9-NEXT: s_waitcnt vmcnt(12)
+; GFX9-NEXT: v_lshlrev_b32_e32 v30, 8, v7
; GFX9-NEXT: s_waitcnt vmcnt(7)
-; GFX9-NEXT: v_lshlrev_b32_e32 v3, 8, v3
+; GFX9-NEXT: v_lshlrev_b32_e32 v39, 8, v0
; GFX9-NEXT: s_waitcnt vmcnt(6)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_ushort v25, off, s[0:3], s32 offset:44
-; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:52
-; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:60
-; GFX9-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:68
-; GFX9-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:76
-; GFX9-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:84
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v1
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(6)
+; GFX9-NEXT: v_lshlrev_b32_e32 v0, 8, v3
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:284
+; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:276
+; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:268
+; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:260
+; GFX9-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:252
+; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:244
+; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:236
+; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:228
+; GFX9-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:220
+; GFX9-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:212
+; GFX9-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:204
+; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:196
+; GFX9-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:188
+; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:180
+; GFX9-NEXT: buffer_load_ushort v26, off, s[0:3], s32 offset:172
+; GFX9-NEXT: buffer_load_ushort v21, off, s[0:3], s32 offset:164
+; GFX9-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:156
+; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:148
+; GFX9-NEXT: buffer_load_ushort v24, off, s[0:3], s32 offset:140
+; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:132
+; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:124
+; GFX9-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:116
+; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:108
+; GFX9-NEXT: buffer_load_ushort v19, off, s[0:3], s32 offset:100
; GFX9-NEXT: buffer_load_ushort v17, off, s[0:3], s32 offset:92
-; GFX9-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:100
-; GFX9-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:108
-; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:116
-; GFX9-NEXT: buffer_load_ushort v27, off, s[0:3], s32 offset:124
-; GFX9-NEXT: buffer_load_ushort v19, off, s[0:3], s32 offset:132
-; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:140
-; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:148
-; GFX9-NEXT: buffer_load_ushort v21, off, s[0:3], s32 offset:156
-; GFX9-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:164
-; GFX9-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:172
-; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:180
-; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:188
-; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:196
-; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:204
-; GFX9-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:212
-; GFX9-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:220
-; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:228
-; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:236
-; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:244
-; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:252
-; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:260
-; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:268
-; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:276
-; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:284
-; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:292
-; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:300
-; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:308
-; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:316
-; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:324
-; GFX9-NEXT: s_waitcnt vmcnt(42)
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v2
-; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:84
+; GFX9-NEXT: buffer_load_ushort v22, off, s[0:3], s32 offset:76
+; GFX9-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:68
+; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:60
+; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:52
+; GFX9-NEXT: buffer_load_ushort v14, off, s[0:3], s32 offset:44
+; GFX9-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:36
+; GFX9-NEXT: buffer_load_ushort v20, off, s[0:3], s32 offset:28
+; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:20
+; GFX9-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:12
+; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:4
+; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(28)
-; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(43)
+; GFX9-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(40)
+; GFX9-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(39)
+; GFX9-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(36)
+; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
+; GFX9-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
+; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
+; GFX9-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
+; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
+; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
; GFX9-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
+; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
+; GFX9-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
+; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
+; GFX9-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
+; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(38)
+; GFX9-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(39)
+; GFX9-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(39)
+; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(39)
+; GFX9-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(41)
+; GFX9-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(41)
+; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(42)
+; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(43)
+; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(43)
+; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(43)
+; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(43)
+; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(43)
+; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(43)
+; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(43)
+; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(43)
+; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill
; GFX9-NEXT: s_cbranch_scc0 .LBB75_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
-; GFX9-NEXT: v_mov_b32_e32 v38, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v0, v2, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v4, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
; GFX9-NEXT: s_and_b32 s4, s28, 0xff
-; GFX9-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
; GFX9-NEXT: s_lshl_b32 s5, s29, 8
; GFX9-NEXT: s_or_b32 s4, s4, s5
; GFX9-NEXT: s_and_b32 s4, s4, 0xffff
@@ -126401,202 +126892,199 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; GFX9-NEXT: s_lshl_b32 s6, s19, 8
; GFX9-NEXT: s_lshl_b32 s7, s23, 8
; GFX9-NEXT: s_lshl_b32 s8, s27, 8
-; GFX9-NEXT: s_waitcnt vmcnt(5)
-; GFX9-NEXT: v_or_b32_sdwa v0, v0, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(4)
-; GFX9-NEXT: v_or_b32_sdwa v1, v1, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(4)
-; GFX9-NEXT: v_or_b32_sdwa v2, v2, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_waitcnt vmcnt(3)
+; GFX9-NEXT: v_or_b32_sdwa v2, v2, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_or_b32_sdwa v3, v3, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v3, v3, v10 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v12, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v1, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v14, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v16, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v18, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v20, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v22, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v24, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v26, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v28, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v30, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v1, v11, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v34, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v12, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v60, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v62, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_or_b32_sdwa v0, v53, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v20, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v1, v13, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v25, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v14, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v62, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v43, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v1, v15, v51 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v1, v15, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v43, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
-; GFX9-NEXT: v_mov_b32_e32 v61, v38
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v22, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v58, v1
+; GFX9-NEXT: v_or_b32_sdwa v1, v35, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v16, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(3)
-; GFX9-NEXT: v_or_b32_sdwa v0, v17, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v1, v63, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v35, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v17, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v1, v19, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v17, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
-; GFX9-NEXT: v_mov_b32_e32 v63, v57
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v57, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v54, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v56, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v54, v1
+; GFX9-NEXT: v_or_b32_sdwa v1, v23, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v18, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_or_b32_sdwa v0, v27, v56 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_or_b32_sdwa v0, v44, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v19, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v52, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v19, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v0, v51, v62 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v58, v59 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: v_mov_b32_e32 v52, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v24, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_or_b32_sdwa v1, v50, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v20, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
-; GFX9-NEXT: v_or_b32_sdwa v0, v21, v47 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v1, v31, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v50, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v28, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v1, v21, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v21, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
-; GFX9-NEXT: v_or_b32_sdwa v1, v50, v60 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v1, v53, v61 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v23, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v26, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v22, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v0, v44, v58 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v37, v57 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v23, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v0, v52, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v29, v49 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v47, v53 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v60, v51 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v23, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v46, v40 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v63, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v24, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
-; GFX9-NEXT: v_mov_b32_e32 v37, v57
-; GFX9-NEXT: v_mov_b32_e32 v57, v60
-; GFX9-NEXT: v_mov_b32_e32 v52, v56
-; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v0, v57, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v40, v25
+; GFX9-NEXT: v_mov_b32_e32 v57, v41
+; GFX9-NEXT: v_mov_b32_e32 v46, v61
+; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_mov_b32_e32 v34, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v46, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v1, v48, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v41, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v25, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v1, v45, v44 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v32, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v37, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v0, v39, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v56, v41 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v26, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v40, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v55, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v34, v44 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v33, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v27, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_mov_b32_e32 v51, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v43, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v36, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v36, v62 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v49, v60 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v28, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v42, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v41, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v38, v31 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v55, v29 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v29, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v53, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v35, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v48, v30 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v48, v39
+; GFX9-NEXT: v_or_b32_sdwa v1, v45, v39 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
; GFX9-NEXT: v_or_b32_sdwa v30, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v0, v42, v47 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v38, v55
+; GFX9-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v36, v49
+; GFX9-NEXT: v_mov_b32_e32 v49, v56
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v54, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v33, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v59, v39 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v31, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v59, v39
+; GFX9-NEXT: v_mov_b32_e32 v39, v41
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_mov_b32_e32 v56, v55
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v61, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v55, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_e32 v3, s4, v0
; GFX9-NEXT: s_and_b32 s4, s16, 0xff
; GFX9-NEXT: s_or_b32 s4, s4, s5
@@ -126627,32 +127115,39 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; GFX9-NEXT: s_mov_b64 s[4:5], 0
; GFX9-NEXT: s_branch .LBB75_3
; GFX9-NEXT: .LBB75_2:
-; GFX9-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
-; GFX9-NEXT: v_mov_b32_e32 v61, v0
-; GFX9-NEXT: v_mov_b32_e32 v63, v57
-; GFX9-NEXT: v_mov_b32_e32 v53, v3
-; GFX9-NEXT: s_mov_b64 s[4:5], -1
-; GFX9-NEXT: v_mov_b32_e32 v57, v38
+; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; GFX9-NEXT: s_nop 0
+; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v48, v39
+; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v36, v49
+; GFX9-NEXT: v_mov_b32_e32 v49, v56
+; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v40, v25
+; GFX9-NEXT: v_mov_b32_e32 v57, v41
+; GFX9-NEXT: v_mov_b32_e32 v38, v55
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; GFX9-NEXT: .LBB75_3: ; %Flow
-; GFX9-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_waitcnt vmcnt(12)
+; GFX9-NEXT: v_mov_b32_e32 v41, v52
; GFX9-NEXT: s_cbranch_vccnz .LBB75_5
; GFX9-NEXT: ; %bb.4: ; %cmp.true
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v61
; GFX9-NEXT: s_add_i32 s16, s16, 3
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_and_b32 s4, s16, 0xff
@@ -126696,190 +127191,210 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; GFX9-NEXT: s_and_b32 s8, s28, 0xff
; GFX9-NEXT: s_lshl_b32 s9, s29, 8
; GFX9-NEXT: s_or_b32 s8, s9, s8
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v56
; GFX9-NEXT: s_movk_i32 s4, 0x300
; GFX9-NEXT: s_addk_i32 s8, 0x300
+; GFX9-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: s_and_b32 s8, s8, 0xffff
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_sdwa v0, v0, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_e32 v3, s8, v0
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v60
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v52, v54
+; GFX9-NEXT: v_mov_b32_e32 v55, v57
+; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v38
-; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
+; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v49
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v5, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
-; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
+; GFX9-NEXT: v_or_b32_sdwa v0, v43, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v58, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v16, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
+; GFX9-NEXT: v_or_b32_sdwa v0, v35, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
+; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v17, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
@@ -126889,163 +127404,155 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v52, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v17, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v63
+; GFX9-NEXT: v_or_b32_sdwa v18, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v18, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v19, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v0, v52, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v0, v41, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v19, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v20, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
-; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v20, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v50, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v21, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v57, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v46, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v22, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v0, v53, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v51, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v23, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v50, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v63
+; GFX9-NEXT: v_or_b32_sdwa v1, v40, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v24, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v46
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v48
-; GFX9-NEXT: v_or_b32_sdwa v0, v34, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v55
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v25, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v39
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v45
-; GFX9-NEXT: v_or_b32_sdwa v1, v44, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v37
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v49
+; GFX9-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v39, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v26, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v40
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v34
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v33
+; GFX9-NEXT: v_or_b32_sdwa v0, v44, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v55
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v27, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v43
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v1, 3, v36
-; GFX9-NEXT: v_or_b32_sdwa v0, v51, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v60, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_or_b32_sdwa v28, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v42
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v41
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v38
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v29, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v32
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v2, s7
; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v35
-; GFX9-NEXT: v_or_b32_sdwa v1, v53, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v45
+; GFX9-NEXT: v_or_b32_sdwa v1, v48, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v30, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v54
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v33
-; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v42
+; GFX9-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
+; GFX9-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v31, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_mov_b32_e32 v0, s5
; GFX9-NEXT: v_mov_b32_e32 v1, s6
-; GFX9-NEXT: v_mov_b32_e32 v2, s7
; GFX9-NEXT: .LBB75_5: ; %end
; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload
@@ -127217,7 +127724,7 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v73, 8, v25
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v74, 8, v27
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v75, 8, v29
-; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, -1
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(62)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v57, 8, v2
@@ -127288,24 +127795,24 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v54
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v53
-; GFX11-TRUE16-NEXT: s_and_b32 s5, s28, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s29, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s2, 0xff
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s28, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s29, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s2, 0xff
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v90
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v91
-; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s5, s5, 0xffff
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s4, 0xffff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s3, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s11, s26, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s26, 0xff
; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v0, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v50
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v49
-; GFX11-TRUE16-NEXT: s_lshl_b32 s12, s27, 8
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s27, 8
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v76
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v77
@@ -127547,40 +128054,40 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v89
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, s5, v0
-; GFX11-TRUE16-NEXT: s_and_b32 s5, s0, 0xff
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, s4, v0
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s0, 0xff
; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s5, s5, 0xffff
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s6, 16
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s8, 16
-; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s23, 8
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s25, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-TRUE16-NEXT: s_or_b32 s9, s9, s10
-; GFX11-TRUE16-NEXT: s_or_b32 s10, s11, s12
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s8, 16
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s9, 0xffff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s10, 16
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s16, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s6, 0xffff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s7, 16
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s20, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s21, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s25, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s6, 0xffff
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s8, s9
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s11
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s7, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s8, 0xffff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s9, 16
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s6, s7
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v51
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s9
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v52
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-TRUE16-NEXT: v_or_b32_e32 v3, v3, v93
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v92
@@ -127589,9 +128096,8 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v2, v3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB75_3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB75_3
; GFX11-TRUE16-NEXT: .LBB75_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_add_i32 s0, s0, 3
; GFX11-TRUE16-NEXT: s_add_i32 s2, s2, 3
@@ -128005,7 +128511,9 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB75_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB75_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB75_2
+; GFX11-TRUE16-NEXT: s_branch .LBB75_3
;
; GFX11-FAKE16-LABEL: bitcast_v128i8_to_v16f64_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -128158,7 +128666,7 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v73, 8, v25
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v74, 8, v27
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v75, 8, v29
-; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, -1
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(62)
; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v57, 8, v2
@@ -128229,24 +128737,24 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v54
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v53
-; GFX11-FAKE16-NEXT: s_and_b32 s5, s28, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s29, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s7, s2, 0xff
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s28, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s29, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s2, 0xff
; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v90
; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, v1, v91
-; GFX11-FAKE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s5, s5, 0xffff
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s4, 0xffff
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s3, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s11, s26, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s10, s26, 0xff
; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, v0, v1
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v50
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v49
-; GFX11-FAKE16-NEXT: s_lshl_b32 s12, s27, 8
+; GFX11-FAKE16-NEXT: s_lshl_b32 s11, s27, 8
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v76
; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, v1, v77
@@ -128488,40 +128996,40 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v89
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, s5, v0
-; GFX11-FAKE16-NEXT: s_and_b32 s5, s0, 0xff
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, s4, v0
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s0, 0xff
; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-FAKE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-FAKE16-NEXT: s_or_b32 s6, s7, s8
-; GFX11-FAKE16-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s5, s5, 0xffff
-; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s6, 16
-; GFX11-FAKE16-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s8, 16
-; GFX11-FAKE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-FAKE16-NEXT: s_or_b32 s6, s7, s8
-; GFX11-FAKE16-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s23, 8
-; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-FAKE16-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s25, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-FAKE16-NEXT: s_or_b32 s9, s9, s10
-; GFX11-FAKE16-NEXT: s_or_b32 s10, s11, s12
-; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s8, 16
-; GFX11-FAKE16-NEXT: s_and_b32 s9, s9, 0xffff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s10, 16
-; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s16, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s6, 0xffff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s7, 16
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s20, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s21, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s25, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s6, 0xffff
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s8, s9
+; GFX11-FAKE16-NEXT: s_or_b32 s9, s10, s11
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s7, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s8, 0xffff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s9, 16
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s6, s7
; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v51
-; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s8, s9
; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v52
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-FAKE16-NEXT: v_or_b32_e32 v3, v3, v93
; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, v2, v92
@@ -128530,9 +129038,8 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, v2, v3
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB75_3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB75_3
; GFX11-FAKE16-NEXT: .LBB75_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_add_i32 s0, s0, 3
; GFX11-FAKE16-NEXT: s_add_i32 s2, s2, 3
@@ -128946,7 +129453,9 @@ define inreg <16 x double> @bitcast_v128i8_to_v16f64_scalar(<128 x i8> inreg %a,
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB75_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB75_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB75_2
+; GFX11-FAKE16-NEXT: s_branch .LBB75_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -129825,8 +130334,9 @@ define inreg <64 x bfloat> @bitcast_v16f64_to_v64bf16_scalar(<16 x double> inreg
; SI-NEXT: v_writelane_b32 v63, s87, 31
; SI-NEXT: v_writelane_b32 v63, s96, 32
; SI-NEXT: v_writelane_b32 v63, s97, 33
-; SI-NEXT: v_writelane_b32 v63, s98, 34
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v19
+; SI-NEXT: v_writelane_b32 v63, s98, 34
+; SI-NEXT: s_and_b64 s[46:47], vcc, exec
; SI-NEXT: v_writelane_b32 v63, s99, 35
; SI-NEXT: v_readfirstlane_b32 s44, v1
; SI-NEXT: v_readfirstlane_b32 s45, v2
@@ -129845,8 +130355,8 @@ define inreg <64 x bfloat> @bitcast_v16f64_to_v64bf16_scalar(<16 x double> inreg
; SI-NEXT: v_readfirstlane_b32 s4, v15
; SI-NEXT: v_readfirstlane_b32 s5, v16
; SI-NEXT: v_readfirstlane_b32 s6, v17
-; SI-NEXT: s_and_b64 s[46:47], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s7, v18
+; SI-NEXT: s_mov_b64 s[46:47], -1
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
@@ -130026,8 +130536,8 @@ define inreg <64 x bfloat> @bitcast_v16f64_to_v64bf16_scalar(<16 x double> inreg
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; SI-NEXT: s_branch .LBB77_5
; SI-NEXT: .LBB77_3:
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; kill: killed $sgpr46
+; SI-NEXT: ; implicit-def: $sgpr56
+; SI-NEXT: ; kill: killed $sgpr56
; SI-NEXT: ; implicit-def: $sgpr60
; SI-NEXT: ; implicit-def: $sgpr61
; SI-NEXT: ; implicit-def: $sgpr62
@@ -130084,17 +130594,18 @@ define inreg <64 x bfloat> @bitcast_v16f64_to_v64bf16_scalar(<16 x double> inreg
; SI-NEXT: ; implicit-def: $sgpr97
; SI-NEXT: ; implicit-def: $sgpr98
; SI-NEXT: ; implicit-def: $sgpr99
-; SI-NEXT: ; implicit-def: $sgpr56
; SI-NEXT: ; implicit-def: $sgpr57
; SI-NEXT: ; implicit-def: $sgpr58
; SI-NEXT: ; implicit-def: $sgpr59
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; kill: killed $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; kill: killed $sgpr46
-; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: ; kill: killed $sgpr46
-; SI-NEXT: s_branch .LBB77_2
+; SI-NEXT: ; implicit-def: $sgpr56
+; SI-NEXT: ; kill: killed $sgpr56
+; SI-NEXT: ; implicit-def: $sgpr56
+; SI-NEXT: ; kill: killed $sgpr56
+; SI-NEXT: ; implicit-def: $sgpr56
+; SI-NEXT: ; kill: killed $sgpr56
+; SI-NEXT: ; implicit-def: $sgpr56
+; SI-NEXT: s_andn2_b64 vcc, exec, s[46:47]
+; SI-NEXT: s_cbranch_vccz .LBB77_2
; SI-NEXT: .LBB77_4:
; SI-NEXT: v_mov_b32_e32 v1, s71
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
@@ -130460,6 +130971,7 @@ define inreg <64 x bfloat> @bitcast_v16f64_to_v64bf16_scalar(<16 x double> inreg
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v31, v17
; VI-NEXT: v_mov_b32_e32 v30, v16
; VI-NEXT: v_mov_b32_e32 v29, v15
@@ -130490,13 +131002,16 @@ define inreg <64 x bfloat> @bitcast_v16f64_to_v64bf16_scalar(<16 x double> inreg
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB77_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB77_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB77_3
-; VI-NEXT: .LBB77_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB77_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB77_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
; VI-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
; VI-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
@@ -130513,17 +131028,16 @@ define inreg <64 x bfloat> @bitcast_v16f64_to_v64bf16_scalar(<16 x double> inreg
; VI-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
; VI-NEXT: v_add_f64 v[32:33], v[32:33], 1.0
; VI-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
-; VI-NEXT: .LBB77_3: ; %end
+; VI-NEXT: .LBB77_4: ; %end
; VI-NEXT: v_mov_b32_e32 v18, v32
; VI-NEXT: v_mov_b32_e32 v19, v33
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB77_4:
-; VI-NEXT: s_branch .LBB77_2
;
; GFX9-LABEL: bitcast_v16f64_to_v64bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v31, v17
; GFX9-NEXT: v_mov_b32_e32 v30, v16
; GFX9-NEXT: v_mov_b32_e32 v29, v15
@@ -130554,13 +131068,16 @@ define inreg <64 x bfloat> @bitcast_v16f64_to_v64bf16_scalar(<16 x double> inreg
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB77_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB77_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB77_3
-; GFX9-NEXT: .LBB77_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB77_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB77_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
; GFX9-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
; GFX9-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
@@ -130577,45 +131094,43 @@ define inreg <64 x bfloat> @bitcast_v16f64_to_v64bf16_scalar(<16 x double> inreg
; GFX9-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
; GFX9-NEXT: v_add_f64 v[32:33], v[32:33], 1.0
; GFX9-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
-; GFX9-NEXT: .LBB77_3: ; %end
+; GFX9-NEXT: .LBB77_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v18, v32
; GFX9-NEXT: v_mov_b32_e32 v19, v33
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB77_4:
-; GFX9-NEXT: s_branch .LBB77_2
;
; GFX11-LABEL: bitcast_v16f64_to_v64bf16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v14 :: v_dual_mov_b32 v31, v13
-; GFX11-NEXT: v_dual_mov_b32 v30, v12 :: v_dual_mov_b32 v29, v11
-; GFX11-NEXT: v_dual_mov_b32 v28, v10 :: v_dual_mov_b32 v27, v9
+; GFX11-NEXT: v_dual_mov_b32 v15, v14 :: v_dual_mov_b32 v30, v12
+; GFX11-NEXT: v_dual_mov_b32 v31, v13 :: v_dual_mov_b32 v28, v10
+; GFX11-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v26, v8
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB77_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB77_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB77_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB77_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB77_3:
-; GFX11-NEXT: .LBB77_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB77_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
; GFX11-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
; GFX11-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
@@ -130632,6 +131147,7 @@ define inreg <64 x bfloat> @bitcast_v16f64_to_v64bf16_scalar(<16 x double> inreg
; GFX11-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
; GFX11-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
; GFX11-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-NEXT: .LBB77_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -133621,23 +134137,28 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v3
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v2
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mul_f32_e32 v0, 1.0, v5
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v4
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mul_f32_e32 v0, 1.0, v7
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v6
-; SI-NEXT: v_mov_b32_e32 v39, v10
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v8
; SI-NEXT: v_mov_b32_e32 v38, v12
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mul_f32_e32 v0, 1.0, v39
+; SI-NEXT: v_mul_f32_e32 v0, 1.0, v10
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v38
@@ -133651,14 +134172,11 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v30
; SI-NEXT: v_mov_b32_e32 v37, v14
-; SI-NEXT: v_mov_b32_e32 v14, v11
; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
-; SI-NEXT: v_mul_f32_e32 v11, 1.0, v5
-; SI-NEXT: v_mul_f32_e32 v10, 1.0, v7
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
; SI-NEXT: v_mul_f32_e32 v12, 1.0, v9
-; SI-NEXT: v_mul_f32_e32 v14, 1.0, v14
+; SI-NEXT: v_mul_f32_e32 v14, 1.0, v11
; SI-NEXT: v_mul_f32_e32 v13, 1.0, v13
; SI-NEXT: v_mul_f32_e32 v38, 1.0, v37
; SI-NEXT: v_mul_f32_e32 v15, 1.0, v17
@@ -133677,7 +134195,9 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s16
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e64 v1, 1.0, s19
+; SI-NEXT: v_mul_f32_e64 v10, 1.0, s18
; SI-NEXT: v_mul_f32_e64 v3, 1.0, s23
+; SI-NEXT: v_mul_f32_e64 v11, 1.0, s22
; SI-NEXT: v_mul_f32_e64 v4, 1.0, s25
; SI-NEXT: v_mul_f32_e64 v9, 1.0, s24
; SI-NEXT: v_mul_f32_e64 v5, 1.0, s27
@@ -133686,8 +134206,8 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; SI-NEXT: v_mul_f32_e64 v7, 1.0, s28
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31
-; SI-NEXT: v_mul_f32_e32 v22, 1.0, v42
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: v_mul_f32_e32 v22, 1.0, v42
; SI-NEXT: v_mul_f32_e32 v23, 1.0, v43
; SI-NEXT: v_mul_f32_e32 v52, 1.0, v44
; SI-NEXT: v_mul_f32_e32 v24, 1.0, v45
@@ -133703,77 +134223,76 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; SI-NEXT: v_mul_f32_e32 v29, 1.0, v63
; SI-NEXT: v_mul_f32_e32 v32, 1.0, v32
; SI-NEXT: v_mul_f32_e32 v30, 1.0, v33
-; SI-NEXT: s_waitcnt vmcnt(13)
; SI-NEXT: v_mul_f32_e32 v0, 1.0, v35
; SI-NEXT: v_mul_f32_e32 v31, 1.0, v34
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(13)
; SI-NEXT: v_mul_f32_e32 v34, 1.0, v36
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e64 v0, 1.0, s17
-; SI-NEXT: v_mul_f32_e64 v35, 1.0, s18
-; SI-NEXT: v_mul_f32_e64 v36, 1.0, s21
-; SI-NEXT: v_mul_f32_e64 v42, 1.0, s20
-; SI-NEXT: v_mul_f32_e64 v33, 1.0, s22
-; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
+; SI-NEXT: v_mul_f32_e64 v33, 1.0, s21
+; SI-NEXT: v_mul_f32_e64 v35, 1.0, s20
+; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB79_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v6
; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v5
; SI-NEXT: v_alignbit_b32 v6, v6, v7, 16
-; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
; SI-NEXT: v_alignbit_b32 v5, v5, v8, 16
; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(2)
-; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_alignbit_b32 v1, v1, v35, 16
-; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; SI-NEXT: v_alignbit_b32 v4, v4, v9, 16
-; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(5)
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; SI-NEXT: v_mov_b32_e32 v59, v2
; SI-NEXT: v_alignbit_b32 v0, v0, v2, 16
-; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v36
-; SI-NEXT: v_alignbit_b32 v2, v2, v42, 16
-; SI-NEXT: v_mov_b32_e32 v57, v11
-; SI-NEXT: v_mov_b32_e32 v47, v10
-; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v10
-; SI-NEXT: v_mov_b32_e32 v45, v12
+; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v33
+; SI-NEXT: v_alignbit_b32 v2, v2, v35, 16
+; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v4
+; SI-NEXT: v_alignbit_b32 v4, v4, v9, 16
+; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(4)
+; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; SI-NEXT: v_alignbit_b32 v1, v1, v10, 16
+; SI-NEXT: s_waitcnt expcnt(3)
+; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; SI-NEXT: v_alignbit_b32 v3, v3, v33, 16
-; SI-NEXT: v_mov_b32_e32 v33, v14
+; SI-NEXT: v_alignbit_b32 v3, v3, v11, 16
+; SI-NEXT: s_waitcnt expcnt(2)
+; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v45, v12
+; SI-NEXT: v_mov_b32_e32 v44, v14
; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v13
; SI-NEXT: v_mov_b32_e32 v62, v38
; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v15
@@ -133814,30 +134333,35 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; SI-NEXT: v_alignbit_b32 v28, v28, v37, 16
; SI-NEXT: v_mov_b32_e32 v37, v34
; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: s_waitcnt vmcnt(5) expcnt(0)
; SI-NEXT: v_mov_b32_e32 v35, v7
; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v7
-; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: s_waitcnt vmcnt(4)
; SI-NEXT: v_mov_b32_e32 v43, v8
; SI-NEXT: v_alignbit_b32 v7, v7, v8, 16
-; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(2) expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v42, v9
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: v_mov_b32_e32 v60, v9
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: v_mov_b32_e32 v58, v10
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: v_mov_b32_e32 v56, v11
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v32
; SI-NEXT: v_alignbit_b32 v31, v31, v34, 16
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v60, v8
+; SI-NEXT: v_mov_b32_e32 v42, v8
; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v8
; SI-NEXT: v_alignbit_b32 v8, v8, v9, 16
-; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v11
-; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v58, v11
-; SI-NEXT: v_alignbit_b32 v9, v9, v11, 16
-; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v57, v9
+; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v9
+; SI-NEXT: v_alignbit_b32 v9, v9, v10, 16
+; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v56, v11
+; SI-NEXT: v_mov_b32_e32 v47, v10
+; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v10
; SI-NEXT: v_alignbit_b32 v10, v10, v11, 16
; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v12
; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
@@ -133851,7 +134375,7 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; SI-NEXT: v_alignbit_b32 v12, v12, v14, 16
; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v44, v14
+; SI-NEXT: v_mov_b32_e32 v33, v14
; SI-NEXT: v_alignbit_b32 v13, v13, v14, 16
; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -133872,25 +134396,25 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; SI-NEXT: v_alignbit_b32 v22, v22, v54, 16
; SI-NEXT: s_cbranch_execnz .LBB79_3
; SI-NEXT: .LBB79_2: ; %cmp.true
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v0, 0xffff0000, v59
; SI-NEXT: v_add_f32_e32 v0, 0x40c00000, v0
-; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_and_b32_e32 v8, 0xffff0000, v35
; SI-NEXT: v_add_f32_e32 v8, 0x40c00000, v8
; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v8
-; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v60
+; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v42
; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v9
; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v9
; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v57
@@ -133902,28 +134426,28 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; SI-NEXT: v_and_b32_e32 v12, 0xffff0000, v45
; SI-NEXT: v_add_f32_e32 v12, 0x40c00000, v12
; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v12
-; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v33
+; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v44
; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v13
; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v13
; SI-NEXT: v_and_b32_e32 v15, 0xffff0000, v36
; SI-NEXT: v_add_f32_e32 v15, 0x40c00000, v15
; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v15
-; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_and_b32_e32 v32, 0xffff0000, v32
; SI-NEXT: v_add_f32_e32 v32, 0x40c00000, v32
; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v32
-; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
@@ -133934,8 +134458,8 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3
; SI-NEXT: v_alignbit_b32 v0, v1, v0, 16
; SI-NEXT: v_alignbit_b32 v1, v3, v2, 16
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; SI-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v4
@@ -134014,22 +134538,22 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3
; SI-NEXT: v_alignbit_b32 v2, v3, v2, 16
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
; SI-NEXT: v_alignbit_b32 v3, v4, v3, 16
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v4, 0xffff0000, v4
; SI-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
; SI-NEXT: v_alignbit_b32 v4, v5, v4, 16
-; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v5
; SI-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
; SI-NEXT: v_alignbit_b32 v5, v6, v5, 16
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6
@@ -134037,7 +134561,7 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v43
; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v7
; SI-NEXT: v_alignbit_b32 v7, v8, v7, 16
-; SI-NEXT: v_and_b32_e32 v8, 0xffff0000, v42
+; SI-NEXT: v_and_b32_e32 v8, 0xffff0000, v60
; SI-NEXT: v_add_f32_e32 v8, 0x40c00000, v8
; SI-NEXT: v_alignbit_b32 v8, v9, v8, 16
; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v58
@@ -134052,7 +134576,7 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; SI-NEXT: v_and_b32_e32 v12, 0xffff0000, v63
; SI-NEXT: v_add_f32_e32 v12, 0x40c00000, v12
; SI-NEXT: v_alignbit_b32 v12, v13, v12, 16
-; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v44
+; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v33
; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v13
; SI-NEXT: v_alignbit_b32 v13, v14, v13, 16
; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v62
@@ -134084,7 +134608,7 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; SI-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
; SI-NEXT: v_add_f32_e32 v22, 0x40c00000, v22
; SI-NEXT: v_alignbit_b32 v22, v23, v22, 16
-; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
; SI-NEXT: v_add_f32_e32 v23, 0x40c00000, v23
@@ -134104,12 +134628,12 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; SI-NEXT: v_and_b32_e32 v28, 0xffff0000, v48
; SI-NEXT: v_add_f32_e32 v28, 0x40c00000, v28
; SI-NEXT: v_alignbit_b32 v28, v29, v28, 16
-; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v29, 0xffff0000, v29
; SI-NEXT: v_add_f32_e32 v29, 0x40c00000, v29
; SI-NEXT: v_alignbit_b32 v29, v30, v29, 16
-; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v30, 0xffff0000, v30
; SI-NEXT: v_add_f32_e32 v30, 0x40c00000, v30
@@ -134138,25 +134662,24 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB79_4:
; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(3)
-; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
; SI-NEXT: v_mov_b32_e32 v61, v53
; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
; SI-NEXT: v_mov_b32_e32 v59, v2
-; SI-NEXT: v_mov_b32_e32 v57, v11
-; SI-NEXT: v_mov_b32_e32 v47, v10
; SI-NEXT: v_mov_b32_e32 v45, v12
-; SI-NEXT: v_mov_b32_e32 v33, v14
+; SI-NEXT: v_mov_b32_e32 v44, v14
; SI-NEXT: v_mov_b32_e32 v62, v38
; SI-NEXT: v_mov_b32_e32 v38, v39
; SI-NEXT: v_mov_b32_e32 v39, v41
@@ -134170,12 +134693,15 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; SI-NEXT: v_mov_b32_e32 v48, v37
; SI-NEXT: v_mov_b32_e32 v37, v34
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; SI-NEXT: s_branch .LBB79_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB79_2
+; SI-NEXT: s_branch .LBB79_3
;
; VI-LABEL: bitcast_v64bf16_to_v16f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v31, v17
; VI-NEXT: v_mov_b32_e32 v30, v16
; VI-NEXT: v_mov_b32_e32 v29, v15
@@ -134196,7 +134722,7 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
@@ -134209,10 +134735,13 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB79_4
+; VI-NEXT: s_cbranch_scc0 .LBB79_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB79_3
-; VI-NEXT: .LBB79_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB79_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB79_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_lshlrev_b32_e32 v18, 16, v15
; VI-NEXT: v_add_f32_e32 v18, 0x40c00000, v18
; VI-NEXT: v_bfe_u32 v33, v18, 16, 1
@@ -134789,16 +135318,15 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; VI-NEXT: v_cndmask_b32_e32 v16, v33, v34, vcc
; VI-NEXT: v_lshrrev_b32_e32 v16, 16, v16
; VI-NEXT: v_alignbit_b32 v16, v16, v18, 16
-; VI-NEXT: .LBB79_3: ; %end
+; VI-NEXT: .LBB79_4: ; %end
; VI-NEXT: v_mov_b32_e32 v18, v32
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB79_4:
-; VI-NEXT: s_branch .LBB79_2
;
; GFX9-LABEL: bitcast_v64bf16_to_v16f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v31, v17
; GFX9-NEXT: v_mov_b32_e32 v30, v16
; GFX9-NEXT: v_mov_b32_e32 v29, v15
@@ -134819,7 +135347,7 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -134832,10 +135360,13 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB79_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB79_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB79_3
-; GFX9-NEXT: .LBB79_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB79_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB79_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_and_b32_e32 v18, 0xffff0000, v15
; GFX9-NEXT: v_add_f32_e32 v18, 0x40c00000, v18
; GFX9-NEXT: v_bfe_u32 v33, v18, 16, 1
@@ -135445,11 +135976,9 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; GFX9-NEXT: v_and_b32_sdwa v16, v18, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: v_lshrrev_b32_e32 v18, 16, v33
; GFX9-NEXT: v_lshl_or_b32 v16, v18, 16, v16
-; GFX9-NEXT: .LBB79_3: ; %end
+; GFX9-NEXT: .LBB79_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v18, v32
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB79_4:
-; GFX9-NEXT: s_branch .LBB79_2
;
; GFX11-LABEL: bitcast_v64bf16_to_v16f64_scalar:
; GFX11: ; %bb.0:
@@ -135539,8 +136068,8 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; GFX11-NEXT: v_dual_mov_b32 v174, v5 :: v_dual_mov_b32 v173, v0
; GFX11-NEXT: v_dual_mov_b32 v184, v2 :: v_dual_mov_b32 v175, v1
; GFX11-NEXT: v_dual_mov_b32 v183, s28 :: v_dual_mov_b32 v172, s29
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s4, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB79_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_dual_mov_b32 v32, s0 :: v_dual_mov_b32 v37, s2
@@ -135551,8 +136080,7 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; GFX11-NEXT: v_dual_mov_b32 v86, s21 :: v_dual_mov_b32 v109, s23
; GFX11-NEXT: v_dual_mov_b32 v122, s24 :: v_dual_mov_b32 v151, s26
; GFX11-NEXT: v_dual_mov_b32 v136, s25 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB79_3
+; GFX11-NEXT: s_cbranch_execnz .LBB79_3
; GFX11-NEXT: .LBB79_2: ; %cmp.true
; GFX11-NEXT: s_and_b32 s5, s27, 0xffff0000
; GFX11-NEXT: s_lshl_b32 s4, s27, 16
@@ -136299,8 +136827,8 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB79_4:
; GFX11-NEXT: ; implicit-def: $vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63
-; GFX11-NEXT: ; implicit-def: $vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64
; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX11-NEXT: ; implicit-def: $vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64
; GFX11-NEXT: ; implicit-def: $vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66
; GFX11-NEXT: ; implicit-def: $vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69
; GFX11-NEXT: ; implicit-def: $vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73
@@ -136314,7 +136842,9 @@ define inreg <16 x double> @bitcast_v64bf16_to_v16f64_scalar(<64 x bfloat> inreg
; GFX11-NEXT: ; implicit-def: $vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141
; GFX11-NEXT: ; implicit-def: $vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154
; GFX11-NEXT: ; implicit-def: $vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168
-; GFX11-NEXT: s_branch .LBB79_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_vccz .LBB79_2
+; GFX11-NEXT: s_branch .LBB79_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -137218,6 +137748,7 @@ define inreg <64 x half> @bitcast_v16f64_to_v64f16_scalar(<16 x double> inreg %a
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v19
+; SI-NEXT: s_and_b64 s[46:47], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s44, v1
; SI-NEXT: v_readfirstlane_b32 s45, v2
; SI-NEXT: v_readfirstlane_b32 s42, v3
@@ -137235,8 +137766,8 @@ define inreg <64 x half> @bitcast_v16f64_to_v64f16_scalar(<16 x double> inreg %a
; SI-NEXT: v_readfirstlane_b32 s6, v15
; SI-NEXT: v_readfirstlane_b32 s7, v16
; SI-NEXT: v_readfirstlane_b32 s4, v17
-; SI-NEXT: s_and_b64 s[46:47], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s5, v18
+; SI-NEXT: s_mov_b64 s[46:47], -1
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
@@ -137769,7 +138300,6 @@ define inreg <64 x half> @bitcast_v16f64_to_v64f16_scalar(<16 x double> inreg %a
; SI-NEXT: .LBB81_4:
; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; kill: killed $vgpr3
-; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; implicit-def: $vgpr62
; SI-NEXT: ; implicit-def: $vgpr2
@@ -137799,7 +138329,6 @@ define inreg <64 x half> @bitcast_v16f64_to_v64f16_scalar(<16 x double> inreg %a
; SI-NEXT: ; implicit-def: $vgpr37
; SI-NEXT: ; implicit-def: $vgpr28
; SI-NEXT: ; implicit-def: $vgpr36
-; SI-NEXT: ; kill: killed $vgpr3
; SI-NEXT: ; implicit-def: $vgpr32
; SI-NEXT: ; implicit-def: $vgpr12
; SI-NEXT: ; implicit-def: $vgpr63
@@ -137834,12 +138363,17 @@ define inreg <64 x half> @bitcast_v16f64_to_v64f16_scalar(<16 x double> inreg %a
; SI-NEXT: ; implicit-def: $vgpr10
; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; kill: killed $vgpr3
-; SI-NEXT: s_branch .LBB81_2
+; SI-NEXT: ; implicit-def: $vgpr3
+; SI-NEXT: ; kill: killed $vgpr3
+; SI-NEXT: s_andn2_b64 vcc, exec, s[46:47]
+; SI-NEXT: s_cbranch_vccz .LBB81_2
+; SI-NEXT: s_branch .LBB81_3
;
; VI-LABEL: bitcast_v16f64_to_v64f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v31, v17
; VI-NEXT: v_mov_b32_e32 v30, v16
; VI-NEXT: v_mov_b32_e32 v29, v15
@@ -137870,13 +138404,16 @@ define inreg <64 x half> @bitcast_v16f64_to_v64f16_scalar(<16 x double> inreg %a
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB81_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB81_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB81_3
-; VI-NEXT: .LBB81_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB81_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB81_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
; VI-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
; VI-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
@@ -137893,17 +138430,16 @@ define inreg <64 x half> @bitcast_v16f64_to_v64f16_scalar(<16 x double> inreg %a
; VI-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
; VI-NEXT: v_add_f64 v[32:33], v[32:33], 1.0
; VI-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
-; VI-NEXT: .LBB81_3: ; %end
+; VI-NEXT: .LBB81_4: ; %end
; VI-NEXT: v_mov_b32_e32 v18, v32
; VI-NEXT: v_mov_b32_e32 v19, v33
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB81_4:
-; VI-NEXT: s_branch .LBB81_2
;
; GFX9-LABEL: bitcast_v16f64_to_v64f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v31, v17
; GFX9-NEXT: v_mov_b32_e32 v30, v16
; GFX9-NEXT: v_mov_b32_e32 v29, v15
@@ -137934,13 +138470,16 @@ define inreg <64 x half> @bitcast_v16f64_to_v64f16_scalar(<16 x double> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB81_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB81_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB81_3
-; GFX9-NEXT: .LBB81_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB81_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB81_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
; GFX9-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
; GFX9-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
@@ -137957,45 +138496,43 @@ define inreg <64 x half> @bitcast_v16f64_to_v64f16_scalar(<16 x double> inreg %a
; GFX9-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
; GFX9-NEXT: v_add_f64 v[32:33], v[32:33], 1.0
; GFX9-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
-; GFX9-NEXT: .LBB81_3: ; %end
+; GFX9-NEXT: .LBB81_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v18, v32
; GFX9-NEXT: v_mov_b32_e32 v19, v33
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB81_4:
-; GFX9-NEXT: s_branch .LBB81_2
;
; GFX11-LABEL: bitcast_v16f64_to_v64f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v14 :: v_dual_mov_b32 v31, v13
-; GFX11-NEXT: v_dual_mov_b32 v30, v12 :: v_dual_mov_b32 v29, v11
-; GFX11-NEXT: v_dual_mov_b32 v28, v10 :: v_dual_mov_b32 v27, v9
+; GFX11-NEXT: v_dual_mov_b32 v15, v14 :: v_dual_mov_b32 v30, v12
+; GFX11-NEXT: v_dual_mov_b32 v31, v13 :: v_dual_mov_b32 v28, v10
+; GFX11-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v26, v8
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB81_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB81_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB81_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB81_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB81_3:
-; GFX11-NEXT: .LBB81_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB81_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
; GFX11-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
; GFX11-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
@@ -138012,6 +138549,7 @@ define inreg <64 x half> @bitcast_v16f64_to_v64f16_scalar(<16 x double> inreg %a
; GFX11-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
; GFX11-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
; GFX11-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-NEXT: .LBB81_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -139064,22 +139602,23 @@ define inreg <16 x double> @bitcast_v64f16_to_v16f64_scalar(<64 x half> inreg %a
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
-; SI-NEXT: v_mov_b32_e32 v53, v26
-; SI-NEXT: v_mov_b32_e32 v45, v6
+; SI-NEXT: v_mov_b32_e32 v52, v30
+; SI-NEXT: v_mov_b32_e32 v54, v26
+; SI-NEXT: v_mov_b32_e32 v41, v6
; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:76
-; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32
-; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:8
-; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:4
+; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32
+; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:8
+; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:4
; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:16
-; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:12
+; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:12
; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:24
-; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:20
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:32
+; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:20
+; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:32
; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:28
-; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:40
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:40
; SI-NEXT: s_waitcnt expcnt(3)
; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:36
-; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:48
+; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:48
; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:44
; SI-NEXT: s_waitcnt expcnt(0)
@@ -139089,12 +139628,12 @@ define inreg <16 x double> @bitcast_v64f16_to_v16f64_scalar(<64 x half> inreg %a
; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:60
; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:72
; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:68
-; SI-NEXT: v_mov_b32_e32 v54, v14
+; SI-NEXT: v_mov_b32_e32 v53, v14
; SI-NEXT: v_mov_b32_e32 v55, v12
-; SI-NEXT: v_mov_b32_e32 v41, v11
+; SI-NEXT: v_mov_b32_e32 v43, v11
; SI-NEXT: v_mov_b32_e32 v40, v10
-; SI-NEXT: v_mov_b32_e32 v44, v9
-; SI-NEXT: v_mov_b32_e32 v43, v8
+; SI-NEXT: v_mov_b32_e32 v45, v9
+; SI-NEXT: v_mov_b32_e32 v44, v8
; SI-NEXT: v_cvt_f16_f32_e32 v9, v1
; SI-NEXT: v_cvt_f16_f32_e32 v8, v0
; SI-NEXT: v_cvt_f16_f32_e32 v11, v3
@@ -139102,27 +139641,27 @@ define inreg <16 x double> @bitcast_v64f16_to_v16f64_scalar(<64 x half> inreg %a
; SI-NEXT: v_cvt_f16_f32_e32 v12, v5
; SI-NEXT: v_cvt_f16_f32_e32 v14, v4
; SI-NEXT: v_cvt_f16_f32_e32 v58, v7
+; SI-NEXT: v_cvt_f16_f32_e32 v41, v41
; SI-NEXT: v_cvt_f16_f32_e32 v56, v45
; SI-NEXT: v_cvt_f16_f32_e32 v46, v44
; SI-NEXT: v_cvt_f16_f32_e32 v44, v43
-; SI-NEXT: v_cvt_f16_f32_e32 v61, v41
-; SI-NEXT: v_cvt_f16_f32_e32 v59, v40
+; SI-NEXT: v_cvt_f16_f32_e32 v61, v40
; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
-; SI-NEXT: v_cvt_f16_f32_e32 v57, v55
-; SI-NEXT: v_cvt_f16_f32_e32 v47, v15
-; SI-NEXT: v_cvt_f16_f32_e32 v45, v54
+; SI-NEXT: v_cvt_f16_f32_e32 v59, v55
+; SI-NEXT: v_cvt_f16_f32_e32 v57, v15
+; SI-NEXT: v_cvt_f16_f32_e32 v47, v53
; SI-NEXT: v_cvt_f16_f32_e32 v15, v17
-; SI-NEXT: v_cvt_f16_f32_e32 v43, v16
+; SI-NEXT: v_cvt_f16_f32_e32 v45, v16
; SI-NEXT: v_cvt_f16_f32_e32 v16, v19
-; SI-NEXT: v_cvt_f16_f32_e32 v41, v18
+; SI-NEXT: v_cvt_f16_f32_e32 v43, v18
; SI-NEXT: v_cvt_f16_f32_e32 v17, v21
-; SI-NEXT: v_cvt_f16_f32_e32 v40, v20
+; SI-NEXT: v_cvt_f16_f32_e32 v53, v20
; SI-NEXT: v_cvt_f16_f32_e32 v18, v23
-; SI-NEXT: v_cvt_f16_f32_e32 v55, v22
+; SI-NEXT: v_cvt_f16_f32_e32 v40, v22
; SI-NEXT: v_cvt_f16_f32_e32 v19, v25
-; SI-NEXT: v_cvt_f16_f32_e32 v54, v24
+; SI-NEXT: v_cvt_f16_f32_e32 v55, v24
; SI-NEXT: v_cvt_f16_f32_e32 v20, v27
-; SI-NEXT: v_cvt_f16_f32_e32 v53, v53
+; SI-NEXT: v_cvt_f16_f32_e32 v54, v54
; SI-NEXT: v_cvt_f16_f32_e32 v21, v29
; SI-NEXT: v_cvt_f16_f32_e32 v22, v28
; SI-NEXT: v_cvt_f16_f32_e32 v0, s17
@@ -139134,26 +139673,26 @@ define inreg <16 x double> @bitcast_v64f16_to_v16f64_scalar(<64 x half> inreg %a
; SI-NEXT: v_cvt_f16_f32_e32 v7, s28
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v51
-; SI-NEXT: v_cvt_f16_f32_e32 v51, v30
-; SI-NEXT: v_cvt_f16_f32_e32 v52, v52
-; SI-NEXT: v_cvt_f16_f32_e32 v23, v50
-; SI-NEXT: v_cvt_f16_f32_e32 v50, v48
+; SI-NEXT: v_cvt_f16_f32_e32 v51, v52
+; SI-NEXT: v_cvt_f16_f32_e32 v52, v49
+; SI-NEXT: v_cvt_f16_f32_e32 v23, v37
+; SI-NEXT: v_cvt_f16_f32_e32 v50, v50
; SI-NEXT: v_cvt_f16_f32_e32 v24, v38
-; SI-NEXT: v_cvt_f16_f32_e32 v49, v49
+; SI-NEXT: v_cvt_f16_f32_e32 v49, v48
; SI-NEXT: s_waitcnt vmcnt(13)
; SI-NEXT: v_cvt_f16_f32_e32 v25, v39
; SI-NEXT: s_waitcnt vmcnt(12)
-; SI-NEXT: v_cvt_f16_f32_e32 v48, v26
+; SI-NEXT: v_cvt_f16_f32_e32 v48, v30
; SI-NEXT: s_waitcnt vmcnt(11)
-; SI-NEXT: v_cvt_f16_f32_e32 v26, v31
+; SI-NEXT: v_cvt_f16_f32_e32 v26, v26
; SI-NEXT: s_waitcnt vmcnt(10)
; SI-NEXT: v_cvt_f16_f32_e32 v39, v6
; SI-NEXT: s_waitcnt vmcnt(9)
-; SI-NEXT: v_cvt_f16_f32_e32 v27, v42
+; SI-NEXT: v_cvt_f16_f32_e32 v27, v31
; SI-NEXT: s_waitcnt vmcnt(8)
; SI-NEXT: v_cvt_f16_f32_e32 v38, v60
; SI-NEXT: s_waitcnt vmcnt(7)
-; SI-NEXT: v_cvt_f16_f32_e32 v28, v37
+; SI-NEXT: v_cvt_f16_f32_e32 v28, v42
; SI-NEXT: s_waitcnt vmcnt(6)
; SI-NEXT: v_cvt_f16_f32_e32 v37, v62
; SI-NEXT: s_waitcnt vmcnt(5)
@@ -139163,70 +139702,74 @@ define inreg <16 x double> @bitcast_v64f16_to_v16f64_scalar(<64 x half> inreg %a
; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_cvt_f16_f32_e32 v30, v33
; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_cvt_f16_f32_e32 v32, v34
+; SI-NEXT: v_cvt_f16_f32_e32 v33, v34
; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_cvt_f16_f32_e32 v34, v35
+; SI-NEXT: v_cvt_f16_f32_e32 v63, v35
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v36, v36
-; SI-NEXT: v_cvt_f16_f32_e32 v63, s16
-; SI-NEXT: v_cvt_f16_f32_e32 v62, s18
-; SI-NEXT: v_cvt_f16_f32_e32 v60, s20
-; SI-NEXT: v_cvt_f16_f32_e32 v42, s22
-; SI-NEXT: v_cvt_f16_f32_e32 v35, s24
-; SI-NEXT: v_cvt_f16_f32_e32 v33, s26
+; SI-NEXT: v_cvt_f16_f32_e32 v35, v36
+; SI-NEXT: v_cvt_f16_f32_e32 v62, s16
+; SI-NEXT: v_cvt_f16_f32_e32 v60, s18
+; SI-NEXT: v_cvt_f16_f32_e32 v42, s20
+; SI-NEXT: v_cvt_f16_f32_e32 v36, s22
+; SI-NEXT: v_cvt_f16_f32_e32 v34, s24
+; SI-NEXT: v_cvt_f16_f32_e32 v32, s26
; SI-NEXT: v_cvt_f16_f32_e32 v6, s29
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB83_2
; SI-NEXT: ; %bb.1: ; %cmp.false
+; SI-NEXT: s_waitcnt expcnt(6)
+; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20
+; SI-NEXT: v_or_b32_e32 v3, v36, v3
+; SI-NEXT: v_mov_b32_e32 v36, v54
+; SI-NEXT: v_or_b32_e32 v20, v54, v20
+; SI-NEXT: v_mov_b32_e32 v54, v21
; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21
-; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: v_or_b32_e32 v21, v22, v21
; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v52
-; SI-NEXT: v_or_b32_e32 v5, v33, v5
-; SI-NEXT: v_mov_b32_e32 v33, v52
-; SI-NEXT: v_mov_b32_e32 v52, v51
; SI-NEXT: v_or_b32_e32 v22, v51, v22
; SI-NEXT: v_mov_b32_e32 v51, v23
; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v23
@@ -139248,11 +139791,9 @@ define inreg <16 x double> @bitcast_v64f16_to_v16f64_scalar(<64 x half> inreg %a
; SI-NEXT: v_or_b32_e32 v27, v38, v27
; SI-NEXT: v_mov_b32_e32 v38, v28
; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v28
-; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
; SI-NEXT: v_or_b32_e32 v7, v8, v7
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v11
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v12
-; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19
; SI-NEXT: v_or_b32_e32 v28, v37, v28
; SI-NEXT: v_mov_b32_e32 v37, v29
; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v29
@@ -139260,70 +139801,68 @@ define inreg <16 x double> @bitcast_v64f16_to_v16f64_scalar(<64 x half> inreg %a
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; SI-NEXT: v_or_b32_e32 v4, v35, v4
+; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: v_or_b32_e32 v8, v10, v8
; SI-NEXT: v_or_b32_e32 v9, v14, v9
; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v58
-; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v46
-; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v61
+; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v56
+; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v44
; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
-; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v47
+; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v57
; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15
; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17
; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18
-; SI-NEXT: v_mov_b32_e32 v35, v54
-; SI-NEXT: v_or_b32_e32 v19, v54, v19
-; SI-NEXT: v_mov_b32_e32 v54, v20
-; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20
+; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19
; SI-NEXT: v_or_b32_e32 v29, v31, v29
; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v30
-; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v34
-; SI-NEXT: v_or_b32_e32 v0, v63, v0
-; SI-NEXT: v_or_b32_e32 v1, v62, v1
-; SI-NEXT: v_or_b32_e32 v2, v60, v2
-; SI-NEXT: v_or_b32_e32 v3, v42, v3
-; SI-NEXT: v_or_b32_e32 v10, v56, v10
-; SI-NEXT: v_mov_b32_e32 v63, v44
-; SI-NEXT: v_or_b32_e32 v11, v44, v11
+; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v63
+; SI-NEXT: v_or_b32_e32 v0, v62, v0
+; SI-NEXT: v_or_b32_e32 v1, v60, v1
+; SI-NEXT: v_or_b32_e32 v2, v42, v2
+; SI-NEXT: v_or_b32_e32 v4, v34, v4
+; SI-NEXT: v_or_b32_e32 v5, v32, v5
+; SI-NEXT: v_or_b32_e32 v10, v41, v10
+; SI-NEXT: v_or_b32_e32 v11, v46, v11
+; SI-NEXT: v_mov_b32_e32 v41, v44
; SI-NEXT: v_mov_b32_e32 v62, v61
+; SI-NEXT: v_or_b32_e32 v12, v61, v12
; SI-NEXT: v_mov_b32_e32 v60, v59
-; SI-NEXT: v_or_b32_e32 v12, v59, v12
+; SI-NEXT: v_or_b32_e32 v13, v59, v13
; SI-NEXT: v_mov_b32_e32 v58, v57
-; SI-NEXT: v_or_b32_e32 v13, v57, v13
; SI-NEXT: v_mov_b32_e32 v56, v47
+; SI-NEXT: v_or_b32_e32 v14, v47, v14
; SI-NEXT: v_mov_b32_e32 v46, v45
-; SI-NEXT: v_or_b32_e32 v14, v45, v14
+; SI-NEXT: v_or_b32_e32 v15, v45, v15
; SI-NEXT: v_mov_b32_e32 v44, v43
-; SI-NEXT: v_or_b32_e32 v15, v43, v15
-; SI-NEXT: v_mov_b32_e32 v42, v41
-; SI-NEXT: v_or_b32_e32 v16, v41, v16
-; SI-NEXT: v_or_b32_e32 v17, v40, v17
+; SI-NEXT: v_or_b32_e32 v16, v43, v16
+; SI-NEXT: v_mov_b32_e32 v42, v53
+; SI-NEXT: v_or_b32_e32 v17, v53, v17
+; SI-NEXT: v_or_b32_e32 v18, v40, v18
; SI-NEXT: v_mov_b32_e32 v40, v55
-; SI-NEXT: v_or_b32_e32 v18, v55, v18
-; SI-NEXT: v_or_b32_e32 v20, v53, v20
-; SI-NEXT: v_or_b32_e32 v30, v32, v30
-; SI-NEXT: v_mov_b32_e32 v32, v34
-; SI-NEXT: v_or_b32_e32 v31, v36, v31
+; SI-NEXT: v_or_b32_e32 v19, v55, v19
+; SI-NEXT: v_mov_b32_e32 v32, v52
+; SI-NEXT: v_mov_b32_e32 v34, v33
+; SI-NEXT: v_or_b32_e32 v30, v33, v30
+; SI-NEXT: v_or_b32_e32 v31, v35, v31
; SI-NEXT: s_mov_b64 s[4:5], 0
; SI-NEXT: s_branch .LBB83_3
; SI-NEXT: .LBB83_2:
+; SI-NEXT: v_mov_b32_e32 v41, v44
; SI-NEXT: s_waitcnt expcnt(1)
-; SI-NEXT: v_mov_b32_e32 v63, v44
; SI-NEXT: v_mov_b32_e32 v62, v61
; SI-NEXT: v_mov_b32_e32 v60, v59
; SI-NEXT: v_mov_b32_e32 v58, v57
; SI-NEXT: v_mov_b32_e32 v56, v47
; SI-NEXT: v_mov_b32_e32 v46, v45
; SI-NEXT: v_mov_b32_e32 v44, v43
-; SI-NEXT: v_mov_b32_e32 v42, v41
+; SI-NEXT: v_mov_b32_e32 v42, v53
; SI-NEXT: v_mov_b32_e32 v40, v55
-; SI-NEXT: v_mov_b32_e32 v35, v54
-; SI-NEXT: v_mov_b32_e32 v54, v20
-; SI-NEXT: v_mov_b32_e32 v33, v52
-; SI-NEXT: v_mov_b32_e32 v32, v34
-; SI-NEXT: v_mov_b32_e32 v52, v51
+; SI-NEXT: v_mov_b32_e32 v36, v54
+; SI-NEXT: v_mov_b32_e32 v54, v21
+; SI-NEXT: v_mov_b32_e32 v32, v52
+; SI-NEXT: v_mov_b32_e32 v34, v33
; SI-NEXT: v_mov_b32_e32 v51, v23
; SI-NEXT: v_mov_b32_e32 v50, v24
; SI-NEXT: v_mov_b32_e32 v49, v25
@@ -139331,25 +139870,29 @@ define inreg <16 x double> @bitcast_v64f16_to_v16f64_scalar(<64 x half> inreg %a
; SI-NEXT: v_mov_b32_e32 v39, v27
; SI-NEXT: v_mov_b32_e32 v38, v28
; SI-NEXT: v_mov_b32_e32 v37, v29
-; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; SI-NEXT: .LBB83_3: ; %Flow
-; SI-NEXT: v_mov_b32_e32 v34, v33
-; SI-NEXT: v_mov_b32_e32 v33, v35
-; SI-NEXT: v_mov_b32_e32 v35, v40
+; SI-NEXT: v_mov_b32_e32 v33, v63
+; SI-NEXT: v_mov_b32_e32 v52, v36
+; SI-NEXT: v_mov_b32_e32 v36, v40
; SI-NEXT: v_mov_b32_e32 v53, v42
+; SI-NEXT: v_mov_b32_e32 v55, v44
; SI-NEXT: v_mov_b32_e32 v40, v46
-; SI-NEXT: v_mov_b32_e32 v41, v56
+; SI-NEXT: v_mov_b32_e32 v57, v56
; SI-NEXT: v_mov_b32_e32 v42, v58
; SI-NEXT: v_mov_b32_e32 v43, v60
+; SI-NEXT: v_mov_b32_e32 v44, v62
+; SI-NEXT: v_mov_b32_e32 v45, v41
; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
; SI-NEXT: s_cbranch_vccnz .LBB83_5
; SI-NEXT: ; %bb.4: ; %cmp.true
@@ -139358,11 +139901,11 @@ define inreg <16 x double> @bitcast_v64f16_to_v16f64_scalar(<64 x half> inreg %a
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(4)
-; SI-NEXT: v_cvt_f32_f16_e32 v8, v61
-; SI-NEXT: v_cvt_f32_f16_e32 v9, v59
-; SI-NEXT: v_cvt_f32_f16_e32 v10, v57
-; SI-NEXT: v_cvt_f32_f16_e32 v11, v47
+; SI-NEXT: s_waitcnt vmcnt(5)
+; SI-NEXT: v_cvt_f32_f16_e32 v8, v62
+; SI-NEXT: v_cvt_f32_f16_e32 v9, v60
+; SI-NEXT: v_cvt_f32_f16_e32 v10, v58
+; SI-NEXT: v_cvt_f32_f16_e32 v11, v56
; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8
; SI-NEXT: v_cvt_f16_f32_e32 v8, v8
; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9
@@ -139371,10 +139914,10 @@ define inreg <16 x double> @bitcast_v64f16_to_v16f64_scalar(<64 x half> inreg %a
; SI-NEXT: v_cvt_f16_f32_e32 v10, v10
; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11
; SI-NEXT: v_cvt_f16_f32_e32 v11, v11
-; SI-NEXT: v_cvt_f32_f16_e32 v12, v63
-; SI-NEXT: v_cvt_f32_f16_e32 v13, v43
-; SI-NEXT: v_cvt_f32_f16_e32 v14, v42
-; SI-NEXT: v_cvt_f32_f16_e32 v15, v40
+; SI-NEXT: v_cvt_f32_f16_e32 v12, v46
+; SI-NEXT: v_cvt_f32_f16_e32 v13, v44
+; SI-NEXT: v_cvt_f32_f16_e32 v14, v43
+; SI-NEXT: v_cvt_f32_f16_e32 v15, v57
; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12
; SI-NEXT: v_cvt_f16_f32_e32 v12, v12
; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13
@@ -139383,33 +139926,32 @@ define inreg <16 x double> @bitcast_v64f16_to_v16f64_scalar(<64 x half> inreg %a
; SI-NEXT: v_cvt_f16_f32_e32 v14, v14
; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15
; SI-NEXT: v_cvt_f16_f32_e32 v15, v15
-; SI-NEXT: v_mov_b32_e32 v55, v44
-; SI-NEXT: v_cvt_f32_f16_e32 v16, v55
-; SI-NEXT: v_cvt_f32_f16_e32 v17, v53
-; SI-NEXT: v_cvt_f32_f16_e32 v19, v35
-; SI-NEXT: v_cvt_f32_f16_e32 v21, v33
+; SI-NEXT: v_cvt_f32_f16_e32 v16, v40
+; SI-NEXT: v_cvt_f32_f16_e32 v17, v55
+; SI-NEXT: v_cvt_f32_f16_e32 v18, v53
+; SI-NEXT: v_cvt_f32_f16_e32 v22, v52
; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16
; SI-NEXT: v_cvt_f16_f32_e32 v16, v16
; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17
; SI-NEXT: v_cvt_f16_f32_e32 v17, v17
-; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19
-; SI-NEXT: v_cvt_f16_f32_e32 v19, v19
-; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21
-; SI-NEXT: v_cvt_f16_f32_e32 v21, v21
-; SI-NEXT: v_cvt_f32_f16_e32 v23, v34
-; SI-NEXT: v_cvt_f32_f16_e32 v24, v52
+; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18
+; SI-NEXT: v_cvt_f16_f32_e32 v18, v18
+; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22
+; SI-NEXT: v_cvt_f16_f32_e32 v22, v22
+; SI-NEXT: v_cvt_f32_f16_e32 v21, v36
+; SI-NEXT: v_cvt_f32_f16_e32 v23, v32
; SI-NEXT: v_cvt_f32_f16_e32 v26, v49
; SI-NEXT: v_cvt_f32_f16_e32 v29, v38
+; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21
+; SI-NEXT: v_cvt_f16_f32_e32 v21, v21
; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23
; SI-NEXT: v_cvt_f16_f32_e32 v23, v23
-; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24
-; SI-NEXT: v_cvt_f16_f32_e32 v24, v24
; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26
; SI-NEXT: v_cvt_f16_f32_e32 v26, v26
; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v29
; SI-NEXT: v_cvt_f16_f32_e32 v29, v29
-; SI-NEXT: v_cvt_f32_f16_e32 v32, v32
-; SI-NEXT: v_cvt_f32_f16_e32 v33, v36
+; SI-NEXT: v_cvt_f32_f16_e32 v32, v33
+; SI-NEXT: v_cvt_f32_f16_e32 v33, v35
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v32, 0x38000000, v32
@@ -139418,14 +139960,14 @@ define inreg <16 x double> @bitcast_v64f16_to_v16f64_scalar(<64 x half> inreg %a
; SI-NEXT: v_cvt_f16_f32_e32 v33, v33
; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
@@ -139460,26 +140002,22 @@ define inreg <16 x double> @bitcast_v64f16_to_v16f64_scalar(<64 x half> inreg %a
; SI-NEXT: s_waitcnt vmcnt(10)
; SI-NEXT: v_cvt_f32_f16_e32 v7, v7
; SI-NEXT: s_waitcnt vmcnt(9)
-; SI-NEXT: v_cvt_f32_f16_e32 v18, v18
+; SI-NEXT: v_cvt_f32_f16_e32 v19, v19
; SI-NEXT: s_waitcnt vmcnt(8)
; SI-NEXT: v_cvt_f32_f16_e32 v20, v20
; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6
; SI-NEXT: v_cvt_f16_f32_e32 v6, v6
; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7
; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
-; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18
-; SI-NEXT: v_cvt_f16_f32_e32 v18, v18
+; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19
; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20
+; SI-NEXT: v_cvt_f16_f32_e32 v19, v19
; SI-NEXT: v_cvt_f16_f32_e32 v20, v20
-; SI-NEXT: s_waitcnt vmcnt(7)
-; SI-NEXT: v_cvt_f32_f16_e32 v22, v22
-; SI-NEXT: s_waitcnt vmcnt(6)
+; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_cvt_f32_f16_e32 v25, v25
-; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_cvt_f32_f16_e32 v28, v28
+; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_cvt_f32_f16_e32 v30, v30
-; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22
-; SI-NEXT: v_cvt_f16_f32_e32 v22, v22
; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25
; SI-NEXT: v_cvt_f16_f32_e32 v25, v25
; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28
@@ -139524,72 +140062,70 @@ define inreg <16 x double> @bitcast_v64f16_to_v16f64_scalar(<64 x half> inreg %a
; SI-NEXT: v_cvt_f16_f32_e32 v6, v6
; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6
; SI-NEXT: v_or_b32_e32 v6, v7, v6
-; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v7, v7
+; SI-NEXT: v_cvt_f32_f16_e32 v7, v63
; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7
; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7
; SI-NEXT: v_or_b32_e32 v7, v8, v7
-; SI-NEXT: v_cvt_f32_f16_e32 v8, v60
+; SI-NEXT: v_cvt_f32_f16_e32 v8, v61
; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8
; SI-NEXT: v_cvt_f16_f32_e32 v8, v8
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8
; SI-NEXT: v_or_b32_e32 v8, v9, v8
-; SI-NEXT: v_cvt_f32_f16_e32 v9, v58
+; SI-NEXT: v_cvt_f32_f16_e32 v9, v59
; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9
; SI-NEXT: v_cvt_f16_f32_e32 v9, v9
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9
; SI-NEXT: v_or_b32_e32 v9, v10, v9
-; SI-NEXT: v_cvt_f32_f16_e32 v10, v56
+; SI-NEXT: v_cvt_f32_f16_e32 v10, v41
; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10
; SI-NEXT: v_cvt_f16_f32_e32 v10, v10
; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10
; SI-NEXT: v_or_b32_e32 v10, v11, v10
-; SI-NEXT: v_cvt_f32_f16_e32 v11, v46
+; SI-NEXT: v_cvt_f32_f16_e32 v11, v47
; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11
; SI-NEXT: v_cvt_f16_f32_e32 v11, v11
; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11
; SI-NEXT: v_or_b32_e32 v11, v12, v11
-; SI-NEXT: v_cvt_f32_f16_e32 v12, v62
+; SI-NEXT: v_cvt_f32_f16_e32 v12, v45
; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12
; SI-NEXT: v_cvt_f16_f32_e32 v12, v12
; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12
; SI-NEXT: v_or_b32_e32 v12, v13, v12
-; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v13, v13
; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13
; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
; SI-NEXT: v_or_b32_e32 v13, v14, v13
-; SI-NEXT: v_cvt_f32_f16_e32 v14, v41
+; SI-NEXT: v_cvt_f32_f16_e32 v14, v42
; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14
; SI-NEXT: v_cvt_f16_f32_e32 v14, v14
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
; SI-NEXT: v_or_b32_e32 v14, v15, v14
-; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v15, v15
; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15
; SI-NEXT: v_cvt_f16_f32_e32 v15, v15
; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15
; SI-NEXT: v_or_b32_e32 v15, v16, v15
-; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v16, v16
; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16
; SI-NEXT: v_cvt_f16_f32_e32 v16, v16
; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16
; SI-NEXT: v_or_b32_e32 v16, v17, v16
-; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v17, v17
; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17
; SI-NEXT: v_cvt_f16_f32_e32 v17, v17
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17
; SI-NEXT: v_or_b32_e32 v17, v18, v17
-; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v18, v18
; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18
@@ -139597,35 +140133,38 @@ define inreg <16 x double> @bitcast_v64f16_to_v16f64_scalar(<64 x half> inreg %a
; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18
; SI-NEXT: v_or_b32_e32 v18, v19, v18
; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v20
-; SI-NEXT: v_cvt_f32_f16_e32 v20, v54
+; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
; SI-NEXT: v_or_b32_e32 v19, v21, v19
-; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
+; SI-NEXT: v_cvt_f32_f16_e32 v21, v54
+; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21
+; SI-NEXT: v_cvt_f16_f32_e32 v21, v21
+; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_cvt_f32_f16_e32 v20, v20
; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20
; SI-NEXT: v_cvt_f16_f32_e32 v20, v20
; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20
; SI-NEXT: v_or_b32_e32 v20, v22, v20
-; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_cvt_f32_f16_e32 v21, v21
-; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21
-; SI-NEXT: v_cvt_f16_f32_e32 v21, v21
+; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v22, v22
-; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21
; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22
; SI-NEXT: v_cvt_f16_f32_e32 v22, v22
; SI-NEXT: v_or_b32_e32 v21, v22, v21
; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v23
; SI-NEXT: v_cvt_f32_f16_e32 v23, v51
-; SI-NEXT: v_or_b32_e32 v22, v24, v22
-; SI-NEXT: v_cvt_f32_f16_e32 v24, v50
; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23
; SI-NEXT: v_cvt_f16_f32_e32 v23, v23
-; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24
-; SI-NEXT: v_cvt_f16_f32_e32 v24, v24
; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v23
; SI-NEXT: v_or_b32_e32 v23, v25, v23
-; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
+; SI-NEXT: v_cvt_f32_f16_e32 v24, v24
+; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24
+; SI-NEXT: v_cvt_f16_f32_e32 v24, v24
+; SI-NEXT: v_or_b32_e32 v22, v24, v22
+; SI-NEXT: v_cvt_f32_f16_e32 v24, v50
+; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24
+; SI-NEXT: v_cvt_f16_f32_e32 v24, v24
; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v24
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v25, v25
@@ -139638,7 +140177,7 @@ define inreg <16 x double> @bitcast_v64f16_to_v16f64_scalar(<64 x half> inreg %a
; SI-NEXT: v_cvt_f16_f32_e32 v26, v26
; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v26
; SI-NEXT: v_or_b32_e32 v26, v28, v26
-; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
; SI-NEXT: v_cvt_f32_f16_e32 v27, v27
; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27
; SI-NEXT: v_cvt_f16_f32_e32 v27, v27
@@ -139653,9 +140192,9 @@ define inreg <16 x double> @bitcast_v64f16_to_v16f64_scalar(<64 x half> inreg %a
; SI-NEXT: v_cvt_f16_f32_e32 v28, v28
; SI-NEXT: v_or_b32_e32 v27, v28, v27
; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v29
-; SI-NEXT: v_cvt_f32_f16_e32 v29, v37
; SI-NEXT: v_or_b32_e32 v28, v30, v28
-; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
+; SI-NEXT: v_cvt_f32_f16_e32 v29, v37
; SI-NEXT: v_cvt_f32_f16_e32 v31, v31
; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v29
; SI-NEXT: v_cvt_f16_f32_e32 v29, v29
@@ -139663,16 +140202,14 @@ define inreg <16 x double> @bitcast_v64f16_to_v16f64_scalar(<64 x half> inreg %a
; SI-NEXT: v_cvt_f16_f32_e32 v31, v31
; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v29
; SI-NEXT: v_or_b32_e32 v29, v31, v29
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_cvt_f32_f16_e32 v31, v34
+; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31
+; SI-NEXT: v_cvt_f16_f32_e32 v31, v31
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v30, v30
; SI-NEXT: v_add_f32_e32 v30, 0x38000000, v30
; SI-NEXT: v_cvt_f16_f32_e32 v30, v30
; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v30
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v31, v31
-; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31
-; SI-NEXT: v_cvt_f16_f32_e32 v31, v31
; SI-NEXT: v_or_b32_e32 v30, v31, v30
; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v32
; SI-NEXT: v_or_b32_e32 v31, v33, v31
@@ -139700,6 +140237,7 @@ define inreg <16 x double> @bitcast_v64f16_to_v16f64_scalar(<64 x half> inreg %a
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v31, v17
; VI-NEXT: v_mov_b32_e32 v30, v16
; VI-NEXT: v_mov_b32_e32 v29, v15
@@ -139720,7 +140258,7 @@ define inreg <16 x double> @bitcast_v64f16_to_v16f64_scalar(<64 x half> inreg %a
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
@@ -139733,10 +140271,13 @@ define inreg <16 x double> @bitcast_v64f16_to_v16f64_scalar(<64 x half> inreg %a
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB83_4
+; VI-NEXT: s_cbranch_scc0 .LBB83_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB83_3
-; VI-NEXT: .LBB83_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB83_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB83_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v18, 0x200
; VI-NEXT: v_add_f16_sdwa v33, v15, v18 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v15, 0x200, v15
@@ -139834,16 +140375,15 @@ define inreg <16 x double> @bitcast_v64f16_to_v16f64_scalar(<64 x half> inreg %a
; VI-NEXT: v_add_f16_e32 v16, 0x200, v16
; VI-NEXT: v_or_b32_e32 v17, v17, v33
; VI-NEXT: v_or_b32_e32 v16, v16, v18
-; VI-NEXT: .LBB83_3: ; %end
+; VI-NEXT: .LBB83_4: ; %end
; VI-NEXT: v_mov_b32_e32 v18, v32
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB83_4:
-; VI-NEXT: s_branch .LBB83_2
;
; GFX9-LABEL: bitcast_v64f16_to_v16f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v31, v17
; GFX9-NEXT: v_mov_b32_e32 v30, v16
; GFX9-NEXT: v_mov_b32_e32 v29, v15
@@ -139864,7 +140404,7 @@ define inreg <16 x double> @bitcast_v64f16_to_v16f64_scalar(<64 x half> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -139877,10 +140417,13 @@ define inreg <16 x double> @bitcast_v64f16_to_v16f64_scalar(<64 x half> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB83_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB83_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB83_3
-; GFX9-NEXT: .LBB83_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB83_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB83_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_movk_i32 s4, 0x200
; GFX9-NEXT: v_pk_add_f16 v15, v15, s4 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v14, v14, s4 op_sel_hi:[1,0]
@@ -139914,118 +140457,113 @@ define inreg <16 x double> @bitcast_v64f16_to_v16f64_scalar(<64 x half> inreg %a
; GFX9-NEXT: v_pk_add_f16 v32, v32, s4 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v17, v17, s4 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v16, v16, s4 op_sel_hi:[1,0]
-; GFX9-NEXT: .LBB83_3: ; %end
+; GFX9-NEXT: .LBB83_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v18, v32
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB83_4:
-; GFX9-NEXT: s_branch .LBB83_2
;
; GFX11-LABEL: bitcast_v64f16_to_v16f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_store_b32 off, v40, s32 offset:292
-; GFX11-NEXT: scratch_store_b32 off, v41, s32 offset:288
-; GFX11-NEXT: scratch_store_b32 off, v42, s32 offset:284
-; GFX11-NEXT: scratch_store_b32 off, v43, s32 offset:280
-; GFX11-NEXT: scratch_store_b32 off, v44, s32 offset:276
-; GFX11-NEXT: scratch_store_b32 off, v45, s32 offset:272
-; GFX11-NEXT: scratch_store_b32 off, v46, s32 offset:268
-; GFX11-NEXT: scratch_store_b32 off, v47, s32 offset:264
-; GFX11-NEXT: scratch_store_b32 off, v56, s32 offset:260
-; GFX11-NEXT: scratch_store_b32 off, v57, s32 offset:256
-; GFX11-NEXT: scratch_store_b32 off, v58, s32 offset:252
-; GFX11-NEXT: scratch_store_b32 off, v59, s32 offset:248
-; GFX11-NEXT: scratch_store_b32 off, v60, s32 offset:244
-; GFX11-NEXT: scratch_store_b32 off, v61, s32 offset:240
-; GFX11-NEXT: scratch_store_b32 off, v62, s32 offset:236
-; GFX11-NEXT: scratch_store_b32 off, v63, s32 offset:232
-; GFX11-NEXT: scratch_store_b32 off, v72, s32 offset:228
-; GFX11-NEXT: scratch_store_b32 off, v73, s32 offset:224
-; GFX11-NEXT: scratch_store_b32 off, v74, s32 offset:220
-; GFX11-NEXT: scratch_store_b32 off, v75, s32 offset:216
-; GFX11-NEXT: scratch_store_b32 off, v76, s32 offset:212
-; GFX11-NEXT: scratch_store_b32 off, v77, s32 offset:208
-; GFX11-NEXT: scratch_store_b32 off, v78, s32 offset:204
-; GFX11-NEXT: scratch_store_b32 off, v79, s32 offset:200
-; GFX11-NEXT: scratch_store_b32 off, v88, s32 offset:196
-; GFX11-NEXT: scratch_store_b32 off, v89, s32 offset:192
-; GFX11-NEXT: scratch_store_b32 off, v90, s32 offset:188
-; GFX11-NEXT: scratch_store_b32 off, v91, s32 offset:184
-; GFX11-NEXT: scratch_store_b32 off, v92, s32 offset:180
-; GFX11-NEXT: scratch_store_b32 off, v93, s32 offset:176
-; GFX11-NEXT: scratch_store_b32 off, v94, s32 offset:172
-; GFX11-NEXT: scratch_store_b32 off, v95, s32 offset:168
+; GFX11-NEXT: scratch_store_b32 off, v40, s32 offset:284
+; GFX11-NEXT: scratch_store_b32 off, v41, s32 offset:280
+; GFX11-NEXT: scratch_store_b32 off, v42, s32 offset:276
+; GFX11-NEXT: scratch_store_b32 off, v43, s32 offset:272
+; GFX11-NEXT: scratch_store_b32 off, v44, s32 offset:268
+; GFX11-NEXT: scratch_store_b32 off, v45, s32 offset:264
+; GFX11-NEXT: scratch_store_b32 off, v46, s32 offset:260
+; GFX11-NEXT: scratch_store_b32 off, v47, s32 offset:256
+; GFX11-NEXT: scratch_store_b32 off, v56, s32 offset:252
+; GFX11-NEXT: scratch_store_b32 off, v57, s32 offset:248
+; GFX11-NEXT: scratch_store_b32 off, v58, s32 offset:244
+; GFX11-NEXT: scratch_store_b32 off, v59, s32 offset:240
+; GFX11-NEXT: scratch_store_b32 off, v60, s32 offset:236
+; GFX11-NEXT: scratch_store_b32 off, v61, s32 offset:232
+; GFX11-NEXT: scratch_store_b32 off, v62, s32 offset:228
+; GFX11-NEXT: scratch_store_b32 off, v63, s32 offset:224
+; GFX11-NEXT: scratch_store_b32 off, v72, s32 offset:220
+; GFX11-NEXT: scratch_store_b32 off, v73, s32 offset:216
+; GFX11-NEXT: scratch_store_b32 off, v74, s32 offset:212
+; GFX11-NEXT: scratch_store_b32 off, v75, s32 offset:208
+; GFX11-NEXT: scratch_store_b32 off, v76, s32 offset:204
+; GFX11-NEXT: scratch_store_b32 off, v77, s32 offset:200
+; GFX11-NEXT: scratch_store_b32 off, v78, s32 offset:196
+; GFX11-NEXT: scratch_store_b32 off, v79, s32 offset:192
+; GFX11-NEXT: scratch_store_b32 off, v88, s32 offset:188
+; GFX11-NEXT: scratch_store_b32 off, v89, s32 offset:184
+; GFX11-NEXT: scratch_store_b32 off, v90, s32 offset:180
+; GFX11-NEXT: scratch_store_b32 off, v91, s32 offset:176
+; GFX11-NEXT: scratch_store_b32 off, v92, s32 offset:172
+; GFX11-NEXT: scratch_store_b32 off, v93, s32 offset:168
+; GFX11-NEXT: scratch_store_b32 off, v94, s32 offset:164
+; GFX11-NEXT: scratch_store_b32 off, v95, s32 offset:160
; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_store_b32 off, v104, s32 offset:164
-; GFX11-NEXT: scratch_store_b32 off, v105, s32 offset:160
-; GFX11-NEXT: scratch_store_b32 off, v106, s32 offset:156
-; GFX11-NEXT: scratch_store_b32 off, v107, s32 offset:152
-; GFX11-NEXT: scratch_store_b32 off, v108, s32 offset:148
-; GFX11-NEXT: scratch_store_b32 off, v109, s32 offset:144
-; GFX11-NEXT: scratch_store_b32 off, v110, s32 offset:140
-; GFX11-NEXT: scratch_store_b32 off, v111, s32 offset:136
-; GFX11-NEXT: scratch_store_b32 off, v120, s32 offset:132
-; GFX11-NEXT: scratch_store_b32 off, v121, s32 offset:128
-; GFX11-NEXT: scratch_store_b32 off, v122, s32 offset:124
-; GFX11-NEXT: scratch_store_b32 off, v123, s32 offset:120
-; GFX11-NEXT: scratch_store_b32 off, v124, s32 offset:116
-; GFX11-NEXT: scratch_store_b32 off, v125, s32 offset:112
-; GFX11-NEXT: scratch_store_b32 off, v126, s32 offset:108
-; GFX11-NEXT: scratch_store_b32 off, v127, s32 offset:104
-; GFX11-NEXT: scratch_store_b32 off, v136, s32 offset:100
-; GFX11-NEXT: scratch_store_b32 off, v137, s32 offset:96
-; GFX11-NEXT: scratch_store_b32 off, v138, s32 offset:92
-; GFX11-NEXT: scratch_store_b32 off, v139, s32 offset:88
-; GFX11-NEXT: scratch_store_b32 off, v140, s32 offset:84
-; GFX11-NEXT: scratch_store_b32 off, v141, s32 offset:80
-; GFX11-NEXT: scratch_store_b32 off, v142, s32 offset:76
-; GFX11-NEXT: scratch_store_b32 off, v143, s32 offset:72
-; GFX11-NEXT: scratch_store_b32 off, v152, s32 offset:68
-; GFX11-NEXT: scratch_store_b32 off, v153, s32 offset:64
-; GFX11-NEXT: scratch_store_b32 off, v154, s32 offset:60
-; GFX11-NEXT: scratch_store_b32 off, v155, s32 offset:56
-; GFX11-NEXT: scratch_store_b32 off, v156, s32 offset:52
-; GFX11-NEXT: scratch_store_b32 off, v157, s32 offset:48
-; GFX11-NEXT: scratch_store_b32 off, v158, s32 offset:44
-; GFX11-NEXT: scratch_store_b32 off, v159, s32 offset:40
-; GFX11-NEXT: s_clause 0x9
-; GFX11-NEXT: scratch_store_b32 off, v168, s32 offset:36
-; GFX11-NEXT: scratch_store_b32 off, v169, s32 offset:32
-; GFX11-NEXT: scratch_store_b32 off, v170, s32 offset:28
-; GFX11-NEXT: scratch_store_b32 off, v171, s32 offset:24
-; GFX11-NEXT: scratch_store_b32 off, v172, s32 offset:20
-; GFX11-NEXT: scratch_store_b32 off, v173, s32 offset:16
-; GFX11-NEXT: scratch_store_b32 off, v174, s32 offset:12
-; GFX11-NEXT: scratch_store_b32 off, v175, s32 offset:8
-; GFX11-NEXT: scratch_store_b32 off, v184, s32 offset:4
-; GFX11-NEXT: scratch_store_b32 off, v185, s32
+; GFX11-NEXT: scratch_store_b32 off, v104, s32 offset:156
+; GFX11-NEXT: scratch_store_b32 off, v105, s32 offset:152
+; GFX11-NEXT: scratch_store_b32 off, v106, s32 offset:148
+; GFX11-NEXT: scratch_store_b32 off, v107, s32 offset:144
+; GFX11-NEXT: scratch_store_b32 off, v108, s32 offset:140
+; GFX11-NEXT: scratch_store_b32 off, v109, s32 offset:136
+; GFX11-NEXT: scratch_store_b32 off, v110, s32 offset:132
+; GFX11-NEXT: scratch_store_b32 off, v111, s32 offset:128
+; GFX11-NEXT: scratch_store_b32 off, v120, s32 offset:124
+; GFX11-NEXT: scratch_store_b32 off, v121, s32 offset:120
+; GFX11-NEXT: scratch_store_b32 off, v122, s32 offset:116
+; GFX11-NEXT: scratch_store_b32 off, v123, s32 offset:112
+; GFX11-NEXT: scratch_store_b32 off, v124, s32 offset:108
+; GFX11-NEXT: scratch_store_b32 off, v125, s32 offset:104
+; GFX11-NEXT: scratch_store_b32 off, v126, s32 offset:100
+; GFX11-NEXT: scratch_store_b32 off, v127, s32 offset:96
+; GFX11-NEXT: scratch_store_b32 off, v136, s32 offset:92
+; GFX11-NEXT: scratch_store_b32 off, v137, s32 offset:88
+; GFX11-NEXT: scratch_store_b32 off, v138, s32 offset:84
+; GFX11-NEXT: scratch_store_b32 off, v139, s32 offset:80
+; GFX11-NEXT: scratch_store_b32 off, v140, s32 offset:76
+; GFX11-NEXT: scratch_store_b32 off, v141, s32 offset:72
+; GFX11-NEXT: scratch_store_b32 off, v142, s32 offset:68
+; GFX11-NEXT: scratch_store_b32 off, v143, s32 offset:64
+; GFX11-NEXT: scratch_store_b32 off, v152, s32 offset:60
+; GFX11-NEXT: scratch_store_b32 off, v153, s32 offset:56
+; GFX11-NEXT: scratch_store_b32 off, v154, s32 offset:52
+; GFX11-NEXT: scratch_store_b32 off, v155, s32 offset:48
+; GFX11-NEXT: scratch_store_b32 off, v156, s32 offset:44
+; GFX11-NEXT: scratch_store_b32 off, v157, s32 offset:40
+; GFX11-NEXT: scratch_store_b32 off, v158, s32 offset:36
+; GFX11-NEXT: scratch_store_b32 off, v159, s32 offset:32
+; GFX11-NEXT: s_clause 0x7
+; GFX11-NEXT: scratch_store_b32 off, v168, s32 offset:28
+; GFX11-NEXT: scratch_store_b32 off, v169, s32 offset:24
+; GFX11-NEXT: scratch_store_b32 off, v170, s32 offset:20
+; GFX11-NEXT: scratch_store_b32 off, v171, s32 offset:16
+; GFX11-NEXT: scratch_store_b32 off, v172, s32 offset:12
+; GFX11-NEXT: scratch_store_b32 off, v173, s32 offset:8
+; GFX11-NEXT: scratch_store_b32 off, v174, s32 offset:4
+; GFX11-NEXT: scratch_store_b32 off, v175, s32
; GFX11-NEXT: v_dual_mov_b32 v176, v13 :: v_dual_mov_b32 v177, v12
; GFX11-NEXT: v_dual_mov_b32 v178, v11 :: v_dual_mov_b32 v179, v10
; GFX11-NEXT: v_dual_mov_b32 v180, v9 :: v_dual_mov_b32 v181, v8
; GFX11-NEXT: v_dual_mov_b32 v182, v7 :: v_dual_mov_b32 v183, v6
-; GFX11-NEXT: v_dual_mov_b32 v170, v5 :: v_dual_mov_b32 v171, v4
-; GFX11-NEXT: v_dual_mov_b32 v172, v3 :: v_dual_mov_b32 v173, v2
-; GFX11-NEXT: v_dual_mov_b32 v174, v1 :: v_dual_mov_b32 v175, v0
-; GFX11-NEXT: v_dual_mov_b32 v184, s28 :: v_dual_mov_b32 v185, s29
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-NEXT: v_dual_mov_b32 v168, v5 :: v_dual_mov_b32 v169, v4
+; GFX11-NEXT: v_dual_mov_b32 v170, v3 :: v_dual_mov_b32 v171, v2
+; GFX11-NEXT: v_dual_mov_b32 v172, v1 :: v_dual_mov_b32 v173, v0
+; GFX11-NEXT: v_dual_mov_b32 v174, s28 :: v_dual_mov_b32 v175, s29
+; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s4, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB83_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_dual_mov_b32 v47, s0 :: v_dual_mov_b32 v52, s2
-; GFX11-NEXT: v_dual_mov_b32 v49, s1 :: v_dual_mov_b32 v56, s3
-; GFX11-NEXT: v_dual_mov_b32 v61, s16 :: v_dual_mov_b32 v74, s18
-; GFX11-NEXT: v_dual_mov_b32 v67, s17 :: v_dual_mov_b32 v82, s19
-; GFX11-NEXT: v_dual_mov_b32 v91, s20 :: v_dual_mov_b32 v112, s22
-; GFX11-NEXT: v_dual_mov_b32 v101, s21 :: v_dual_mov_b32 v124, s23
-; GFX11-NEXT: v_dual_mov_b32 v137, s24 :: v_dual_mov_b32 v14, s26
-; GFX11-NEXT: v_dual_mov_b32 v151, s25 :: v_dual_mov_b32 v30, s27
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB83_3
+; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v59, s16
+; GFX11-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v65, s17
+; GFX11-NEXT: v_dual_mov_b32 v50, s2 :: v_dual_mov_b32 v89, s20
+; GFX11-NEXT: v_dual_mov_b32 v54, s3 :: v_dual_mov_b32 v99, s21
+; GFX11-NEXT: v_dual_mov_b32 v72, s18 :: v_dual_mov_b32 v135, s24
+; GFX11-NEXT: v_dual_mov_b32 v80, s19 :: v_dual_mov_b32 v149, s25
+; GFX11-NEXT: v_dual_mov_b32 v110, s22 :: v_dual_mov_b32 v17, s26
+; GFX11-NEXT: v_dual_mov_b32 v122, s23 :: v_dual_mov_b32 v33, s27
+; GFX11-NEXT: s_cbranch_execnz .LBB83_3
; GFX11-NEXT: .LBB83_2: ; %cmp.true
-; GFX11-NEXT: v_pk_add_f16 v30, 0x200, s27 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v14, 0x200, s26 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v33, 0x200, s27 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v17, 0x200, s26 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v176, 0x200, v176 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v177, 0x200, v177 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v178, 0x200, v178 op_sel_hi:[0,1]
@@ -140034,119 +140572,117 @@ define inreg <16 x double> @bitcast_v64f16_to_v16f64_scalar(<64 x half> inreg %a
; GFX11-NEXT: v_pk_add_f16 v181, 0x200, v181 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v182, 0x200, v182 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v183, 0x200, v183 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v168, 0x200, v168 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v169, 0x200, v169 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v170, 0x200, v170 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v171, 0x200, v171 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v172, 0x200, v172 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v173, 0x200, v173 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v174, 0x200, v174 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v175, 0x200, v175 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v185, 0x200, v185 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v184, 0x200, v184 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v151, 0x200, s25 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v137, 0x200, s24 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v124, 0x200, s23 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v112, 0x200, s22 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v101, 0x200, s21 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v91, 0x200, s20 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v82, 0x200, s19 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v74, 0x200, s18 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v67, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v61, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v56, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v52, 0x200, s2 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v49, 0x200, s1 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v47, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v174, 0x200, v174 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v149, 0x200, s25 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v135, 0x200, s24 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v122, 0x200, s23 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v110, 0x200, s22 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v99, 0x200, s21 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v89, 0x200, s20 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v80, 0x200, s19 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v72, 0x200, s18 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v65, 0x200, s17 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v59, 0x200, s16 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v54, 0x200, s3 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v50, 0x200, s2 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v2, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: .LBB83_3: ; %end
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mov_b32 v0, v47 :: v_dual_mov_b32 v1, v49
-; GFX11-NEXT: v_dual_mov_b32 v3, v56 :: v_dual_mov_b32 v4, v61
-; GFX11-NEXT: v_dual_mov_b32 v6, v74 :: v_dual_mov_b32 v9, v101
-; GFX11-NEXT: v_dual_mov_b32 v7, v82 :: v_dual_mov_b32 v8, v91
-; GFX11-NEXT: v_dual_mov_b32 v11, v124 :: v_dual_mov_b32 v12, v137
-; GFX11-NEXT: v_dual_mov_b32 v15, v30 :: v_dual_mov_b32 v16, v184
-; GFX11-NEXT: v_dual_mov_b32 v17, v185 :: v_dual_mov_b32 v18, v175
-; GFX11-NEXT: v_dual_mov_b32 v19, v174 :: v_dual_mov_b32 v20, v173
-; GFX11-NEXT: v_dual_mov_b32 v21, v172 :: v_dual_mov_b32 v22, v171
-; GFX11-NEXT: v_dual_mov_b32 v23, v170 :: v_dual_mov_b32 v24, v183
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v4, v59
+; GFX11-NEXT: v_dual_mov_b32 v3, v54 :: v_dual_mov_b32 v6, v72
+; GFX11-NEXT: v_dual_mov_b32 v7, v80 :: v_dual_mov_b32 v8, v89
+; GFX11-NEXT: v_dual_mov_b32 v9, v99 :: v_dual_mov_b32 v10, v110
+; GFX11-NEXT: v_dual_mov_b32 v11, v122 :: v_dual_mov_b32 v12, v135
+; GFX11-NEXT: v_dual_mov_b32 v13, v149 :: v_dual_mov_b32 v16, v174
+; GFX11-NEXT: v_dual_mov_b32 v14, v17 :: v_dual_mov_b32 v17, v175
+; GFX11-NEXT: v_dual_mov_b32 v15, v33 :: v_dual_mov_b32 v20, v171
+; GFX11-NEXT: v_dual_mov_b32 v18, v173 :: v_dual_mov_b32 v19, v172
+; GFX11-NEXT: v_dual_mov_b32 v21, v170 :: v_dual_mov_b32 v22, v169
+; GFX11-NEXT: v_dual_mov_b32 v23, v168 :: v_dual_mov_b32 v24, v183
; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_load_b32 v185, off, s32
-; GFX11-NEXT: scratch_load_b32 v184, off, s32 offset:4
-; GFX11-NEXT: scratch_load_b32 v175, off, s32 offset:8
-; GFX11-NEXT: scratch_load_b32 v174, off, s32 offset:12
-; GFX11-NEXT: scratch_load_b32 v173, off, s32 offset:16
-; GFX11-NEXT: scratch_load_b32 v172, off, s32 offset:20
-; GFX11-NEXT: scratch_load_b32 v171, off, s32 offset:24
-; GFX11-NEXT: scratch_load_b32 v170, off, s32 offset:28
-; GFX11-NEXT: scratch_load_b32 v169, off, s32 offset:32
-; GFX11-NEXT: scratch_load_b32 v168, off, s32 offset:36
-; GFX11-NEXT: scratch_load_b32 v159, off, s32 offset:40
-; GFX11-NEXT: scratch_load_b32 v158, off, s32 offset:44
-; GFX11-NEXT: scratch_load_b32 v157, off, s32 offset:48
-; GFX11-NEXT: scratch_load_b32 v156, off, s32 offset:52
-; GFX11-NEXT: scratch_load_b32 v155, off, s32 offset:56
-; GFX11-NEXT: scratch_load_b32 v154, off, s32 offset:60
-; GFX11-NEXT: scratch_load_b32 v153, off, s32 offset:64
-; GFX11-NEXT: scratch_load_b32 v152, off, s32 offset:68
-; GFX11-NEXT: scratch_load_b32 v143, off, s32 offset:72
-; GFX11-NEXT: scratch_load_b32 v142, off, s32 offset:76
-; GFX11-NEXT: scratch_load_b32 v141, off, s32 offset:80
-; GFX11-NEXT: scratch_load_b32 v140, off, s32 offset:84
-; GFX11-NEXT: scratch_load_b32 v139, off, s32 offset:88
-; GFX11-NEXT: scratch_load_b32 v138, off, s32 offset:92
-; GFX11-NEXT: scratch_load_b32 v137, off, s32 offset:96
-; GFX11-NEXT: scratch_load_b32 v136, off, s32 offset:100
-; GFX11-NEXT: scratch_load_b32 v127, off, s32 offset:104
-; GFX11-NEXT: scratch_load_b32 v126, off, s32 offset:108
-; GFX11-NEXT: scratch_load_b32 v125, off, s32 offset:112
-; GFX11-NEXT: scratch_load_b32 v124, off, s32 offset:116
-; GFX11-NEXT: scratch_load_b32 v123, off, s32 offset:120
-; GFX11-NEXT: scratch_load_b32 v122, off, s32 offset:124
+; GFX11-NEXT: scratch_load_b32 v175, off, s32
+; GFX11-NEXT: scratch_load_b32 v174, off, s32 offset:4
+; GFX11-NEXT: scratch_load_b32 v173, off, s32 offset:8
+; GFX11-NEXT: scratch_load_b32 v172, off, s32 offset:12
+; GFX11-NEXT: scratch_load_b32 v171, off, s32 offset:16
+; GFX11-NEXT: scratch_load_b32 v170, off, s32 offset:20
+; GFX11-NEXT: scratch_load_b32 v169, off, s32 offset:24
+; GFX11-NEXT: scratch_load_b32 v168, off, s32 offset:28
+; GFX11-NEXT: scratch_load_b32 v159, off, s32 offset:32
+; GFX11-NEXT: scratch_load_b32 v158, off, s32 offset:36
+; GFX11-NEXT: scratch_load_b32 v157, off, s32 offset:40
+; GFX11-NEXT: scratch_load_b32 v156, off, s32 offset:44
+; GFX11-NEXT: scratch_load_b32 v155, off, s32 offset:48
+; GFX11-NEXT: scratch_load_b32 v154, off, s32 offset:52
+; GFX11-NEXT: scratch_load_b32 v153, off, s32 offset:56
+; GFX11-NEXT: scratch_load_b32 v152, off, s32 offset:60
+; GFX11-NEXT: scratch_load_b32 v143, off, s32 offset:64
+; GFX11-NEXT: scratch_load_b32 v142, off, s32 offset:68
+; GFX11-NEXT: scratch_load_b32 v141, off, s32 offset:72
+; GFX11-NEXT: scratch_load_b32 v140, off, s32 offset:76
+; GFX11-NEXT: scratch_load_b32 v139, off, s32 offset:80
+; GFX11-NEXT: scratch_load_b32 v138, off, s32 offset:84
+; GFX11-NEXT: scratch_load_b32 v137, off, s32 offset:88
+; GFX11-NEXT: scratch_load_b32 v136, off, s32 offset:92
+; GFX11-NEXT: scratch_load_b32 v127, off, s32 offset:96
+; GFX11-NEXT: scratch_load_b32 v126, off, s32 offset:100
+; GFX11-NEXT: scratch_load_b32 v125, off, s32 offset:104
+; GFX11-NEXT: scratch_load_b32 v124, off, s32 offset:108
+; GFX11-NEXT: scratch_load_b32 v123, off, s32 offset:112
+; GFX11-NEXT: scratch_load_b32 v122, off, s32 offset:116
+; GFX11-NEXT: scratch_load_b32 v121, off, s32 offset:120
+; GFX11-NEXT: scratch_load_b32 v120, off, s32 offset:124
; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_load_b32 v121, off, s32 offset:128
-; GFX11-NEXT: scratch_load_b32 v120, off, s32 offset:132
-; GFX11-NEXT: scratch_load_b32 v111, off, s32 offset:136
-; GFX11-NEXT: scratch_load_b32 v110, off, s32 offset:140
-; GFX11-NEXT: scratch_load_b32 v109, off, s32 offset:144
-; GFX11-NEXT: scratch_load_b32 v108, off, s32 offset:148
-; GFX11-NEXT: scratch_load_b32 v107, off, s32 offset:152
-; GFX11-NEXT: scratch_load_b32 v106, off, s32 offset:156
-; GFX11-NEXT: scratch_load_b32 v105, off, s32 offset:160
-; GFX11-NEXT: scratch_load_b32 v104, off, s32 offset:164
-; GFX11-NEXT: scratch_load_b32 v95, off, s32 offset:168
-; GFX11-NEXT: scratch_load_b32 v94, off, s32 offset:172
-; GFX11-NEXT: scratch_load_b32 v93, off, s32 offset:176
-; GFX11-NEXT: scratch_load_b32 v92, off, s32 offset:180
-; GFX11-NEXT: scratch_load_b32 v91, off, s32 offset:184
-; GFX11-NEXT: scratch_load_b32 v90, off, s32 offset:188
-; GFX11-NEXT: scratch_load_b32 v89, off, s32 offset:192
-; GFX11-NEXT: scratch_load_b32 v88, off, s32 offset:196
-; GFX11-NEXT: scratch_load_b32 v79, off, s32 offset:200
-; GFX11-NEXT: scratch_load_b32 v78, off, s32 offset:204
-; GFX11-NEXT: scratch_load_b32 v77, off, s32 offset:208
-; GFX11-NEXT: scratch_load_b32 v76, off, s32 offset:212
-; GFX11-NEXT: scratch_load_b32 v75, off, s32 offset:216
-; GFX11-NEXT: scratch_load_b32 v74, off, s32 offset:220
-; GFX11-NEXT: scratch_load_b32 v73, off, s32 offset:224
-; GFX11-NEXT: scratch_load_b32 v72, off, s32 offset:228
-; GFX11-NEXT: scratch_load_b32 v63, off, s32 offset:232
-; GFX11-NEXT: scratch_load_b32 v62, off, s32 offset:236
-; GFX11-NEXT: scratch_load_b32 v61, off, s32 offset:240
-; GFX11-NEXT: scratch_load_b32 v60, off, s32 offset:244
-; GFX11-NEXT: scratch_load_b32 v59, off, s32 offset:248
-; GFX11-NEXT: scratch_load_b32 v58, off, s32 offset:252
-; GFX11-NEXT: s_clause 0x9
-; GFX11-NEXT: scratch_load_b32 v57, off, s32 offset:256
-; GFX11-NEXT: scratch_load_b32 v56, off, s32 offset:260
-; GFX11-NEXT: scratch_load_b32 v47, off, s32 offset:264
-; GFX11-NEXT: scratch_load_b32 v46, off, s32 offset:268
-; GFX11-NEXT: scratch_load_b32 v45, off, s32 offset:272
-; GFX11-NEXT: scratch_load_b32 v44, off, s32 offset:276
-; GFX11-NEXT: scratch_load_b32 v43, off, s32 offset:280
-; GFX11-NEXT: scratch_load_b32 v42, off, s32 offset:284
-; GFX11-NEXT: scratch_load_b32 v41, off, s32 offset:288
-; GFX11-NEXT: scratch_load_b32 v40, off, s32 offset:292
-; GFX11-NEXT: v_dual_mov_b32 v2, v52 :: v_dual_mov_b32 v5, v67
-; GFX11-NEXT: v_dual_mov_b32 v10, v112 :: v_dual_mov_b32 v13, v151
+; GFX11-NEXT: scratch_load_b32 v111, off, s32 offset:128
+; GFX11-NEXT: scratch_load_b32 v110, off, s32 offset:132
+; GFX11-NEXT: scratch_load_b32 v109, off, s32 offset:136
+; GFX11-NEXT: scratch_load_b32 v108, off, s32 offset:140
+; GFX11-NEXT: scratch_load_b32 v107, off, s32 offset:144
+; GFX11-NEXT: scratch_load_b32 v106, off, s32 offset:148
+; GFX11-NEXT: scratch_load_b32 v105, off, s32 offset:152
+; GFX11-NEXT: scratch_load_b32 v104, off, s32 offset:156
+; GFX11-NEXT: scratch_load_b32 v95, off, s32 offset:160
+; GFX11-NEXT: scratch_load_b32 v94, off, s32 offset:164
+; GFX11-NEXT: scratch_load_b32 v93, off, s32 offset:168
+; GFX11-NEXT: scratch_load_b32 v92, off, s32 offset:172
+; GFX11-NEXT: scratch_load_b32 v91, off, s32 offset:176
+; GFX11-NEXT: scratch_load_b32 v90, off, s32 offset:180
+; GFX11-NEXT: scratch_load_b32 v89, off, s32 offset:184
+; GFX11-NEXT: scratch_load_b32 v88, off, s32 offset:188
+; GFX11-NEXT: scratch_load_b32 v79, off, s32 offset:192
+; GFX11-NEXT: scratch_load_b32 v78, off, s32 offset:196
+; GFX11-NEXT: scratch_load_b32 v77, off, s32 offset:200
+; GFX11-NEXT: scratch_load_b32 v76, off, s32 offset:204
+; GFX11-NEXT: scratch_load_b32 v75, off, s32 offset:208
+; GFX11-NEXT: scratch_load_b32 v74, off, s32 offset:212
+; GFX11-NEXT: scratch_load_b32 v73, off, s32 offset:216
+; GFX11-NEXT: scratch_load_b32 v72, off, s32 offset:220
+; GFX11-NEXT: scratch_load_b32 v63, off, s32 offset:224
+; GFX11-NEXT: scratch_load_b32 v62, off, s32 offset:228
+; GFX11-NEXT: scratch_load_b32 v61, off, s32 offset:232
+; GFX11-NEXT: scratch_load_b32 v60, off, s32 offset:236
+; GFX11-NEXT: scratch_load_b32 v59, off, s32 offset:240
+; GFX11-NEXT: scratch_load_b32 v58, off, s32 offset:244
+; GFX11-NEXT: scratch_load_b32 v57, off, s32 offset:248
+; GFX11-NEXT: scratch_load_b32 v56, off, s32 offset:252
+; GFX11-NEXT: s_clause 0x7
+; GFX11-NEXT: scratch_load_b32 v47, off, s32 offset:256
+; GFX11-NEXT: scratch_load_b32 v46, off, s32 offset:260
+; GFX11-NEXT: scratch_load_b32 v45, off, s32 offset:264
+; GFX11-NEXT: scratch_load_b32 v44, off, s32 offset:268
+; GFX11-NEXT: scratch_load_b32 v43, off, s32 offset:272
+; GFX11-NEXT: scratch_load_b32 v42, off, s32 offset:276
+; GFX11-NEXT: scratch_load_b32 v41, off, s32 offset:280
+; GFX11-NEXT: scratch_load_b32 v40, off, s32 offset:284
+; GFX11-NEXT: v_dual_mov_b32 v2, v50 :: v_dual_mov_b32 v5, v65
; GFX11-NEXT: v_dual_mov_b32 v25, v182 :: v_dual_mov_b32 v26, v181
; GFX11-NEXT: v_dual_mov_b32 v27, v180 :: v_dual_mov_b32 v28, v179
; GFX11-NEXT: v_dual_mov_b32 v29, v178 :: v_dual_mov_b32 v30, v177
@@ -140154,23 +140690,25 @@ define inreg <16 x double> @bitcast_v64f16_to_v16f64_scalar(<64 x half> inreg %a
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB83_4:
-; GFX11-NEXT: ; implicit-def: $vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78
; GFX11-NEXT: ; implicit-def: $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79
; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
-; GFX11-NEXT: ; implicit-def: $vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81
-; GFX11-NEXT: ; implicit-def: $vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84
-; GFX11-NEXT: ; implicit-def: $vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88
-; GFX11-NEXT: ; implicit-def: $vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93
-; GFX11-NEXT: ; implicit-def: $vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99
-; GFX11-NEXT: ; implicit-def: $vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106
-; GFX11-NEXT: ; implicit-def: $vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114
-; GFX11-NEXT: ; implicit-def: $vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123
-; GFX11-NEXT: ; implicit-def: $vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133
-; GFX11-NEXT: ; implicit-def: $vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144
-; GFX11-NEXT: ; implicit-def: $vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156
-; GFX11-NEXT: ; implicit-def: $vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169
-; GFX11-NEXT: s_branch .LBB83_2
+; GFX11-NEXT: ; implicit-def: $vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82
+; GFX11-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-NEXT: ; implicit-def: $vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91
+; GFX11-NEXT: ; implicit-def: $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49
+; GFX11-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-NEXT: ; implicit-def: $vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104
+; GFX11-NEXT: ; implicit-def: $vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112
+; GFX11-NEXT: ; implicit-def: $vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121
+; GFX11-NEXT: ; implicit-def: $vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131
+; GFX11-NEXT: ; implicit-def: $vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142
+; GFX11-NEXT: ; implicit-def: $vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154
+; GFX11-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_vccz .LBB83_2
+; GFX11-NEXT: s_branch .LBB83_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -140689,6 +141227,7 @@ define inreg <64 x i16> @bitcast_v16f64_to_v64i16_scalar(<16 x double> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v19
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v31, s16
; SI-NEXT: v_mov_b32_e32 v32, s17
; SI-NEXT: v_mov_b32_e32 v29, s18
@@ -140701,9 +141240,9 @@ define inreg <64 x i16> @bitcast_v16f64_to_v64i16_scalar(<16 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v24, s25
; SI-NEXT: v_mov_b32_e32 v21, s26
; SI-NEXT: v_mov_b32_e32 v22, s27
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v19, s28
; SI-NEXT: v_mov_b32_e32 v20, s29
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
@@ -141064,12 +141603,15 @@ define inreg <64 x i16> @bitcast_v16f64_to_v64i16_scalar(<16 x double> inreg %a,
; SI-NEXT: ; implicit-def: $vgpr50
; SI-NEXT: ; kill: killed $vgpr48
; SI-NEXT: ; implicit-def: $vgpr48
-; SI-NEXT: s_branch .LBB85_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB85_2
+; SI-NEXT: s_branch .LBB85_3
;
; VI-LABEL: bitcast_v16f64_to_v64i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v31, v17
; VI-NEXT: v_mov_b32_e32 v30, v16
; VI-NEXT: v_mov_b32_e32 v29, v15
@@ -141100,13 +141642,16 @@ define inreg <64 x i16> @bitcast_v16f64_to_v64i16_scalar(<16 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB85_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB85_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB85_3
-; VI-NEXT: .LBB85_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB85_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB85_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
; VI-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
; VI-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
@@ -141123,17 +141668,16 @@ define inreg <64 x i16> @bitcast_v16f64_to_v64i16_scalar(<16 x double> inreg %a,
; VI-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
; VI-NEXT: v_add_f64 v[32:33], v[32:33], 1.0
; VI-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
-; VI-NEXT: .LBB85_3: ; %end
+; VI-NEXT: .LBB85_4: ; %end
; VI-NEXT: v_mov_b32_e32 v18, v32
; VI-NEXT: v_mov_b32_e32 v19, v33
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB85_4:
-; VI-NEXT: s_branch .LBB85_2
;
; GFX9-LABEL: bitcast_v16f64_to_v64i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v31, v17
; GFX9-NEXT: v_mov_b32_e32 v30, v16
; GFX9-NEXT: v_mov_b32_e32 v29, v15
@@ -141164,13 +141708,16 @@ define inreg <64 x i16> @bitcast_v16f64_to_v64i16_scalar(<16 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB85_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB85_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB85_3
-; GFX9-NEXT: .LBB85_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB85_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB85_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
; GFX9-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
; GFX9-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
@@ -141187,45 +141734,43 @@ define inreg <64 x i16> @bitcast_v16f64_to_v64i16_scalar(<16 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
; GFX9-NEXT: v_add_f64 v[32:33], v[32:33], 1.0
; GFX9-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
-; GFX9-NEXT: .LBB85_3: ; %end
+; GFX9-NEXT: .LBB85_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v18, v32
; GFX9-NEXT: v_mov_b32_e32 v19, v33
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB85_4:
-; GFX9-NEXT: s_branch .LBB85_2
;
; GFX11-LABEL: bitcast_v16f64_to_v64i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v14 :: v_dual_mov_b32 v31, v13
-; GFX11-NEXT: v_dual_mov_b32 v30, v12 :: v_dual_mov_b32 v29, v11
-; GFX11-NEXT: v_dual_mov_b32 v28, v10 :: v_dual_mov_b32 v27, v9
+; GFX11-NEXT: v_dual_mov_b32 v15, v14 :: v_dual_mov_b32 v30, v12
+; GFX11-NEXT: v_dual_mov_b32 v31, v13 :: v_dual_mov_b32 v28, v10
+; GFX11-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v26, v8
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB85_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB85_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB85_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB85_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB85_3:
-; GFX11-NEXT: .LBB85_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB85_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
; GFX11-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
; GFX11-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
@@ -141242,6 +141787,7 @@ define inreg <64 x i16> @bitcast_v16f64_to_v64i16_scalar(<16 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
; GFX11-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
; GFX11-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
+; GFX11-NEXT: .LBB85_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -142088,43 +142634,43 @@ define inreg <16 x double> @bitcast_v64i16_to_v16f64_scalar(<64 x i16> inreg %a,
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
; SI-NEXT: v_mov_b32_e32 v49, v12
-; SI-NEXT: v_mov_b32_e32 v56, v10
-; SI-NEXT: s_waitcnt expcnt(6)
-; SI-NEXT: v_mov_b32_e32 v57, v8
+; SI-NEXT: v_mov_b32_e32 v47, v10
+; SI-NEXT: s_waitcnt expcnt(3)
+; SI-NEXT: v_mov_b32_e32 v60, v8
; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:76
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32
; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:8
-; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:4
+; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:4
; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:16
-; SI-NEXT: s_waitcnt expcnt(3)
-; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:12
+; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:12
; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:24
-; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:20
+; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:20
; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:32
-; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:28
+; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:28
; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:40
; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:36
-; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:48
+; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:48
; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:44
-; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:56
-; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:52
+; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:56
+; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:52
; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:64
-; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:60
+; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:60
; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:72
-; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:68
+; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:68
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_waitcnt expcnt(2)
; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v55, 16, v3
-; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v5
-; SI-NEXT: v_lshlrev_b32_e32 v43, 16, v7
-; SI-NEXT: v_lshlrev_b32_e32 v41, 16, v9
+; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v5
+; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v7
+; SI-NEXT: v_lshlrev_b32_e32 v43, 16, v9
; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v11
; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v15
; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v17
-; SI-NEXT: v_lshlrev_b32_e32 v37, 16, v19
+; SI-NEXT: v_lshlrev_b32_e32 v38, 16, v19
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v21
-; SI-NEXT: v_lshlrev_b32_e32 v35, 16, v23
+; SI-NEXT: v_lshlrev_b32_e32 v36, 16, v23
; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v25
; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v27
; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v29
@@ -142132,7 +142678,7 @@ define inreg <16 x double> @bitcast_v64i16_to_v16f64_scalar(<64 x i16> inreg %a,
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v53
; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v31
; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v52
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_and_b64 s[6:7], vcc, exec
; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v51
; SI-NEXT: s_waitcnt vmcnt(13)
; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v8
@@ -142141,102 +142687,103 @@ define inreg <16 x double> @bitcast_v64i16_to_v16f64_scalar(<64 x i16> inreg %a,
; SI-NEXT: s_waitcnt vmcnt(9)
; SI-NEXT: v_lshlrev_b32_e32 v54, 16, v12
; SI-NEXT: s_waitcnt vmcnt(7)
-; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v38
-; SI-NEXT: s_waitcnt vmcnt(5) expcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v36
+; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v37
+; SI-NEXT: s_waitcnt vmcnt(5) expcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v35
; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v34
; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v32
-; SI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v32
+; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(4)
-; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
-; SI-NEXT: s_cbranch_scc0 .LBB87_4
+; SI-NEXT: s_cbranch_scc0 .LBB87_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_waitcnt expcnt(2)
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v7, v0, v61
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v4
-; SI-NEXT: v_or_b32_e32 v9, v0, v50
+; SI-NEXT: v_or_b32_e32 v9, v0, v57
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v6
-; SI-NEXT: v_or_b32_e32 v10, v0, v43
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v57
-; SI-NEXT: v_or_b32_e32 v11, v0, v41
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v56
+; SI-NEXT: v_or_b32_e32 v10, v0, v50
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v60
+; SI-NEXT: v_or_b32_e32 v11, v0, v43
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v47
; SI-NEXT: v_or_b32_e32 v12, v0, v40
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49
-; SI-NEXT: v_mov_b32_e32 v52, v57
-; SI-NEXT: v_mov_b32_e32 v57, v40
-; SI-NEXT: v_mov_b32_e32 v40, v49
-; SI-NEXT: v_mov_b32_e32 v49, v13
+; SI-NEXT: v_mov_b32_e32 v35, v61
+; SI-NEXT: v_mov_b32_e32 v61, v50
+; SI-NEXT: v_mov_b32_e32 v50, v43
+; SI-NEXT: v_mov_b32_e32 v43, v40
+; SI-NEXT: v_mov_b32_e32 v40, v13
; SI-NEXT: v_or_b32_e32 v13, v0, v13
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v14
-; SI-NEXT: v_mov_b32_e32 v36, v41
-; SI-NEXT: v_mov_b32_e32 v41, v14
+; SI-NEXT: v_mov_b32_e32 v52, v60
+; SI-NEXT: v_mov_b32_e32 v60, v14
; SI-NEXT: v_or_b32_e32 v14, v0, v48
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v16
-; SI-NEXT: v_mov_b32_e32 v51, v50
-; SI-NEXT: v_mov_b32_e32 v50, v43
-; SI-NEXT: v_mov_b32_e32 v43, v48
+; SI-NEXT: v_mov_b32_e32 v51, v57
+; SI-NEXT: v_mov_b32_e32 v57, v49
+; SI-NEXT: v_mov_b32_e32 v49, v48
; SI-NEXT: v_mov_b32_e32 v48, v15
; SI-NEXT: v_or_b32_e32 v15, v0, v15
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v18
-; SI-NEXT: v_mov_b32_e32 v38, v61
-; SI-NEXT: v_mov_b32_e32 v61, v56
-; SI-NEXT: v_mov_b32_e32 v56, v16
-; SI-NEXT: v_or_b32_e32 v16, v0, v37
+; SI-NEXT: v_mov_b32_e32 v32, v47
+; SI-NEXT: v_mov_b32_e32 v47, v16
+; SI-NEXT: v_or_b32_e32 v16, v0, v38
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v20
; SI-NEXT: v_or_b32_e32 v17, v0, v17
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v22
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_or_b32_e32 v18, v0, v35
+; SI-NEXT: v_or_b32_e32 v18, v0, v36
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v24
; SI-NEXT: v_or_b32_e32 v19, v0, v19
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v26
-; SI-NEXT: v_mov_b32_e32 v37, v20
+; SI-NEXT: v_mov_b32_e32 v38, v20
; SI-NEXT: v_or_b32_e32 v20, v0, v33
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v28
; SI-NEXT: v_or_b32_e32 v21, v0, v21
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v30
; SI-NEXT: v_or_b32_e32 v22, v0, v31
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v39
-; SI-NEXT: v_mov_b32_e32 v35, v24
-; SI-NEXT: v_mov_b32_e32 v39, v23
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v56
+; SI-NEXT: v_mov_b32_e32 v36, v24
+; SI-NEXT: v_mov_b32_e32 v56, v23
; SI-NEXT: v_or_b32_e32 v23, v0, v23
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v60
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v39
; SI-NEXT: v_mov_b32_e32 v24, v29
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s17, 16
; SI-NEXT: v_or_b32_e32 v24, v0, v24
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v47
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v46
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: s_and_b32 s5, s18, 0xffff
; SI-NEXT: s_lshl_b32 s6, s19, 16
+; SI-NEXT: v_mov_b32_e32 v46, v25
; SI-NEXT: v_or_b32_e32 v25, v0, v25
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v46
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v59
; SI-NEXT: v_mov_b32_e32 v26, v27
; SI-NEXT: s_or_b32 s5, s5, s6
; SI-NEXT: s_and_b32 s6, s20, 0xffff
@@ -142253,29 +142800,28 @@ define inreg <16 x double> @bitcast_v64i16_to_v16f64_scalar(<64 x i16> inreg %a,
; SI-NEXT: s_lshl_b32 s9, s25, 16
; SI-NEXT: v_mov_b32_e32 v33, v28
; SI-NEXT: v_or_b32_e32 v28, v0, v5
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v59
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v42
; SI-NEXT: s_or_b32 s8, s8, s9
; SI-NEXT: s_and_b32 s9, s26, 0xffff
; SI-NEXT: s_lshl_b32 s10, s27, 16
-; SI-NEXT: v_mov_b32_e32 v60, v29
-; SI-NEXT: v_or_b32_e32 v29, v0, v62
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v42
+; SI-NEXT: v_mov_b32_e32 v39, v29
+; SI-NEXT: v_or_b32_e32 v29, v0, v63
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v58
; SI-NEXT: s_or_b32 s9, s9, s10
; SI-NEXT: s_and_b32 s10, s28, 0xffff
; SI-NEXT: s_lshl_b32 s11, s29, 16
; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2
; SI-NEXT: v_or_b32_e32 v30, v0, v3
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v58
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v41
; SI-NEXT: s_or_b32 s10, s10, s11
-; SI-NEXT: v_mov_b32_e32 v63, v2
-; SI-NEXT: v_mov_b32_e32 v32, v55
+; SI-NEXT: v_mov_b32_e32 v37, v2
+; SI-NEXT: v_mov_b32_e32 v34, v55
; SI-NEXT: v_or_b32_e32 v8, v1, v55
; SI-NEXT: v_mov_b32_e32 v55, v4
; SI-NEXT: v_mov_b32_e32 v53, v6
-; SI-NEXT: v_mov_b32_e32 v47, v46
; SI-NEXT: v_mov_b32_e32 v45, v44
-; SI-NEXT: v_mov_b32_e32 v59, v42
-; SI-NEXT: v_or_b32_e32 v31, v0, v34
+; SI-NEXT: v_mov_b32_e32 v42, v58
+; SI-NEXT: v_or_b32_e32 v31, v0, v62
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: v_mov_b32_e32 v2, s6
@@ -142283,12 +142829,45 @@ define inreg <16 x double> @bitcast_v64i16_to_v16f64_scalar(<64 x i16> inreg %a,
; SI-NEXT: v_mov_b32_e32 v4, s8
; SI-NEXT: v_mov_b32_e32 v5, s9
; SI-NEXT: v_mov_b32_e32 v6, s10
-; SI-NEXT: s_cbranch_execnz .LBB87_3
-; SI-NEXT: .LBB87_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: s_branch .LBB87_3
+; SI-NEXT: .LBB87_2:
+; SI-NEXT: v_mov_b32_e32 v35, v61
+; SI-NEXT: v_mov_b32_e32 v34, v55
+; SI-NEXT: v_mov_b32_e32 v37, v2
+; SI-NEXT: v_mov_b32_e32 v55, v4
+; SI-NEXT: v_mov_b32_e32 v53, v6
+; SI-NEXT: v_mov_b32_e32 v52, v60
+; SI-NEXT: v_mov_b32_e32 v51, v57
+; SI-NEXT: v_mov_b32_e32 v61, v50
+; SI-NEXT: v_mov_b32_e32 v32, v47
+; SI-NEXT: v_mov_b32_e32 v50, v43
+; SI-NEXT: v_mov_b32_e32 v43, v40
+; SI-NEXT: v_mov_b32_e32 v40, v13
+; SI-NEXT: v_mov_b32_e32 v57, v49
+; SI-NEXT: v_mov_b32_e32 v49, v48
+; SI-NEXT: v_mov_b32_e32 v48, v15
+; SI-NEXT: v_mov_b32_e32 v60, v14
+; SI-NEXT: v_mov_b32_e32 v47, v16
+; SI-NEXT: v_mov_b32_e32 v45, v44
+; SI-NEXT: v_mov_b32_e32 v42, v58
+; SI-NEXT: s_waitcnt expcnt(1)
+; SI-NEXT: v_mov_b32_e32 v38, v20
+; SI-NEXT: v_mov_b32_e32 v56, v23
+; SI-NEXT: v_mov_b32_e32 v36, v24
+; SI-NEXT: v_mov_b32_e32 v33, v28
+; SI-NEXT: v_mov_b32_e32 v39, v29
+; SI-NEXT: v_mov_b32_e32 v46, v25
+; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; SI-NEXT: .LBB87_3: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: v_mov_b32_e32 v41, v42
+; SI-NEXT: s_cbranch_vccnz .LBB87_5
+; SI-NEXT: ; %bb.4: ; %cmp.true
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v63
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v37
; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; SI-NEXT: v_or_b32_e32 v1, v32, v1
+; SI-NEXT: v_or_b32_e32 v1, v34, v1
; SI-NEXT: v_add_i32_e32 v8, vcc, 0x30000, v1
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
; SI-NEXT: s_add_i32 s16, s16, 3
@@ -142334,7 +142913,7 @@ define inreg <16 x double> @bitcast_v64i16_to_v16f64_scalar(<64 x i16> inreg %a,
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v0, v38, v0
+; SI-NEXT: v_or_b32_e32 v0, v35, v0
; SI-NEXT: v_add_i32_e32 v7, vcc, 0x30000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v55
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
@@ -142342,25 +142921,25 @@ define inreg <16 x double> @bitcast_v64i16_to_v16f64_scalar(<64 x i16> inreg %a,
; SI-NEXT: v_add_i32_e32 v9, vcc, 0x30000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v0, v50, v0
+; SI-NEXT: v_or_b32_e32 v0, v61, v0
; SI-NEXT: v_add_i32_e32 v10, vcc, 0x30000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v52
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v0, v36, v0
+; SI-NEXT: v_or_b32_e32 v0, v50, v0
; SI-NEXT: v_add_i32_e32 v11, vcc, 0x30000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v61
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v32
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v0, v57, v0
+; SI-NEXT: v_or_b32_e32 v0, v43, v0
; SI-NEXT: v_add_i32_e32 v12, vcc, 0x30000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v40
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v57
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v0, v49, v0
+; SI-NEXT: v_or_b32_e32 v0, v40, v0
; SI-NEXT: v_add_i32_e32 v13, vcc, 0x30000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v41
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v60
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v0, v43, v0
+; SI-NEXT: v_or_b32_e32 v0, v49, v0
; SI-NEXT: v_add_i32_e32 v14, vcc, 0x30000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v56
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v47
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v48, v0
; SI-NEXT: v_add_i32_e32 v15, vcc, 0x30000, v0
@@ -142371,7 +142950,7 @@ define inreg <16 x double> @bitcast_v64i16_to_v16f64_scalar(<64 x i16> inreg %a,
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v16, vcc, 0x30000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v38
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
@@ -142386,7 +142965,7 @@ define inreg <16 x double> @bitcast_v64i16_to_v16f64_scalar(<64 x i16> inreg %a,
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_add_i32_e32 v18, vcc, 0x30000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v35
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v36
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
@@ -142414,31 +142993,31 @@ define inreg <16 x double> @bitcast_v64i16_to_v16f64_scalar(<64 x i16> inreg %a,
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v22, vcc, 0x30000, v0
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v0, v39, v0
+; SI-NEXT: v_or_b32_e32 v0, v56, v0
; SI-NEXT: v_add_i32_e32 v23, vcc, 0x30000, v0
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v0, v60, v0
+; SI-NEXT: v_or_b32_e32 v0, v39, v0
; SI-NEXT: v_add_i32_e32 v24, vcc, 0x30000, v0
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
+; SI-NEXT: v_or_b32_e32 v0, v46, v0
; SI-NEXT: v_add_i32_e32 v25, vcc, 0x30000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v47
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v26, vcc, 0x30000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
@@ -142452,7 +143031,7 @@ define inreg <16 x double> @bitcast_v64i16_to_v16f64_scalar(<64 x i16> inreg %a,
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v28, vcc, 0x30000, v0
-; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
@@ -142461,7 +143040,7 @@ define inreg <16 x double> @bitcast_v64i16_to_v16f64_scalar(<64 x i16> inreg %a,
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v29, vcc, 0x30000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v59
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v41
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_or_b32_e32 v0, v1, v0
@@ -142476,7 +143055,7 @@ define inreg <16 x double> @bitcast_v64i16_to_v16f64_scalar(<64 x i16> inreg %a,
; SI-NEXT: v_add_i32_e32 v31, vcc, 0x30000, v0
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
-; SI-NEXT: .LBB87_3: ; %end
+; SI-NEXT: .LBB87_5: ; %end
; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
@@ -142495,40 +143074,12 @@ define inreg <16 x double> @bitcast_v64i16_to_v16f64_scalar(<64 x i16> inreg %a,
; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB87_4:
-; SI-NEXT: v_mov_b32_e32 v38, v61
-; SI-NEXT: v_mov_b32_e32 v32, v55
-; SI-NEXT: v_mov_b32_e32 v63, v2
-; SI-NEXT: v_mov_b32_e32 v55, v4
-; SI-NEXT: v_mov_b32_e32 v53, v6
-; SI-NEXT: v_mov_b32_e32 v52, v57
-; SI-NEXT: v_mov_b32_e32 v51, v50
-; SI-NEXT: v_mov_b32_e32 v61, v56
-; SI-NEXT: v_mov_b32_e32 v50, v43
-; SI-NEXT: v_mov_b32_e32 v36, v41
-; SI-NEXT: v_mov_b32_e32 v57, v40
-; SI-NEXT: v_mov_b32_e32 v40, v49
-; SI-NEXT: v_mov_b32_e32 v49, v13
-; SI-NEXT: v_mov_b32_e32 v43, v48
-; SI-NEXT: v_mov_b32_e32 v48, v15
-; SI-NEXT: v_mov_b32_e32 v41, v14
-; SI-NEXT: v_mov_b32_e32 v56, v16
-; SI-NEXT: v_mov_b32_e32 v47, v46
-; SI-NEXT: v_mov_b32_e32 v45, v44
-; SI-NEXT: v_mov_b32_e32 v59, v42
-; SI-NEXT: s_waitcnt expcnt(1)
-; SI-NEXT: v_mov_b32_e32 v37, v20
-; SI-NEXT: v_mov_b32_e32 v39, v23
-; SI-NEXT: v_mov_b32_e32 v35, v24
-; SI-NEXT: v_mov_b32_e32 v33, v28
-; SI-NEXT: v_mov_b32_e32 v60, v29
-; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; SI-NEXT: s_branch .LBB87_2
;
; VI-LABEL: bitcast_v64i16_to_v16f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s6, v2
; VI-NEXT: v_readfirstlane_b32 s7, v3
; VI-NEXT: v_readfirstlane_b32 s8, v4
@@ -142546,12 +143097,15 @@ define inreg <16 x double> @bitcast_v64i16_to_v16f64_scalar(<64 x i16> inreg %a,
; VI-NEXT: v_readfirstlane_b32 s44, v16
; VI-NEXT: v_readfirstlane_b32 s45, v17
; VI-NEXT: v_readfirstlane_b32 s46, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s47, v1
-; VI-NEXT: s_cbranch_scc0 .LBB87_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB87_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB87_3
-; VI-NEXT: .LBB87_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB87_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB87_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s47, 3
; VI-NEXT: s_and_b32 s4, s47, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
@@ -142712,7 +143266,7 @@ define inreg <16 x double> @bitcast_v64i16_to_v16f64_scalar(<64 x i16> inreg %a,
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s6, s4, 0x30000
-; VI-NEXT: .LBB87_3: ; %end
+; VI-NEXT: .LBB87_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -142746,13 +143300,12 @@ define inreg <16 x double> @bitcast_v64i16_to_v16f64_scalar(<64 x i16> inreg %a,
; VI-NEXT: v_mov_b32_e32 v30, s44
; VI-NEXT: v_mov_b32_e32 v31, s45
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB87_4:
-; VI-NEXT: s_branch .LBB87_2
;
; GFX9-LABEL: bitcast_v64i16_to_v16f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v31, v17
; GFX9-NEXT: v_mov_b32_e32 v30, v16
; GFX9-NEXT: v_mov_b32_e32 v29, v15
@@ -142773,7 +143326,7 @@ define inreg <16 x double> @bitcast_v64i16_to_v16f64_scalar(<64 x i16> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -142786,10 +143339,13 @@ define inreg <16 x double> @bitcast_v64i16_to_v16f64_scalar(<64 x i16> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB87_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB87_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB87_3
-; GFX9-NEXT: .LBB87_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB87_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB87_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
@@ -142822,118 +143378,113 @@ define inreg <16 x double> @bitcast_v64i16_to_v16f64_scalar(<64 x i16> inreg %a,
; GFX9-NEXT: v_pk_add_u16 v32, v32, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: .LBB87_3: ; %end
+; GFX9-NEXT: .LBB87_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v18, v32
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB87_4:
-; GFX9-NEXT: s_branch .LBB87_2
;
; GFX11-LABEL: bitcast_v64i16_to_v16f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v14
; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_store_b32 off, v40, s32 offset:292
-; GFX11-NEXT: scratch_store_b32 off, v41, s32 offset:288
-; GFX11-NEXT: scratch_store_b32 off, v42, s32 offset:284
-; GFX11-NEXT: scratch_store_b32 off, v43, s32 offset:280
-; GFX11-NEXT: scratch_store_b32 off, v44, s32 offset:276
-; GFX11-NEXT: scratch_store_b32 off, v45, s32 offset:272
-; GFX11-NEXT: scratch_store_b32 off, v46, s32 offset:268
-; GFX11-NEXT: scratch_store_b32 off, v47, s32 offset:264
-; GFX11-NEXT: scratch_store_b32 off, v56, s32 offset:260
-; GFX11-NEXT: scratch_store_b32 off, v57, s32 offset:256
-; GFX11-NEXT: scratch_store_b32 off, v58, s32 offset:252
-; GFX11-NEXT: scratch_store_b32 off, v59, s32 offset:248
-; GFX11-NEXT: scratch_store_b32 off, v60, s32 offset:244
-; GFX11-NEXT: scratch_store_b32 off, v61, s32 offset:240
-; GFX11-NEXT: scratch_store_b32 off, v62, s32 offset:236
-; GFX11-NEXT: scratch_store_b32 off, v63, s32 offset:232
-; GFX11-NEXT: scratch_store_b32 off, v72, s32 offset:228
-; GFX11-NEXT: scratch_store_b32 off, v73, s32 offset:224
-; GFX11-NEXT: scratch_store_b32 off, v74, s32 offset:220
-; GFX11-NEXT: scratch_store_b32 off, v75, s32 offset:216
-; GFX11-NEXT: scratch_store_b32 off, v76, s32 offset:212
-; GFX11-NEXT: scratch_store_b32 off, v77, s32 offset:208
-; GFX11-NEXT: scratch_store_b32 off, v78, s32 offset:204
-; GFX11-NEXT: scratch_store_b32 off, v79, s32 offset:200
-; GFX11-NEXT: scratch_store_b32 off, v88, s32 offset:196
-; GFX11-NEXT: scratch_store_b32 off, v89, s32 offset:192
-; GFX11-NEXT: scratch_store_b32 off, v90, s32 offset:188
-; GFX11-NEXT: scratch_store_b32 off, v91, s32 offset:184
-; GFX11-NEXT: scratch_store_b32 off, v92, s32 offset:180
-; GFX11-NEXT: scratch_store_b32 off, v93, s32 offset:176
-; GFX11-NEXT: scratch_store_b32 off, v94, s32 offset:172
-; GFX11-NEXT: scratch_store_b32 off, v95, s32 offset:168
+; GFX11-NEXT: scratch_store_b32 off, v40, s32 offset:284
+; GFX11-NEXT: scratch_store_b32 off, v41, s32 offset:280
+; GFX11-NEXT: scratch_store_b32 off, v42, s32 offset:276
+; GFX11-NEXT: scratch_store_b32 off, v43, s32 offset:272
+; GFX11-NEXT: scratch_store_b32 off, v44, s32 offset:268
+; GFX11-NEXT: scratch_store_b32 off, v45, s32 offset:264
+; GFX11-NEXT: scratch_store_b32 off, v46, s32 offset:260
+; GFX11-NEXT: scratch_store_b32 off, v47, s32 offset:256
+; GFX11-NEXT: scratch_store_b32 off, v56, s32 offset:252
+; GFX11-NEXT: scratch_store_b32 off, v57, s32 offset:248
+; GFX11-NEXT: scratch_store_b32 off, v58, s32 offset:244
+; GFX11-NEXT: scratch_store_b32 off, v59, s32 offset:240
+; GFX11-NEXT: scratch_store_b32 off, v60, s32 offset:236
+; GFX11-NEXT: scratch_store_b32 off, v61, s32 offset:232
+; GFX11-NEXT: scratch_store_b32 off, v62, s32 offset:228
+; GFX11-NEXT: scratch_store_b32 off, v63, s32 offset:224
+; GFX11-NEXT: scratch_store_b32 off, v72, s32 offset:220
+; GFX11-NEXT: scratch_store_b32 off, v73, s32 offset:216
+; GFX11-NEXT: scratch_store_b32 off, v74, s32 offset:212
+; GFX11-NEXT: scratch_store_b32 off, v75, s32 offset:208
+; GFX11-NEXT: scratch_store_b32 off, v76, s32 offset:204
+; GFX11-NEXT: scratch_store_b32 off, v77, s32 offset:200
+; GFX11-NEXT: scratch_store_b32 off, v78, s32 offset:196
+; GFX11-NEXT: scratch_store_b32 off, v79, s32 offset:192
+; GFX11-NEXT: scratch_store_b32 off, v88, s32 offset:188
+; GFX11-NEXT: scratch_store_b32 off, v89, s32 offset:184
+; GFX11-NEXT: scratch_store_b32 off, v90, s32 offset:180
+; GFX11-NEXT: scratch_store_b32 off, v91, s32 offset:176
+; GFX11-NEXT: scratch_store_b32 off, v92, s32 offset:172
+; GFX11-NEXT: scratch_store_b32 off, v93, s32 offset:168
+; GFX11-NEXT: scratch_store_b32 off, v94, s32 offset:164
+; GFX11-NEXT: scratch_store_b32 off, v95, s32 offset:160
; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_store_b32 off, v104, s32 offset:164
-; GFX11-NEXT: scratch_store_b32 off, v105, s32 offset:160
-; GFX11-NEXT: scratch_store_b32 off, v106, s32 offset:156
-; GFX11-NEXT: scratch_store_b32 off, v107, s32 offset:152
-; GFX11-NEXT: scratch_store_b32 off, v108, s32 offset:148
-; GFX11-NEXT: scratch_store_b32 off, v109, s32 offset:144
-; GFX11-NEXT: scratch_store_b32 off, v110, s32 offset:140
-; GFX11-NEXT: scratch_store_b32 off, v111, s32 offset:136
-; GFX11-NEXT: scratch_store_b32 off, v120, s32 offset:132
-; GFX11-NEXT: scratch_store_b32 off, v121, s32 offset:128
-; GFX11-NEXT: scratch_store_b32 off, v122, s32 offset:124
-; GFX11-NEXT: scratch_store_b32 off, v123, s32 offset:120
-; GFX11-NEXT: scratch_store_b32 off, v124, s32 offset:116
-; GFX11-NEXT: scratch_store_b32 off, v125, s32 offset:112
-; GFX11-NEXT: scratch_store_b32 off, v126, s32 offset:108
-; GFX11-NEXT: scratch_store_b32 off, v127, s32 offset:104
-; GFX11-NEXT: scratch_store_b32 off, v136, s32 offset:100
-; GFX11-NEXT: scratch_store_b32 off, v137, s32 offset:96
-; GFX11-NEXT: scratch_store_b32 off, v138, s32 offset:92
-; GFX11-NEXT: scratch_store_b32 off, v139, s32 offset:88
-; GFX11-NEXT: scratch_store_b32 off, v140, s32 offset:84
-; GFX11-NEXT: scratch_store_b32 off, v141, s32 offset:80
-; GFX11-NEXT: scratch_store_b32 off, v142, s32 offset:76
-; GFX11-NEXT: scratch_store_b32 off, v143, s32 offset:72
-; GFX11-NEXT: scratch_store_b32 off, v152, s32 offset:68
-; GFX11-NEXT: scratch_store_b32 off, v153, s32 offset:64
-; GFX11-NEXT: scratch_store_b32 off, v154, s32 offset:60
-; GFX11-NEXT: scratch_store_b32 off, v155, s32 offset:56
-; GFX11-NEXT: scratch_store_b32 off, v156, s32 offset:52
-; GFX11-NEXT: scratch_store_b32 off, v157, s32 offset:48
-; GFX11-NEXT: scratch_store_b32 off, v158, s32 offset:44
-; GFX11-NEXT: scratch_store_b32 off, v159, s32 offset:40
-; GFX11-NEXT: s_clause 0x9
-; GFX11-NEXT: scratch_store_b32 off, v168, s32 offset:36
-; GFX11-NEXT: scratch_store_b32 off, v169, s32 offset:32
-; GFX11-NEXT: scratch_store_b32 off, v170, s32 offset:28
-; GFX11-NEXT: scratch_store_b32 off, v171, s32 offset:24
-; GFX11-NEXT: scratch_store_b32 off, v172, s32 offset:20
-; GFX11-NEXT: scratch_store_b32 off, v173, s32 offset:16
-; GFX11-NEXT: scratch_store_b32 off, v174, s32 offset:12
-; GFX11-NEXT: scratch_store_b32 off, v175, s32 offset:8
-; GFX11-NEXT: scratch_store_b32 off, v184, s32 offset:4
-; GFX11-NEXT: scratch_store_b32 off, v185, s32
+; GFX11-NEXT: scratch_store_b32 off, v104, s32 offset:156
+; GFX11-NEXT: scratch_store_b32 off, v105, s32 offset:152
+; GFX11-NEXT: scratch_store_b32 off, v106, s32 offset:148
+; GFX11-NEXT: scratch_store_b32 off, v107, s32 offset:144
+; GFX11-NEXT: scratch_store_b32 off, v108, s32 offset:140
+; GFX11-NEXT: scratch_store_b32 off, v109, s32 offset:136
+; GFX11-NEXT: scratch_store_b32 off, v110, s32 offset:132
+; GFX11-NEXT: scratch_store_b32 off, v111, s32 offset:128
+; GFX11-NEXT: scratch_store_b32 off, v120, s32 offset:124
+; GFX11-NEXT: scratch_store_b32 off, v121, s32 offset:120
+; GFX11-NEXT: scratch_store_b32 off, v122, s32 offset:116
+; GFX11-NEXT: scratch_store_b32 off, v123, s32 offset:112
+; GFX11-NEXT: scratch_store_b32 off, v124, s32 offset:108
+; GFX11-NEXT: scratch_store_b32 off, v125, s32 offset:104
+; GFX11-NEXT: scratch_store_b32 off, v126, s32 offset:100
+; GFX11-NEXT: scratch_store_b32 off, v127, s32 offset:96
+; GFX11-NEXT: scratch_store_b32 off, v136, s32 offset:92
+; GFX11-NEXT: scratch_store_b32 off, v137, s32 offset:88
+; GFX11-NEXT: scratch_store_b32 off, v138, s32 offset:84
+; GFX11-NEXT: scratch_store_b32 off, v139, s32 offset:80
+; GFX11-NEXT: scratch_store_b32 off, v140, s32 offset:76
+; GFX11-NEXT: scratch_store_b32 off, v141, s32 offset:72
+; GFX11-NEXT: scratch_store_b32 off, v142, s32 offset:68
+; GFX11-NEXT: scratch_store_b32 off, v143, s32 offset:64
+; GFX11-NEXT: scratch_store_b32 off, v152, s32 offset:60
+; GFX11-NEXT: scratch_store_b32 off, v153, s32 offset:56
+; GFX11-NEXT: scratch_store_b32 off, v154, s32 offset:52
+; GFX11-NEXT: scratch_store_b32 off, v155, s32 offset:48
+; GFX11-NEXT: scratch_store_b32 off, v156, s32 offset:44
+; GFX11-NEXT: scratch_store_b32 off, v157, s32 offset:40
+; GFX11-NEXT: scratch_store_b32 off, v158, s32 offset:36
+; GFX11-NEXT: scratch_store_b32 off, v159, s32 offset:32
+; GFX11-NEXT: s_clause 0x7
+; GFX11-NEXT: scratch_store_b32 off, v168, s32 offset:28
+; GFX11-NEXT: scratch_store_b32 off, v169, s32 offset:24
+; GFX11-NEXT: scratch_store_b32 off, v170, s32 offset:20
+; GFX11-NEXT: scratch_store_b32 off, v171, s32 offset:16
+; GFX11-NEXT: scratch_store_b32 off, v172, s32 offset:12
+; GFX11-NEXT: scratch_store_b32 off, v173, s32 offset:8
+; GFX11-NEXT: scratch_store_b32 off, v174, s32 offset:4
+; GFX11-NEXT: scratch_store_b32 off, v175, s32
; GFX11-NEXT: v_dual_mov_b32 v176, v13 :: v_dual_mov_b32 v177, v12
; GFX11-NEXT: v_dual_mov_b32 v178, v11 :: v_dual_mov_b32 v179, v10
; GFX11-NEXT: v_dual_mov_b32 v180, v9 :: v_dual_mov_b32 v181, v8
; GFX11-NEXT: v_dual_mov_b32 v182, v7 :: v_dual_mov_b32 v183, v6
-; GFX11-NEXT: v_dual_mov_b32 v170, v5 :: v_dual_mov_b32 v171, v4
-; GFX11-NEXT: v_dual_mov_b32 v172, v3 :: v_dual_mov_b32 v173, v2
-; GFX11-NEXT: v_dual_mov_b32 v174, v1 :: v_dual_mov_b32 v175, v0
-; GFX11-NEXT: v_dual_mov_b32 v184, s28 :: v_dual_mov_b32 v185, s29
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-NEXT: v_dual_mov_b32 v168, v5 :: v_dual_mov_b32 v169, v4
+; GFX11-NEXT: v_dual_mov_b32 v170, v3 :: v_dual_mov_b32 v171, v2
+; GFX11-NEXT: v_dual_mov_b32 v172, v1 :: v_dual_mov_b32 v173, v0
+; GFX11-NEXT: v_dual_mov_b32 v174, s28 :: v_dual_mov_b32 v175, s29
+; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s4, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB87_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: v_dual_mov_b32 v47, s0 :: v_dual_mov_b32 v52, s2
-; GFX11-NEXT: v_dual_mov_b32 v49, s1 :: v_dual_mov_b32 v56, s3
-; GFX11-NEXT: v_dual_mov_b32 v61, s16 :: v_dual_mov_b32 v74, s18
-; GFX11-NEXT: v_dual_mov_b32 v67, s17 :: v_dual_mov_b32 v82, s19
-; GFX11-NEXT: v_dual_mov_b32 v91, s20 :: v_dual_mov_b32 v112, s22
-; GFX11-NEXT: v_dual_mov_b32 v101, s21 :: v_dual_mov_b32 v124, s23
-; GFX11-NEXT: v_dual_mov_b32 v137, s24 :: v_dual_mov_b32 v14, s26
-; GFX11-NEXT: v_dual_mov_b32 v151, s25 :: v_dual_mov_b32 v30, s27
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB87_3
+; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v59, s16
+; GFX11-NEXT: v_dual_mov_b32 v2, s1 :: v_dual_mov_b32 v65, s17
+; GFX11-NEXT: v_dual_mov_b32 v50, s2 :: v_dual_mov_b32 v89, s20
+; GFX11-NEXT: v_dual_mov_b32 v54, s3 :: v_dual_mov_b32 v99, s21
+; GFX11-NEXT: v_dual_mov_b32 v72, s18 :: v_dual_mov_b32 v135, s24
+; GFX11-NEXT: v_dual_mov_b32 v80, s19 :: v_dual_mov_b32 v149, s25
+; GFX11-NEXT: v_dual_mov_b32 v110, s22 :: v_dual_mov_b32 v17, s26
+; GFX11-NEXT: v_dual_mov_b32 v122, s23 :: v_dual_mov_b32 v33, s27
+; GFX11-NEXT: s_cbranch_execnz .LBB87_3
; GFX11-NEXT: .LBB87_2: ; %cmp.true
-; GFX11-NEXT: v_pk_add_u16 v30, s27, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v14, s26, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v33, s27, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v17, s26, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v176, v176, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v177, v177, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v178, v178, 3 op_sel_hi:[1,0]
@@ -142942,119 +143493,117 @@ define inreg <16 x double> @bitcast_v64i16_to_v16f64_scalar(<64 x i16> inreg %a,
; GFX11-NEXT: v_pk_add_u16 v181, v181, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v182, v182, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v183, v183, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v168, v168, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v169, v169, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v170, v170, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v171, v171, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v172, v172, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v173, v173, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v174, v174, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v175, v175, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v185, v185, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v184, v184, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v151, s25, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v137, s24, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v124, s23, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v112, s22, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v101, s21, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v91, s20, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v82, s19, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v74, s18, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v67, s17, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v61, s16, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v56, s3, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v52, s2, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v49, s1, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v47, s0, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v174, v174, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v149, s25, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v135, s24, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v122, s23, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v110, s22, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v99, s21, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v89, s20, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v80, s19, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v72, s18, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v65, s17, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v59, s16, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v54, s3, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v50, s2, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v2, s1, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: .LBB87_3: ; %end
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
-; GFX11-NEXT: v_dual_mov_b32 v0, v47 :: v_dual_mov_b32 v1, v49
-; GFX11-NEXT: v_dual_mov_b32 v3, v56 :: v_dual_mov_b32 v4, v61
-; GFX11-NEXT: v_dual_mov_b32 v6, v74 :: v_dual_mov_b32 v9, v101
-; GFX11-NEXT: v_dual_mov_b32 v7, v82 :: v_dual_mov_b32 v8, v91
-; GFX11-NEXT: v_dual_mov_b32 v11, v124 :: v_dual_mov_b32 v12, v137
-; GFX11-NEXT: v_dual_mov_b32 v15, v30 :: v_dual_mov_b32 v16, v184
-; GFX11-NEXT: v_dual_mov_b32 v17, v185 :: v_dual_mov_b32 v18, v175
-; GFX11-NEXT: v_dual_mov_b32 v19, v174 :: v_dual_mov_b32 v20, v173
-; GFX11-NEXT: v_dual_mov_b32 v21, v172 :: v_dual_mov_b32 v22, v171
-; GFX11-NEXT: v_dual_mov_b32 v23, v170 :: v_dual_mov_b32 v24, v183
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
+; GFX11-NEXT: v_dual_mov_b32 v1, v2 :: v_dual_mov_b32 v4, v59
+; GFX11-NEXT: v_dual_mov_b32 v3, v54 :: v_dual_mov_b32 v6, v72
+; GFX11-NEXT: v_dual_mov_b32 v7, v80 :: v_dual_mov_b32 v8, v89
+; GFX11-NEXT: v_dual_mov_b32 v9, v99 :: v_dual_mov_b32 v10, v110
+; GFX11-NEXT: v_dual_mov_b32 v11, v122 :: v_dual_mov_b32 v12, v135
+; GFX11-NEXT: v_dual_mov_b32 v13, v149 :: v_dual_mov_b32 v16, v174
+; GFX11-NEXT: v_dual_mov_b32 v14, v17 :: v_dual_mov_b32 v17, v175
+; GFX11-NEXT: v_dual_mov_b32 v15, v33 :: v_dual_mov_b32 v20, v171
+; GFX11-NEXT: v_dual_mov_b32 v18, v173 :: v_dual_mov_b32 v19, v172
+; GFX11-NEXT: v_dual_mov_b32 v21, v170 :: v_dual_mov_b32 v22, v169
+; GFX11-NEXT: v_dual_mov_b32 v23, v168 :: v_dual_mov_b32 v24, v183
; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_load_b32 v185, off, s32
-; GFX11-NEXT: scratch_load_b32 v184, off, s32 offset:4
-; GFX11-NEXT: scratch_load_b32 v175, off, s32 offset:8
-; GFX11-NEXT: scratch_load_b32 v174, off, s32 offset:12
-; GFX11-NEXT: scratch_load_b32 v173, off, s32 offset:16
-; GFX11-NEXT: scratch_load_b32 v172, off, s32 offset:20
-; GFX11-NEXT: scratch_load_b32 v171, off, s32 offset:24
-; GFX11-NEXT: scratch_load_b32 v170, off, s32 offset:28
-; GFX11-NEXT: scratch_load_b32 v169, off, s32 offset:32
-; GFX11-NEXT: scratch_load_b32 v168, off, s32 offset:36
-; GFX11-NEXT: scratch_load_b32 v159, off, s32 offset:40
-; GFX11-NEXT: scratch_load_b32 v158, off, s32 offset:44
-; GFX11-NEXT: scratch_load_b32 v157, off, s32 offset:48
-; GFX11-NEXT: scratch_load_b32 v156, off, s32 offset:52
-; GFX11-NEXT: scratch_load_b32 v155, off, s32 offset:56
-; GFX11-NEXT: scratch_load_b32 v154, off, s32 offset:60
-; GFX11-NEXT: scratch_load_b32 v153, off, s32 offset:64
-; GFX11-NEXT: scratch_load_b32 v152, off, s32 offset:68
-; GFX11-NEXT: scratch_load_b32 v143, off, s32 offset:72
-; GFX11-NEXT: scratch_load_b32 v142, off, s32 offset:76
-; GFX11-NEXT: scratch_load_b32 v141, off, s32 offset:80
-; GFX11-NEXT: scratch_load_b32 v140, off, s32 offset:84
-; GFX11-NEXT: scratch_load_b32 v139, off, s32 offset:88
-; GFX11-NEXT: scratch_load_b32 v138, off, s32 offset:92
-; GFX11-NEXT: scratch_load_b32 v137, off, s32 offset:96
-; GFX11-NEXT: scratch_load_b32 v136, off, s32 offset:100
-; GFX11-NEXT: scratch_load_b32 v127, off, s32 offset:104
-; GFX11-NEXT: scratch_load_b32 v126, off, s32 offset:108
-; GFX11-NEXT: scratch_load_b32 v125, off, s32 offset:112
-; GFX11-NEXT: scratch_load_b32 v124, off, s32 offset:116
-; GFX11-NEXT: scratch_load_b32 v123, off, s32 offset:120
-; GFX11-NEXT: scratch_load_b32 v122, off, s32 offset:124
+; GFX11-NEXT: scratch_load_b32 v175, off, s32
+; GFX11-NEXT: scratch_load_b32 v174, off, s32 offset:4
+; GFX11-NEXT: scratch_load_b32 v173, off, s32 offset:8
+; GFX11-NEXT: scratch_load_b32 v172, off, s32 offset:12
+; GFX11-NEXT: scratch_load_b32 v171, off, s32 offset:16
+; GFX11-NEXT: scratch_load_b32 v170, off, s32 offset:20
+; GFX11-NEXT: scratch_load_b32 v169, off, s32 offset:24
+; GFX11-NEXT: scratch_load_b32 v168, off, s32 offset:28
+; GFX11-NEXT: scratch_load_b32 v159, off, s32 offset:32
+; GFX11-NEXT: scratch_load_b32 v158, off, s32 offset:36
+; GFX11-NEXT: scratch_load_b32 v157, off, s32 offset:40
+; GFX11-NEXT: scratch_load_b32 v156, off, s32 offset:44
+; GFX11-NEXT: scratch_load_b32 v155, off, s32 offset:48
+; GFX11-NEXT: scratch_load_b32 v154, off, s32 offset:52
+; GFX11-NEXT: scratch_load_b32 v153, off, s32 offset:56
+; GFX11-NEXT: scratch_load_b32 v152, off, s32 offset:60
+; GFX11-NEXT: scratch_load_b32 v143, off, s32 offset:64
+; GFX11-NEXT: scratch_load_b32 v142, off, s32 offset:68
+; GFX11-NEXT: scratch_load_b32 v141, off, s32 offset:72
+; GFX11-NEXT: scratch_load_b32 v140, off, s32 offset:76
+; GFX11-NEXT: scratch_load_b32 v139, off, s32 offset:80
+; GFX11-NEXT: scratch_load_b32 v138, off, s32 offset:84
+; GFX11-NEXT: scratch_load_b32 v137, off, s32 offset:88
+; GFX11-NEXT: scratch_load_b32 v136, off, s32 offset:92
+; GFX11-NEXT: scratch_load_b32 v127, off, s32 offset:96
+; GFX11-NEXT: scratch_load_b32 v126, off, s32 offset:100
+; GFX11-NEXT: scratch_load_b32 v125, off, s32 offset:104
+; GFX11-NEXT: scratch_load_b32 v124, off, s32 offset:108
+; GFX11-NEXT: scratch_load_b32 v123, off, s32 offset:112
+; GFX11-NEXT: scratch_load_b32 v122, off, s32 offset:116
+; GFX11-NEXT: scratch_load_b32 v121, off, s32 offset:120
+; GFX11-NEXT: scratch_load_b32 v120, off, s32 offset:124
; GFX11-NEXT: s_clause 0x1f
-; GFX11-NEXT: scratch_load_b32 v121, off, s32 offset:128
-; GFX11-NEXT: scratch_load_b32 v120, off, s32 offset:132
-; GFX11-NEXT: scratch_load_b32 v111, off, s32 offset:136
-; GFX11-NEXT: scratch_load_b32 v110, off, s32 offset:140
-; GFX11-NEXT: scratch_load_b32 v109, off, s32 offset:144
-; GFX11-NEXT: scratch_load_b32 v108, off, s32 offset:148
-; GFX11-NEXT: scratch_load_b32 v107, off, s32 offset:152
-; GFX11-NEXT: scratch_load_b32 v106, off, s32 offset:156
-; GFX11-NEXT: scratch_load_b32 v105, off, s32 offset:160
-; GFX11-NEXT: scratch_load_b32 v104, off, s32 offset:164
-; GFX11-NEXT: scratch_load_b32 v95, off, s32 offset:168
-; GFX11-NEXT: scratch_load_b32 v94, off, s32 offset:172
-; GFX11-NEXT: scratch_load_b32 v93, off, s32 offset:176
-; GFX11-NEXT: scratch_load_b32 v92, off, s32 offset:180
-; GFX11-NEXT: scratch_load_b32 v91, off, s32 offset:184
-; GFX11-NEXT: scratch_load_b32 v90, off, s32 offset:188
-; GFX11-NEXT: scratch_load_b32 v89, off, s32 offset:192
-; GFX11-NEXT: scratch_load_b32 v88, off, s32 offset:196
-; GFX11-NEXT: scratch_load_b32 v79, off, s32 offset:200
-; GFX11-NEXT: scratch_load_b32 v78, off, s32 offset:204
-; GFX11-NEXT: scratch_load_b32 v77, off, s32 offset:208
-; GFX11-NEXT: scratch_load_b32 v76, off, s32 offset:212
-; GFX11-NEXT: scratch_load_b32 v75, off, s32 offset:216
-; GFX11-NEXT: scratch_load_b32 v74, off, s32 offset:220
-; GFX11-NEXT: scratch_load_b32 v73, off, s32 offset:224
-; GFX11-NEXT: scratch_load_b32 v72, off, s32 offset:228
-; GFX11-NEXT: scratch_load_b32 v63, off, s32 offset:232
-; GFX11-NEXT: scratch_load_b32 v62, off, s32 offset:236
-; GFX11-NEXT: scratch_load_b32 v61, off, s32 offset:240
-; GFX11-NEXT: scratch_load_b32 v60, off, s32 offset:244
-; GFX11-NEXT: scratch_load_b32 v59, off, s32 offset:248
-; GFX11-NEXT: scratch_load_b32 v58, off, s32 offset:252
-; GFX11-NEXT: s_clause 0x9
-; GFX11-NEXT: scratch_load_b32 v57, off, s32 offset:256
-; GFX11-NEXT: scratch_load_b32 v56, off, s32 offset:260
-; GFX11-NEXT: scratch_load_b32 v47, off, s32 offset:264
-; GFX11-NEXT: scratch_load_b32 v46, off, s32 offset:268
-; GFX11-NEXT: scratch_load_b32 v45, off, s32 offset:272
-; GFX11-NEXT: scratch_load_b32 v44, off, s32 offset:276
-; GFX11-NEXT: scratch_load_b32 v43, off, s32 offset:280
-; GFX11-NEXT: scratch_load_b32 v42, off, s32 offset:284
-; GFX11-NEXT: scratch_load_b32 v41, off, s32 offset:288
-; GFX11-NEXT: scratch_load_b32 v40, off, s32 offset:292
-; GFX11-NEXT: v_dual_mov_b32 v2, v52 :: v_dual_mov_b32 v5, v67
-; GFX11-NEXT: v_dual_mov_b32 v10, v112 :: v_dual_mov_b32 v13, v151
+; GFX11-NEXT: scratch_load_b32 v111, off, s32 offset:128
+; GFX11-NEXT: scratch_load_b32 v110, off, s32 offset:132
+; GFX11-NEXT: scratch_load_b32 v109, off, s32 offset:136
+; GFX11-NEXT: scratch_load_b32 v108, off, s32 offset:140
+; GFX11-NEXT: scratch_load_b32 v107, off, s32 offset:144
+; GFX11-NEXT: scratch_load_b32 v106, off, s32 offset:148
+; GFX11-NEXT: scratch_load_b32 v105, off, s32 offset:152
+; GFX11-NEXT: scratch_load_b32 v104, off, s32 offset:156
+; GFX11-NEXT: scratch_load_b32 v95, off, s32 offset:160
+; GFX11-NEXT: scratch_load_b32 v94, off, s32 offset:164
+; GFX11-NEXT: scratch_load_b32 v93, off, s32 offset:168
+; GFX11-NEXT: scratch_load_b32 v92, off, s32 offset:172
+; GFX11-NEXT: scratch_load_b32 v91, off, s32 offset:176
+; GFX11-NEXT: scratch_load_b32 v90, off, s32 offset:180
+; GFX11-NEXT: scratch_load_b32 v89, off, s32 offset:184
+; GFX11-NEXT: scratch_load_b32 v88, off, s32 offset:188
+; GFX11-NEXT: scratch_load_b32 v79, off, s32 offset:192
+; GFX11-NEXT: scratch_load_b32 v78, off, s32 offset:196
+; GFX11-NEXT: scratch_load_b32 v77, off, s32 offset:200
+; GFX11-NEXT: scratch_load_b32 v76, off, s32 offset:204
+; GFX11-NEXT: scratch_load_b32 v75, off, s32 offset:208
+; GFX11-NEXT: scratch_load_b32 v74, off, s32 offset:212
+; GFX11-NEXT: scratch_load_b32 v73, off, s32 offset:216
+; GFX11-NEXT: scratch_load_b32 v72, off, s32 offset:220
+; GFX11-NEXT: scratch_load_b32 v63, off, s32 offset:224
+; GFX11-NEXT: scratch_load_b32 v62, off, s32 offset:228
+; GFX11-NEXT: scratch_load_b32 v61, off, s32 offset:232
+; GFX11-NEXT: scratch_load_b32 v60, off, s32 offset:236
+; GFX11-NEXT: scratch_load_b32 v59, off, s32 offset:240
+; GFX11-NEXT: scratch_load_b32 v58, off, s32 offset:244
+; GFX11-NEXT: scratch_load_b32 v57, off, s32 offset:248
+; GFX11-NEXT: scratch_load_b32 v56, off, s32 offset:252
+; GFX11-NEXT: s_clause 0x7
+; GFX11-NEXT: scratch_load_b32 v47, off, s32 offset:256
+; GFX11-NEXT: scratch_load_b32 v46, off, s32 offset:260
+; GFX11-NEXT: scratch_load_b32 v45, off, s32 offset:264
+; GFX11-NEXT: scratch_load_b32 v44, off, s32 offset:268
+; GFX11-NEXT: scratch_load_b32 v43, off, s32 offset:272
+; GFX11-NEXT: scratch_load_b32 v42, off, s32 offset:276
+; GFX11-NEXT: scratch_load_b32 v41, off, s32 offset:280
+; GFX11-NEXT: scratch_load_b32 v40, off, s32 offset:284
+; GFX11-NEXT: v_dual_mov_b32 v2, v50 :: v_dual_mov_b32 v5, v65
; GFX11-NEXT: v_dual_mov_b32 v25, v182 :: v_dual_mov_b32 v26, v181
; GFX11-NEXT: v_dual_mov_b32 v27, v180 :: v_dual_mov_b32 v28, v179
; GFX11-NEXT: v_dual_mov_b32 v29, v178 :: v_dual_mov_b32 v30, v177
@@ -143062,23 +143611,25 @@ define inreg <16 x double> @bitcast_v64i16_to_v16f64_scalar(<64 x i16> inreg %a,
; GFX11-NEXT: s_waitcnt vmcnt(0)
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB87_4:
-; GFX11-NEXT: ; implicit-def: $vgpr47_vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78
; GFX11-NEXT: ; implicit-def: $vgpr48_vgpr49_vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79
; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-NEXT: ; implicit-def: $vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46
-; GFX11-NEXT: ; implicit-def: $vgpr50_vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81
-; GFX11-NEXT: ; implicit-def: $vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84
-; GFX11-NEXT: ; implicit-def: $vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88
-; GFX11-NEXT: ; implicit-def: $vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93
-; GFX11-NEXT: ; implicit-def: $vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99
-; GFX11-NEXT: ; implicit-def: $vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106
-; GFX11-NEXT: ; implicit-def: $vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114
-; GFX11-NEXT: ; implicit-def: $vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123
-; GFX11-NEXT: ; implicit-def: $vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133
-; GFX11-NEXT: ; implicit-def: $vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144
-; GFX11-NEXT: ; implicit-def: $vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156
-; GFX11-NEXT: ; implicit-def: $vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167_vgpr168_vgpr169
-; GFX11-NEXT: s_branch .LBB87_2
+; GFX11-NEXT: ; implicit-def: $vgpr51_vgpr52_vgpr53_vgpr54_vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82
+; GFX11-NEXT: ; implicit-def: $vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32
+; GFX11-NEXT: ; implicit-def: $vgpr55_vgpr56_vgpr57_vgpr58_vgpr59_vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86
+; GFX11-NEXT: ; implicit-def: $vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34
+; GFX11-NEXT: ; implicit-def: $vgpr60_vgpr61_vgpr62_vgpr63_vgpr64_vgpr65_vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91
+; GFX11-NEXT: ; implicit-def: $vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31_vgpr32_vgpr33_vgpr34_vgpr35_vgpr36_vgpr37_vgpr38_vgpr39_vgpr40_vgpr41_vgpr42_vgpr43_vgpr44_vgpr45_vgpr46_vgpr47_vgpr48_vgpr49
+; GFX11-NEXT: ; implicit-def: $vgpr66_vgpr67_vgpr68_vgpr69_vgpr70_vgpr71_vgpr72_vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97
+; GFX11-NEXT: ; implicit-def: $vgpr73_vgpr74_vgpr75_vgpr76_vgpr77_vgpr78_vgpr79_vgpr80_vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104
+; GFX11-NEXT: ; implicit-def: $vgpr81_vgpr82_vgpr83_vgpr84_vgpr85_vgpr86_vgpr87_vgpr88_vgpr89_vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112
+; GFX11-NEXT: ; implicit-def: $vgpr90_vgpr91_vgpr92_vgpr93_vgpr94_vgpr95_vgpr96_vgpr97_vgpr98_vgpr99_vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121
+; GFX11-NEXT: ; implicit-def: $vgpr100_vgpr101_vgpr102_vgpr103_vgpr104_vgpr105_vgpr106_vgpr107_vgpr108_vgpr109_vgpr110_vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131
+; GFX11-NEXT: ; implicit-def: $vgpr111_vgpr112_vgpr113_vgpr114_vgpr115_vgpr116_vgpr117_vgpr118_vgpr119_vgpr120_vgpr121_vgpr122_vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142
+; GFX11-NEXT: ; implicit-def: $vgpr123_vgpr124_vgpr125_vgpr126_vgpr127_vgpr128_vgpr129_vgpr130_vgpr131_vgpr132_vgpr133_vgpr134_vgpr135_vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154
+; GFX11-NEXT: ; implicit-def: $vgpr136_vgpr137_vgpr138_vgpr139_vgpr140_vgpr141_vgpr142_vgpr143_vgpr144_vgpr145_vgpr146_vgpr147_vgpr148_vgpr149_vgpr150_vgpr151_vgpr152_vgpr153_vgpr154_vgpr155_vgpr156_vgpr157_vgpr158_vgpr159_vgpr160_vgpr161_vgpr162_vgpr163_vgpr164_vgpr165_vgpr166_vgpr167
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_vccz .LBB87_2
+; GFX11-NEXT: s_branch .LBB87_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -149433,13 +149984,14 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:308
; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:304
; SI-NEXT: ; implicit-def: $vgpr43 : SGPR spill to VGPR lane
-; SI-NEXT: s_mov_b32 s72, s21
+; SI-NEXT: s_mov_b32 s76, s28
; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: v_writelane_b32 v43, s19, 0
; SI-NEXT: v_writelane_b32 v43, s18, 1
; SI-NEXT: v_writelane_b32 v43, s17, 2
; SI-NEXT: v_writelane_b32 v43, s16, 3
-; SI-NEXT: s_mov_b32 s60, s24
+; SI-NEXT: s_mov_b32 s72, s21
+; SI-NEXT: s_mov_b32 s79, s24
; SI-NEXT: v_writelane_b32 v41, s30, 0
; SI-NEXT: v_writelane_b32 v41, s31, 1
; SI-NEXT: v_writelane_b32 v41, s34, 2
@@ -149464,8 +150016,8 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: v_writelane_b32 v41, s69, 21
; SI-NEXT: v_writelane_b32 v41, s70, 22
; SI-NEXT: v_writelane_b32 v41, s71, 23
-; SI-NEXT: s_mov_b32 s77, s28
-; SI-NEXT: s_mov_b32 s76, s27
+; SI-NEXT: s_mov_b32 s60, s29
+; SI-NEXT: s_mov_b32 s73, s27
; SI-NEXT: v_writelane_b32 v41, s80, 24
; SI-NEXT: v_writelane_b32 v41, s81, 25
; SI-NEXT: v_writelane_b32 v41, s82, 26
@@ -149478,7 +150030,7 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: v_writelane_b32 v41, s97, 33
; SI-NEXT: v_writelane_b32 v41, s98, 34
; SI-NEXT: v_writelane_b32 v41, s99, 35
-; SI-NEXT: s_mov_b32 s79, s26
+; SI-NEXT: s_mov_b32 s77, s26
; SI-NEXT: v_readfirstlane_b32 s38, v20
; SI-NEXT: ; implicit-def: $vgpr42 : SGPR spill to VGPR lane
; SI-NEXT: v_readfirstlane_b32 s39, v19
@@ -149506,7 +150058,6 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: v_readfirstlane_b32 s19, v6
; SI-NEXT: v_readfirstlane_b32 s88, v4
; SI-NEXT: v_readfirstlane_b32 s89, v3
-; SI-NEXT: v_readfirstlane_b32 s90, v9
; SI-NEXT: s_waitcnt vmcnt(7)
; SI-NEXT: v_readfirstlane_b32 s6, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:300
@@ -149542,6 +150093,7 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: s_waitcnt vmcnt(12)
; SI-NEXT: v_readfirstlane_b32 s4, v38
; SI-NEXT: v_writelane_b32 v43, s4, 10
+; SI-NEXT: v_readfirstlane_b32 s90, v9
; SI-NEXT: v_readfirstlane_b32 s91, v10
; SI-NEXT: v_readfirstlane_b32 s92, v8
; SI-NEXT: v_readfirstlane_b32 s93, v7
@@ -149600,17 +150152,17 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: s_waitcnt vmcnt(12)
; SI-NEXT: v_readfirstlane_b32 s59, v31
; SI-NEXT: s_waitcnt vmcnt(11)
-; SI-NEXT: v_readfirstlane_b32 s42, v38
+; SI-NEXT: v_readfirstlane_b32 s78, v38
; SI-NEXT: s_waitcnt vmcnt(10)
-; SI-NEXT: v_readfirstlane_b32 s73, v39
+; SI-NEXT: v_readfirstlane_b32 s45, v39
; SI-NEXT: s_waitcnt vmcnt(9)
-; SI-NEXT: v_readfirstlane_b32 s21, v48
+; SI-NEXT: v_readfirstlane_b32 s28, v48
; SI-NEXT: s_waitcnt vmcnt(8)
-; SI-NEXT: v_readfirstlane_b32 s57, v49
+; SI-NEXT: v_readfirstlane_b32 s42, v49
; SI-NEXT: s_waitcnt vmcnt(7)
; SI-NEXT: v_readfirstlane_b32 s13, v50
; SI-NEXT: s_waitcnt vmcnt(6)
-; SI-NEXT: v_readfirstlane_b32 s45, v51
+; SI-NEXT: v_readfirstlane_b32 s21, v51
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:200
; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:196
; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:192
@@ -149621,7 +150173,7 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: s_waitcnt vmcnt(12)
; SI-NEXT: v_readfirstlane_b32 s47, v32
; SI-NEXT: s_waitcnt vmcnt(11)
-; SI-NEXT: v_readfirstlane_b32 s24, v33
+; SI-NEXT: v_readfirstlane_b32 s46, v33
; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:172
; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:168
; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:164
@@ -149630,7 +150182,7 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:152
; SI-NEXT: s_waitcnt vmcnt(14)
-; SI-NEXT: v_readfirstlane_b32 s78, v34
+; SI-NEXT: v_readfirstlane_b32 s24, v34
; SI-NEXT: v_readfirstlane_b32 s4, v35
; SI-NEXT: v_writelane_b32 v43, s4, 18
; SI-NEXT: v_readfirstlane_b32 s4, v36
@@ -149691,12 +150243,12 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: v_writelane_b32 v43, s23, 35
; SI-NEXT: v_writelane_b32 v43, s72, 36
; SI-NEXT: v_writelane_b32 v43, s20, 37
-; SI-NEXT: v_writelane_b32 v43, s79, 38
-; SI-NEXT: v_writelane_b32 v43, s76, 39
+; SI-NEXT: v_writelane_b32 v43, s77, 38
+; SI-NEXT: v_writelane_b32 v43, s73, 39
; SI-NEXT: v_writelane_b32 v43, s25, 40
-; SI-NEXT: v_writelane_b32 v43, s60, 41
-; SI-NEXT: v_writelane_b32 v43, s29, 42
-; SI-NEXT: v_writelane_b32 v43, s77, 43
+; SI-NEXT: v_writelane_b32 v43, s79, 41
+; SI-NEXT: v_writelane_b32 v43, s60, 42
+; SI-NEXT: v_writelane_b32 v43, s76, 43
; SI-NEXT: v_writelane_b32 v43, s16, 44
; SI-NEXT: v_writelane_b32 v43, s17, 45
; SI-NEXT: v_writelane_b32 v43, s18, 46
@@ -149710,15 +150262,15 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: v_writelane_b32 v43, s94, 54
; SI-NEXT: v_writelane_b32 v43, s95, 55
; SI-NEXT: s_waitcnt vmcnt(10)
-; SI-NEXT: v_readfirstlane_b32 s62, v33
+; SI-NEXT: v_readfirstlane_b32 s74, v33
; SI-NEXT: s_waitcnt vmcnt(9)
; SI-NEXT: v_readfirstlane_b32 s10, v34
; SI-NEXT: s_waitcnt vmcnt(8)
; SI-NEXT: v_readfirstlane_b32 s66, v35
-; SI-NEXT: v_readfirstlane_b32 s28, v31
+; SI-NEXT: v_readfirstlane_b32 s57, v31
; SI-NEXT: v_readfirstlane_b32 s27, v32
; SI-NEXT: s_waitcnt vmcnt(7)
-; SI-NEXT: v_readfirstlane_b32 s58, v36
+; SI-NEXT: v_readfirstlane_b32 s29, v36
; SI-NEXT: s_waitcnt vmcnt(6)
; SI-NEXT: v_readfirstlane_b32 s69, v37
; SI-NEXT: s_waitcnt vmcnt(5)
@@ -149755,12 +150307,13 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: v_writelane_b32 v43, s31, 59
; SI-NEXT: v_writelane_b32 v43, s34, 60
; SI-NEXT: v_writelane_b32 v43, s35, 61
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_writelane_b32 v43, s36, 62
; SI-NEXT: v_writelane_b32 v43, s37, 63
; SI-NEXT: s_waitcnt vmcnt(12)
-; SI-NEXT: v_readfirstlane_b32 s74, v31
+; SI-NEXT: v_readfirstlane_b32 s62, v31
; SI-NEXT: s_waitcnt vmcnt(11)
-; SI-NEXT: v_readfirstlane_b32 s46, v32
+; SI-NEXT: v_readfirstlane_b32 s58, v32
; SI-NEXT: s_waitcnt vmcnt(10)
; SI-NEXT: v_readfirstlane_b32 s96, v33
; SI-NEXT: s_waitcnt vmcnt(9)
@@ -149851,17 +150404,16 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: v_writelane_b32 v42, s71, 36
; SI-NEXT: v_writelane_b32 v42, s70, 37
; SI-NEXT: v_writelane_b32 v42, s68, 38
-; SI-NEXT: v_writelane_b32 v42, s74, 39
-; SI-NEXT: v_writelane_b32 v42, s46, 40
+; SI-NEXT: v_writelane_b32 v42, s62, 39
+; SI-NEXT: v_writelane_b32 v42, s58, 40
; SI-NEXT: v_writelane_b32 v42, s11, 41
; SI-NEXT: v_writelane_b32 v42, s10, 42
-; SI-NEXT: v_writelane_b32 v42, s62, 43
+; SI-NEXT: v_writelane_b32 v42, s74, 43
; SI-NEXT: v_writelane_b32 v42, s66, 44
-; SI-NEXT: v_writelane_b32 v42, s58, 45
-; SI-NEXT: v_writelane_b32 v42, s28, 46
+; SI-NEXT: v_writelane_b32 v42, s29, 45
+; SI-NEXT: v_writelane_b32 v42, s57, 46
; SI-NEXT: v_writelane_b32 v42, s27, 47
-; SI-NEXT: v_writelane_b32 v42, s78, 48
-; SI-NEXT: v_writelane_b32 v42, s24, 49
+; SI-NEXT: v_writelane_b32 v42, s24, 48
; SI-NEXT: s_cbranch_scc0 .LBB89_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_readlane_b32 s4, v43, 3
@@ -149870,14 +150422,14 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: s_lshl_b32 s4, s4, 16
; SI-NEXT: s_lshl_b32 s5, s5, 24
; SI-NEXT: s_or_b32 s4, s5, s4
-; SI-NEXT: v_writelane_b32 v42, s4, 56
+; SI-NEXT: v_writelane_b32 v42, s4, 58
; SI-NEXT: v_readlane_b32 s4, v43, 1
; SI-NEXT: s_and_b32 s4, s4, 0xff
; SI-NEXT: v_readlane_b32 s5, v43, 0
; SI-NEXT: s_lshl_b32 s4, s4, 16
; SI-NEXT: s_lshl_b32 s5, s5, 24
; SI-NEXT: s_or_b32 s4, s5, s4
-; SI-NEXT: v_writelane_b32 v42, s4, 57
+; SI-NEXT: v_writelane_b32 v42, s4, 59
; SI-NEXT: s_and_b32 s4, s20, 0xff
; SI-NEXT: s_lshl_b32 s5, s72, 8
; SI-NEXT: s_or_b32 s4, s4, s5
@@ -149885,32 +150437,29 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: s_lshl_b32 s5, s5, 16
; SI-NEXT: s_mov_b32 s22, s6
; SI-NEXT: s_lshl_b32 s6, s23, 24
-; SI-NEXT: v_writelane_b32 v42, s4, 58
+; SI-NEXT: v_writelane_b32 v42, s4, 60
; SI-NEXT: s_or_b32 s4, s6, s5
-; SI-NEXT: s_and_b32 s5, s60, 0xff
-; SI-NEXT: s_lshl_b32 s5, s5, 16
-; SI-NEXT: s_lshl_b32 s6, s25, 24
-; SI-NEXT: v_writelane_b32 v42, s4, 59
-; SI-NEXT: s_or_b32 s5, s6, s5
-; SI-NEXT: v_writelane_b32 v42, s5, 60
; SI-NEXT: s_and_b32 s5, s79, 0xff
; SI-NEXT: s_lshl_b32 s5, s5, 16
-; SI-NEXT: s_lshl_b32 s6, s76, 24
-; SI-NEXT: s_or_b32 s5, s6, s5
-; SI-NEXT: v_writelane_b32 v42, s5, 61
+; SI-NEXT: s_lshl_b32 s6, s25, 24
+; SI-NEXT: v_writelane_b32 v42, s4, 61
+; SI-NEXT: s_or_b32 s4, s6, s5
; SI-NEXT: s_and_b32 s5, s77, 0xff
-; SI-NEXT: s_lshl_b32 s6, s29, 8
+; SI-NEXT: s_lshl_b32 s5, s5, 16
+; SI-NEXT: s_lshl_b32 s6, s73, 24
+; SI-NEXT: s_or_b32 s72, s6, s5
+; SI-NEXT: s_and_b32 s5, s76, 0xff
+; SI-NEXT: s_lshl_b32 s6, s60, 8
; SI-NEXT: s_or_b32 s5, s5, s6
; SI-NEXT: s_and_b32 s6, s16, 0xff
; SI-NEXT: s_lshl_b32 s6, s6, 16
; SI-NEXT: s_lshl_b32 s16, s17, 24
-; SI-NEXT: s_or_b32 s6, s16, s6
-; SI-NEXT: v_writelane_b32 v42, s6, 62
+; SI-NEXT: s_or_b32 s23, s16, s6
; SI-NEXT: s_and_b32 s6, s89, 0xff
; SI-NEXT: s_lshl_b32 s6, s6, 16
; SI-NEXT: s_lshl_b32 s16, s88, 24
-; SI-NEXT: s_mov_b32 s4, s47
-; SI-NEXT: s_or_b32 s47, s16, s6
+; SI-NEXT: v_writelane_b32 v42, s4, 62
+; SI-NEXT: s_or_b32 s4, s16, s6
; SI-NEXT: s_and_b32 s6, s18, 0xff
; SI-NEXT: s_lshl_b32 s6, s6, 16
; SI-NEXT: s_lshl_b32 s16, s19, 24
@@ -149951,11 +150500,11 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: s_and_b32 s18, s48, 0xff
; SI-NEXT: s_lshl_b32 s18, s18, 16
; SI-NEXT: s_lshl_b32 s19, s49, 24
-; SI-NEXT: s_or_b32 s89, s19, s18
+; SI-NEXT: s_or_b32 s60, s19, s18
; SI-NEXT: s_and_b32 s18, s55, 0xff
; SI-NEXT: s_lshl_b32 s18, s18, 16
; SI-NEXT: s_lshl_b32 s19, s54, 24
-; SI-NEXT: s_or_b32 s31, s19, s18
+; SI-NEXT: s_or_b32 s89, s19, s18
; SI-NEXT: s_and_b32 s18, s52, 0xff
; SI-NEXT: s_lshl_b32 s18, s18, 16
; SI-NEXT: s_lshl_b32 s19, s53, 24
@@ -149966,7 +150515,7 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: s_and_b32 s19, s64, 0xff
; SI-NEXT: s_lshl_b32 s19, s19, 16
; SI-NEXT: s_lshl_b32 s20, s65, 24
-; SI-NEXT: s_or_b32 s60, s20, s19
+; SI-NEXT: s_or_b32 s73, s20, s19
; SI-NEXT: s_and_b32 s19, s12, 0xff
; SI-NEXT: s_lshl_b32 s19, s19, 16
; SI-NEXT: s_lshl_b32 s20, s8, 24
@@ -149987,17 +150536,18 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: s_lshl_b32 s20, s7, 24
; SI-NEXT: s_or_b32 s7, s20, s19
; SI-NEXT: s_and_b32 s19, s82, 0xff
+; SI-NEXT: v_writelane_b32 v42, s9, 49
; SI-NEXT: s_lshl_b32 s19, s19, 16
; SI-NEXT: s_lshl_b32 s20, s83, 24
-; SI-NEXT: s_or_b32 s23, s20, s19
+; SI-NEXT: v_writelane_b32 v42, s7, 51
+; SI-NEXT: s_or_b32 s7, s20, s19
; SI-NEXT: s_and_b32 s19, s26, 0xff
; SI-NEXT: s_lshl_b32 s20, s81, 8
; SI-NEXT: s_or_b32 vcc_hi, s19, s20
; SI-NEXT: s_and_b32 s19, s99, 0xff
-; SI-NEXT: v_writelane_b32 v42, s9, 50
; SI-NEXT: s_lshl_b32 s19, s19, 16
; SI-NEXT: s_lshl_b32 s20, s87, 24
-; SI-NEXT: v_writelane_b32 v42, s7, 51
+; SI-NEXT: v_writelane_b32 v42, s7, 50
; SI-NEXT: s_or_b32 s7, s20, s19
; SI-NEXT: s_and_b32 s19, s56, 0xff
; SI-NEXT: s_lshl_b32 s19, s19, 16
@@ -150009,97 +150559,99 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: s_lshl_b32 s20, s96, 24
; SI-NEXT: v_writelane_b32 v42, s7, 54
; SI-NEXT: s_or_b32 s7, s20, s19
-; SI-NEXT: s_and_b32 s19, s46, 0xff
-; SI-NEXT: s_lshl_b32 s20, s74, 8
+; SI-NEXT: s_and_b32 s19, s58, 0xff
+; SI-NEXT: s_lshl_b32 s20, s62, 8
; SI-NEXT: s_or_b32 s84, s19, s20
; SI-NEXT: s_and_b32 s19, s71, 0xff
; SI-NEXT: s_lshl_b32 s19, s19, 16
; SI-NEXT: s_lshl_b32 s20, s70, 24
-; SI-NEXT: s_or_b32 s72, s20, s19
+; SI-NEXT: v_writelane_b32 v42, s7, 53
+; SI-NEXT: s_or_b32 s7, s20, s19
; SI-NEXT: s_and_b32 s19, s11, 0xff
; SI-NEXT: s_lshl_b32 s19, s19, 16
; SI-NEXT: s_lshl_b32 s20, s68, 24
-; SI-NEXT: v_writelane_b32 v42, s7, 53
+; SI-NEXT: v_writelane_b32 v42, s7, 55
; SI-NEXT: s_or_b32 s7, s20, s19
; SI-NEXT: s_and_b32 s19, s14, 0xff
; SI-NEXT: s_lshl_b32 s19, s19, 16
; SI-NEXT: s_lshl_b32 s20, s69, 24
; SI-NEXT: s_or_b32 s9, s20, s19
-; SI-NEXT: s_and_b32 s19, s58, 0xff
+; SI-NEXT: s_and_b32 s19, s29, 0xff
; SI-NEXT: s_lshl_b32 s20, s66, 8
; SI-NEXT: s_or_b32 s85, s19, s20
; SI-NEXT: s_and_b32 s19, s10, 0xff
; SI-NEXT: s_lshl_b32 s19, s19, 16
-; SI-NEXT: s_lshl_b32 s20, s62, 24
-; SI-NEXT: s_or_b32 s49, s20, s19
+; SI-NEXT: s_lshl_b32 s20, s74, 24
+; SI-NEXT: v_writelane_b32 v42, s9, 56
+; SI-NEXT: s_or_b32 s9, s20, s19
; SI-NEXT: s_and_b32 s19, s27, 0xff
-; SI-NEXT: v_writelane_b32 v42, s9, 55
; SI-NEXT: s_lshl_b32 s19, s19, 16
-; SI-NEXT: s_lshl_b32 s20, s28, 24
-; SI-NEXT: v_readlane_b32 s9, v43, 33
+; SI-NEXT: s_lshl_b32 s20, s57, 24
+; SI-NEXT: s_mov_b32 s57, s7
+; SI-NEXT: v_readlane_b32 s7, v43, 33
; SI-NEXT: s_or_b32 s50, s20, s19
-; SI-NEXT: s_and_b32 s19, s9, 0xff
-; SI-NEXT: v_readlane_b32 s9, v43, 32
+; SI-NEXT: s_and_b32 s19, s7, 0xff
+; SI-NEXT: v_readlane_b32 s7, v43, 32
; SI-NEXT: s_lshl_b32 s19, s19, 16
-; SI-NEXT: s_lshl_b32 s20, s9, 24
-; SI-NEXT: v_readlane_b32 s9, v43, 31
+; SI-NEXT: s_lshl_b32 s20, s7, 24
+; SI-NEXT: v_readlane_b32 s7, v43, 31
; SI-NEXT: s_or_b32 s51, s20, s19
-; SI-NEXT: s_and_b32 s19, s9, 0xff
-; SI-NEXT: v_readlane_b32 s9, v43, 30
-; SI-NEXT: s_lshl_b32 s20, s9, 8
-; SI-NEXT: v_readlane_b32 s9, v43, 29
+; SI-NEXT: s_and_b32 s19, s7, 0xff
+; SI-NEXT: v_readlane_b32 s7, v43, 30
+; SI-NEXT: s_lshl_b32 s20, s7, 8
+; SI-NEXT: v_readlane_b32 s7, v43, 29
; SI-NEXT: s_or_b32 s86, s19, s20
-; SI-NEXT: s_and_b32 s19, s9, 0xff
-; SI-NEXT: v_readlane_b32 s9, v43, 28
+; SI-NEXT: s_and_b32 s19, s7, 0xff
+; SI-NEXT: v_readlane_b32 s7, v43, 28
; SI-NEXT: s_lshl_b32 s19, s19, 16
-; SI-NEXT: s_lshl_b32 s20, s9, 24
-; SI-NEXT: v_readlane_b32 s9, v43, 27
+; SI-NEXT: s_lshl_b32 s20, s7, 24
+; SI-NEXT: v_readlane_b32 s7, v43, 27
; SI-NEXT: s_or_b32 s52, s20, s19
-; SI-NEXT: s_and_b32 s19, s9, 0xff
-; SI-NEXT: v_readlane_b32 s9, v43, 26
+; SI-NEXT: s_and_b32 s19, s7, 0xff
+; SI-NEXT: v_readlane_b32 s7, v43, 26
; SI-NEXT: s_lshl_b32 s19, s19, 16
-; SI-NEXT: s_lshl_b32 s20, s9, 24
-; SI-NEXT: v_readlane_b32 s9, v43, 25
+; SI-NEXT: s_lshl_b32 s20, s7, 24
+; SI-NEXT: v_readlane_b32 s7, v43, 25
; SI-NEXT: s_or_b32 s53, s20, s19
-; SI-NEXT: s_and_b32 s19, s9, 0xff
-; SI-NEXT: v_readlane_b32 s9, v43, 24
+; SI-NEXT: s_and_b32 s19, s7, 0xff
+; SI-NEXT: v_readlane_b32 s7, v43, 24
; SI-NEXT: s_lshl_b32 s19, s19, 16
-; SI-NEXT: s_lshl_b32 s20, s9, 24
-; SI-NEXT: v_readlane_b32 s9, v43, 23
+; SI-NEXT: s_lshl_b32 s20, s7, 24
+; SI-NEXT: v_readlane_b32 s7, v43, 23
; SI-NEXT: s_or_b32 s54, s20, s19
-; SI-NEXT: s_and_b32 s19, s9, 0xff
-; SI-NEXT: v_readlane_b32 s9, v43, 22
-; SI-NEXT: s_lshl_b32 s20, s9, 8
-; SI-NEXT: v_readlane_b32 s9, v43, 21
+; SI-NEXT: s_and_b32 s19, s7, 0xff
+; SI-NEXT: v_readlane_b32 s7, v43, 22
+; SI-NEXT: s_lshl_b32 s20, s7, 8
+; SI-NEXT: v_readlane_b32 s7, v43, 21
; SI-NEXT: s_or_b32 s87, s19, s20
-; SI-NEXT: s_and_b32 s19, s9, 0xff
-; SI-NEXT: v_readlane_b32 s9, v43, 20
+; SI-NEXT: s_and_b32 s19, s7, 0xff
+; SI-NEXT: v_readlane_b32 s7, v43, 20
; SI-NEXT: s_lshl_b32 s19, s19, 16
-; SI-NEXT: s_lshl_b32 s20, s9, 24
-; SI-NEXT: v_readlane_b32 s9, v43, 19
+; SI-NEXT: s_lshl_b32 s20, s7, 24
+; SI-NEXT: v_readlane_b32 s7, v43, 19
; SI-NEXT: s_or_b32 s55, s20, s19
-; SI-NEXT: s_mov_b32 s58, s9
-; SI-NEXT: s_and_b32 s19, s9, 0xff
-; SI-NEXT: v_readlane_b32 s9, v43, 18
+; SI-NEXT: s_mov_b32 s90, s7
+; SI-NEXT: s_and_b32 s19, s7, 0xff
+; SI-NEXT: v_readlane_b32 s7, v43, 18
; SI-NEXT: s_lshl_b32 s19, s19, 16
-; SI-NEXT: s_lshl_b32 s20, s9, 24
+; SI-NEXT: s_lshl_b32 s20, s7, 24
; SI-NEXT: s_or_b32 s64, s20, s19
-; SI-NEXT: s_and_b32 s19, s78, 0xff
+; SI-NEXT: s_and_b32 s19, s24, 0xff
; SI-NEXT: s_lshl_b32 s19, s19, 16
-; SI-NEXT: s_lshl_b32 s20, s24, 24
+; SI-NEXT: s_lshl_b32 s20, s46, 24
; SI-NEXT: s_or_b32 s65, s20, s19
-; SI-NEXT: s_and_b32 s19, s4, 0xff
-; SI-NEXT: s_lshl_b32 s20, s45, 8
+; SI-NEXT: s_and_b32 s19, s47, 0xff
+; SI-NEXT: s_lshl_b32 s20, s21, 8
; SI-NEXT: s_or_b32 s26, s19, s20
; SI-NEXT: s_and_b32 s19, s13, 0xff
; SI-NEXT: s_lshl_b32 s19, s19, 16
-; SI-NEXT: s_lshl_b32 s20, s57, 24
+; SI-NEXT: s_lshl_b32 s20, s42, 24
; SI-NEXT: s_or_b32 s66, s20, s19
-; SI-NEXT: s_and_b32 s19, s21, 0xff
+; SI-NEXT: s_and_b32 s19, s28, 0xff
; SI-NEXT: s_lshl_b32 s19, s19, 16
-; SI-NEXT: s_lshl_b32 s20, s73, 24
+; SI-NEXT: s_lshl_b32 s20, s45, 24
; SI-NEXT: s_or_b32 s67, s20, s19
-; SI-NEXT: s_and_b32 s19, s42, 0xff
+; SI-NEXT: s_and_b32 s19, s78, 0xff
; SI-NEXT: v_readlane_b32 s88, v43, 17
; SI-NEXT: s_lshl_b32 s19, s19, 16
; SI-NEXT: s_lshl_b32 s20, s59, 24
@@ -150112,8 +150664,7 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: s_lshl_b32 s20, s43, 24
; SI-NEXT: s_or_b32 s69, s20, s19
; SI-NEXT: s_and_b32 s19, s61, 0xff
-; SI-NEXT: s_mov_b32 s39, s57
-; SI-NEXT: s_mov_b32 s57, s7
+; SI-NEXT: s_mov_b32 s35, s7
; SI-NEXT: s_lshl_b32 s19, s19, 16
; SI-NEXT: s_lshl_b32 s20, s75, 24
; SI-NEXT: v_readlane_b32 s7, v43, 16
@@ -150136,6 +150687,7 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: s_mov_b32 s14, s7
; SI-NEXT: s_and_b32 s19, s7, 0xff
; SI-NEXT: v_readlane_b32 s7, v43, 11
+; SI-NEXT: v_writelane_b32 v42, s9, 57
; SI-NEXT: s_lshl_b32 s19, s19, 16
; SI-NEXT: s_mov_b32 s9, s7
; SI-NEXT: s_lshl_b32 s20, s7, 24
@@ -150156,11 +150708,8 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: s_mov_b32 s96, s7
; SI-NEXT: s_lshl_b32 s20, s7, 24
; SI-NEXT: v_readlane_b32 s7, v43, 6
-; SI-NEXT: s_mov_b32 s36, s63
-; SI-NEXT: s_mov_b32 s63, s93
-; SI-NEXT: s_mov_b32 s93, s61
-; SI-NEXT: s_mov_b32 s61, s91
-; SI-NEXT: s_mov_b32 s91, s75
+; SI-NEXT: s_mov_b32 s30, s88
+; SI-NEXT: s_mov_b32 s88, s75
; SI-NEXT: s_mov_b32 s75, s92
; SI-NEXT: s_or_b32 s92, s20, s19
; SI-NEXT: s_mov_b32 s98, s7
@@ -150169,29 +150718,33 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: s_mov_b32 s44, s7
; SI-NEXT: s_lshl_b32 s20, s7, 8
; SI-NEXT: v_readlane_b32 s7, v43, 4
-; SI-NEXT: s_mov_b32 s48, s13
-; SI-NEXT: s_mov_b32 s13, s94
-; SI-NEXT: s_mov_b32 s94, s21
+; SI-NEXT: s_mov_b32 s49, s21
; SI-NEXT: s_or_b32 s21, s19, s20
; SI-NEXT: s_and_b32 s19, s7, 0xff
-; SI-NEXT: s_mov_b32 s95, s4
+; SI-NEXT: s_mov_b32 s39, s59
+; SI-NEXT: s_mov_b32 s59, s8
; SI-NEXT: s_lshl_b32 s19, s19, 16
; SI-NEXT: s_lshl_b32 s20, s22, 24
-; SI-NEXT: v_readlane_b32 s4, v42, 58
-; SI-NEXT: s_mov_b32 s46, s45
-; SI-NEXT: s_mov_b32 s34, s73
-; SI-NEXT: s_mov_b32 s73, s12
-; SI-NEXT: s_mov_b32 s37, s42
-; SI-NEXT: s_mov_b32 s38, s59
-; SI-NEXT: s_mov_b32 s59, s8
-; SI-NEXT: s_mov_b32 s30, s88
-; SI-NEXT: s_mov_b32 s88, s31
+; SI-NEXT: v_readlane_b32 s8, v42, 60
+; SI-NEXT: s_mov_b32 s58, s46
+; SI-NEXT: s_mov_b32 s95, s47
+; SI-NEXT: s_mov_b32 s34, s13
+; SI-NEXT: s_mov_b32 s13, s94
+; SI-NEXT: s_mov_b32 s48, s42
+; SI-NEXT: s_mov_b32 s94, s28
+; SI-NEXT: s_mov_b32 s38, s45
+; SI-NEXT: s_mov_b32 s37, s78
+; SI-NEXT: s_mov_b32 s36, s63
+; SI-NEXT: s_mov_b32 s63, s93
; SI-NEXT: s_mov_b32 s78, s40
; SI-NEXT: s_mov_b32 s31, s43
+; SI-NEXT: s_mov_b32 s93, s61
+; SI-NEXT: s_mov_b32 s61, s91
+; SI-NEXT: s_mov_b32 s91, s12
; SI-NEXT: s_mov_b32 s12, s7
; SI-NEXT: s_mov_b32 s7, s22
; SI-NEXT: s_or_b32 s83, s20, s19
-; SI-NEXT: s_lshl_b32 s20, s4, 16
+; SI-NEXT: s_lshl_b32 s20, s8, 16
; SI-NEXT: s_lshl_b32 s74, s5, 16
; SI-NEXT: s_lshl_b32 s22, s6, 16
; SI-NEXT: s_lshl_b32 s16, s16, 16
@@ -150204,16 +150757,14 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: s_lshl_b32 s97, s86, 16
; SI-NEXT: s_lshl_b32 s28, s87, 16
; SI-NEXT: s_lshl_b32 s87, s26, 16
-; SI-NEXT: v_readlane_b32 s26, v42, 56
+; SI-NEXT: v_readlane_b32 s26, v42, 58
; SI-NEXT: s_lshl_b32 s86, s27, 16
-; SI-NEXT: v_readlane_b32 s27, v42, 57
-; SI-NEXT: v_readlane_b32 s35, v42, 61
+; SI-NEXT: v_readlane_b32 s27, v42, 59
; SI-NEXT: s_lshl_b32 s85, s29, 16
-; SI-NEXT: v_readlane_b32 s29, v42, 60
-; SI-NEXT: v_readlane_b32 s24, v42, 59
-; SI-NEXT: v_readlane_b32 s90, v42, 62
+; SI-NEXT: v_readlane_b32 s29, v42, 62
+; SI-NEXT: v_readlane_b32 s24, v42, 61
; SI-NEXT: s_lshl_b32 s84, s21, 16
-; SI-NEXT: s_mov_b32 s21, s47
+; SI-NEXT: s_mov_b32 s21, s4
; SI-NEXT: s_cbranch_execnz .LBB89_3
; SI-NEXT: .LBB89_2: ; %cmp.true
; SI-NEXT: s_add_i32 s4, s98, 3
@@ -150254,7 +150805,7 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: s_or_b32 s6, s16, s6
; SI-NEXT: s_add_i32 s16, s93, 3
; SI-NEXT: s_and_b32 s16, s16, 0xff
-; SI-NEXT: s_lshl_b32 s17, s91, 8
+; SI-NEXT: s_lshl_b32 s17, s88, 8
; SI-NEXT: s_add_i32 s18, s10, 3
; SI-NEXT: s_or_b32 s16, s17, s16
; SI-NEXT: s_and_b32 s18, s18, 0xff
@@ -150278,11 +150829,11 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: s_or_b32 s17, s18, s17
; SI-NEXT: s_add_i32 s18, s94, 3
; SI-NEXT: s_and_b32 s18, s18, 0xff
-; SI-NEXT: s_lshl_b32 s19, s34, 8
+; SI-NEXT: s_lshl_b32 s19, s38, 8
; SI-NEXT: s_add_i32 s20, s37, 3
; SI-NEXT: s_or_b32 s18, s19, s18
; SI-NEXT: s_and_b32 s20, s20, 0xff
-; SI-NEXT: s_lshl_b32 s19, s38, 24
+; SI-NEXT: s_lshl_b32 s19, s39, 24
; SI-NEXT: s_lshl_b32 s20, s20, 16
; SI-NEXT: s_addk_i32 s18, 0x300
; SI-NEXT: s_or_b32 s19, s19, s20
@@ -150290,26 +150841,24 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: s_or_b32 s18, s19, s18
; SI-NEXT: s_add_i32 s19, s95, 3
; SI-NEXT: s_and_b32 s19, s19, 0xff
-; SI-NEXT: s_lshl_b32 s20, s46, 8
-; SI-NEXT: s_add_i32 s22, s48, 3
+; SI-NEXT: s_lshl_b32 s20, s49, 8
+; SI-NEXT: s_add_i32 s22, s34, 3
; SI-NEXT: s_or_b32 s19, s20, s19
; SI-NEXT: s_and_b32 s22, s22, 0xff
-; SI-NEXT: s_lshl_b32 s20, s39, 24
+; SI-NEXT: s_lshl_b32 s20, s48, 24
; SI-NEXT: s_lshl_b32 s22, s22, 16
; SI-NEXT: s_addk_i32 s19, 0x300
; SI-NEXT: s_or_b32 s20, s20, s22
; SI-NEXT: s_and_b32 s19, s19, 0xffff
; SI-NEXT: s_or_b32 s19, s20, s19
-; SI-NEXT: s_add_i32 s20, s58, 3
-; SI-NEXT: v_readlane_b32 s7, v43, 18
-; SI-NEXT: s_and_b32 s20, s20, 0xff
-; SI-NEXT: s_lshl_b32 s22, s7, 8
-; SI-NEXT: v_readlane_b32 s7, v42, 49
-; SI-NEXT: s_or_b32 s20, s22, s20
-; SI-NEXT: s_lshl_b32 s22, s7, 24
+; SI-NEXT: s_add_i32 s20, s90, 3
; SI-NEXT: v_readlane_b32 s7, v42, 48
+; SI-NEXT: s_and_b32 s20, s20, 0xff
+; SI-NEXT: s_lshl_b32 s22, s35, 8
; SI-NEXT: s_add_i32 s23, s7, 3
+; SI-NEXT: s_or_b32 s20, s22, s20
; SI-NEXT: s_and_b32 s23, s23, 0xff
+; SI-NEXT: s_lshl_b32 s22, s58, 24
; SI-NEXT: s_lshl_b32 s23, s23, 16
; SI-NEXT: s_addk_i32 s20, 0x300
; SI-NEXT: s_or_b32 s22, s22, s23
@@ -150680,61 +151229,67 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: s_and_b32 s20, s20, 0xff
; SI-NEXT: s_lshl_b32 s24, s24, 8
; SI-NEXT: s_add_i32 s25, s25, 3
+; SI-NEXT: s_and_b32 s46, s46, 0xff
; SI-NEXT: s_or_b32 s20, s24, s20
; SI-NEXT: v_readlane_b32 s24, v43, 35
; SI-NEXT: s_and_b32 s25, s25, 0xff
+; SI-NEXT: s_lshl_b32 s46, s46, 16
+; SI-NEXT: s_addk_i32 s56, 0x300
; SI-NEXT: s_addk_i32 s20, 0x300
; SI-NEXT: s_lshl_b32 s24, s24, 24
; SI-NEXT: s_lshl_b32 s25, s25, 16
-; SI-NEXT: s_and_b32 s20, s20, 0xffff
-; SI-NEXT: s_or_b32 s24, s24, s25
-; SI-NEXT: s_and_b32 s46, s46, 0xff
-; SI-NEXT: s_or_b32 s20, s24, s20
-; SI-NEXT: v_readlane_b32 s24, v43, 3
-; SI-NEXT: s_lshl_b32 s46, s46, 16
-; SI-NEXT: s_addk_i32 s56, 0x300
-; SI-NEXT: s_add_i32 s24, s24, 3
-; SI-NEXT: v_readlane_b32 s25, v43, 2
-; SI-NEXT: v_readlane_b32 s26, v43, 1
; SI-NEXT: s_or_b32 s46, s47, s46
; SI-NEXT: s_and_b32 s47, s56, 0xffff
; SI-NEXT: s_add_i32 s7, s7, 0x3000000
; SI-NEXT: s_add_i32 s9, s9, 0x3000000
-; SI-NEXT: s_and_b32 s24, s24, 0xff
-; SI-NEXT: s_lshl_b32 s25, s25, 8
-; SI-NEXT: s_add_i32 s26, s26, 3
+; SI-NEXT: s_and_b32 s20, s20, 0xffff
+; SI-NEXT: s_or_b32 s24, s24, s25
; SI-NEXT: s_or_b32 s56, s46, s47
; SI-NEXT: s_add_i32 s47, s58, 0x3000000
; SI-NEXT: s_add_i32 s58, s59, 0x3000000
; SI-NEXT: s_add_i32 s10, s10, 0x3000000
-; SI-NEXT: s_or_b32 s24, s25, s24
-; SI-NEXT: v_readlane_b32 s25, v43, 0
-; SI-NEXT: s_and_b32 s26, s26, 0xff
-; SI-NEXT: s_and_b32 s73, s9, 0xffff0000
+; SI-NEXT: s_or_b32 s20, s24, s20
+; SI-NEXT: v_readlane_b32 s24, v43, 3
+; SI-NEXT: s_and_b32 s91, s9, 0xffff0000
; SI-NEXT: s_lshl_b32 s59, s9, 16
; SI-NEXT: s_and_b32 s9, s7, 0xffff0000
-; SI-NEXT: s_add_i32 s6, s6, 0x3000000
-; SI-NEXT: s_addk_i32 s24, 0x300
-; SI-NEXT: s_lshl_b32 s25, s25, 24
-; SI-NEXT: s_lshl_b32 s26, s26, 16
+; SI-NEXT: s_add_i32 s24, s24, 3
+; SI-NEXT: v_readlane_b32 s25, v43, 2
+; SI-NEXT: v_readlane_b32 s26, v43, 1
; SI-NEXT: s_and_b32 s63, s17, 0xffff0000
; SI-NEXT: s_lshl_b32 s79, s17, 16
-; SI-NEXT: v_writelane_b32 v42, s9, 50
+; SI-NEXT: v_writelane_b32 v42, s9, 49
; SI-NEXT: s_lshl_b32 s17, s7, 16
+; SI-NEXT: s_and_b32 s7, s10, 0xffff0000
+; SI-NEXT: s_add_i32 s6, s6, 0x3000000
+; SI-NEXT: s_and_b32 s24, s24, 0xff
+; SI-NEXT: s_lshl_b32 s25, s25, 8
+; SI-NEXT: s_add_i32 s26, s26, 3
+; SI-NEXT: v_writelane_b32 v42, s7, 50
; SI-NEXT: s_lshl_b32 s7, s10, 16
; SI-NEXT: s_add_i32 s8, s8, 0x3000000
-; SI-NEXT: s_and_b32 s24, s24, 0xffff
-; SI-NEXT: s_or_b32 s25, s25, s26
+; SI-NEXT: s_or_b32 s24, s25, s24
+; SI-NEXT: v_readlane_b32 s25, v43, 0
+; SI-NEXT: s_and_b32 s26, s26, 0xff
; SI-NEXT: v_writelane_b32 v42, s7, 51
; SI-NEXT: s_and_b32 s7, s6, 0xffff0000
-; SI-NEXT: s_or_b32 s24, s25, s24
+; SI-NEXT: s_addk_i32 s24, 0x300
+; SI-NEXT: s_lshl_b32 s25, s25, 24
+; SI-NEXT: s_lshl_b32 s26, s26, 16
; SI-NEXT: v_writelane_b32 v42, s7, 52
; SI-NEXT: s_and_b32 s7, s8, 0xffff0000
+; SI-NEXT: s_and_b32 s24, s24, 0xffff
+; SI-NEXT: s_or_b32 s25, s25, s26
+; SI-NEXT: v_writelane_b32 v42, s7, 53
+; SI-NEXT: s_lshl_b32 s7, s8, 16
+; SI-NEXT: s_add_i32 s57, s57, 0x3000000
+; SI-NEXT: s_or_b32 s24, s25, s24
+; SI-NEXT: v_writelane_b32 v42, s7, 54
+; SI-NEXT: s_and_b32 s7, s58, 0xffff0000
; SI-NEXT: s_add_i32 s4, s4, 0x3000000
; SI-NEXT: s_add_i32 s5, s5, 0x3000000
; SI-NEXT: s_add_i32 s46, s60, 0x3000000
; SI-NEXT: s_add_i32 s56, s56, 0x3000000
-; SI-NEXT: s_add_i32 s57, s57, 0x3000000
; SI-NEXT: s_add_i32 s11, s11, 0x3000000
; SI-NEXT: s_add_i32 s12, s12, 0x3000000
; SI-NEXT: s_add_i32 s13, s13, 0x3000000
@@ -150743,15 +151298,15 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: s_add_i32 s19, s19, 0x3000000
; SI-NEXT: s_add_i32 s20, s20, 0x3000000
; SI-NEXT: s_add_i32 s24, s24, 0x3000000
-; SI-NEXT: v_writelane_b32 v42, s7, 53
-; SI-NEXT: s_lshl_b32 s7, s8, 16
+; SI-NEXT: v_writelane_b32 v42, s7, 55
+; SI-NEXT: s_and_b32 s7, s57, 0xffff0000
; SI-NEXT: s_and_b32 s27, s24, 0xffff0000
; SI-NEXT: s_lshl_b32 s26, s24, 16
; SI-NEXT: s_and_b32 s24, s20, 0xffff0000
; SI-NEXT: s_lshl_b32 s20, s20, 16
-; SI-NEXT: s_and_b32 s35, s23, 0xffff0000
+; SI-NEXT: s_and_b32 s72, s23, 0xffff0000
; SI-NEXT: s_lshl_b32 s29, s23, 16
-; SI-NEXT: s_and_b32 s90, s22, 0xffff0000
+; SI-NEXT: s_and_b32 s23, s22, 0xffff0000
; SI-NEXT: s_lshl_b32 s74, s22, 16
; SI-NEXT: s_and_b32 s25, s21, 0xffff0000
; SI-NEXT: s_lshl_b32 s21, s21, 16
@@ -150761,20 +151316,17 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: s_lshl_b32 s76, s18, 16
; SI-NEXT: s_and_b32 s77, s16, 0xffff0000
; SI-NEXT: s_lshl_b32 s16, s16, 16
-; SI-NEXT: s_and_b32 s89, s13, 0xffff0000
+; SI-NEXT: s_and_b32 s60, s13, 0xffff0000
; SI-NEXT: s_lshl_b32 s19, s13, 16
; SI-NEXT: s_and_b32 s13, s12, 0xffff0000
-; SI-NEXT: s_lshl_b32 s88, s12, 16
-; SI-NEXT: s_and_b32 s60, s11, 0xffff0000
+; SI-NEXT: s_lshl_b32 s89, s12, 16
+; SI-NEXT: s_and_b32 s73, s11, 0xffff0000
; SI-NEXT: s_lshl_b32 s18, s11, 16
-; SI-NEXT: s_and_b32 s23, s10, 0xffff0000
; SI-NEXT: s_lshl_b32 s6, s6, 16
-; SI-NEXT: v_writelane_b32 v42, s7, 54
-; SI-NEXT: s_and_b32 s72, s58, 0xffff0000
; SI-NEXT: s_lshl_b32 s99, s58, 16
-; SI-NEXT: s_and_b32 s7, s57, 0xffff0000
+; SI-NEXT: v_writelane_b32 v42, s7, 56
; SI-NEXT: s_lshl_b32 s57, s57, 16
-; SI-NEXT: s_and_b32 s49, s56, 0xffff0000
+; SI-NEXT: s_and_b32 s7, s56, 0xffff0000
; SI-NEXT: s_lshl_b32 s8, s56, 16
; SI-NEXT: s_and_b32 s51, s47, 0xffff0000
; SI-NEXT: s_lshl_b32 s50, s47, 16
@@ -150800,7 +151352,7 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: s_lshl_b32 s11, s5, 16
; SI-NEXT: s_and_b32 s83, s4, 0xffff0000
; SI-NEXT: s_lshl_b32 s84, s4, 16
-; SI-NEXT: v_writelane_b32 v42, s7, 55
+; SI-NEXT: v_writelane_b32 v42, s7, 57
; SI-NEXT: .LBB89_3: ; %end
; SI-NEXT: v_mul_f32_e64 v1, 1.0, s27
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
@@ -150815,14 +151367,14 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: v_add_i32_e32 v2, vcc, 4, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mul_f32_e64 v1, 1.0, s35
+; SI-NEXT: v_mul_f32_e64 v1, 1.0, s72
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s29
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
; SI-NEXT: v_add_i32_e32 v2, vcc, 8, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mul_f32_e64 v1, 1.0, s90
+; SI-NEXT: v_mul_f32_e64 v1, 1.0, s23
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s74
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
@@ -150864,7 +151416,7 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: v_add_i32_e32 v2, vcc, 32, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mul_f32_e64 v1, 1.0, s89
+; SI-NEXT: v_mul_f32_e64 v1, 1.0, s60
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s19
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
@@ -150873,24 +151425,24 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e64 v1, 1.0, s13
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_mul_f32_e64 v2, 1.0, s88
+; SI-NEXT: v_mul_f32_e64 v2, 1.0, s89
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
; SI-NEXT: v_add_i32_e32 v2, vcc, 40, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mul_f32_e64 v1, 1.0, s60
+; SI-NEXT: v_mul_f32_e64 v1, 1.0, s73
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s18
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
; SI-NEXT: v_add_i32_e32 v2, vcc, 44, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mul_f32_e64 v1, 1.0, s73
+; SI-NEXT: v_mul_f32_e64 v1, 1.0, s91
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s59
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
; SI-NEXT: v_add_i32_e32 v2, vcc, 48, v0
-; SI-NEXT: v_readlane_b32 s4, v42, 50
+; SI-NEXT: v_readlane_b32 s4, v42, 49
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e64 v1, 1.0, s4
@@ -150898,9 +151450,10 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s17
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
; SI-NEXT: v_add_i32_e32 v2, vcc, 52, v0
+; SI-NEXT: v_readlane_b32 s4, v42, 50
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mul_f32_e64 v1, 1.0, s23
+; SI-NEXT: v_mul_f32_e64 v1, 1.0, s4
; SI-NEXT: v_readlane_b32 s4, v42, 51
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s4
@@ -150923,14 +151476,15 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s4
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
; SI-NEXT: v_add_i32_e32 v2, vcc, 64, v0
+; SI-NEXT: v_readlane_b32 s4, v42, 55
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mul_f32_e64 v1, 1.0, s72
+; SI-NEXT: v_mul_f32_e64 v1, 1.0, s4
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s99
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
; SI-NEXT: v_add_i32_e32 v2, vcc, 0x44, v0
-; SI-NEXT: v_readlane_b32 s4, v42, 55
+; SI-NEXT: v_readlane_b32 s4, v42, 56
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e64 v1, 1.0, s4
@@ -150938,9 +151492,10 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s57
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
; SI-NEXT: v_add_i32_e32 v2, vcc, 0x48, v0
+; SI-NEXT: v_readlane_b32 s4, v42, 57
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mul_f32_e64 v1, 1.0, s49
+; SI-NEXT: v_mul_f32_e64 v1, 1.0, s4
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s8
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
@@ -151081,25 +151636,31 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: ; implicit-def: $sgpr8
; SI-NEXT: ; implicit-def: $sgpr6
; SI-NEXT: ; kill: killed $sgpr8
-; SI-NEXT: v_readlane_b32 s58, v43, 19
+; SI-NEXT: ; kill: killed $sgpr6
+; SI-NEXT: ; implicit-def: $sgpr6
+; SI-NEXT: ; implicit-def: $sgpr8
+; SI-NEXT: ; kill: killed $sgpr8
+; SI-NEXT: v_readlane_b32 s90, v43, 19
; SI-NEXT: ; implicit-def: $sgpr8
; SI-NEXT: s_mov_b32 s95, s47
-; SI-NEXT: s_mov_b32 s94, s21
+; SI-NEXT: v_readlane_b32 s35, v43, 18
+; SI-NEXT: s_mov_b32 s94, s28
; SI-NEXT: s_mov_b32 s93, s61
-; SI-NEXT: s_mov_b32 s34, s73
-; SI-NEXT: s_mov_b32 s91, s75
+; SI-NEXT: s_mov_b32 s38, s45
+; SI-NEXT: s_mov_b32 s88, s75
; SI-NEXT: v_readlane_b32 s56, v43, 10
; SI-NEXT: s_mov_b32 s36, s63
-; SI-NEXT: s_mov_b32 s38, s59
-; SI-NEXT: s_mov_b32 s37, s42
+; SI-NEXT: s_mov_b32 s39, s59
+; SI-NEXT: s_mov_b32 s37, s78
; SI-NEXT: v_readlane_b32 s30, v43, 17
+; SI-NEXT: s_mov_b32 s58, s46
; SI-NEXT: v_readlane_b32 s98, v43, 6
-; SI-NEXT: s_mov_b32 s46, s45
+; SI-NEXT: s_mov_b32 s49, s21
; SI-NEXT: s_mov_b32 s31, s43
; SI-NEXT: s_mov_b32 s78, s40
; SI-NEXT: v_readlane_b32 s15, v43, 14
-; SI-NEXT: s_mov_b32 s39, s57
-; SI-NEXT: s_mov_b32 s48, s13
+; SI-NEXT: s_mov_b32 s48, s42
+; SI-NEXT: s_mov_b32 s34, s13
; SI-NEXT: v_readlane_b32 s41, v43, 13
; SI-NEXT: v_readlane_b32 s44, v43, 5
; SI-NEXT: v_readlane_b32 s9, v43, 11
@@ -151114,14 +151675,15 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: ; implicit-def: $sgpr6
; SI-NEXT: ; kill: killed $sgpr8
; SI-NEXT: ; implicit-def: $sgpr8
+; SI-NEXT: ; implicit-def: $sgpr11
; SI-NEXT: ; implicit-def: $sgpr26
; SI-NEXT: ; implicit-def: $sgpr27
; SI-NEXT: ; implicit-def: $sgpr20
; SI-NEXT: ; implicit-def: $sgpr24
; SI-NEXT: ; implicit-def: $sgpr29
-; SI-NEXT: ; implicit-def: $sgpr35
+; SI-NEXT: ; implicit-def: $sgpr72
; SI-NEXT: ; implicit-def: $sgpr74
-; SI-NEXT: ; implicit-def: $sgpr90
+; SI-NEXT: ; implicit-def: $sgpr23
; SI-NEXT: ; implicit-def: $sgpr21
; SI-NEXT: ; implicit-def: $sgpr25
; SI-NEXT: ; implicit-def: $sgpr22
@@ -151133,23 +151695,21 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: ; implicit-def: $sgpr79
; SI-NEXT: ; implicit-def: $sgpr63
; SI-NEXT: ; implicit-def: $sgpr19
+; SI-NEXT: ; implicit-def: $sgpr60
; SI-NEXT: ; implicit-def: $sgpr89
-; SI-NEXT: ; implicit-def: $sgpr88
; SI-NEXT: ; implicit-def: $sgpr13
; SI-NEXT: ; implicit-def: $sgpr18
-; SI-NEXT: ; implicit-def: $sgpr60
-; SI-NEXT: ; implicit-def: $sgpr59
; SI-NEXT: ; implicit-def: $sgpr73
+; SI-NEXT: ; implicit-def: $sgpr59
+; SI-NEXT: ; implicit-def: $sgpr91
; SI-NEXT: ; implicit-def: $sgpr17
; SI-NEXT: ; kill: killed $sgpr6
-; SI-NEXT: ; implicit-def: $sgpr23
; SI-NEXT: ; implicit-def: $sgpr6
; SI-NEXT: ; implicit-def: $sgpr99
-; SI-NEXT: ; implicit-def: $sgpr72
; SI-NEXT: ; implicit-def: $sgpr57
; SI-NEXT: ; kill: killed $sgpr8
; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr49
+; SI-NEXT: ; kill: killed $sgpr11
; SI-NEXT: ; implicit-def: $sgpr50
; SI-NEXT: ; implicit-def: $sgpr51
; SI-NEXT: ; implicit-def: $sgpr97
@@ -151174,7 +151734,9 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; SI-NEXT: ; implicit-def: $sgpr92
; SI-NEXT: ; implicit-def: $sgpr84
; SI-NEXT: ; implicit-def: $sgpr83
-; SI-NEXT: s_branch .LBB89_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB89_2
+; SI-NEXT: s_branch .LBB89_3
;
; VI-LABEL: bitcast_v128i8_to_v64bf16_scalar:
; VI: ; %bb.0:
@@ -151195,22 +151757,22 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:332
; VI-NEXT: buffer_load_ushort v2, off, s[0:3], s32
; VI-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:8
@@ -151235,14 +151797,17 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:160
; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:168
; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:176
+; VI-NEXT: v_mov_b32_e32 v48, v27
+; VI-NEXT: v_mov_b32_e32 v39, v29
; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v1
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v3
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v5
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v3
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v7
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v5
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v7
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_lshlrev_b32_e32 v9, 8, v9
; VI-NEXT: v_lshlrev_b32_e32 v11, 8, v11
; VI-NEXT: v_lshlrev_b32_e32 v13, 8, v13
@@ -151250,50 +151815,51 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; VI-NEXT: v_lshlrev_b32_e32 v17, 8, v17
; VI-NEXT: v_lshlrev_b32_e32 v19, 8, v19
; VI-NEXT: v_lshlrev_b32_e32 v21, 8, v21
-; VI-NEXT: v_lshlrev_b32_e32 v23, 8, v23
-; VI-NEXT: v_lshlrev_b32_e32 v25, 8, v25
; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v27
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v29
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v23
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v2
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v25
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v4
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v48
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v6
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v39
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v8
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v2
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v10
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v4
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v12
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v6
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v26
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v8
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v10
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v12
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v26
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v28
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v30
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v31
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v32
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v33
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v34
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v35
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v36
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v37
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(14)
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v38
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:184
; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:192
; VI-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:200
@@ -151302,9 +151868,9 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; VI-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:224
; VI-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:232
; VI-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:240
-; VI-NEXT: v_lshlrev_b32_e32 v45, 8, v22
-; VI-NEXT: v_lshlrev_b32_e32 v8, 8, v24
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: v_lshlrev_b32_e32 v8, 8, v22
+; VI-NEXT: v_lshlrev_b32_e32 v10, 8, v24
+; VI-NEXT: s_and_b64 s[6:7], vcc, exec
; VI-NEXT: v_lshlrev_b32_e32 v14, 8, v14
; VI-NEXT: v_lshlrev_b32_e32 v16, 8, v16
; VI-NEXT: v_lshlrev_b32_e32 v18, 8, v18
@@ -151313,23 +151879,25 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; VI-NEXT: v_lshlrev_b32_e32 v22, 8, v0
; VI-NEXT: s_waitcnt vmcnt(6)
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v1
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(5)
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(6)
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v2
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(6)
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v3
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(5)
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(6)
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v4
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(5)
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(6)
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v5
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(5)
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(6)
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v6
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(5)
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(6)
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v7
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v24, 8, v2
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:248
; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:256
; VI-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:264
@@ -151342,128 +151910,123 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; VI-NEXT: v_lshlrev_b32_e32 v26, 8, v0
; VI-NEXT: s_waitcnt vmcnt(6)
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v1
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
; VI-NEXT: s_waitcnt vmcnt(6)
-; VI-NEXT: v_lshlrev_b32_e32 v27, 8, v2
-; VI-NEXT: s_waitcnt vmcnt(4)
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v4
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v2
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(6)
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v3
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:312
; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:320
; VI-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:328
-; VI-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:4
-; VI-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:12
-; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:20
-; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:28
-; VI-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:36
-; VI-NEXT: s_waitcnt vmcnt(11)
-; VI-NEXT: v_lshlrev_b32_e32 v4, 8, v6
-; VI-NEXT: v_lshlrev_b32_e32 v28, 8, v3
-; VI-NEXT: v_lshlrev_b32_e32 v3, 8, v5
+; VI-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:324
; VI-NEXT: s_waitcnt vmcnt(10)
-; VI-NEXT: v_lshlrev_b32_e32 v5, 8, v7
+; VI-NEXT: v_lshlrev_b32_e32 v4, 8, v4
+; VI-NEXT: s_waitcnt vmcnt(9)
+; VI-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; VI-NEXT: s_waitcnt vmcnt(8)
+; VI-NEXT: v_lshlrev_b32_e32 v6, 8, v6
; VI-NEXT: s_waitcnt vmcnt(7)
-; VI-NEXT: v_lshlrev_b32_e32 v6, 8, v0
-; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:44
-; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:52
-; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:60
-; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:68
-; VI-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:76
-; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:84
-; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:92
-; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:100
-; VI-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:108
-; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:116
-; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:124
-; VI-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:132
-; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:140
-; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:148
-; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:156
-; VI-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:164
-; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:172
-; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:180
-; VI-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:188
-; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:196
-; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:204
-; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:212
-; VI-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:220
-; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:228
-; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:236
-; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:244
-; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:252
-; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:260
-; VI-NEXT: s_waitcnt vmcnt(14)
+; VI-NEXT: v_lshlrev_b32_e32 v33, 8, v7
+; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v1
-; VI-NEXT: v_lshlrev_b32_e32 v2, 8, v2
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:268
-; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:276
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:284
-; VI-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:292
-; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:300
-; VI-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:308
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:316
-; VI-NEXT: buffer_load_ushort v10, off, s[0:3], s32 offset:324
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
+; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:316
+; VI-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:308
+; VI-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:300
+; VI-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:292
+; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:284
+; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:276
+; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:268
+; VI-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:260
+; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:252
+; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:244
+; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:236
+; VI-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:228
+; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:220
+; VI-NEXT: buffer_load_ushort v24, off, s[0:3], s32 offset:212
+; VI-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:204
+; VI-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:196
+; VI-NEXT: buffer_load_ushort v27, off, s[0:3], s32 offset:188
+; VI-NEXT: buffer_load_ushort v25, off, s[0:3], s32 offset:180
+; VI-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:172
+; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:164
+; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:156
+; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:148
+; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:140
+; VI-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:132
+; VI-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:124
+; VI-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:116
+; VI-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:108
+; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:100
+; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:92
+; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:84
+; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:76
+; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:68
+; VI-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:60
+; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:52
+; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:44
+; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:36
+; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:28
+; VI-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:20
+; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:12
+; VI-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:4
+; VI-NEXT: v_lshlrev_b32_e32 v3, 8, v0
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v2
+; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(13)
+; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
; VI-NEXT: s_cbranch_scc0 .LBB89_4
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
; VI-NEXT: s_and_b32 s4, s28, 0xff
; VI-NEXT: s_lshl_b32 s5, s29, 8
; VI-NEXT: s_or_b32 s4, s4, s5
@@ -151474,222 +152037,228 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; VI-NEXT: s_lshl_b32 s8, s27, 8
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
-; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
-; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
+; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill
; VI-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill
+; VI-NEXT: v_mov_b32_e32 v33, v6
+; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(5)
; VI-NEXT: v_or_b32_sdwa v2, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_or_b32_sdwa v0, v0, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v1, v1, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_or_b32_sdwa v3, v3, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v3, v8
+; VI-NEXT: v_mov_b32_e32 v2, v8
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v0, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v1, v17 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v0, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v1, v21 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v0, v0, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v1, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v1, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v3, v10
; VI-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v2, v50, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v12, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v34, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v33, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v35, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v0, v34, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v36, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v35, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v36, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v37, v16 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v38, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v39, v16 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v39, v22
; VI-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v38, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v39, v20 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v48, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v49, v20 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v49, v26
; VI-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v48, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v49, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
-; VI-NEXT: v_mov_b32_e32 v45, v62
+; VI-NEXT: v_or_b32_sdwa v0, v50, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v51, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
; VI-NEXT: v_or_b32_sdwa v16, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(3)
-; VI-NEXT: v_or_b32_sdwa v3, v51, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v17, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v2, v52, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(2)
-; VI-NEXT: v_or_b32_sdwa v0, v54, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v3, v53, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v17, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v32, v1
-; VI-NEXT: v_or_b32_sdwa v1, v41, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v54, v22
-; VI-NEXT: v_mov_b32_e32 v41, v24
+; VI-NEXT: v_or_b32_sdwa v0, v54, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v1, v55, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v18, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v34, v0
-; VI-NEXT: v_or_b32_sdwa v0, v56, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v45, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v37, v1
-; VI-NEXT: v_or_b32_sdwa v1, v55, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v55, v26
+; VI-NEXT: v_mov_b32_e32 v51, v1
+; VI-NEXT: v_or_b32_sdwa v1, v46, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v19, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v39, v0
-; VI-NEXT: v_or_b32_sdwa v0, v52, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v53, v0
+; VI-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v49, v1
-; VI-NEXT: v_or_b32_sdwa v1, v43, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v43, v27
+; VI-NEXT: v_mov_b32_e32 v54, v1
+; VI-NEXT: v_or_b32_sdwa v1, v56, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v47, v32
; VI-NEXT: v_or_b32_sdwa v20, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v51, v0
-; VI-NEXT: v_or_b32_sdwa v0, v53, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v55, v0
+; VI-NEXT: v_or_b32_sdwa v0, v60, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_mov_b32_e32 v35, v1
-; VI-NEXT: v_or_b32_sdwa v1, v46, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v53, v28
+; VI-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v21, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
-; VI-NEXT: v_or_b32_sdwa v1, v47, v22 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v33, v0
-; VI-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v22, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
-; VI-NEXT: v_or_b32_sdwa v1, v57, v24 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v25, v22 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v36, v0
+; VI-NEXT: v_mov_b32_e32 v38, v0
; VI-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v63, v33
+; VI-NEXT: v_or_b32_sdwa v22, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_mov_b32_e32 v48, v0
+; VI-NEXT: v_or_b32_sdwa v0, v27, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v50, v1
+; VI-NEXT: v_or_b32_sdwa v1, v23, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v23, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v56, v0
-; VI-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v36, v0
+; VI-NEXT: v_or_b32_sdwa v0, v28, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v58, v1
-; VI-NEXT: v_or_b32_sdwa v1, v60, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v61, v60
-; VI-NEXT: v_mov_b32_e32 v60, v59
+; VI-NEXT: v_mov_b32_e32 v60, v1
+; VI-NEXT: v_or_b32_sdwa v1, v24, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v24, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v38, v0
-; VI-NEXT: v_or_b32_sdwa v0, v59, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v61, v0
+; VI-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v48, v1
-; VI-NEXT: v_or_b32_sdwa v1, v40, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v34, v1
+; VI-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v62, v43
; VI-NEXT: v_or_b32_sdwa v25, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v1, v44, v26 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v44, v42
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v37, v0
+; VI-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v58, v57
+; VI-NEXT: v_or_b32_sdwa v26, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
-; VI-NEXT: v_or_b32_sdwa v1, v45, v26 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v50, v0
-; VI-NEXT: v_or_b32_sdwa v0, v42, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v26, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
-; VI-NEXT: v_or_b32_sdwa v1, v62, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_mov_b32_e32 v52, v0
-; VI-NEXT: v_or_b32_sdwa v0, v44, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v27, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
-; VI-NEXT: v_or_b32_sdwa v0, v29, v28 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v57, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_mov_b32_e32 v46, v1
-; VI-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v32, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v32, v30
+; VI-NEXT: v_mov_b32_e32 v57, v40
+; VI-NEXT: v_or_b32_sdwa v27, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_mov_b32_e32 v45, v0
+; VI-NEXT: v_or_b32_sdwa v0, v42, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v56, v1
+; VI-NEXT: v_or_b32_sdwa v1, v41, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v42, v41
+; VI-NEXT: v_mov_b32_e32 v41, v29
; VI-NEXT: v_or_b32_sdwa v28, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v63, v0
-; VI-NEXT: v_or_b32_sdwa v0, v30, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v29, v33 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v47, v1
-; VI-NEXT: v_or_b32_sdwa v1, v31, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v59, v0
+; VI-NEXT: v_or_b32_sdwa v0, v43, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v29, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v43, v31
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v57, v1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v33, v0
+; VI-NEXT: v_or_b32_sdwa v0, v31, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v30, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v30, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v40, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v40, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v31, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_e32 v3, s4, v0
@@ -151721,12 +152290,14 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; VI-NEXT: v_mov_b32_e32 v2, s6
; VI-NEXT: s_cbranch_execnz .LBB89_3
; VI-NEXT: .LBB89_2: ; %cmp.true
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v59
-; VI-NEXT: v_or_b32_sdwa v29, v46, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v40
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v42
+; VI-NEXT: v_or_b32_sdwa v28, v56, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v44
+; VI-NEXT: v_or_b32_sdwa v27, v45, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v47
; VI-NEXT: s_add_i32 s28, s28, 3
; VI-NEXT: s_and_b32 s4, s28, 0xff
; VI-NEXT: s_lshl_b32 s5, s29, 8
@@ -151745,302 +152316,309 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; VI-NEXT: s_lshl_b32 s9, s19, 8
; VI-NEXT: s_add_i32 s16, s16, 3
; VI-NEXT: s_lshl_b32 s10, s17, 8
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(3)
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: v_or_b32_sdwa v31, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v57
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v32
+; VI-NEXT: v_or_b32_sdwa v30, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v41
+; VI-NEXT: v_or_b32_sdwa v29, v63, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v63, v46, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v58
+; VI-NEXT: v_or_b32_sdwa v24, v52, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v43
+; VI-NEXT: v_or_b32_sdwa v1, v33, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v62
+; VI-NEXT: v_or_b32_sdwa v2, v59, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: v_or_b32_sdwa v26, v53, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v62
-; VI-NEXT: v_or_b32_sdwa v28, v43, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v44
-; VI-NEXT: v_or_b32_sdwa v53, v52, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v45
-; VI-NEXT: v_or_b32_sdwa v27, v55, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v42
-; VI-NEXT: v_or_b32_sdwa v52, v50, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v40
-; VI-NEXT: v_or_b32_sdwa v25, v48, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v60
-; VI-NEXT: v_or_b32_sdwa v59, v38, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v61
-; VI-NEXT: v_or_b32_sdwa v24, v58, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v26, v49, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: v_or_b32_sdwa v48, v56, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v48, vcc, 0x300, v48
-; VI-NEXT: v_or_b32_sdwa v24, v24, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v24, vcc, 0x3000000, v24
+; VI-NEXT: v_or_b32_sdwa v32, v37, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v32, vcc, 0x300, v32
+; VI-NEXT: v_or_b32_sdwa v26, v26, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v26, vcc, 0x3000000, v26
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: v_or_b32_sdwa v23, v41, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v25, v34, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: v_or_b32_sdwa v38, v36, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v38, vcc, 0x300, v38
-; VI-NEXT: v_or_b32_sdwa v23, v23, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v23, vcc, 0x3000000, v23
+; VI-NEXT: v_or_b32_sdwa v33, v61, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v33, vcc, 0x300, v33
+; VI-NEXT: v_or_b32_sdwa v25, v25, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v25, vcc, 0x3000000, v25
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: v_or_b32_sdwa v22, v54, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v60, v60, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: v_or_b32_sdwa v50, v33, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v50, vcc, 0x300, v50
-; VI-NEXT: v_or_b32_sdwa v22, v22, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v22, vcc, 0x3000000, v22
+; VI-NEXT: v_or_b32_sdwa v34, v36, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v34, vcc, 0x300, v34
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: v_or_b32_sdwa v21, v35, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v23, v50, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: v_or_b32_sdwa v54, v51, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v37, v48, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v37, vcc, 0x300, v37
+; VI-NEXT: v_or_b32_sdwa v23, v23, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v23, vcc, 0x3000000, v23
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: v_or_b32_sdwa v20, v49, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v22, v39, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: v_or_b32_sdwa v49, v39, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v49, v38, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v49, vcc, 0x300, v49
-; VI-NEXT: v_or_b32_sdwa v20, v20, v49 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v20, vcc, 0x3000000, v20
-; VI-NEXT: s_waitcnt vmcnt(3)
+; VI-NEXT: v_or_b32_sdwa v22, v22, v49 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v22, vcc, 0x3000000, v22
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: v_or_b32_sdwa v19, v37, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v21, v35, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
+; VI-NEXT: v_or_b32_sdwa v52, v55, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(2)
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: v_or_b32_sdwa v37, v34, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v20, v54, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v37, vcc, 0x300, v37
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
-; VI-NEXT: v_or_b32_sdwa v31, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload
-; VI-NEXT: v_or_b32_sdwa v19, v19, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v19, vcc, 0x3000000, v19
-; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: v_or_b32_sdwa v18, v32, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v54, v53, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(4)
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
-; VI-NEXT: v_or_b32_sdwa v1, v57, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v57, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v19, v51, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v35, vcc, 0x300, v57
-; VI-NEXT: v_or_b32_sdwa v18, v18, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v18, vcc, 0x3000000, v18
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v16, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
+; VI-NEXT: v_or_b32_sdwa v51, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v51, vcc, 0x300, v51
+; VI-NEXT: v_or_b32_sdwa v19, v19, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v19, vcc, 0x3000000, v19
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v10, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v18, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v17, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v48, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v48, vcc, 0x300, v48
+; VI-NEXT: v_or_b32_sdwa v18, v18, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v18, vcc, 0x3000000, v18
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v11, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v16, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v15, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v10, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v56, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v17, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v14, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v11, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v34, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v15, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v34, vcc, 0x300, v34
-; VI-NEXT: v_or_b32_sdwa v14, v14, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v14, vcc, 0x3000000, v14
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v13, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v35, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v35, vcc, 0x300, v35
+; VI-NEXT: v_or_b32_sdwa v15, v15, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v15, vcc, 0x3000000, v15
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v36, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v14, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v36, vcc, 0x300, v36
-; VI-NEXT: v_or_b32_sdwa v13, v13, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v36, vcc, 0x300, v26
-; VI-NEXT: v_add_u32_e32 v26, vcc, 0x300, v52
-; VI-NEXT: v_or_b32_sdwa v26, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v52, vcc, 0x300, v54
-; VI-NEXT: v_or_b32_sdwa v21, v21, v52 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v13, vcc, 0x3000000, v13
-; VI-NEXT: v_add_u32_e32 v21, vcc, 0x3000000, v21
-; VI-NEXT: v_add_u32_e32 v26, vcc, 0x3000000, v26
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v12, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v38, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v38, vcc, 0x300, v38
+; VI-NEXT: v_or_b32_sdwa v14, v14, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v38, vcc, 0x300, v2
+; VI-NEXT: v_or_b32_sdwa v29, v29, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v14, vcc, 0x3000000, v14
+; VI-NEXT: v_add_u32_e32 v29, vcc, 0x3000000, v29
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v51, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v13, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v51, vcc, 0x300, v51
-; VI-NEXT: v_or_b32_sdwa v12, v12, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v51, vcc, 0x300, v59
-; VI-NEXT: v_or_b32_sdwa v25, v25, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v39, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v39, vcc, 0x300, v39
+; VI-NEXT: v_or_b32_sdwa v13, v13, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v39, vcc, 0x300, v27
+; VI-NEXT: v_add_u32_e32 v27, vcc, 0x300, v24
+; VI-NEXT: v_or_b32_sdwa v24, v60, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_or_b32_sdwa v27, v63, v27 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_or_b32_sdwa v28, v28, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v13, vcc, 0x3000000, v13
+; VI-NEXT: v_add_u32_e32 v24, vcc, 0x3000000, v24
+; VI-NEXT: v_add_u32_e32 v27, vcc, 0x3000000, v27
+; VI-NEXT: v_add_u32_e32 v28, vcc, 0x3000000, v28
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v12, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v53, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v53, vcc, 0x300, v53
+; VI-NEXT: v_or_b32_sdwa v12, v12, v53 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v12, vcc, 0x3000000, v12
-; VI-NEXT: v_add_u32_e32 v25, vcc, 0x3000000, v25
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v33, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v36, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v41, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v40, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v50, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
-; VI-NEXT: v_or_b32_sdwa v30, v47, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(3)
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: s_waitcnt vmcnt(2)
-; VI-NEXT: v_or_b32_sdwa v39, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(3)
-; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
-; VI-NEXT: v_or_b32_sdwa v2, v63, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v34, vcc, 0x300, v2
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v55, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v9, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v41, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v41, vcc, 0x300, v41
-; VI-NEXT: v_or_b32_sdwa v9, v9, v41 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v41, vcc, 0x300, v10
+; VI-NEXT: v_or_b32_sdwa v42, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v42, vcc, 0x300, v42
+; VI-NEXT: v_or_b32_sdwa v9, v9, v42 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v42, vcc, 0x300, v10
; VI-NEXT: v_add_u32_e32 v10, vcc, 0x300, v55
-; VI-NEXT: v_or_b32_sdwa v10, v39, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v39, vcc, 0x300, v53
-; VI-NEXT: v_or_b32_sdwa v27, v28, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_or_b32_sdwa v28, v29, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_or_b32_sdwa v29, v30, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_or_b32_sdwa v35, v16, v42 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_or_b32_sdwa v10, v50, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v50, vcc, 0x300, v52
+; VI-NEXT: v_add_u32_e32 v52, vcc, 0x300, v54
+; VI-NEXT: v_or_b32_sdwa v20, v20, v52 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_or_b32_sdwa v21, v21, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v9, vcc, 0x3000000, v9
; VI-NEXT: v_add_u32_e32 v10, vcc, 0x3000000, v10
-; VI-NEXT: v_add_u32_e32 v27, vcc, 0x3000000, v27
-; VI-NEXT: v_add_u32_e32 v28, vcc, 0x3000000, v28
-; VI-NEXT: v_add_u32_e32 v29, vcc, 0x3000000, v29
+; VI-NEXT: v_add_u32_e32 v20, vcc, 0x3000000, v20
+; VI-NEXT: v_add_u32_e32 v21, vcc, 0x3000000, v21
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v8, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v42, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v42, vcc, 0x300, v42
-; VI-NEXT: v_or_b32_sdwa v8, v8, v42 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v42, vcc, 0x300, v11
-; VI-NEXT: v_add_u32_e32 v11, vcc, 0x300, v40
-; VI-NEXT: v_or_b32_sdwa v11, v33, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v33, vcc, 0x300, v1
-; VI-NEXT: v_or_b32_sdwa v30, v31, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
-; VI-NEXT: v_or_b32_sdwa v17, v17, v42 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_or_b32_sdwa v43, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v43, vcc, 0x300, v43
+; VI-NEXT: v_or_b32_sdwa v8, v8, v43 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v43, vcc, 0x300, v11
+; VI-NEXT: v_add_u32_e32 v11, vcc, 0x300, v41
+; VI-NEXT: v_or_b32_sdwa v17, v17, v43 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_or_b32_sdwa v11, v36, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v16, vcc, 0x3000000, v17
+; VI-NEXT: v_add_u32_e32 v17, vcc, 0x3000000, v35
+; VI-NEXT: v_add_u32_e32 v35, vcc, 0x300, v0
+; VI-NEXT: v_add_u32_e32 v36, vcc, 0x300, v1
+; VI-NEXT: v_or_b32_sdwa v30, v30, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_or_b32_sdwa v31, v31, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v8, vcc, 0x3000000, v8
; VI-NEXT: v_add_u32_e32 v11, vcc, 0x3000000, v11
; VI-NEXT: v_add_u32_e32 v30, vcc, 0x3000000, v30
-; VI-NEXT: s_waitcnt vmcnt(2)
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
+; VI-NEXT: v_add_u32_e32 v31, vcc, 0x3000000, v31
; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v7, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v44, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v44, vcc, 0x300, v44
; VI-NEXT: v_or_b32_sdwa v7, v7, v44 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v7, vcc, 0x3000000, v7
@@ -152048,14 +152626,14 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v6, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v45, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v45, vcc, 0x300, v45
; VI-NEXT: v_or_b32_sdwa v6, v6, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v6, vcc, 0x3000000, v6
@@ -152063,14 +152641,14 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v5, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v46, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v46, vcc, 0x300, v46
; VI-NEXT: v_or_b32_sdwa v5, v5, v46 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v5, vcc, 0x3000000, v5
@@ -152078,17 +152656,17 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v3, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v4, vcc, 3, v4
; VI-NEXT: v_or_b32_sdwa v4, v47, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v47, vcc, 3, v32
-; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v4, vcc, 0x300, v4
; VI-NEXT: v_or_b32_sdwa v4, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v4, vcc, 0x3000000, v4
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v47, v32, v47 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v47, vcc, 3, v47
+; VI-NEXT: v_or_b32_sdwa v47, v56, v47 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_e32 v47, s4, v47
; VI-NEXT: s_and_b32 s4, s26, 0xff
; VI-NEXT: s_or_b32 s4, s5, s4
@@ -152102,34 +152680,25 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; VI-NEXT: s_or_b32 s8, s9, s8
; VI-NEXT: s_and_b32 s9, s16, 0xff
; VI-NEXT: s_or_b32 s9, s10, s9
-; VI-NEXT: v_add_u32_e32 v32, vcc, 0x300, v56
; VI-NEXT: s_addk_i32 s5, 0x300
; VI-NEXT: s_addk_i32 s7, 0x300
; VI-NEXT: s_addk_i32 s9, 0x300
-; VI-NEXT: v_or_b32_sdwa v15, v15, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_or_b32_sdwa v32, v16, v41 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_lshl_b32 s4, s4, 16
; VI-NEXT: s_lshl_b32 s6, s6, 16
; VI-NEXT: s_lshl_b32 s8, s8, 16
; VI-NEXT: s_and_b32 s9, s9, 0xffff
; VI-NEXT: s_and_b32 s7, s7, 0xffff
; VI-NEXT: s_and_b32 s5, s5, 0xffff
-; VI-NEXT: v_add_u32_e32 v16, vcc, 0x3000000, v17
-; VI-NEXT: v_add_u32_e32 v17, vcc, 0x3000000, v32
-; VI-NEXT: v_add_u32_e32 v32, vcc, 0x300, v0
; VI-NEXT: s_or_b32 s8, s8, s9
; VI-NEXT: s_or_b32 s6, s6, s7
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s8, s8, 0x3000000
; VI-NEXT: s_add_i32 s6, s6, 0x3000000
; VI-NEXT: s_add_i32 s4, s4, 0x3000000
-; VI-NEXT: v_or_b32_sdwa v31, v31, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v3, vcc, 0x3000000, v47
-; VI-NEXT: v_add_u32_e32 v15, vcc, 0x3000000, v15
; VI-NEXT: v_mov_b32_e32 v0, s8
; VI-NEXT: v_mov_b32_e32 v1, s6
; VI-NEXT: v_mov_b32_e32 v2, s4
-; VI-NEXT: v_add_u32_e32 v31, vcc, 0x3000000, v31
; VI-NEXT: .LBB89_3: ; %end
; VI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload
@@ -152150,38 +152719,42 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB89_4:
-; VI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
-; VI-NEXT: v_mov_b32_e32 v61, v60
-; VI-NEXT: v_mov_b32_e32 v60, v59
-; VI-NEXT: v_mov_b32_e32 v45, v62
-; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
-; VI-NEXT: v_mov_b32_e32 v57, v5
-; VI-NEXT: v_mov_b32_e32 v47, v4
-; VI-NEXT: v_mov_b32_e32 v63, v3
-; VI-NEXT: v_mov_b32_e32 v53, v28
-; VI-NEXT: v_mov_b32_e32 v43, v27
-; VI-NEXT: v_mov_b32_e32 v55, v26
-; VI-NEXT: v_mov_b32_e32 v41, v24
-; VI-NEXT: v_mov_b32_e32 v54, v22
+; VI-NEXT: v_mov_b32_e32 v63, v6
+; VI-NEXT: v_mov_b32_e32 v59, v5
+; VI-NEXT: v_mov_b32_e32 v58, v57
+; VI-NEXT: v_mov_b32_e32 v47, v32
+; VI-NEXT: v_mov_b32_e32 v57, v40
+; VI-NEXT: v_mov_b32_e32 v32, v30
+; VI-NEXT: v_mov_b32_e32 v44, v42
+; VI-NEXT: v_mov_b32_e32 v62, v43
+; VI-NEXT: v_mov_b32_e32 v43, v31
+; VI-NEXT: v_mov_b32_e32 v42, v41
+; VI-NEXT: v_mov_b32_e32 v41, v29
+; VI-NEXT: v_mov_b32_e32 v56, v4
+; VI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v49, v26
+; VI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v39, v22
+; VI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; VI-NEXT: ; implicit-def: $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB89_2
+; VI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB89_2
+; VI-NEXT: s_branch .LBB89_3
;
; GFX9-LABEL: bitcast_v128i8_to_v64bf16_scalar:
; GFX9: ; %bb.0:
@@ -152202,16 +152775,18 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:332
; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32
; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:8
@@ -152236,93 +152811,97 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:160
; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:168
; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:176
-; GFX9-NEXT: v_lshlrev_b32_e32 v16, 8, v1
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v29
+; GFX9-NEXT: v_lshlrev_b32_e32 v63, 8, v1
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v19
; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshlrev_b32_e32 v14, 8, v3
-; GFX9-NEXT: v_lshlrev_b32_e32 v46, 8, v5
-; GFX9-NEXT: v_lshlrev_b32_e32 v22, 8, v7
-; GFX9-NEXT: v_lshlrev_b32_e32 v24, 8, v9
-; GFX9-NEXT: v_lshlrev_b32_e32 v26, 8, v11
-; GFX9-NEXT: v_lshlrev_b32_e32 v20, 8, v13
-; GFX9-NEXT: v_lshlrev_b32_e32 v28, 8, v15
-; GFX9-NEXT: v_lshlrev_b32_e32 v18, 8, v17
-; GFX9-NEXT: v_lshlrev_b32_e32 v17, 8, v25
-; GFX9-NEXT: v_lshlrev_b32_e32 v12, 8, v27
-; GFX9-NEXT: v_lshlrev_b32_e32 v19, 8, v19
-; GFX9-NEXT: v_lshlrev_b32_e32 v21, 8, v21
-; GFX9-NEXT: v_lshlrev_b32_e32 v23, 8, v23
-; GFX9-NEXT: s_waitcnt vmcnt(24)
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v23
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v25
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v27
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v29
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v18, 8, v3
+; GFX9-NEXT: v_lshlrev_b32_e32 v20, 8, v5
+; GFX9-NEXT: v_lshlrev_b32_e32 v14, 8, v7
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 8, v9
+; GFX9-NEXT: v_lshlrev_b32_e32 v22, 8, v11
+; GFX9-NEXT: v_lshlrev_b32_e32 v8, 8, v13
+; GFX9-NEXT: v_lshlrev_b32_e32 v12, 8, v15
+; GFX9-NEXT: v_lshlrev_b32_e32 v10, 8, v17
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_waitcnt vmcnt(28)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v43
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v45
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v44
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v42
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v41
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v40
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v55
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v54
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v53
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v52
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v51
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v50
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v49
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v48
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v39
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v30
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v31
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v32
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v33
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v34
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v35
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v36
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v37
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v38
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:184
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:192
@@ -152332,31 +152911,32 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX9-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:224
; GFX9-NEXT: buffer_load_ushort v9, off, s[0:3], s32 offset:232
; GFX9-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:240
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: v_lshlrev_b32_e32 v43, 8, v21
+; GFX9-NEXT: s_and_b64 s[6:7], vcc, exec
; GFX9-NEXT: s_waitcnt vmcnt(7)
; GFX9-NEXT: v_lshlrev_b32_e32 v11, 8, v11
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v15
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v3
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v13
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v5
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v9
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v7
-; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:248
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:256
@@ -152370,148 +152950,145 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX9-NEXT: v_lshlrev_b32_e32 v11, 8, v11
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v15
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v3
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v13
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v5
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v9
-; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:312
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:320
; GFX9-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:328
-; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:4
-; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:12
-; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:20
-; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:28
+; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:324
+; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:316
+; GFX9-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:308
+; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:300
+; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:292
+; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:284
+; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:276
+; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:268
+; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:260
+; GFX9-NEXT: buffer_load_ushort v27, off, s[0:3], s32 offset:252
+; GFX9-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:244
+; GFX9-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:236
+; GFX9-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:228
+; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:220
+; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:212
+; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:204
+; GFX9-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:196
+; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:188
+; GFX9-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:180
+; GFX9-NEXT: buffer_load_ushort v26, off, s[0:3], s32 offset:172
+; GFX9-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:164
+; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:156
+; GFX9-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:148
+; GFX9-NEXT: buffer_load_ushort v25, off, s[0:3], s32 offset:140
+; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:132
+; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:124
+; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:116
+; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:108
+; GFX9-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:100
+; GFX9-NEXT: buffer_load_ushort v21, off, s[0:3], s32 offset:92
+; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:84
+; GFX9-NEXT: buffer_load_ushort v19, off, s[0:3], s32 offset:76
+; GFX9-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:68
+; GFX9-NEXT: buffer_load_ushort v17, off, s[0:3], s32 offset:60
+; GFX9-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:52
+; GFX9-NEXT: buffer_load_ushort v24, off, s[0:3], s32 offset:44
; GFX9-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:36
-; GFX9-NEXT: s_waitcnt vmcnt(15)
+; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:28
+; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:20
+; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:12
+; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:4
+; GFX9-NEXT: s_waitcnt vmcnt(51)
; GFX9-NEXT: v_lshlrev_b32_e32 v7, 8, v7
-; GFX9-NEXT: s_waitcnt vmcnt(7)
-; GFX9-NEXT: v_lshlrev_b32_e32 v3, 8, v3
-; GFX9-NEXT: s_waitcnt vmcnt(6)
-; GFX9-NEXT: v_lshlrev_b32_e32 v9, 8, v1
-; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:44
-; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:52
-; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:60
-; GFX9-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:68
-; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:76
-; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:84
-; GFX9-NEXT: buffer_load_ushort v25, off, s[0:3], s32 offset:92
-; GFX9-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:100
-; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:108
-; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:116
-; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:124
-; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:132
-; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:140
-; GFX9-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:148
-; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:156
-; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:164
-; GFX9-NEXT: s_waitcnt vmcnt(21)
-; GFX9-NEXT: v_lshlrev_b32_e32 v5, 8, v5
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:172
-; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:180
-; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:188
-; GFX9-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:196
-; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:204
-; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:212
-; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:220
-; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:228
-; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:236
-; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:244
-; GFX9-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:252
-; GFX9-NEXT: buffer_load_ushort v27, off, s[0:3], s32 offset:260
-; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:268
-; GFX9-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:276
-; GFX9-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:284
-; GFX9-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:292
-; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:300
-; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:308
-; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:316
-; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:324
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(28)
-; GFX9-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(30)
-; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(33)
-; GFX9-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(36)
-; GFX9-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(39)
-; GFX9-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(41)
-; GFX9-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(41)
-; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(41)
-; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(41)
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(43)
+; GFX9-NEXT: v_lshlrev_b32_e32 v9, 8, v3
+; GFX9-NEXT: s_waitcnt vmcnt(42)
+; GFX9-NEXT: v_lshlrev_b32_e32 v3, 8, v1
; GFX9-NEXT: s_waitcnt vmcnt(41)
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v5
+; GFX9-NEXT: s_waitcnt vmcnt(14)
+; GFX9-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:812 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:816 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:828 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:832 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(54)
+; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(55)
+; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(56)
+; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:812 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(56)
+; GFX9-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:816 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(56)
+; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(56)
+; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(56)
+; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:828 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(56)
+; GFX9-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:832 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(56)
+; GFX9-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:836 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(56)
+; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:840 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(56)
+; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:844 ; 4-byte Folded Spill
; GFX9-NEXT: s_cbranch_scc0 .LBB89_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_and_b32 s4, s28, 0xff
@@ -152519,17 +153096,13 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX9-NEXT: s_or_b32 s4, s4, s5
; GFX9-NEXT: v_mov_b32_e32 v1, 0xffff
; GFX9-NEXT: v_and_b32_e32 v3, s4, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v2, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v2, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX9-NEXT: v_or_b32_sdwa v2, v0, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v4, v4, v46 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v0, v6, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v4, v4, v20 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v4, v4, 16, v1
-; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: v_or_b32_sdwa v1, v8, v24 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_lshl_or_b32 v5, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
-; GFX9-NEXT: v_or_b32_sdwa v0, v10, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v2, v0, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v6, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_lshl_or_b32 v3, v2, 16, v3
; GFX9-NEXT: s_and_b32 s4, s16, 0xff
@@ -152554,266 +153127,279 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX9-NEXT: s_or_b32 s7, s7, s8
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s7
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v1, v20 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v1, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_lshl_or_b32 v5, v1, 16, v0
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_or_b32_sdwa v0, v0, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v1, v1, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v6, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v0, v28 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v0, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v1, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v1, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v7, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v0, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v1, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v1, v43 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v8, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v0, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v1, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v9, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_or_b32_sdwa v0, v0, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v10, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(4)
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_or_b32_sdwa v2, v39, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_mov_b32_e32 v39, v16
-; GFX9-NEXT: v_or_b32_sdwa v17, v34, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_waitcnt vmcnt(4)
+; GFX9-NEXT: v_or_b32_sdwa v2, v33, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v33, v32
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v1, v11, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v11, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v42, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v55, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v53, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v55, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v54, v1
+; GFX9-NEXT: v_or_b32_sdwa v1, v52, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v12, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
-; GFX9-NEXT: v_mov_b32_e32 v42, v61
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v53, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v53, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v36, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v55, v1
+; GFX9-NEXT: v_mov_b32_e32 v52, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v13, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v13, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_mov_b32_e32 v53, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v52, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v36, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v24, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v52, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v50, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v57, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v14, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_mov_b32_e32 v50, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v49, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v57, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v17, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v49, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v15, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v15, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v48, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v17, v23, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v63, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v19, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_lshl_or_b32 v16, v2, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_mov_b32_e32 v48, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v25, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v1, v21, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT: v_lshl_or_b32 v17, v17, 16, v1
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v2, s6
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: v_mov_b32_e32 v33, v45
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v45, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_or_b32_sdwa v1, v51, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v18, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v43, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v35, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v38, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v19, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v56, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v35, v61
+; GFX9-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: v_or_b32_sdwa v0, v25, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v47, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v31, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v20, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_or_b32_sdwa v0, v42, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_or_b32_sdwa v0, v49, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v29, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v21, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v31, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: v_or_b32_sdwa v0, v26, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v32, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_lshl_or_b32 v22, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v51, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v28, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_lshl_or_b32 v22, v1, 16, v0
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v34, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v30, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v34, v62 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
; GFX9-NEXT: v_lshl_or_b32 v23, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(4)
-; GFX9-NEXT: v_mov_b32_e32 v46, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v44, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v42, v49 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_or_b32_sdwa v1, v35, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_or_b32_sdwa v1, v50, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v24, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
-; GFX9-NEXT: v_mov_b32_e32 v35, v45
-; GFX9-NEXT: v_mov_b32_e32 v45, v61
-; GFX9-NEXT: v_mov_b32_e32 v61, v42
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_mov_b32_e32 v38, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v36, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v51, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v44, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v37, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_or_b32_sdwa v1, v30, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v25, v1, 16, v0
-; GFX9-NEXT: v_or_b32_sdwa v1, v54, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_mov_b32_e32 v54, v2
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v0, v41, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v0, v46, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_or_b32_sdwa v1, v59, v44 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
; GFX9-NEXT: v_lshl_or_b32 v26, v1, 16, v0
-; GFX9-NEXT: v_or_b32_sdwa v1, v27, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v0, v29, v41 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v27, v42 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-NEXT: v_or_b32_sdwa v1, v40, v43 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v27, v1, 16, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v60, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
-; GFX9-NEXT: v_or_b32_sdwa v1, v57, v44 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v0, v56, v61 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: v_lshl_or_b32 v28, v1, 16, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v59, v60 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v45, v46 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_lshl_or_b32 v28, v1, 16, v0
+; GFX9-NEXT: v_mov_b32_e32 v45, v34
+; GFX9-NEXT: v_mov_b32_e32 v34, v38
+; GFX9-NEXT: v_mov_b32_e32 v56, v39
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v0, v48, v59 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v63, v57 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v41, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v29, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v41, v43
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v60, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v40, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v47, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v30, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v60, v49
+; GFX9-NEXT: v_mov_b32_e32 v47, v61
+; GFX9-NEXT: v_mov_b32_e32 v49, v48
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_or_b32_sdwa v0, v37, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v58, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v31, v1, 16, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
-; GFX9-NEXT: v_mov_b32_e32 v2, s6
; GFX9-NEXT: s_mov_b64 s[4:5], 0
; GFX9-NEXT: s_branch .LBB89_3
; GFX9-NEXT: .LBB89_2:
-; GFX9-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
-; GFX9-NEXT: v_mov_b32_e32 v33, v45
-; GFX9-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
-; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v33, v32
+; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; GFX9-NEXT: ; implicit-def: $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v35, v61
; GFX9-NEXT: .LBB89_3: ; %Flow
-; GFX9-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:848 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
; GFX9-NEXT: s_cbranch_vccnz .LBB89_5
; GFX9-NEXT: ; %bb.4: ; %cmp.true
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
; GFX9-NEXT: s_add_i32 s28, s28, 3
; GFX9-NEXT: s_and_b32 s4, s28, 0xff
; GFX9-NEXT: s_lshl_b32 s5, s29, 8
@@ -152831,59 +153417,66 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX9-NEXT: s_lshl_b32 s9, s17, 8
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_lshl_b32 s10, s19, 8
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:824 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(15)
+; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(13)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: s_waitcnt vmcnt(14)
+; GFX9-NEXT: s_waitcnt vmcnt(12)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: s_waitcnt vmcnt(12)
+; GFX9-NEXT: s_waitcnt vmcnt(9)
+; GFX9-NEXT: v_add_u32_e32 v22, 3, v22
+; GFX9-NEXT: s_waitcnt vmcnt(8)
; GFX9-NEXT: v_add_u32_e32 v25, 3, v25
-; GFX9-NEXT: s_waitcnt vmcnt(11)
+; GFX9-NEXT: s_waitcnt vmcnt(7)
+; GFX9-NEXT: v_add_u32_e32 v20, 3, v20
+; GFX9-NEXT: v_or_b32_sdwa v20, v49, v20 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: s_waitcnt vmcnt(5)
+; GFX9-NEXT: v_add_u32_e32 v24, 3, v24
+; GFX9-NEXT: v_or_b32_sdwa v24, v41, v24 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: s_waitcnt vmcnt(4)
+; GFX9-NEXT: v_add_u32_e32 v23, 3, v23
+; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
-; GFX9-NEXT: v_or_b32_sdwa v25, v37, v25 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_or_b32_sdwa v37, v51, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v37, 0x300, v37
-; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_or_b32_sdwa v23, v42, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v21, 3, v21
+; GFX9-NEXT: v_or_b32_sdwa v21, v47, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v22, v46, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v25, v32, v25 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v28, 0x300, v21
+; GFX9-NEXT: v_and_b32_e32 v28, 0xffff, v28
+; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
; GFX9-NEXT: v_lshl_or_b32 v4, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(3)
-; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
-; GFX9-NEXT: v_or_b32_sdwa v38, v38, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(3)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_add_u32_e32 v2, 3, v2
; GFX9-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_mov_b32_e32 v3, 0xffff
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: v_add_u32_e32 v2, 0x300, v2
; GFX9-NEXT: v_and_b32_e32 v3, s4, v3
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
; GFX9-NEXT: v_lshl_or_b32 v3, v2, 16, v3
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_and_b32 s4, s24, 0xff
@@ -152897,8 +153490,6 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX9-NEXT: s_and_b32 s8, s16, 0xff
; GFX9-NEXT: s_or_b32 s8, s9, s8
; GFX9-NEXT: s_and_b32 s9, s18, 0xff
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
; GFX9-NEXT: s_or_b32 s9, s10, s9
; GFX9-NEXT: s_addk_i32 s4, 0x300
; GFX9-NEXT: s_addk_i32 s5, 0x300
@@ -152915,14 +153506,14 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
; GFX9-NEXT: v_lshl_or_b32 v5, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
@@ -152930,14 +153521,14 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
; GFX9-NEXT: v_lshl_or_b32 v6, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
@@ -152945,264 +153536,240 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
; GFX9-NEXT: v_lshl_or_b32 v7, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v48, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_lshl_or_b32 v8, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v37, v44, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v37, 0x300, v37
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v58, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
; GFX9-NEXT: v_lshl_or_b32 v9, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v63, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v43, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
; GFX9-NEXT: v_lshl_or_b32 v10, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:844 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:832 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v38, v51, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
+; GFX9-NEXT: v_or_b32_sdwa v1, v39, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
+; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-NEXT: v_lshl_or_b32 v11, v1, 16, v0
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:836 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:840 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
+; GFX9-NEXT: v_or_b32_sdwa v39, v50, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
+; GFX9-NEXT: v_or_b32_sdwa v48, v60, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v58, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v54, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
-; GFX9-NEXT: v_lshl_or_b32 v11, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:820 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:816 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v40, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
+; GFX9-NEXT: v_or_b32_sdwa v49, v45, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v55, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
-; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_lshl_or_b32 v12, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:812 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:832 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v42, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v55, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
-; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
-; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: v_lshl_or_b32 v13, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v53, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
+; GFX9-NEXT: v_or_b32_sdwa v50, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(3)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v52, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
-; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: v_lshl_or_b32 v14, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v50, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
+; GFX9-NEXT: v_or_b32_sdwa v51, v62, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v49, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
-; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
-; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: v_lshl_or_b32 v15, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v39, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_or_b32_sdwa v39, v36, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
+; GFX9-NEXT: v_or_b32_sdwa v52, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v48, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v0, v53, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
-; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_lshl_or_b32 v13, v1, 16, v0
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:820 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:824 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(3)
; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
-; GFX9-NEXT: v_or_b32_sdwa v48, v46, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: v_or_b32_sdwa v53, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
-; GFX9-NEXT: v_or_b32_sdwa v49, v35, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v54, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v21, 0x300, v54
+; GFX9-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
-; GFX9-NEXT: v_or_b32_sdwa v50, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v2, 3, v2
-; GFX9-NEXT: v_or_b32_sdwa v2, v16, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v2, 0x300, v2
-; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v55, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
-; GFX9-NEXT: v_or_b32_sdwa v51, v34, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v40, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_add_u32_e32 v16, 3, v16
-; GFX9-NEXT: v_or_b32_sdwa v16, v17, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_lshl_or_b32 v17, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v16, 0x300, v16
-; GFX9-NEXT: v_lshl_or_b32 v16, v16, 16, v2
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
-; GFX9-NEXT: v_or_b32_sdwa v52, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v53, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v24, 3, v24
-; GFX9-NEXT: v_add_u32_e32 v26, 3, v61
-; GFX9-NEXT: v_or_b32_sdwa v24, v54, v24 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v41, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v36, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_add_u32_e32 v36, 0x300, v24
; GFX9-NEXT: v_add_u32_e32 v24, 0x300, v48
; GFX9-NEXT: v_add_u32_e32 v48, 0x300, v51
+; GFX9-NEXT: v_add_u32_e32 v51, 0x300, v41
; GFX9-NEXT: v_and_b32_e32 v24, 0xffff, v24
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v54, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v26, 3, v45
-; GFX9-NEXT: v_add_u32_e32 v20, 3, v20
-; GFX9-NEXT: v_or_b32_sdwa v20, v57, v20 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v34, 0x300, v20
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v55, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v26, 3, v56
-; GFX9-NEXT: v_add_u32_e32 v21, 3, v21
-; GFX9-NEXT: v_or_b32_sdwa v21, v32, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v28, 0x300, v21
-; GFX9-NEXT: v_add_u32_e32 v21, 0x300, v54
-; GFX9-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX9-NEXT: v_and_b32_e32 v28, 0xffff, v28
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v40, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v23, 3, v23
-; GFX9-NEXT: v_add_u32_e32 v26, 3, v47
-; GFX9-NEXT: v_or_b32_sdwa v23, v41, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v20, 0x300, v40
-; GFX9-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v41, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v26, 3, v43
-; GFX9-NEXT: v_add_u32_e32 v22, 3, v22
-; GFX9-NEXT: v_or_b32_sdwa v22, v44, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v42, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v26, 3, v35
; GFX9-NEXT: v_add_u32_e32 v35, 0x300, v22
; GFX9-NEXT: v_add_u32_e32 v22, 0x300, v52
-; GFX9-NEXT: v_add_u32_e32 v51, 0x300, v41
; GFX9-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX9-NEXT: v_lshl_or_b32 v20, v51, 16, v20
; GFX9-NEXT: v_lshl_or_b32 v28, v35, 16, v28
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v42, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
-; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v43, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
-; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v31, 0x300, v0
+; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
+; GFX9-NEXT: v_lshl_or_b32 v14, v1, 16, v0
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:812 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:848 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:816 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v52, 0x300, v43
-; GFX9-NEXT: v_and_b32_e32 v31, 0xffff, v31
-; GFX9-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-NEXT: s_waitcnt vmcnt(4)
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: s_waitcnt vmcnt(3)
; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
-; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v44, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
+; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(4)
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v26, 3, v33
-; GFX9-NEXT: v_add_u32_e32 v32, 0x300, v1
-; GFX9-NEXT: v_mov_b32_e32 v1, s6
-; GFX9-NEXT: v_lshl_or_b32 v31, v32, 16, v31
-; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(3)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v57, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
+; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-NEXT: v_lshl_or_b32 v15, v1, 16, v0
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v16, 3, v16
+; GFX9-NEXT: v_or_b32_sdwa v16, v17, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v16, 0x300, v16
+; GFX9-NEXT: s_waitcnt vmcnt(4)
+; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
+; GFX9-NEXT: s_waitcnt vmcnt(3)
; GFX9-NEXT: v_or_b32_sdwa v45, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v27, 0x300, v23
; GFX9-NEXT: v_add_u32_e32 v26, 0x300, v25
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_add_u32_e32 v2, 3, v2
-; GFX9-NEXT: v_or_b32_sdwa v2, v18, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v30, 0x300, v2
; GFX9-NEXT: v_add_u32_e32 v25, 0x300, v38
; GFX9-NEXT: v_add_u32_e32 v23, 0x300, v50
; GFX9-NEXT: v_add_u32_e32 v38, 0x300, v39
; GFX9-NEXT: v_add_u32_e32 v39, 0x300, v49
; GFX9-NEXT: v_add_u32_e32 v49, 0x300, v53
; GFX9-NEXT: v_add_u32_e32 v50, 0x300, v55
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v56, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
+; GFX9-NEXT: v_or_b32_sdwa v1, v34, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
+; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-NEXT: v_lshl_or_b32 v17, v1, 16, v0
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v2, 3, v2
+; GFX9-NEXT: v_or_b32_sdwa v2, v63, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v2, 0x300, v2
+; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX9-NEXT: v_lshl_or_b32 v16, v16, 16, v2
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v34, 0x300, v20
+; GFX9-NEXT: v_add_u32_e32 v20, 0x300, v40
; GFX9-NEXT: v_add_u32_e32 v53, 0x300, v45
+; GFX9-NEXT: v_and_b32_e32 v20, 0xffff, v20
; GFX9-NEXT: v_and_b32_e32 v23, 0xffff, v23
; GFX9-NEXT: v_and_b32_e32 v25, 0xffff, v25
; GFX9-NEXT: v_and_b32_e32 v26, 0xffff, v26
; GFX9-NEXT: v_and_b32_e32 v27, 0xffff, v27
-; GFX9-NEXT: v_and_b32_e32 v30, 0xffff, v30
-; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_lshl_or_b32 v20, v51, 16, v20
; GFX9-NEXT: v_lshl_or_b32 v21, v50, 16, v21
; GFX9-NEXT: v_lshl_or_b32 v22, v49, 16, v22
; GFX9-NEXT: v_lshl_or_b32 v23, v48, 16, v23
@@ -153210,10 +153777,32 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX9-NEXT: v_lshl_or_b32 v25, v38, 16, v25
; GFX9-NEXT: v_lshl_or_b32 v26, v37, 16, v26
; GFX9-NEXT: v_lshl_or_b32 v27, v36, 16, v27
+; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v31, 0x300, v0
+; GFX9-NEXT: v_and_b32_e32 v31, 0xffff, v31
+; GFX9-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
+; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v32, 0x300, v1
+; GFX9-NEXT: v_mov_b32_e32 v1, s6
+; GFX9-NEXT: v_lshl_or_b32 v31, v32, 16, v31
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v2, 3, v2
+; GFX9-NEXT: v_or_b32_sdwa v2, v18, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v30, 0x300, v2
+; GFX9-NEXT: v_and_b32_e32 v30, 0xffff, v30
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v18, 3, v18
; GFX9-NEXT: v_or_b32_sdwa v18, v19, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v33, 0x300, v18
; GFX9-NEXT: v_add_u32_e32 v18, 0x300, v44
; GFX9-NEXT: v_and_b32_e32 v18, 0xffff, v18
@@ -153221,7 +153810,7 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX9-NEXT: v_lshl_or_b32 v30, v33, 16, v30
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v19, 3, v19
-; GFX9-NEXT: v_or_b32_sdwa v19, v60, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v19, v59, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v29, 0x300, v19
; GFX9-NEXT: v_add_u32_e32 v19, 0x300, v42
; GFX9-NEXT: v_and_b32_e32 v19, 0xffff, v19
@@ -153389,7 +153978,7 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v83, 8, v25
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v86, 8, v27
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v85, 8, v29
-; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, -1
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(62)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v97, 8, v2
@@ -153458,38 +154047,38 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX11-TRUE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB89_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: s_and_b32 s5, s28, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s29, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-TRUE16-NEXT: v_and_b32_e64 v5, 0xffff, s5
-; GFX11-TRUE16-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s3, 8
-; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s5, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s7, s8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s23, 8
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s25, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s11, s26, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s12, s27, 8
-; GFX11-TRUE16-NEXT: s_or_b32 s9, s9, s10
-; GFX11-TRUE16-NEXT: s_or_b32 s10, s11, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s9, s10
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s28, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s29, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s2, 0xff
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-TRUE16-NEXT: v_and_b32_e64 v5, 0xffff, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s0, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s16, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s4, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s6, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s20, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s21, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s25, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s26, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s27, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s8, s9
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s8, s9
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v36
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v32
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v35
@@ -153576,12 +154165,12 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX11-TRUE16-NEXT: v_and_b32_e32 v21, 0xffff, v15
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v17, 16, v19
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v17, v18, 16, v22
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, s7
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, s6
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v1, 16, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v151
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v149
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v16, v20, 16, v21
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, s8
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, s7
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v180
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v177
@@ -153684,9 +154273,8 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v31, v1, 16, v0
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB89_3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB89_3
; GFX11-TRUE16-NEXT: .LBB89_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_add_i32 s28, s28, 3
; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s29, 8
@@ -154072,7 +154660,9 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX11-TRUE16-NEXT: .LBB89_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB89_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB89_2
+; GFX11-TRUE16-NEXT: s_branch .LBB89_3
;
; GFX11-FAKE16-LABEL: bitcast_v128i8_to_v64bf16_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -154215,7 +154805,7 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v83, 8, v25
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v86, 8, v27
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v85, 8, v29
-; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, -1
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(62)
; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v97, 8, v2
@@ -154284,38 +154874,38 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX11-FAKE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB89_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-FAKE16-NEXT: s_and_b32 s5, s28, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s29, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-FAKE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-FAKE16-NEXT: v_and_b32_e64 v5, 0xffff, s5
-; GFX11-FAKE16-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s3, 8
-; GFX11-FAKE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-FAKE16-NEXT: s_or_b32 s6, s7, s8
-; GFX11-FAKE16-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s5, s6
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s7, s8
-; GFX11-FAKE16-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s23, 8
-; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-FAKE16-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s25, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s11, s26, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s12, s27, 8
-; GFX11-FAKE16-NEXT: s_or_b32 s9, s9, s10
-; GFX11-FAKE16-NEXT: s_or_b32 s10, s11, s12
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s7, s8
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s9, s10
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s28, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s29, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s2, 0xff
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-FAKE16-NEXT: v_and_b32_e64 v5, 0xffff, s4
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s0, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s16, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s4, s5
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s6, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s20, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s21, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s25, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s10, s26, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s11, s27, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s8, s9
+; GFX11-FAKE16-NEXT: s_or_b32 s9, s10, s11
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s6, s7
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s8, s9
; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v36
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v32
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v35
@@ -154402,12 +154992,12 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v15
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v17, 16, v19
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v18, 16, v22
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, s7
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, s6
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v1, 16, v0
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v151
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v149
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v20, 16, v21
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, s8
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, s7
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v180
; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, v1, v177
@@ -154510,9 +155100,8 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v31, v1, 16, v0
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB89_3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB89_3
; GFX11-FAKE16-NEXT: .LBB89_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_add_i32 s28, s28, 3
; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s29, 8
@@ -154898,7 +155487,9 @@ define inreg <64 x bfloat> @bitcast_v128i8_to_v64bf16_scalar(<128 x i8> inreg %a
; GFX11-FAKE16-NEXT: .LBB89_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB89_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB89_2
+; GFX11-FAKE16-NEXT: s_branch .LBB89_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -162562,462 +163153,482 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:72
; SI-NEXT: s_waitcnt expcnt(4)
; SI-NEXT: v_mul_f32_e32 v59, 1.0, v1
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v6
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e32 v1, 1.0, v14
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e32 v1, 1.0, v18
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e32 v1, 1.0, v22
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mul_f32_e32 v1, 1.0, v23
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mul_f32_e32 v1, 1.0, v26
+; SI-NEXT: v_mul_f32_e32 v61, 1.0, v5
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; SI-NEXT: v_mul_f32_e64 v5, 1.0, s21
; SI-NEXT: v_mul_f32_e32 v46, 1.0, v2
; SI-NEXT: v_mul_f32_e32 v4, 1.0, v4
; SI-NEXT: v_mul_f32_e32 v3, 1.0, v3
-; SI-NEXT: v_mul_f32_e32 v61, 1.0, v5
-; SI-NEXT: v_mul_f32_e32 v44, 1.0, v8
+; SI-NEXT: v_mul_f32_e32 v60, 1.0, v6
+; SI-NEXT: v_mul_f32_e32 v6, 1.0, v8
; SI-NEXT: v_mul_f32_e32 v7, 1.0, v7
; SI-NEXT: v_mul_f32_e32 v56, 1.0, v10
; SI-NEXT: v_mul_f32_e32 v63, 1.0, v9
-; SI-NEXT: v_mul_f32_e32 v47, 1.0, v12
-; SI-NEXT: v_mul_f32_e32 v57, 1.0, v11
-; SI-NEXT: v_mul_f32_e32 v45, 1.0, v13
+; SI-NEXT: v_mul_f32_e32 v45, 1.0, v12
+; SI-NEXT: v_mul_f32_e32 v47, 1.0, v11
+; SI-NEXT: v_mul_f32_e32 v44, 1.0, v13
+; SI-NEXT: v_mul_f32_e32 v57, 1.0, v16
; SI-NEXT: v_mul_f32_e32 v58, 1.0, v15
; SI-NEXT: v_mul_f32_e32 v18, 1.0, v17
-; SI-NEXT: v_mul_f32_e32 v62, 1.0, v20
-; SI-NEXT: v_mul_f32_e32 v60, 1.0, v19
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill
+; SI-NEXT: v_mul_f32_e32 v20, 1.0, v20
+; SI-NEXT: v_mul_f32_e32 v62, 1.0, v19
; SI-NEXT: v_mul_f32_e32 v21, 1.0, v21
-; SI-NEXT: v_mul_f32_e32 v19, 1.0, v24
-; SI-NEXT: v_mul_f32_e32 v22, 1.0, v23
-; SI-NEXT: v_mul_f32_e32 v23, 1.0, v26
+; SI-NEXT: v_mul_f32_e32 v22, 1.0, v24
; SI-NEXT: v_mul_f32_e32 v24, 1.0, v25
-; SI-NEXT: v_mul_f32_e32 v25, 1.0, v28
+; SI-NEXT: v_mul_f32_e32 v26, 1.0, v28
; SI-NEXT: v_mul_f32_e32 v27, 1.0, v27
-; SI-NEXT: v_mul_f32_e32 v20, 1.0, v30
-; SI-NEXT: v_mul_f32_e32 v26, 1.0, v29
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mul_f32_e64 v1, 1.0, s17
+; SI-NEXT: v_mul_f32_e32 v14, 1.0, v30
+; SI-NEXT: v_mul_f32_e32 v29, 1.0, v29
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s16
; SI-NEXT: v_mul_f32_e64 v11, 1.0, s19
; SI-NEXT: v_mul_f32_e64 v8, 1.0, s18
-; SI-NEXT: v_mul_f32_e64 v5, 1.0, s21
-; SI-NEXT: v_mul_f32_e64 v6, 1.0, s20
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mul_f32_e64 v5, 1.0, s20
; SI-NEXT: v_mul_f32_e64 v9, 1.0, s22
; SI-NEXT: v_mul_f32_e64 v10, 1.0, s25
; SI-NEXT: v_mul_f32_e64 v13, 1.0, s24
; SI-NEXT: v_mul_f32_e64 v12, 1.0, s26
+; SI-NEXT: v_mul_f32_e64 v16, 1.0, s29
; SI-NEXT: v_mul_f32_e64 v17, 1.0, s28
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31
-; SI-NEXT: v_mul_f32_e32 v31, 1.0, v16
-; SI-NEXT: v_mul_f32_e32 v29, 1.0, v32
-; SI-NEXT: v_mul_f32_e32 v30, 1.0, v33
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: v_mul_f32_e32 v30, 1.0, v32
+; SI-NEXT: v_mul_f32_e32 v33, 1.0, v33
; SI-NEXT: v_mul_f32_e32 v32, 1.0, v34
-; SI-NEXT: v_mul_f32_e32 v14, 1.0, v35
-; SI-NEXT: v_mul_f32_e32 v35, 1.0, v36
-; SI-NEXT: v_mul_f32_e32 v37, 1.0, v37
-; SI-NEXT: v_mul_f32_e32 v34, 1.0, v38
+; SI-NEXT: v_mul_f32_e32 v19, 1.0, v35
+; SI-NEXT: v_mul_f32_e32 v34, 1.0, v36
+; SI-NEXT: v_mul_f32_e32 v35, 1.0, v37
+; SI-NEXT: v_mul_f32_e32 v25, 1.0, v38
; SI-NEXT: v_mul_f32_e32 v15, 1.0, v39
; SI-NEXT: v_mul_f32_e32 v38, 1.0, v48
-; SI-NEXT: v_mul_f32_e32 v48, 1.0, v49
-; SI-NEXT: s_waitcnt vmcnt(13)
+; SI-NEXT: v_mul_f32_e32 v39, 1.0, v49
; SI-NEXT: v_mul_f32_e32 v28, 1.0, v50
-; SI-NEXT: s_waitcnt vmcnt(12)
-; SI-NEXT: v_mul_f32_e32 v51, 1.0, v51
+; SI-NEXT: v_mul_f32_e32 v23, 1.0, v51
+; SI-NEXT: s_waitcnt vmcnt(13)
+; SI-NEXT: v_mul_f32_e32 v48, 1.0, v52
; SI-NEXT: s_waitcnt vmcnt(11)
-; SI-NEXT: v_mul_f32_e32 v50, 1.0, v52
+; SI-NEXT: v_mul_f32_e32 v1, 1.0, v54
+; SI-NEXT: v_mul_f32_e32 v50, 1.0, v53
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(11)
+; SI-NEXT: v_mul_f32_e32 v37, 1.0, v55
; SI-NEXT: s_waitcnt vmcnt(10)
-; SI-NEXT: v_mul_f32_e32 v52, 1.0, v53
+; SI-NEXT: v_mul_f32_e32 v51, 1.0, v40
; SI-NEXT: s_waitcnt vmcnt(9)
-; SI-NEXT: v_mul_f32_e32 v33, 1.0, v54
+; SI-NEXT: v_mul_f32_e32 v55, 1.0, v41
; SI-NEXT: s_waitcnt vmcnt(8)
-; SI-NEXT: v_mul_f32_e32 v36, 1.0, v55
+; SI-NEXT: v_mul_f32_e32 v52, 1.0, v42
; SI-NEXT: s_waitcnt vmcnt(7)
-; SI-NEXT: v_mul_f32_e32 v55, 1.0, v40
-; SI-NEXT: s_waitcnt vmcnt(6)
-; SI-NEXT: v_mul_f32_e32 v41, 1.0, v41
-; SI-NEXT: s_waitcnt vmcnt(5)
-; SI-NEXT: v_mul_f32_e32 v42, 1.0, v42
-; SI-NEXT: s_waitcnt vmcnt(4)
-; SI-NEXT: v_mul_f32_e32 v54, 1.0, v43
-; SI-NEXT: v_mul_f32_e64 v39, 1.0, s23
+; SI-NEXT: v_mul_f32_e32 v41, 1.0, v43
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mul_f32_e64 v1, 1.0, s17
+; SI-NEXT: v_mul_f32_e64 v31, 1.0, s23
; SI-NEXT: v_mul_f32_e64 v49, 1.0, s27
-; SI-NEXT: v_mul_f32_e64 v16, 1.0, s29
-; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB91_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_mov_b32_e32 v43, v36
; SI-NEXT: v_alignbit_b32 v36, v1, v2, 16
-; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v5
-; SI-NEXT: v_alignbit_b32 v6, v1, v6, 16
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v42, v23
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; SI-NEXT: v_alignbit_b32 v5, v1, v5, 16
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v10
; SI-NEXT: v_alignbit_b32 v2, v1, v13, 16
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v16
-; SI-NEXT: v_alignbit_b32 v5, v1, v17, 16
+; SI-NEXT: v_alignbit_b32 v10, v1, v17, 16
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v4
; SI-NEXT: v_alignbit_b32 v4, v1, v3, 16
-; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v44
+; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v6
; SI-NEXT: v_alignbit_b32 v3, v1, v7, 16
-; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v47
-; SI-NEXT: v_alignbit_b32 v16, v1, v57, 16
-; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v31
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
+; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v45
+; SI-NEXT: v_alignbit_b32 v16, v1, v47, 16
+; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v57
; SI-NEXT: v_alignbit_b32 v13, v1, v58, 16
-; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v62
-; SI-NEXT: v_alignbit_b32 v10, v1, v60, 16
-; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v19
-; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v11
-; SI-NEXT: v_alignbit_b32 v44, v19, v8, 16
-; SI-NEXT: v_alignbit_b32 v7, v1, v22, 16
-; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v25
-; SI-NEXT: v_alignbit_b32 v8, v44, v36, 24
-; SI-NEXT: v_alignbit_b32 v60, v1, v27, 16
-; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v29
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill
+; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v20
+; SI-NEXT: v_alignbit_b32 v57, v1, v62, 16
+; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v22
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_alignbit_b32 v7, v1, v43, 16
+; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v26
+; SI-NEXT: v_alignbit_b32 v6, v1, v27, 16
+; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v30
+; SI-NEXT: v_alignbit_b32 v54, v1, v33, 16
+; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v34
+; SI-NEXT: v_alignbit_b32 v53, v1, v35, 16
+; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v38
+; SI-NEXT: v_alignbit_b32 v17, v1, v39, 16
+; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v48
+; SI-NEXT: v_alignbit_b32 v40, v1, v50, 16
+; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v51
+; SI-NEXT: s_waitcnt expcnt(2)
+; SI-NEXT: v_alignbit_b32 v51, v1, v55, 16
+; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v11
+; SI-NEXT: v_alignbit_b32 v45, v1, v8, 16
+; SI-NEXT: v_alignbit_b32 v8, v45, v36, 24
+; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v31
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v44, v36, 16
-; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v39
-; SI-NEXT: v_alignbit_b32 v57, v1, v30, 16
-; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v35
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v8, v45, v36, 16
+; SI-NEXT: v_alignbit_b32 v58, v48, v9, 16
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v44, v36, 8
-; SI-NEXT: v_alignbit_b32 v58, v22, v9, 16
-; SI-NEXT: v_alignbit_b32 v40, v1, v37, 16
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v8, v58, v5, 24
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v58, v6, 24
-; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v8, v58, v5, 16
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v58, v6, 16
-; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v49
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v8, v58, v5, 8
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v58, v6, 8
-; SI-NEXT: v_alignbit_b32 v47, v25, v12, 16
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
+; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v49
+; SI-NEXT: v_alignbit_b32 v47, v8, v12, 16
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_alignbit_b32 v8, v47, v2, 24
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_alignbit_b32 v8, v47, v2, 16
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_alignbit_b32 v8, v47, v2, 8
-; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v38
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v46
-; SI-NEXT: v_alignbit_b32 v53, v1, v48, 16
-; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v50
; SI-NEXT: v_alignbit_b32 v50, v8, v59, 16
-; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
+; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v50, v5, 24
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v8, v50, v10, 24
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v50, v5, 16
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v8, v50, v10, 8
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v50, v5, 8
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill
-; SI-NEXT: v_alignbit_b32 v52, v1, v52, 16
-; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v55
-; SI-NEXT: v_mov_b32_e32 v17, v63
-; SI-NEXT: v_alignbit_b32 v1, v1, v41, 16
-; SI-NEXT: s_mov_b64 s[4:5], 0
-; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v31
+; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v60
; SI-NEXT: v_alignbit_b32 v62, v8, v61, 16
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
+; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_alignbit_b32 v8, v62, v4, 24
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_alignbit_b32 v8, v62, v4, 16
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_alignbit_b32 v8, v62, v4, 8
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v56
; SI-NEXT: v_alignbit_b32 v55, v8, v63, 16
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_alignbit_b32 v8, v55, v3, 24
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_alignbit_b32 v8, v55, v3, 16
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_alignbit_b32 v8, v55, v3, 8
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
-; SI-NEXT: v_alignbit_b32 v48, v62, v4, 8
-; SI-NEXT: v_lshrrev_b32_e32 v31, 24, v31
-; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v37
-; SI-NEXT: v_alignbit_b32 v38, v8, v45, 16
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
+; SI-NEXT: v_lshrrev_b32_e32 v11, 24, v11
+; SI-NEXT: v_alignbit_b32 v22, v50, v10, 16
+; SI-NEXT: v_alignbit_b32 v33, v45, v36, 8
+; SI-NEXT: s_waitcnt vmcnt(14) expcnt(3)
+; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v59
+; SI-NEXT: v_alignbit_b32 v38, v8, v44, 16
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_alignbit_b32 v8, v38, v16, 24
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_alignbit_b32 v8, v38, v16, 16
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_alignbit_b32 v8, v38, v16, 8
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v9
; SI-NEXT: v_alignbit_b32 v35, v8, v18, 16
-; SI-NEXT: v_mov_b32_e32 v45, v8
-; SI-NEXT: v_alignbit_b32 v8, v35, v13, 24
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v35, v13, 16
+; SI-NEXT: v_alignbit_b32 v8, v35, v13, 24
+; SI-NEXT: s_waitcnt vmcnt(14)
+; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v61
; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
-; SI-NEXT: v_alignbit_b32 v29, v35, v13, 8
-; SI-NEXT: v_alignbit_b32 v61, v38, v16, 24
-; SI-NEXT: v_alignbit_b32 v41, v38, v16, 16
-; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v59
-; SI-NEXT: v_alignbit_b32 v30, v8, v21, 16
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v30, v10, 24
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v8, v35, v13, 16
+; SI-NEXT: v_alignbit_b32 v30, v20, v21, 16
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v30, v10, 16
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v8, v30, v57, 24
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v30, v10, 8
+; SI-NEXT: v_alignbit_b32 v8, v30, v57, 16
+; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v39
; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v23
-; SI-NEXT: v_alignbit_b32 v27, v8, v24, 16
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v8, v30, v57, 8
+; SI-NEXT: v_alignbit_b32 v27, v26, v24, 16
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_alignbit_b32 v8, v27, v7, 24
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_alignbit_b32 v8, v27, v7, 16
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v14
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_alignbit_b32 v8, v27, v7, 8
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v24, v44, v29, 16
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v20
-; SI-NEXT: v_alignbit_b32 v24, v8, v26, 16
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v8, v24, v6, 24
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v24, v60, 24
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v8, v24, v6, 16
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v24, v60, 8
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v8, v24, v6, 8
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v32
-; SI-NEXT: v_alignbit_b32 v21, v8, v14, 16
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v21, v8, v19, 16
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v21, v57, 24
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v8, v21, v54, 24
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v21, v57, 16
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v8, v21, v54, 16
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v21, v57, 8
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v8, v21, v54, 8
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v34
+; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v25
; SI-NEXT: v_alignbit_b32 v18, v8, v15, 16
; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v18, v40, 24
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v8, v18, v53, 24
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v18, v40, 16
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(1)
-; SI-NEXT: v_alignbit_b32 v8, v18, v40, 8
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v8, v18, v53, 16
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_alignbit_b32 v8, v18, v53, 8
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v28
-; SI-NEXT: v_alignbit_b32 v63, v8, v51, 16
+; SI-NEXT: v_alignbit_b32 v63, v8, v23, 16
; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v63, v53, 24
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v63, v53, 16
-; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v33
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v8, v63, v17, 24
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v63, v53, 8
-; SI-NEXT: v_alignbit_b32 v12, v40, v43, 16
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v8, v63, v17, 16
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v12, v52, 24
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v8, v63, v17, 8
+; SI-NEXT: v_mov_b32_e32 v17, v1
+; SI-NEXT: v_mov_b32_e32 v1, v51
+; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; SI-NEXT: v_mov_b32_e32 v15, v9
+; SI-NEXT: v_lshrrev_b32_e32 v23, 24, v49
+; SI-NEXT: v_lshrrev_b32_e32 v14, 24, v14
+; SI-NEXT: v_lshrrev_b32_e32 v29, 24, v39
+; SI-NEXT: v_alignbit_b32 v34, v35, v13, 8
+; SI-NEXT: v_mov_b32_e32 v54, v41
+; SI-NEXT: v_lshrrev_b32_e32 v19, 8, v45
+; SI-NEXT: v_lshrrev_b32_e32 v49, 8, v47
+; SI-NEXT: v_mov_b32_e32 v39, v28
+; SI-NEXT: v_mov_b32_e32 v53, v52
+; SI-NEXT: s_waitcnt vmcnt(1) expcnt(0)
+; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v51
+; SI-NEXT: v_alignbit_b32 v12, v8, v37, 16
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v12, v52, 16
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v8, v12, v40, 24
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v12, v52, 8
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v8, v12, v40, 16
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(1)
+; SI-NEXT: v_alignbit_b32 v8, v12, v40, 8
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v42
-; SI-NEXT: v_mov_b32_e32 v15, v9
-; SI-NEXT: v_alignbit_b32 v9, v8, v54, 16
+; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v52
+; SI-NEXT: v_alignbit_b32 v9, v8, v41, 16
; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_alignbit_b32 v8, v9, v1, 24
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_alignbit_b32 v8, v9, v1, 16
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshrrev_b32_e32 v23, 24, v60
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_alignbit_b32 v8, v9, v1, 8
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v8, v37
-; SI-NEXT: v_lshrrev_b32_e32 v37, 24, v49
-; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
+; SI-NEXT: v_lshrrev_b32_e32 v23, 24, v56
+; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v37, 24, v46
-; SI-NEXT: v_lshrrev_b32_e32 v46, 24, v56
+; SI-NEXT: v_lshrrev_b32_e32 v14, 8, v24
; SI-NEXT: v_lshrrev_b32_e32 v56, 24, v32
-; SI-NEXT: v_lshrrev_b32_e32 v8, 24, v8
-; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v56, 24, v34
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill
+; SI-NEXT: v_lshrrev_b32_e32 v8, 24, v59
+; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(1)
+; SI-NEXT: v_lshrrev_b32_e32 v14, 8, v21
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshrrev_b32_e32 v56, 24, v25
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v8, 8, v38
-; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(1)
+; SI-NEXT: v_lshrrev_b32_e32 v14, 8, v18
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v56, 24, v28
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v8, 24, v15
-; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v56, 24, v33
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v8, 24, v59
-; SI-NEXT: v_lshrrev_b32_e32 v20, 24, v20
-; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v56, 8, v12
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v8, 8, v30
-; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v20, 8, v24
-; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v56, 24, v42
-; SI-NEXT: v_lshrrev_b32_e32 v14, 24, v11
-; SI-NEXT: v_lshrrev_b32_e32 v11, 24, v39
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
+; SI-NEXT: v_lshrrev_b32_e32 v56, 24, v51
+; SI-NEXT: v_lshrrev_b32_e32 v14, 8, v12
+; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v8, 24, v23
-; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill
+; SI-NEXT: v_lshrrev_b32_e32 v11, 24, v31
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v20, v29
-; SI-NEXT: v_lshrrev_b32_e32 v29, 8, v21
-; SI-NEXT: v_lshrrev_b32_e32 v32, 8, v18
+; SI-NEXT: v_lshrrev_b32_e32 v8, 24, v61
; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(1)
+; SI-NEXT: v_lshrrev_b32_e32 v56, 24, v52
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v56, 8, v9
-; SI-NEXT: v_alignbit_b32 v26, v24, v60, 16
-; SI-NEXT: v_lshrrev_b32_e32 v51, 8, v44
-; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
+; SI-NEXT: v_lshrrev_b32_e32 v14, 8, v9
+; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v11, 8, v58
-; SI-NEXT: v_lshrrev_b32_e32 v49, 8, v47
-; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
-; SI-NEXT: v_lshrrev_b32_e32 v39, 8, v50
-; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v31, 8, v62
-; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v46, 8, v55
+; SI-NEXT: v_lshrrev_b32_e32 v40, 24, v46
+; SI-NEXT: v_lshrrev_b32_e32 v31, 8, v50
+; SI-NEXT: v_lshrrev_b32_e32 v46, 8, v62
+; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill
+; SI-NEXT: v_lshrrev_b32_e32 v60, 8, v55
; SI-NEXT: v_lshrrev_b32_e32 v15, 8, v35
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v8, 8, v27
-; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill
-; SI-NEXT: v_mov_b32_e32 v37, v34
-; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(1)
-; SI-NEXT: v_mov_b32_e32 v29, v28
-; SI-NEXT: v_mov_b32_e32 v23, v48
+; SI-NEXT: v_lshrrev_b32_e32 v8, 8, v30
+; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshrrev_b32_e32 v29, 8, v27
+; SI-NEXT: v_mov_b32_e32 v23, v26
+; SI-NEXT: v_mov_b32_e32 v26, v25
+; SI-NEXT: v_mov_b32_e32 v25, v22
; SI-NEXT: v_lshrrev_b32_e32 v32, 8, v63
-; SI-NEXT: v_mov_b32_e32 v48, v33
-; SI-NEXT: v_mov_b32_e32 v34, v53
-; SI-NEXT: v_mov_b32_e32 v53, v42
-; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
+; SI-NEXT: v_mov_b32_e32 v22, v43
+; SI-NEXT: v_mov_b32_e32 v28, v51
; SI-NEXT: s_branch .LBB91_3
; SI-NEXT: .LBB91_2:
-; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(5)
+; SI-NEXT: v_mov_b32_e32 v39, v28
+; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
+; SI-NEXT: ; implicit-def: $vgpr1
+; SI-NEXT: ; kill: killed $vgpr1
+; SI-NEXT: ; implicit-def: $vgpr1
+; SI-NEXT: ; kill: killed $vgpr1
+; SI-NEXT: ; implicit-def: $vgpr1
+; SI-NEXT: ; kill: killed $vgpr1
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; kill: killed $vgpr1
; SI-NEXT: ; implicit-def: $vgpr1
@@ -163160,353 +163771,347 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; kill: killed $vgpr9
; SI-NEXT: ; implicit-def: $vgpr9
-; SI-NEXT: ; implicit-def: $vgpr40
-; SI-NEXT: v_mov_b32_e32 v53, v42
-; SI-NEXT: s_waitcnt expcnt(5)
-; SI-NEXT: v_mov_b32_e32 v48, v33
-; SI-NEXT: v_mov_b32_e32 v29, v28
-; SI-NEXT: v_mov_b32_e32 v37, v34
-; SI-NEXT: v_mov_b32_e32 v17, v63
-; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: v_mov_b32_e32 v54, v41
+; SI-NEXT: v_mov_b32_e32 v53, v52
+; SI-NEXT: v_mov_b32_e32 v42, v23
+; SI-NEXT: v_mov_b32_e32 v26, v25
; SI-NEXT: ; kill: killed $vgpr1
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; kill: killed $vgpr9
; SI-NEXT: ; implicit-def: $vgpr9
-; SI-NEXT: ; kill: killed $vgpr40
+; SI-NEXT: ; implicit-def: $vgpr14
; SI-NEXT: ; implicit-def: $vgpr40
; SI-NEXT: ; implicit-def: $vgpr36
-; SI-NEXT: ; implicit-def: $vgpr44
-; SI-NEXT: ; implicit-def: $vgpr51
+; SI-NEXT: ; implicit-def: $vgpr33
+; SI-NEXT: ; implicit-def: $vgpr45
; SI-NEXT: ; implicit-def: $vgpr19
-; SI-NEXT: ; implicit-def: $vgpr14
-; SI-NEXT: ; implicit-def: $vgpr6
+; SI-NEXT: ; implicit-def: $vgpr17
+; SI-NEXT: ; implicit-def: $vgpr5
; SI-NEXT: ; implicit-def: $vgpr58
; SI-NEXT: ; implicit-def: $vgpr11
-; SI-NEXT: ; implicit-def: $vgpr22
+; SI-NEXT: ; implicit-def: $vgpr48
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr47
; SI-NEXT: ; implicit-def: $vgpr49
+; SI-NEXT: ; implicit-def: $vgpr10
; SI-NEXT: ; implicit-def: $vgpr25
-; SI-NEXT: ; implicit-def: $vgpr5
; SI-NEXT: ; implicit-def: $vgpr50
-; SI-NEXT: ; implicit-def: $vgpr39
+; SI-NEXT: ; implicit-def: $vgpr31
; SI-NEXT: ; implicit-def: $vgpr4
-; SI-NEXT: ; implicit-def: $vgpr23
; SI-NEXT: ; implicit-def: $vgpr62
-; SI-NEXT: ; implicit-def: $vgpr31
+; SI-NEXT: ; implicit-def: $vgpr46
; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; implicit-def: $vgpr55
-; SI-NEXT: ; implicit-def: $vgpr46
+; SI-NEXT: ; implicit-def: $vgpr60
; SI-NEXT: ; implicit-def: $vgpr16
-; SI-NEXT: ; implicit-def: $vgpr41
-; SI-NEXT: ; implicit-def: $vgpr61
; SI-NEXT: ; implicit-def: $vgpr38
; SI-NEXT: ; implicit-def: $vgpr13
-; SI-NEXT: ; implicit-def: $vgpr20
+; SI-NEXT: ; implicit-def: $vgpr34
; SI-NEXT: ; implicit-def: $vgpr35
; SI-NEXT: ; implicit-def: $vgpr15
-; SI-NEXT: ; implicit-def: $vgpr45
-; SI-NEXT: ; implicit-def: $vgpr10
+; SI-NEXT: ; implicit-def: $vgpr57
; SI-NEXT: ; implicit-def: $vgpr30
+; SI-NEXT: ; implicit-def: $vgpr8
+; SI-NEXT: ; implicit-def: $vgpr20
; SI-NEXT: ; implicit-def: $vgpr7
; SI-NEXT: ; implicit-def: $vgpr27
-; SI-NEXT: ; implicit-def: $vgpr8
-; SI-NEXT: ; implicit-def: $vgpr60
-; SI-NEXT: ; implicit-def: $vgpr26
+; SI-NEXT: ; implicit-def: $vgpr29
+; SI-NEXT: ; implicit-def: $vgpr23
+; SI-NEXT: ; implicit-def: $vgpr6
; SI-NEXT: ; implicit-def: $vgpr24
-; SI-NEXT: ; implicit-def: $vgpr57
+; SI-NEXT: ; implicit-def: $vgpr44
; SI-NEXT: ; implicit-def: $vgpr21
; SI-NEXT: ; implicit-def: $vgpr18
-; SI-NEXT: ; implicit-def: $vgpr34
; SI-NEXT: ; implicit-def: $vgpr63
; SI-NEXT: ; implicit-def: $vgpr32
-; SI-NEXT: ; implicit-def: $vgpr52
; SI-NEXT: ; implicit-def: $vgpr12
; SI-NEXT: ; kill: killed $vgpr1
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; kill: killed $vgpr9
; SI-NEXT: ; implicit-def: $vgpr9
+; SI-NEXT: ; kill: killed $vgpr14
; SI-NEXT: ; kill: killed $vgpr40
; SI-NEXT: ; implicit-def: $vgpr40
+; SI-NEXT: ; implicit-def: $vgpr14
+; SI-NEXT: ; kill: killed $vgpr14
; SI-NEXT: ; implicit-def: $vgpr56
; SI-NEXT: ; kill: killed $vgpr56
; SI-NEXT: .LBB91_3: ; %Flow
+; SI-NEXT: s_waitcnt expcnt(1)
+; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v56, v17
-; SI-NEXT: v_mov_b32_e32 v54, v61
-; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
-; SI-NEXT: v_mov_b32_e32 v42, v32
-; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: v_mov_b32_e32 v41, v32
; SI-NEXT: s_cbranch_vccnz .LBB91_5
; SI-NEXT: ; %bb.4: ; %cmp.true
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(8)
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v14
+; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v13
+; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v13
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(6)
; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; SI-NEXT: s_waitcnt vmcnt(7)
+; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v2
; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(7)
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; SI-NEXT: s_waitcnt vmcnt(6)
+; SI-NEXT: s_waitcnt vmcnt(4)
; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v8
; SI-NEXT: v_add_f32_e32 v10, 0x40c00000, v10
; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v10
-; SI-NEXT: s_waitcnt vmcnt(5)
-; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
-; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v13
-; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v13
-; SI-NEXT: s_waitcnt vmcnt(4)
-; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
-; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
-; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v14
; SI-NEXT: s_waitcnt vmcnt(3)
-; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v15
+; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16
; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v16
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_and_b32_e32 v19, 0xffff0000, v17
-; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v19, 0x40c00000, v19
; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v19
; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_and_b32_e32 v44, 0xffff0000, v44
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_and_b32_e32 v45, 0xffff0000, v45
+; SI-NEXT: v_add_f32_e32 v60, 0x40c00000, v45
+; SI-NEXT: v_lshrrev_b32_e32 v8, 24, v60
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
-; SI-NEXT: v_alignbit_b32 v52, v3, v2, 16
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
+; SI-NEXT: v_alignbit_b32 v11, v3, v2, 16
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v2
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v4
-; SI-NEXT: v_alignbit_b32 v34, v4, v3, 16
-; SI-NEXT: v_and_b32_e32 v4, 0xffff0000, v33
+; SI-NEXT: v_alignbit_b32 v15, v4, v3, 16
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_and_b32_e32 v4, 0xffff0000, v2
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v4, 0x40c00000, v4
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v5
-; SI-NEXT: v_alignbit_b32 v51, v5, v4, 16
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
+; SI-NEXT: v_alignbit_b32 v52, v5, v4, 16
+; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v54
; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v5
; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v53
-; SI-NEXT: v_add_f32_e32 v26, 0x40c00000, v5
+; SI-NEXT: v_add_f32_e32 v54, 0x40c00000, v5
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v5, 0xffff0000, v2
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v5, 0x40c00000, v5
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v2
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v7
; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v7
-; SI-NEXT: v_alignbit_b32 v57, v7, v5, 16
-; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v26
+; SI-NEXT: v_alignbit_b32 v53, v7, v5, 16
+; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v54
; SI-NEXT: v_alignbit_b32 v9, v7, v6, 16
-; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v32
; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
+; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v2
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v6
-; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v48
-; SI-NEXT: v_add_f32_e32 v11, 0x40c00000, v6
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
-; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v11
-; SI-NEXT: v_alignbit_b32 v12, v8, v7, 16
-; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v28
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v28
+; SI-NEXT: v_add_f32_e32 v37, 0x40c00000, v6
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
+; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v37
+; SI-NEXT: v_alignbit_b32 v12, v2, v7, 16
+; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v42
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: v_and_b32_e32 v36, 0xffff0000, v5
+; SI-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
+; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v36
+; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6
-; SI-NEXT: v_alignbit_b32 v60, v10, v6, 16
+; SI-NEXT: v_alignbit_b32 v6, v10, v6, 16
; SI-NEXT: v_add_f32_e32 v10, 0x40c00000, v7
-; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v29
-; SI-NEXT: v_add_f32_e32 v31, 0x40c00000, v7
-; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v7
+; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v39
+; SI-NEXT: v_add_f32_e32 v51, 0x40c00000, v7
+; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v22
; SI-NEXT: v_add_f32_e32 v7, 0x40c00000, v7
; SI-NEXT: v_alignbit_b32 v7, v13, v7, 16
-; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v31
+; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v51
; SI-NEXT: v_alignbit_b32 v63, v13, v10, 16
-; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
+; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v14
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v10
-; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v37
+; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v26
; SI-NEXT: v_add_f32_e32 v32, 0x40c00000, v10
-; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
; SI-NEXT: v_add_f32_e32 v10, 0x40c00000, v10
-; SI-NEXT: v_alignbit_b32 v10, v14, v10, 16
+; SI-NEXT: v_alignbit_b32 v57, v14, v10, 16
; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v32
; SI-NEXT: v_alignbit_b32 v18, v14, v13, 16
-; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v13
+; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v2
+; SI-NEXT: v_add_f32_e32 v49, 0x40c00000, v13
; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
-; SI-NEXT: v_add_f32_e32 v49, 0x40c00000, v13
-; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v49
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
-; SI-NEXT: v_alignbit_b32 v21, v15, v14, 16
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(3)
-; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v13
; SI-NEXT: v_alignbit_b32 v13, v16, v13, 16
+; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v49
+; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v21, v16, v14, 16
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
+; SI-NEXT: v_add_f32_e32 v34, 0x40c00000, v16
+; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v34
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v15
-; SI-NEXT: v_add_f32_e32 v23, 0x40c00000, v16
-; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
-; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v23
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16
; SI-NEXT: v_alignbit_b32 v16, v19, v16, 16
; SI-NEXT: v_and_b32_e32 v19, 0xffff0000, v17
-; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
-; SI-NEXT: v_add_f32_e32 v20, 0x40c00000, v19
+; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; SI-NEXT: v_add_f32_e32 v31, 0x40c00000, v19
+; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v31
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v19, 0xffff0000, v17
-; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
-; SI-NEXT: v_alignbit_b32 v24, v15, v14, 16
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v20
+; SI-NEXT: v_alignbit_b32 v24, v44, v14, 16
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v19, 0x40c00000, v19
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill
-; SI-NEXT: v_lshrrev_b32_e32 v29, 8, v24
-; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_and_b32_e32 v22, 0xffff0000, v17
-; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v22, 0x40c00000, v22
; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v22
-; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
-; SI-NEXT: v_alignbit_b32 v27, v15, v14, 16
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
; SI-NEXT: v_alignbit_b32 v3, v22, v19, 16
-; SI-NEXT: v_and_b32_e32 v22, 0xffff0000, v59
-; SI-NEXT: v_add_f32_e32 v54, 0x40c00000, v22
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v54
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
-; SI-NEXT: v_add_f32_e32 v59, 0x40c00000, v44
-; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v59
-; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: v_and_b32_e32 v22, 0xffff0000, v61
+; SI-NEXT: v_alignbit_b32 v27, v23, v14, 16
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; SI-NEXT: v_add_f32_e32 v29, 0x40c00000, v22
+; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v29
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_and_b32_e32 v22, 0xffff0000, v17
-; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v22, 0x40c00000, v22
-; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
-; SI-NEXT: v_alignbit_b32 v30, v15, v14, 16
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: v_alignbit_b32 v30, v20, v14, 16
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_and_b32_e32 v25, 0xffff0000, v17
-; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v25, 0x40c00000, v25
; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v25
; SI-NEXT: v_alignbit_b32 v4, v25, v22, 16
-; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v25, 0xffff0000, v17
+; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v40, 0x40c00000, v25
-; SI-NEXT: v_and_b32_e32 v25, 0xffff0000, v15
-; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
-; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v40
-; SI-NEXT: v_alignbit_b32 v35, v45, v14, 16
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_and_b32_e32 v25, 0xffff0000, v17
+; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v25, 0x40c00000, v25
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_and_b32_e32 v28, 0xffff0000, v15
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_and_b32_e32 v28, 0xffff0000, v17
+; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v40
+; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v35, v17, v14, 16
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v28, 0x40c00000, v28
; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v28
-; SI-NEXT: v_alignbit_b32 v5, v28, v25, 16
+; SI-NEXT: v_alignbit_b32 v10, v28, v25, 16
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_and_b32_e32 v28, 0xffff0000, v17
-; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v41, 0x40c00000, v28
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
-; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v41
-; SI-NEXT: v_alignbit_b32 v38, v15, v14, 16
-; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v56
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
-; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v28, 0xffff0000, v17
-; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v28, 0x40c00000, v28
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v33, 0xffff0000, v17
; SI-NEXT: v_add_f32_e32 v33, 0x40c00000, v33
; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v33
; SI-NEXT: v_alignbit_b32 v2, v33, v28, 16
-; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v41
+; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
+; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v38, v17, v14, 16
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v56
+; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
; SI-NEXT: v_add_f32_e32 v43, 0x40c00000, v33
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v33, 0xffff0000, v17
-; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v33, 0x40c00000, v33
-; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v43
-; SI-NEXT: v_alignbit_b32 v55, v61, v14, 16
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_and_b32_e32 v36, 0xffff0000, v17
-; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
-; SI-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
-; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v36
-; SI-NEXT: v_alignbit_b32 v6, v36, v33, 16
-; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
+; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v43
+; SI-NEXT: v_alignbit_b32 v5, v36, v33, 16
+; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v55, v17, v14, 16
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
@@ -163514,20 +164119,18 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_and_b32_e32 v36, 0xffff0000, v36
; SI-NEXT: v_add_f32_e32 v46, 0x40c00000, v36
; SI-NEXT: v_and_b32_e32 v36, 0xffff0000, v17
-; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v46
+; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
+; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v46
+; SI-NEXT: v_alignbit_b32 v62, v59, v14, 16
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v36, 0x40c00000, v36
-; SI-NEXT: v_alignbit_b32 v62, v15, v14, 16
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_and_b32_e32 v39, 0xffff0000, v17
; SI-NEXT: v_add_f32_e32 v39, 0x40c00000, v39
; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v39
; SI-NEXT: v_alignbit_b32 v36, v39, v36, 16
-; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -163535,284 +164138,287 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_add_f32_e32 v42, 0x40c00000, v39
; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v42
; SI-NEXT: v_alignbit_b32 v50, v17, v14, 16
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v25, v50, v10, 16
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_and_b32_e32 v39, 0xffff0000, v39
; SI-NEXT: v_add_f32_e32 v56, 0x40c00000, v39
; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
-; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v56
-; SI-NEXT: v_alignbit_b32 v47, v25, v14, 16
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v56
+; SI-NEXT: v_alignbit_b32 v47, v17, v14, 16
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
+; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v60
+; SI-NEXT: v_lshrrev_b32_e32 v60, 8, v55
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_and_b32_e32 v39, 0xffff0000, v39
; SI-NEXT: v_add_f32_e32 v39, 0x40c00000, v39
; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
-; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v39
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v15, 24, v39
-; SI-NEXT: v_lshrrev_b32_e32 v39, 8, v50
-; SI-NEXT: v_alignbit_b32 v58, v22, v14, 16
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v15, 24, v56
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
+; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v39
+; SI-NEXT: v_alignbit_b32 v58, v48, v14, 16
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
+; SI-NEXT: v_lshrrev_b32_e32 v8, 24, v39
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v15, 24, v42
-; SI-NEXT: v_lshrrev_b32_e32 v42, 8, v63
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
+; SI-NEXT: v_lshrrev_b32_e32 v8, 24, v56
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v15, 24, v46
-; SI-NEXT: v_lshrrev_b32_e32 v46, 8, v55
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
+; SI-NEXT: v_lshrrev_b32_e32 v8, 24, v42
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
+; SI-NEXT: v_alignbit_b32 v45, v17, v14, 16
+; SI-NEXT: v_lshrrev_b32_e32 v14, 24, v46
+; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v15, 24, v43
-; SI-NEXT: v_alignbit_b32 v43, v38, v16, 8
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill
+; SI-NEXT: v_lshrrev_b32_e32 v14, 24, v43
+; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v15, 24, v41
-; SI-NEXT: v_alignbit_b32 v41, v38, v16, 16
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill
+; SI-NEXT: v_lshrrev_b32_e32 v14, 24, v41
+; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v15, 24, v40
+; SI-NEXT: v_lshrrev_b32_e32 v14, 24, v40
; SI-NEXT: v_mov_b32_e32 v40, v8
-; SI-NEXT: v_lshrrev_b32_e32 v8, 24, v54
-; SI-NEXT: v_alignbit_b32 v54, v38, v16, 24
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v8, 24, v20
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
-; SI-NEXT: v_alignbit_b32 v20, v35, v13, 8
+; SI-NEXT: v_lshrrev_b32_e32 v8, 24, v29
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v15, 8, v35
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; SI-NEXT: v_lshrrev_b32_e32 v8, 24, v31
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v8, 24, v23
-; SI-NEXT: v_alignbit_b32 v23, v62, v4, 8
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
+; SI-NEXT: v_lshrrev_b32_e32 v8, 24, v34
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v8, 24, v49
-; SI-NEXT: v_lshrrev_b32_e32 v49, 8, v47
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v8, 24, v32
-; SI-NEXT: v_lshrrev_b32_e32 v32, 8, v18
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v8, 24, v31
-; SI-NEXT: v_lshrrev_b32_e32 v31, 8, v62
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v8, 24, v11
-; SI-NEXT: v_lshrrev_b32_e32 v11, 8, v58
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
+; SI-NEXT: v_lshrrev_b32_e32 v8, 24, v51
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v8, 24, v26
-; SI-NEXT: v_alignbit_b32 v26, v24, v60, 16
+; SI-NEXT: v_lshrrev_b32_e32 v8, 24, v37
; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(14)
-; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
-; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
-; SI-NEXT: v_alignbit_b32 v44, v19, v14, 16
-; SI-NEXT: v_lshrrev_b32_e32 v14, 24, v59
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v44, v36, 24
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill
+; SI-NEXT: v_lshrrev_b32_e32 v8, 24, v54
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v44, v36, 16
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v8, v45, v36, 24
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v44, v36, 8
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v8, v45, v36, 16
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v58, v6, 24
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v8, v58, v5, 24
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v58, v6, 16
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v8, v58, v5, 16
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v58, v6, 8
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v8, v58, v5, 8
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_alignbit_b32 v8, v47, v2, 24
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_alignbit_b32 v8, v47, v2, 16
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_alignbit_b32 v8, v47, v2, 8
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v50, v5, 24
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v50, v5, 16
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v8, v50, v10, 24
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v50, v5, 8
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v8, v50, v10, 8
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_alignbit_b32 v8, v62, v4, 24
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_alignbit_b32 v8, v62, v4, 16
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_alignbit_b32 v8, v62, v4, 8
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_alignbit_b32 v8, v55, v3, 24
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_alignbit_b32 v8, v55, v3, 16
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_alignbit_b32 v8, v55, v3, 8
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_alignbit_b32 v8, v35, v13, 24
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v35, v13, 16
; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v30, v10, 24
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v8, v35, v13, 16
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v30, v10, 16
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v8, v30, v57, 24
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v30, v10, 8
+; SI-NEXT: v_alignbit_b32 v8, v30, v57, 16
; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_alignbit_b32 v8, v30, v57, 8
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_alignbit_b32 v8, v27, v7, 24
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_alignbit_b32 v8, v27, v7, 16
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_alignbit_b32 v8, v27, v7, 8
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v24, v60, 24
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v8, v24, v6, 24
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v24, v60, 8
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v8, v24, v6, 16
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v21, v57, 24
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v8, v24, v6, 8
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v21, v57, 16
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v8, v21, v53, 24
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v21, v57, 8
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v8, v21, v53, 16
+; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(1)
+; SI-NEXT: v_alignbit_b32 v8, v21, v53, 8
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v18, v51, 24
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v8, v18, v52, 24
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v18, v51, 16
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v8, v18, v52, 16
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(1)
-; SI-NEXT: v_alignbit_b32 v8, v18, v51, 8
-; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v8, v18, v52, 8
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v32, 8, v12
-; SI-NEXT: v_lshrrev_b32_e32 v51, 8, v44
-; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v8, v63, v15, 24
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v29, 8, v21
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v8, v63, v15, 16
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(1)
+; SI-NEXT: v_alignbit_b32 v8, v63, v15, 8
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v63, v34, 24
-; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v8, v12, v11, 24
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v32, 8, v9
-; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v8, v12, v11, 16
; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v63, v34, 16
-; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(1)
+; SI-NEXT: v_alignbit_b32 v8, v12, v11, 8
; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v63, v34, 8
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v12, v52, 24
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v12, v52, 16
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v8, v12, v52, 8
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_alignbit_b32 v8, v9, v1, 24
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; SI-NEXT: v_lshrrev_b32_e32 v14, 8, v24
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_alignbit_b32 v8, v9, v1, 16
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshrrev_b32_e32 v14, 8, v21
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_alignbit_b32 v8, v9, v1, 8
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshrrev_b32_e32 v14, 8, v18
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v8, 8, v38
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshrrev_b32_e32 v14, 8, v12
+; SI-NEXT: v_alignbit_b32 v33, v45, v36, 8
+; SI-NEXT: v_alignbit_b32 v43, v38, v16, 24
+; SI-NEXT: v_alignbit_b32 v51, v38, v16, 16
+; SI-NEXT: v_alignbit_b32 v37, v38, v16, 8
+; SI-NEXT: v_alignbit_b32 v34, v35, v13, 8
+; SI-NEXT: v_lshrrev_b32_e32 v19, 8, v45
+; SI-NEXT: v_lshrrev_b32_e32 v11, 8, v58
+; SI-NEXT: v_lshrrev_b32_e32 v49, 8, v47
+; SI-NEXT: v_lshrrev_b32_e32 v31, 8, v50
+; SI-NEXT: v_lshrrev_b32_e32 v46, 8, v62
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
+; SI-NEXT: v_lshrrev_b32_e32 v15, 8, v35
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v8, 8, v30
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
+; SI-NEXT: v_lshrrev_b32_e32 v29, 8, v27
+; SI-NEXT: v_lshrrev_b32_e32 v41, 8, v63
+; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v8, 8, v27
+; SI-NEXT: v_lshrrev_b32_e32 v14, 8, v9
+; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
; SI-NEXT: .LBB91_5: ; %end
-; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v36, 0xff, v36
-; SI-NEXT: v_lshlrev_b32_e32 v14, 24, v14
+; SI-NEXT: v_lshlrev_b32_e32 v32, 8, v33
+; SI-NEXT: v_or_b32_e32 v32, v36, v32
+; SI-NEXT: v_and_b32_e32 v32, 0xffff, v32
; SI-NEXT: v_lshlrev_b32_e32 v11, 8, v11
-; SI-NEXT: v_and_b32_e32 v10, 0xff, v10
; SI-NEXT: v_and_b32_e32 v7, 0xff, v7
-; SI-NEXT: v_lshlrev_b32_e32 v8, 8, v8
+; SI-NEXT: v_and_b32_e32 v6, 0xff, v6
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v32, 8, v28
-; SI-NEXT: v_or_b32_e32 v32, v36, v32
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_and_b32_e32 v36, 0xff, v29
-; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v36, 0xff, v14
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
; SI-NEXT: v_lshlrev_b32_e32 v36, 16, v36
-; SI-NEXT: v_and_b32_e32 v32, 0xffff, v32
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v56, 24, v29
+; SI-NEXT: v_lshlrev_b32_e32 v56, 24, v14
; SI-NEXT: v_or_b32_e32 v36, v56, v36
; SI-NEXT: v_or_b32_e32 v32, v32, v36
; SI-NEXT: buffer_store_dword v32, v0, s[0:3], 0 offen
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_and_b32_e32 v32, 0xff, v44
-; SI-NEXT: v_lshlrev_b32_e32 v36, 8, v51
+; SI-NEXT: v_and_b32_e32 v32, 0xff, v45
+; SI-NEXT: v_lshlrev_b32_e32 v36, 8, v19
; SI-NEXT: v_or_b32_e32 v32, v32, v36
-; SI-NEXT: v_and_b32_e32 v36, 0xff, v19
+; SI-NEXT: v_and_b32_e32 v36, 0xff, v17
; SI-NEXT: v_lshlrev_b32_e32 v36, 16, v36
-; SI-NEXT: v_or_b32_e32 v14, v14, v36
; SI-NEXT: v_and_b32_e32 v32, 0xffff, v32
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v14, 24, v14
+; SI-NEXT: v_or_b32_e32 v14, v14, v36
; SI-NEXT: v_or_b32_e32 v14, v32, v14
; SI-NEXT: v_add_i32_e32 v32, vcc, 4, v0
; SI-NEXT: buffer_store_dword v14, v32, s[0:3], 0 offen
-; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_and_b32_e32 v14, 0xff, v6
+; SI-NEXT: v_and_b32_e32 v14, 0xff, v5
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v32, 8, v19
-; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
; SI-NEXT: v_or_b32_e32 v14, v14, v32
; SI-NEXT: v_and_b32_e32 v14, 0xffff, v14
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v32, 0xff, v19
-; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:384 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v32
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v33, 24, v19
@@ -163820,31 +164426,30 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_or_b32_e32 v14, v14, v32
; SI-NEXT: v_add_i32_e32 v32, vcc, 8, v0
; SI-NEXT: buffer_store_dword v14, v32, s[0:3], 0 offen
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_and_b32_e32 v14, 0xff, v58
; SI-NEXT: v_or_b32_e32 v11, v14, v11
-; SI-NEXT: v_and_b32_e32 v14, 0xff, v22
+; SI-NEXT: v_and_b32_e32 v14, 0xff, v48
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v32, 24, v6
+; SI-NEXT: v_lshlrev_b32_e32 v32, 24, v5
; SI-NEXT: v_or_b32_e32 v14, v32, v14
; SI-NEXT: v_or_b32_e32 v11, v11, v14
; SI-NEXT: v_add_i32_e32 v14, vcc, 12, v0
; SI-NEXT: buffer_store_dword v11, v14, s[0:3], 0 offen
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:380 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_and_b32_e32 v11, 0xff, v2
-; SI-NEXT: v_and_b32_e32 v6, 0xff, v60
-; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:368 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v14
; SI-NEXT: v_or_b32_e32 v11, v11, v14
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:372 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11
; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v28, 24, v19
+; SI-NEXT: v_lshlrev_b32_e32 v28, 24, v2
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v14, 0xff, v14
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
@@ -163852,94 +164457,93 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_or_b32_e32 v11, v11, v14
; SI-NEXT: v_add_i32_e32 v14, vcc, 16, v0
; SI-NEXT: buffer_store_dword v11, v14, s[0:3], 0 offen
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_and_b32_e32 v11, 0xff, v47
; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v49
; SI-NEXT: v_or_b32_e32 v11, v11, v14
-; SI-NEXT: v_and_b32_e32 v14, 0xff, v25
-; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:380 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_and_b32_e32 v14, 0xff, v14
+; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v28, 24, v2
; SI-NEXT: v_or_b32_e32 v14, v28, v14
; SI-NEXT: v_or_b32_e32 v11, v11, v14
; SI-NEXT: v_add_i32_e32 v14, vcc, 20, v0
; SI-NEXT: buffer_store_dword v11, v14, s[0:3], 0 offen
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:364 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:384 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_and_b32_e32 v11, 0xff, v5
-; SI-NEXT: v_and_b32_e32 v5, 0xff, v57
-; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v11, 0xff, v10
+; SI-NEXT: v_and_b32_e32 v10, 0xff, v57
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v14
; SI-NEXT: v_or_b32_e32 v11, v11, v14
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v25, 24, v19
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_and_b32_e32 v14, 0xff, v14
+; SI-NEXT: v_and_b32_e32 v14, 0xff, v25
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v25, 24, v19
; SI-NEXT: v_or_b32_e32 v14, v25, v14
+; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11
; SI-NEXT: v_or_b32_e32 v11, v11, v14
; SI-NEXT: v_add_i32_e32 v14, vcc, 24, v0
; SI-NEXT: buffer_store_dword v11, v14, s[0:3], 0 offen
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_and_b32_e32 v11, 0xff, v50
-; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v39
+; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v31
; SI-NEXT: v_or_b32_e32 v11, v11, v14
-; SI-NEXT: v_and_b32_e32 v14, 0xff, v17
-; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:372 ; 4-byte Folded Reload
+; SI-NEXT: v_lshlrev_b32_e32 v25, 24, v40
; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v25, 24, v2
+; SI-NEXT: v_and_b32_e32 v14, 0xff, v14
+; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
; SI-NEXT: v_or_b32_e32 v14, v25, v14
; SI-NEXT: v_or_b32_e32 v11, v11, v14
; SI-NEXT: v_add_i32_e32 v14, vcc, 28, v0
; SI-NEXT: buffer_store_dword v11, v14, s[0:3], 0 offen
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_and_b32_e32 v11, 0xff, v4
-; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v23
+; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v14
; SI-NEXT: v_or_b32_e32 v11, v11, v14
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11
; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_lshlrev_b32_e32 v22, 24, v17
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v14, 0xff, v14
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v22, 24, v17
; SI-NEXT: v_or_b32_e32 v14, v22, v14
; SI-NEXT: v_or_b32_e32 v11, v11, v14
; SI-NEXT: v_add_i32_e32 v14, vcc, 32, v0
; SI-NEXT: buffer_store_dword v11, v14, s[0:3], 0 offen
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_and_b32_e32 v11, 0xff, v62
-; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v31
+; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v46
; SI-NEXT: v_or_b32_e32 v11, v11, v14
-; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_and_b32_e32 v14, 0xff, v2
+; SI-NEXT: v_and_b32_e32 v14, 0xff, v59
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
+; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v22, 24, v17
; SI-NEXT: v_or_b32_e32 v14, v22, v14
; SI-NEXT: v_or_b32_e32 v11, v11, v14
; SI-NEXT: v_add_i32_e32 v14, vcc, 36, v0
; SI-NEXT: buffer_store_dword v11, v14, s[0:3], 0 offen
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_and_b32_e32 v11, 0xff, v3
-; SI-NEXT: v_and_b32_e32 v3, 0xff, v34
-; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v14
; SI-NEXT: v_or_b32_e32 v11, v11, v14
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v19, 24, v17
@@ -163950,14 +164554,16 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_or_b32_e32 v11, v11, v14
; SI-NEXT: v_add_i32_e32 v14, vcc, 40, v0
; SI-NEXT: buffer_store_dword v11, v14, s[0:3], 0 offen
-; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:376 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_and_b32_e32 v11, 0xff, v55
-; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v46
+; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v60
; SI-NEXT: v_or_b32_e32 v11, v11, v14
-; SI-NEXT: v_and_b32_e32 v14, 0xff, v61
-; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_and_b32_e32 v14, 0xff, v14
+; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v19, 24, v17
; SI-NEXT: v_or_b32_e32 v14, v19, v14
@@ -163966,24 +164572,24 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; SI-NEXT: buffer_store_dword v11, v14, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_and_b32_e32 v11, 0xff, v16
-; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v43
+; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v37
; SI-NEXT: v_or_b32_e32 v11, v11, v14
-; SI-NEXT: v_and_b32_e32 v14, 0xff, v41
+; SI-NEXT: v_and_b32_e32 v14, 0xff, v51
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
-; SI-NEXT: v_lshlrev_b32_e32 v16, 24, v54
+; SI-NEXT: v_lshlrev_b32_e32 v16, 24, v43
; SI-NEXT: v_or_b32_e32 v14, v16, v14
; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11
; SI-NEXT: v_or_b32_e32 v11, v11, v14
; SI-NEXT: v_add_i32_e32 v14, vcc, 48, v0
; SI-NEXT: buffer_store_dword v11, v14, s[0:3], 0 offen
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_and_b32_e32 v11, 0xff, v38
-; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:364 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v2
; SI-NEXT: v_or_b32_e32 v11, v11, v14
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v16, 24, v16
@@ -163996,10 +164602,10 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; SI-NEXT: buffer_store_dword v11, v14, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_and_b32_e32 v11, 0xff, v13
-; SI-NEXT: v_lshlrev_b32_e32 v13, 8, v20
+; SI-NEXT: v_lshlrev_b32_e32 v13, 8, v34
; SI-NEXT: v_or_b32_e32 v11, v11, v13
-; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_and_b32_e32 v13, 0xff, v13
@@ -164010,14 +164616,16 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_or_b32_e32 v11, v11, v13
; SI-NEXT: v_add_i32_e32 v13, vcc, 56, v0
; SI-NEXT: buffer_store_dword v11, v13, s[0:3], 0 offen
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_and_b32_e32 v11, 0xff, v35
; SI-NEXT: v_lshlrev_b32_e32 v13, 8, v15
; SI-NEXT: v_or_b32_e32 v11, v11, v13
-; SI-NEXT: v_and_b32_e32 v13, 0xff, v45
-; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
+; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_and_b32_e32 v13, 0xff, v13
+; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v14, 24, v14
; SI-NEXT: v_or_b32_e32 v13, v14, v13
@@ -164025,12 +164633,12 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_add_i32_e32 v13, vcc, 60, v0
; SI-NEXT: buffer_store_dword v11, v13, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v11, 8, v11
; SI-NEXT: v_or_b32_e32 v10, v10, v11
-; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v13, 24, v13
@@ -164041,50 +164649,44 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_or_b32_e32 v10, v10, v11
; SI-NEXT: v_add_i32_e32 v11, vcc, 64, v0
; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
+; SI-NEXT: v_lshlrev_b32_e32 v11, 8, v8
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_and_b32_e32 v10, 0xff, v30
-; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v11, 8, v2
; SI-NEXT: v_or_b32_e32 v10, v10, v11
-; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v11, 0xff, v20
+; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11
; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v13, 24, v13
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_and_b32_e32 v11, 0xff, v11
-; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11
+; SI-NEXT: v_lshlrev_b32_e32 v13, 24, v8
; SI-NEXT: v_or_b32_e32 v11, v13, v11
; SI-NEXT: v_or_b32_e32 v10, v10, v11
; SI-NEXT: v_add_i32_e32 v11, vcc, 0x44, v0
; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v10, 8, v10
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v10, 8, v8
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
; SI-NEXT: v_or_b32_e32 v7, v7, v10
-; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v11
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_and_b32_e32 v10, 0xff, v10
+; SI-NEXT: v_and_b32_e32 v10, 0xff, v8
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v8
; SI-NEXT: v_or_b32_e32 v10, v11, v10
; SI-NEXT: v_or_b32_e32 v7, v7, v10
; SI-NEXT: v_add_i32_e32 v10, vcc, 0x48, v0
; SI-NEXT: buffer_store_dword v7, v10, s[0:3], 0 offen
+; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_and_b32_e32 v7, 0xff, v27
+; SI-NEXT: v_lshlrev_b32_e32 v8, 8, v29
; SI-NEXT: v_or_b32_e32 v7, v7, v8
-; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_and_b32_e32 v8, 0xff, v8
+; SI-NEXT: v_and_b32_e32 v8, 0xff, v23
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8
+; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v10, 24, v10
; SI-NEXT: v_or_b32_e32 v8, v10, v8
@@ -164092,45 +164694,48 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_add_i32_e32 v8, vcc, 0x4c, v0
; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v7
; SI-NEXT: v_or_b32_e32 v6, v6, v7
-; SI-NEXT: v_and_b32_e32 v7, 0xff, v26
-; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v8, 24, v8
+; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_lshlrev_b32_e32 v8, 24, v8
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_and_b32_e32 v7, 0xff, v7
+; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7
; SI-NEXT: v_or_b32_e32 v7, v8, v7
; SI-NEXT: v_or_b32_e32 v6, v6, v7
; SI-NEXT: v_add_i32_e32 v7, vcc, 0x50, v0
; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen
-; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_and_b32_e32 v6, 0xff, v24
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v7
; SI-NEXT: v_or_b32_e32 v6, v6, v7
-; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v8, 24, v2
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_and_b32_e32 v7, 0xff, v7
+; SI-NEXT: v_and_b32_e32 v7, 0xff, v44
; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v8, 24, v8
+; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6
; SI-NEXT: v_or_b32_e32 v7, v8, v7
; SI-NEXT: v_or_b32_e32 v6, v6, v7
; SI-NEXT: v_add_i32_e32 v7, vcc, 0x54, v0
; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: v_and_b32_e32 v5, 0xff, v2
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v6
; SI-NEXT: v_or_b32_e32 v5, v5, v6
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v7
; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5
@@ -164141,14 +164746,14 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_or_b32_e32 v5, v5, v6
; SI-NEXT: v_add_i32_e32 v6, vcc, 0x58, v0
; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:376 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_and_b32_e32 v5, 0xff, v21
-; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v6
; SI-NEXT: v_or_b32_e32 v5, v5, v6
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:368 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v7
; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5
@@ -164159,20 +164764,19 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_or_b32_e32 v5, v5, v6
; SI-NEXT: v_add_i32_e32 v6, vcc, 0x5c, v0
; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_and_b32_e32 v4, 0xff, v2
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v5
; SI-NEXT: v_or_b32_e32 v4, v4, v5
-; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v6
; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4
-; SI-NEXT: v_and_b32_e32 v2, 0xff, v52
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v5, 0xff, v5
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
@@ -164180,10 +164784,10 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_or_b32_e32 v4, v4, v5
; SI-NEXT: v_add_i32_e32 v5, vcc, 0x60, v0
; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen
-; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_and_b32_e32 v4, 0xff, v18
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v5
; SI-NEXT: v_or_b32_e32 v4, v4, v5
@@ -164198,13 +164802,16 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_or_b32_e32 v4, v4, v5
; SI-NEXT: v_add_i32_e32 v5, vcc, 0x64, v0
; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: v_and_b32_e32 v3, 0xff, v2
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v4
; SI-NEXT: v_or_b32_e32 v3, v3, v4
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v5
; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3
@@ -164217,10 +164824,10 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_and_b32_e32 v3, 0xff, v63
-; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v42
+; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v41
; SI-NEXT: v_or_b32_e32 v3, v3, v4
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_and_b32_e32 v4, 0xff, v4
@@ -164231,13 +164838,16 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_or_b32_e32 v3, v3, v4
; SI-NEXT: v_add_i32_e32 v4, vcc, 0x6c, v0
; SI-NEXT: buffer_store_dword v3, v4, s[0:3], 0 offen
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: v_and_b32_e32 v2, 0xff, v2
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v3
; SI-NEXT: v_or_b32_e32 v2, v2, v3
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4
; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -164248,29 +164858,31 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_or_b32_e32 v2, v2, v3
; SI-NEXT: v_add_i32_e32 v3, vcc, 0x70, v0
; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_and_b32_e32 v2, 0xff, v12
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v3
; SI-NEXT: v_or_b32_e32 v2, v2, v3
-; SI-NEXT: v_and_b32_e32 v3, 0xff, v40
-; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v4, 24, v4
; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_and_b32_e32 v3, 0xff, v3
+; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; SI-NEXT: v_or_b32_e32 v3, v4, v3
; SI-NEXT: v_or_b32_e32 v2, v2, v3
; SI-NEXT: v_add_i32_e32 v3, vcc, 0x74, v0
; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2
; SI-NEXT: v_or_b32_e32 v1, v1, v2
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v3
; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1
@@ -164281,10 +164893,10 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_or_b32_e32 v1, v1, v2
; SI-NEXT: v_add_i32_e32 v2, vcc, 0x78, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_and_b32_e32 v1, 0xff, v9
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x7c, v0
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v2
@@ -164375,8 +164987,9 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; VI-NEXT: v_readfirstlane_b32 s6, v17
; VI-NEXT: v_readfirstlane_b32 s7, v18
; VI-NEXT: v_readfirstlane_b32 s4, v1
-; VI-NEXT: s_and_b64 s[46:47], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s5, v2
+; VI-NEXT: s_and_b64 s[46:47], vcc, exec
+; VI-NEXT: s_mov_b64 vcc, -1
; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
@@ -165311,8 +165924,6 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; VI-NEXT: .LBB91_3:
; VI-NEXT: ; implicit-def: $sgpr46
; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
; VI-NEXT: ; implicit-def: $sgpr71
; VI-NEXT: ; implicit-def: $sgpr69
; VI-NEXT: ; implicit-def: $sgpr70
@@ -165463,7 +166074,10 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; VI-NEXT: ; implicit-def: $sgpr46
; VI-NEXT: ; kill: killed $sgpr46
; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: s_branch .LBB91_2
+; VI-NEXT: ; kill: killed $sgpr46
+; VI-NEXT: ; implicit-def: $sgpr46
+; VI-NEXT: s_andn2_b64 vcc, exec, vcc
+; VI-NEXT: s_cbranch_vccz .LBB91_2
; VI-NEXT: .LBB91_4:
; VI-NEXT: v_mov_b32_e32 v33, s71
; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
@@ -166233,8 +166847,9 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: v_readfirstlane_b32 s46, v17
; GFX9-NEXT: v_readfirstlane_b32 s47, v18
; GFX9-NEXT: v_readfirstlane_b32 s4, v1
-; GFX9-NEXT: s_and_b64 s[6:7], vcc, exec
; GFX9-NEXT: v_readfirstlane_b32 s5, v2
+; GFX9-NEXT: s_and_b64 s[6:7], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[78:79], -1
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
@@ -166253,151 +166868,153 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: s_cbranch_scc0 .LBB91_3
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s6, s5, 24
-; GFX9-NEXT: v_writelane_b32 v62, s6, 18
+; GFX9-NEXT: v_writelane_b32 v62, s6, 20
; GFX9-NEXT: s_lshr_b32 s6, s5, 16
-; GFX9-NEXT: v_writelane_b32 v62, s6, 17
-; GFX9-NEXT: s_lshr_b32 s6, s5, 8
; GFX9-NEXT: v_writelane_b32 v62, s6, 19
+; GFX9-NEXT: s_lshr_b32 s6, s5, 8
+; GFX9-NEXT: v_writelane_b32 v62, s6, 21
; GFX9-NEXT: s_lshr_b32 s6, s4, 16
-; GFX9-NEXT: v_writelane_b32 v62, s6, 20
+; GFX9-NEXT: v_writelane_b32 v62, s6, 22
; GFX9-NEXT: s_lshr_b32 s6, s4, 8
-; GFX9-NEXT: v_writelane_b32 v62, s6, 21
+; GFX9-NEXT: v_writelane_b32 v62, s6, 23
; GFX9-NEXT: s_lshr_b32 s6, s29, 24
-; GFX9-NEXT: v_writelane_b32 v62, s6, 22
+; GFX9-NEXT: v_writelane_b32 v62, s6, 24
; GFX9-NEXT: s_lshr_b32 s6, s29, 16
-; GFX9-NEXT: v_writelane_b32 v62, s6, 16
+; GFX9-NEXT: v_writelane_b32 v62, s6, 18
; GFX9-NEXT: s_lshr_b32 s6, s29, 8
-; GFX9-NEXT: v_writelane_b32 v62, s6, 23
+; GFX9-NEXT: v_writelane_b32 v62, s6, 25
; GFX9-NEXT: s_lshr_b32 s6, s28, 16
-; GFX9-NEXT: v_writelane_b32 v62, s6, 24
+; GFX9-NEXT: v_writelane_b32 v62, s6, 26
; GFX9-NEXT: s_lshr_b32 s6, s28, 8
-; GFX9-NEXT: v_writelane_b32 v62, s6, 25
+; GFX9-NEXT: v_writelane_b32 v62, s6, 27
; GFX9-NEXT: s_lshr_b32 s6, s27, 24
-; GFX9-NEXT: v_writelane_b32 v62, s6, 26
+; GFX9-NEXT: v_writelane_b32 v62, s6, 28
; GFX9-NEXT: s_lshr_b32 s6, s27, 16
-; GFX9-NEXT: v_writelane_b32 v62, s6, 15
+; GFX9-NEXT: v_writelane_b32 v62, s6, 17
; GFX9-NEXT: s_lshr_b32 s6, s27, 8
-; GFX9-NEXT: v_writelane_b32 v62, s6, 27
+; GFX9-NEXT: v_writelane_b32 v62, s6, 29
; GFX9-NEXT: s_lshr_b32 s6, s26, 16
-; GFX9-NEXT: v_writelane_b32 v62, s6, 28
+; GFX9-NEXT: v_writelane_b32 v62, s6, 30
; GFX9-NEXT: s_lshr_b32 s6, s26, 8
-; GFX9-NEXT: v_writelane_b32 v62, s6, 29
+; GFX9-NEXT: v_writelane_b32 v62, s6, 31
; GFX9-NEXT: s_lshr_b32 s6, s25, 24
-; GFX9-NEXT: v_writelane_b32 v62, s6, 30
+; GFX9-NEXT: v_writelane_b32 v62, s6, 32
; GFX9-NEXT: s_lshr_b32 s6, s25, 16
-; GFX9-NEXT: v_writelane_b32 v62, s6, 14
+; GFX9-NEXT: v_writelane_b32 v62, s6, 16
; GFX9-NEXT: s_lshr_b32 s6, s25, 8
-; GFX9-NEXT: v_writelane_b32 v62, s6, 31
+; GFX9-NEXT: v_writelane_b32 v62, s6, 33
; GFX9-NEXT: s_lshr_b32 s6, s24, 16
-; GFX9-NEXT: v_writelane_b32 v62, s6, 32
+; GFX9-NEXT: v_writelane_b32 v62, s6, 34
; GFX9-NEXT: s_lshr_b32 s6, s24, 8
-; GFX9-NEXT: v_writelane_b32 v62, s6, 33
+; GFX9-NEXT: v_writelane_b32 v62, s6, 35
; GFX9-NEXT: s_lshr_b32 s6, s23, 24
-; GFX9-NEXT: v_writelane_b32 v62, s6, 34
+; GFX9-NEXT: v_writelane_b32 v62, s6, 36
; GFX9-NEXT: s_lshr_b32 s6, s23, 16
-; GFX9-NEXT: v_writelane_b32 v62, s6, 13
+; GFX9-NEXT: v_writelane_b32 v62, s6, 15
; GFX9-NEXT: s_lshr_b32 s6, s23, 8
-; GFX9-NEXT: v_writelane_b32 v62, s6, 35
+; GFX9-NEXT: v_writelane_b32 v62, s6, 37
; GFX9-NEXT: s_lshr_b32 s6, s22, 16
-; GFX9-NEXT: v_writelane_b32 v62, s6, 36
+; GFX9-NEXT: v_writelane_b32 v62, s6, 38
; GFX9-NEXT: s_lshr_b32 s6, s22, 8
-; GFX9-NEXT: v_writelane_b32 v62, s6, 37
+; GFX9-NEXT: v_writelane_b32 v62, s6, 39
; GFX9-NEXT: s_lshr_b32 s6, s21, 24
-; GFX9-NEXT: v_writelane_b32 v62, s6, 38
+; GFX9-NEXT: v_writelane_b32 v62, s6, 40
; GFX9-NEXT: s_lshr_b32 s6, s21, 16
-; GFX9-NEXT: v_writelane_b32 v62, s6, 12
+; GFX9-NEXT: v_writelane_b32 v62, s6, 14
; GFX9-NEXT: s_lshr_b32 s6, s21, 8
-; GFX9-NEXT: v_writelane_b32 v62, s6, 39
+; GFX9-NEXT: v_writelane_b32 v62, s6, 41
; GFX9-NEXT: s_lshr_b32 s6, s20, 16
-; GFX9-NEXT: v_writelane_b32 v62, s6, 40
+; GFX9-NEXT: v_writelane_b32 v62, s6, 42
; GFX9-NEXT: s_lshr_b32 s6, s20, 8
-; GFX9-NEXT: v_writelane_b32 v62, s6, 41
+; GFX9-NEXT: v_writelane_b32 v62, s6, 43
; GFX9-NEXT: s_lshr_b32 s6, s19, 24
-; GFX9-NEXT: v_writelane_b32 v62, s6, 42
+; GFX9-NEXT: v_writelane_b32 v62, s6, 44
; GFX9-NEXT: s_lshr_b32 s6, s19, 16
-; GFX9-NEXT: v_writelane_b32 v62, s6, 11
+; GFX9-NEXT: v_writelane_b32 v62, s6, 13
; GFX9-NEXT: s_lshr_b32 s6, s19, 8
-; GFX9-NEXT: v_writelane_b32 v62, s6, 43
+; GFX9-NEXT: v_writelane_b32 v62, s6, 45
; GFX9-NEXT: s_lshr_b32 s6, s18, 16
-; GFX9-NEXT: v_writelane_b32 v62, s6, 44
+; GFX9-NEXT: v_writelane_b32 v62, s6, 46
; GFX9-NEXT: s_lshr_b32 s6, s18, 8
-; GFX9-NEXT: v_writelane_b32 v62, s6, 45
+; GFX9-NEXT: v_writelane_b32 v62, s6, 47
; GFX9-NEXT: s_lshr_b32 s6, s17, 24
-; GFX9-NEXT: v_writelane_b32 v62, s6, 46
+; GFX9-NEXT: v_writelane_b32 v62, s6, 48
; GFX9-NEXT: s_lshr_b32 s6, s17, 16
-; GFX9-NEXT: v_writelane_b32 v62, s6, 10
+; GFX9-NEXT: v_writelane_b32 v62, s6, 12
; GFX9-NEXT: s_lshr_b32 s6, s17, 8
-; GFX9-NEXT: v_writelane_b32 v62, s6, 47
+; GFX9-NEXT: v_writelane_b32 v62, s6, 49
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
-; GFX9-NEXT: v_writelane_b32 v62, s6, 48
+; GFX9-NEXT: v_writelane_b32 v62, s6, 50
; GFX9-NEXT: s_lshr_b32 s6, s16, 8
-; GFX9-NEXT: v_writelane_b32 v62, s6, 49
-; GFX9-NEXT: s_lshr_b64 s[40:41], s[4:5], 24
-; GFX9-NEXT: v_writelane_b32 v62, s40, 8
-; GFX9-NEXT: v_writelane_b32 v62, s41, 9
-; GFX9-NEXT: s_lshr_b64 s[40:41], s[28:29], 24
-; GFX9-NEXT: v_writelane_b32 v62, s40, 6
-; GFX9-NEXT: v_writelane_b32 v62, s41, 7
-; GFX9-NEXT: s_lshr_b64 s[40:41], s[26:27], 24
-; GFX9-NEXT: v_writelane_b32 v62, s40, 4
-; GFX9-NEXT: v_writelane_b32 v62, s41, 5
-; GFX9-NEXT: s_lshr_b64 s[40:41], s[24:25], 24
-; GFX9-NEXT: v_writelane_b32 v62, s40, 2
-; GFX9-NEXT: v_writelane_b32 v62, s41, 3
-; GFX9-NEXT: s_lshr_b64 s[40:41], s[22:23], 24
-; GFX9-NEXT: v_writelane_b32 v62, s40, 0
-; GFX9-NEXT: s_lshr_b32 s70, s47, 24
-; GFX9-NEXT: s_lshr_b32 s15, s47, 16
-; GFX9-NEXT: s_lshr_b32 s7, s47, 8
-; GFX9-NEXT: s_lshr_b32 s53, s46, 16
-; GFX9-NEXT: s_lshr_b32 s52, s46, 8
-; GFX9-NEXT: s_lshr_b32 s67, s57, 24
-; GFX9-NEXT: s_lshr_b32 s14, s57, 16
-; GFX9-NEXT: s_lshr_b32 s69, s57, 8
-; GFX9-NEXT: s_lshr_b32 s6, s56, 16
-; GFX9-NEXT: s_lshr_b32 s71, s56, 8
-; GFX9-NEXT: s_lshr_b32 s64, s59, 24
-; GFX9-NEXT: s_lshr_b32 s13, s59, 16
-; GFX9-NEXT: s_lshr_b32 s66, s59, 8
-; GFX9-NEXT: s_lshr_b32 s51, s58, 16
-; GFX9-NEXT: s_lshr_b32 s68, s58, 8
-; GFX9-NEXT: s_lshr_b32 s99, s61, 24
-; GFX9-NEXT: s_lshr_b32 s12, s61, 16
-; GFX9-NEXT: s_lshr_b32 s55, s61, 8
-; GFX9-NEXT: s_lshr_b32 s50, s60, 16
-; GFX9-NEXT: s_lshr_b32 s65, s60, 8
-; GFX9-NEXT: s_lshr_b32 s96, s63, 24
-; GFX9-NEXT: s_lshr_b32 s11, s63, 16
-; GFX9-NEXT: s_lshr_b32 s98, s63, 8
-; GFX9-NEXT: s_lshr_b32 s49, s62, 16
-; GFX9-NEXT: s_lshr_b32 s54, s62, 8
-; GFX9-NEXT: s_lshr_b32 s85, s73, 24
-; GFX9-NEXT: s_lshr_b32 s10, s73, 16
-; GFX9-NEXT: s_lshr_b32 s87, s73, 8
-; GFX9-NEXT: s_lshr_b32 s48, s72, 16
-; GFX9-NEXT: s_lshr_b32 s97, s72, 8
-; GFX9-NEXT: s_lshr_b32 s82, s75, 24
-; GFX9-NEXT: s_lshr_b32 s9, s75, 16
-; GFX9-NEXT: s_lshr_b32 s84, s75, 8
-; GFX9-NEXT: s_lshr_b32 s39, s74, 16
-; GFX9-NEXT: s_lshr_b32 s86, s74, 8
-; GFX9-NEXT: s_lshr_b32 s80, s77, 24
-; GFX9-NEXT: s_lshr_b32 s8, s77, 16
-; GFX9-NEXT: s_lshr_b32 s81, s77, 8
-; GFX9-NEXT: s_lshr_b32 s38, s76, 16
-; GFX9-NEXT: s_lshr_b32 s83, s76, 8
-; GFX9-NEXT: v_writelane_b32 v62, s41, 1
-; GFX9-NEXT: s_lshr_b64 s[40:41], s[20:21], 24
+; GFX9-NEXT: v_writelane_b32 v62, s6, 51
+; GFX9-NEXT: s_lshr_b64 s[42:43], s[4:5], 24
+; GFX9-NEXT: v_writelane_b32 v62, s42, 10
+; GFX9-NEXT: v_writelane_b32 v62, s43, 11
+; GFX9-NEXT: s_lshr_b64 s[42:43], s[28:29], 24
+; GFX9-NEXT: v_writelane_b32 v62, s42, 8
+; GFX9-NEXT: v_writelane_b32 v62, s43, 9
+; GFX9-NEXT: s_lshr_b64 s[42:43], s[26:27], 24
+; GFX9-NEXT: v_writelane_b32 v62, s42, 6
+; GFX9-NEXT: v_writelane_b32 v62, s43, 7
+; GFX9-NEXT: s_lshr_b64 s[42:43], s[24:25], 24
+; GFX9-NEXT: v_writelane_b32 v62, s42, 4
+; GFX9-NEXT: v_writelane_b32 v62, s43, 5
+; GFX9-NEXT: s_lshr_b64 s[42:43], s[22:23], 24
+; GFX9-NEXT: v_writelane_b32 v62, s42, 2
+; GFX9-NEXT: v_writelane_b32 v62, s43, 3
+; GFX9-NEXT: s_lshr_b64 s[42:43], s[20:21], 24
+; GFX9-NEXT: v_writelane_b32 v62, s42, 0
+; GFX9-NEXT: s_lshr_b32 s6, s47, 24
+; GFX9-NEXT: s_lshr_b32 s41, s47, 16
+; GFX9-NEXT: s_lshr_b32 s8, s47, 8
+; GFX9-NEXT: s_lshr_b32 s55, s46, 16
+; GFX9-NEXT: s_lshr_b32 s9, s46, 8
+; GFX9-NEXT: s_lshr_b32 s53, s57, 24
+; GFX9-NEXT: s_lshr_b32 s40, s57, 16
+; GFX9-NEXT: s_lshr_b32 s83, s57, 8
+; GFX9-NEXT: s_lshr_b32 s54, s56, 16
+; GFX9-NEXT: s_lshr_b32 s7, s56, 8
+; GFX9-NEXT: s_lshr_b32 s52, s59, 24
+; GFX9-NEXT: s_lshr_b32 s15, s59, 16
+; GFX9-NEXT: s_lshr_b32 s80, s59, 8
+; GFX9-NEXT: s_lshr_b32 s82, s58, 16
+; GFX9-NEXT: s_lshr_b32 s81, s58, 8
+; GFX9-NEXT: s_lshr_b32 s51, s61, 24
+; GFX9-NEXT: s_lshr_b32 s14, s61, 16
+; GFX9-NEXT: s_lshr_b32 s69, s61, 8
+; GFX9-NEXT: s_lshr_b32 s71, s60, 16
+; GFX9-NEXT: s_lshr_b32 s70, s60, 8
+; GFX9-NEXT: s_lshr_b32 s50, s63, 24
+; GFX9-NEXT: s_lshr_b32 s13, s63, 16
+; GFX9-NEXT: s_lshr_b32 s66, s63, 8
+; GFX9-NEXT: s_lshr_b32 s68, s62, 16
+; GFX9-NEXT: s_lshr_b32 s67, s62, 8
+; GFX9-NEXT: s_lshr_b32 s49, s73, 24
+; GFX9-NEXT: s_lshr_b32 s12, s73, 16
+; GFX9-NEXT: s_lshr_b32 s99, s73, 8
+; GFX9-NEXT: s_lshr_b32 s65, s72, 16
+; GFX9-NEXT: s_lshr_b32 s64, s72, 8
+; GFX9-NEXT: s_lshr_b32 s48, s75, 24
+; GFX9-NEXT: s_lshr_b32 s11, s75, 16
+; GFX9-NEXT: s_lshr_b32 s96, s75, 8
+; GFX9-NEXT: s_lshr_b32 s98, s74, 16
+; GFX9-NEXT: s_lshr_b32 s97, s74, 8
+; GFX9-NEXT: s_lshr_b32 s84, s77, 24
+; GFX9-NEXT: s_lshr_b32 s10, s77, 16
+; GFX9-NEXT: s_lshr_b32 s85, s77, 8
+; GFX9-NEXT: s_lshr_b32 s87, s76, 16
+; GFX9-NEXT: s_lshr_b32 s86, s76, 8
+; GFX9-NEXT: v_writelane_b32 v62, s43, 1
; GFX9-NEXT: s_lshr_b64 s[42:43], s[18:19], 24
; GFX9-NEXT: s_lshr_b64 s[44:45], s[16:17], 24
-; GFX9-NEXT: s_lshr_b64 s[78:79], s[46:47], 24
-; GFX9-NEXT: s_lshr_b64 s[88:89], s[56:57], 24
-; GFX9-NEXT: s_lshr_b64 s[90:91], s[58:59], 24
-; GFX9-NEXT: s_lshr_b64 s[92:93], s[60:61], 24
-; GFX9-NEXT: s_lshr_b64 s[94:95], s[62:63], 24
-; GFX9-NEXT: s_lshr_b64 s[30:31], s[72:73], 24
-; GFX9-NEXT: s_lshr_b64 s[34:35], s[74:75], 24
-; GFX9-NEXT: s_lshr_b64 s[36:37], s[76:77], 24
+; GFX9-NEXT: s_lshr_b64 s[88:89], s[46:47], 24
+; GFX9-NEXT: s_lshr_b64 s[90:91], s[56:57], 24
+; GFX9-NEXT: s_lshr_b64 s[92:93], s[58:59], 24
+; GFX9-NEXT: s_lshr_b64 s[94:95], s[60:61], 24
+; GFX9-NEXT: s_lshr_b64 s[30:31], s[62:63], 24
+; GFX9-NEXT: s_lshr_b64 s[34:35], s[72:73], 24
+; GFX9-NEXT: s_lshr_b64 s[36:37], s[74:75], 24
+; GFX9-NEXT: s_lshr_b64 s[38:39], s[76:77], 24
; GFX9-NEXT: s_cbranch_execnz .LBB91_4
; GFX9-NEXT: .LBB91_2: ; %cmp.true
; GFX9-NEXT: s_and_b32 s6, s77, 0xffff0000
@@ -166422,7 +167039,7 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
; GFX9-NEXT: s_and_b32 s6, s76, 0xffff0000
-; GFX9-NEXT: v_lshl_or_b32 v8, v5, 16, v2
+; GFX9-NEXT: v_lshl_or_b32 v9, v5, 16, v2
; GFX9-NEXT: v_add_f32_e32 v2, s6, v1
; GFX9-NEXT: v_bfe_u32 v3, v2, 16, 1
; GFX9-NEXT: v_add_u32_e32 v3, v3, v2
@@ -166444,7 +167061,7 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; GFX9-NEXT: v_and_b32_e32 v3, 0xffff, v3
; GFX9-NEXT: s_and_b32 s6, s75, 0xffff0000
-; GFX9-NEXT: v_lshl_or_b32 v7, v2, 16, v3
+; GFX9-NEXT: v_lshl_or_b32 v8, v2, 16, v3
; GFX9-NEXT: v_add_f32_e32 v2, s6, v1
; GFX9-NEXT: v_bfe_u32 v3, v2, 16, 1
; GFX9-NEXT: v_add_u32_e32 v3, v3, v2
@@ -166960,7 +167577,7 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; GFX9-NEXT: s_and_b64 s[6:7], vcc, exec
; GFX9-NEXT: s_cselect_b32 s6, s9, s8
-; GFX9-NEXT: s_lshr_b32 s76, s6, 16
+; GFX9-NEXT: s_lshr_b32 s40, s6, 16
; GFX9-NEXT: s_lshl_b32 s6, s27, 16
; GFX9-NEXT: v_add_f32_e32 v2, s6, v1
; GFX9-NEXT: v_readfirstlane_b32 s6, v2
@@ -167005,7 +167622,7 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v2, v2
; GFX9-NEXT: s_and_b64 s[6:7], vcc, exec
; GFX9-NEXT: s_cselect_b32 s6, s9, s8
-; GFX9-NEXT: s_lshr_b32 s77, s6, 16
+; GFX9-NEXT: s_lshr_b32 s41, s6, 16
; GFX9-NEXT: s_lshl_b32 s6, s29, 16
; GFX9-NEXT: v_add_f32_e32 v2, s6, v1
; GFX9-NEXT: v_readfirstlane_b32 s6, v2
@@ -167053,7 +167670,7 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: s_lshl_b32 s5, s5, 16
; GFX9-NEXT: v_add_f32_e32 v2, s5, v1
; GFX9-NEXT: v_readfirstlane_b32 s5, v2
-; GFX9-NEXT: s_lshr_b32 s78, s6, 16
+; GFX9-NEXT: s_lshr_b32 s76, s6, 16
; GFX9-NEXT: s_bfe_u32 s6, s5, 0x10010
; GFX9-NEXT: s_add_i32 s6, s6, s5
; GFX9-NEXT: s_add_i32 s8, s6, 0x7fff
@@ -167077,71 +167694,71 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: v_readfirstlane_b32 s4, v1
; GFX9-NEXT: s_bfe_u32 s8, s4, 0x10010
; GFX9-NEXT: s_add_i32 s8, s8, s4
+; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
+; GFX9-NEXT: v_lshrrev_b64 v[1:2], 24, v[25:26]
; GFX9-NEXT: s_lshr_b32 s6, s6, 16
; GFX9-NEXT: s_add_i32 s10, s8, 0x7fff
; GFX9-NEXT: s_bitset1_b32 s4, 22
-; GFX9-NEXT: v_cmp_u_f32_e32 vcc, v1, v1
-; GFX9-NEXT: v_lshrrev_b64 v[1:2], 24, v[25:26]
-; GFX9-NEXT: s_and_b64 s[8:9], vcc, exec
; GFX9-NEXT: v_lshrrev_b64 v[2:3], 24, v[23:24]
-; GFX9-NEXT: s_cselect_b32 s4, s4, s10
+; GFX9-NEXT: s_and_b64 s[8:9], vcc, exec
; GFX9-NEXT: v_lshrrev_b64 v[3:4], 24, v[21:22]
-; GFX9-NEXT: v_lshrrev_b64 v[9:10], 24, v[15:16]
+; GFX9-NEXT: s_cselect_b32 s4, s4, s10
+; GFX9-NEXT: v_lshrrev_b64 v[4:5], 24, v[19:20]
; GFX9-NEXT: s_pack_ll_b32_b16 s47, s17, s11
; GFX9-NEXT: s_pack_ll_b32_b16 s57, s19, s12
-; GFX9-NEXT: s_pack_ll_b32_b16 s59, s21, s13
; GFX9-NEXT: s_lshr_b32 s4, s4, 16
-; GFX9-NEXT: v_lshrrev_b64 v[4:5], 24, v[19:20]
+; GFX9-NEXT: v_lshrrev_b64 v[5:6], 24, v[17:18]
; GFX9-NEXT: v_lshrrev_b64 v[10:11], 24, v[13:14]
+; GFX9-NEXT: s_pack_ll_b32_b16 s59, s21, s13
; GFX9-NEXT: s_pack_ll_b32_b16 s61, s23, s14
; GFX9-NEXT: s_pack_ll_b32_b16 s63, s25, s15
-; GFX9-NEXT: s_pack_ll_b32_b16 s73, s27, s76
-; GFX9-NEXT: s_pack_ll_b32_b16 s75, s29, s77
-; GFX9-NEXT: s_pack_ll_b32_b16 s7, s5, s78
+; GFX9-NEXT: s_pack_ll_b32_b16 s73, s27, s40
+; GFX9-NEXT: s_pack_ll_b32_b16 s75, s29, s41
+; GFX9-NEXT: s_pack_ll_b32_b16 s7, s5, s76
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s4, s6
-; GFX9-NEXT: s_lshr_b64 s[40:41], s[58:59], 24
; GFX9-NEXT: s_lshr_b64 s[42:43], s[56:57], 24
; GFX9-NEXT: s_lshr_b64 s[44:45], s[46:47], 24
-; GFX9-NEXT: v_lshrrev_b64 v[5:6], 24, v[17:18]
-; GFX9-NEXT: v_lshrrev_b64 v[11:12], 24, v[7:8]
-; GFX9-NEXT: s_lshr_b64 s[34:35], s[6:7], 24
-; GFX9-NEXT: s_lshr_b64 s[36:37], s[74:75], 24
-; GFX9-NEXT: s_lshr_b64 s[38:39], s[72:73], 24
-; GFX9-NEXT: s_lshr_b64 s[48:49], s[62:63], 24
-; GFX9-NEXT: s_lshr_b64 s[50:51], s[60:61], 24
+; GFX9-NEXT: v_lshrrev_b64 v[6:7], 24, v[15:16]
+; GFX9-NEXT: v_lshrrev_b64 v[11:12], 24, v[8:9]
+; GFX9-NEXT: s_lshr_b64 s[30:31], s[6:7], 24
+; GFX9-NEXT: s_lshr_b64 s[34:35], s[74:75], 24
+; GFX9-NEXT: s_lshr_b64 s[36:37], s[72:73], 24
+; GFX9-NEXT: s_lshr_b64 s[38:39], s[62:63], 24
+; GFX9-NEXT: s_lshr_b64 s[48:49], s[60:61], 24
+; GFX9-NEXT: s_lshr_b64 s[50:51], s[58:59], 24
; GFX9-NEXT: s_lshr_b32 s9, s7, 24
; GFX9-NEXT: s_lshr_b32 s10, s7, 8
-; GFX9-NEXT: s_lshr_b32 s41, s6, 16
-; GFX9-NEXT: s_lshr_b32 s43, s6, 8
-; GFX9-NEXT: s_lshr_b32 s45, s75, 24
+; GFX9-NEXT: s_lshr_b32 s43, s6, 16
+; GFX9-NEXT: s_lshr_b32 s45, s6, 8
+; GFX9-NEXT: s_lshr_b32 s77, s75, 24
; GFX9-NEXT: s_lshr_b32 s75, s75, 8
-; GFX9-NEXT: s_lshr_b32 s79, s74, 16
+; GFX9-NEXT: s_lshr_b32 s78, s74, 16
; GFX9-NEXT: s_lshr_b32 s74, s74, 8
-; GFX9-NEXT: s_lshr_b32 s88, s73, 24
+; GFX9-NEXT: s_lshr_b32 s79, s73, 24
; GFX9-NEXT: s_lshr_b32 s73, s73, 8
-; GFX9-NEXT: s_lshr_b32 s89, s72, 16
+; GFX9-NEXT: s_lshr_b32 s88, s72, 16
; GFX9-NEXT: s_lshr_b32 s72, s72, 8
-; GFX9-NEXT: s_lshr_b32 s90, s63, 24
+; GFX9-NEXT: s_lshr_b32 s89, s63, 24
; GFX9-NEXT: s_lshr_b32 s63, s63, 8
-; GFX9-NEXT: s_lshr_b32 s91, s62, 16
+; GFX9-NEXT: s_lshr_b32 s90, s62, 16
; GFX9-NEXT: s_lshr_b32 s62, s62, 8
-; GFX9-NEXT: s_lshr_b32 s92, s61, 24
+; GFX9-NEXT: s_lshr_b32 s91, s61, 24
; GFX9-NEXT: s_lshr_b32 s61, s61, 8
-; GFX9-NEXT: s_lshr_b32 s93, s60, 16
+; GFX9-NEXT: s_lshr_b32 s92, s60, 16
; GFX9-NEXT: s_lshr_b32 s60, s60, 8
-; GFX9-NEXT: s_lshr_b32 s94, s59, 24
+; GFX9-NEXT: s_lshr_b32 s93, s59, 24
; GFX9-NEXT: s_lshr_b32 s59, s59, 8
-; GFX9-NEXT: s_lshr_b32 s95, s58, 16
+; GFX9-NEXT: s_lshr_b32 s94, s58, 16
; GFX9-NEXT: s_lshr_b32 s58, s58, 8
-; GFX9-NEXT: s_lshr_b32 vcc_lo, s57, 24
+; GFX9-NEXT: s_lshr_b32 s95, s57, 24
; GFX9-NEXT: s_lshr_b32 s57, s57, 8
-; GFX9-NEXT: s_lshr_b32 vcc_hi, s56, 16
+; GFX9-NEXT: s_lshr_b32 vcc_lo, s56, 16
; GFX9-NEXT: s_lshr_b32 s56, s56, 8
-; GFX9-NEXT: s_lshr_b32 s30, s47, 24
+; GFX9-NEXT: s_lshr_b32 vcc_hi, s47, 24
; GFX9-NEXT: s_lshr_b32 s47, s47, 8
; GFX9-NEXT: s_lshr_b32 s8, s46, 16
; GFX9-NEXT: s_lshr_b32 s7, s46, 8
-; GFX9-NEXT: v_lshrrev_b32_e32 v6, 24, v26
+; GFX9-NEXT: v_lshrrev_b32_e32 v7, 24, v26
; GFX9-NEXT: v_lshrrev_b32_e32 v12, 8, v26
; GFX9-NEXT: v_lshrrev_b32_e32 v26, 16, v25
; GFX9-NEXT: v_lshrrev_b32_e32 v25, 8, v25
@@ -167169,115 +167786,117 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: v_lshrrev_b32_e32 v14, 8, v14
; GFX9-NEXT: v_lshrrev_b32_e32 v27, 16, v13
; GFX9-NEXT: v_lshrrev_b32_e32 v13, 8, v13
-; GFX9-NEXT: v_lshrrev_b32_e32 v28, 24, v8
+; GFX9-NEXT: v_lshrrev_b32_e32 v28, 24, v9
+; GFX9-NEXT: v_lshrrev_b32_e32 v9, 8, v9
+; GFX9-NEXT: v_lshrrev_b32_e32 v29, 16, v8
; GFX9-NEXT: v_lshrrev_b32_e32 v8, 8, v8
-; GFX9-NEXT: v_lshrrev_b32_e32 v29, 16, v7
-; GFX9-NEXT: v_lshrrev_b32_e32 v7, 8, v7
; GFX9-NEXT: s_branch .LBB91_5
; GFX9-NEXT: .LBB91_3:
; GFX9-NEXT: ; implicit-def: $sgpr6
; GFX9-NEXT: ; kill: killed $sgpr6
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: ; kill: killed $sgpr6
-; GFX9-NEXT: v_writelane_b32 v62, s78, 0
-; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: ; kill: killed $sgpr6
-; GFX9-NEXT: v_writelane_b32 v62, s79, 1
-; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: ; kill: killed $sgpr6
-; GFX9-NEXT: ; implicit-def: $sgpr78
-; GFX9-NEXT: ; implicit-def: $sgpr83
-; GFX9-NEXT: ; implicit-def: $sgpr38
-; GFX9-NEXT: ; implicit-def: $sgpr81
-; GFX9-NEXT: ; implicit-def: $sgpr8
-; GFX9-NEXT: ; implicit-def: $sgpr80
+; GFX9-NEXT: ; implicit-def: $sgpr88
; GFX9-NEXT: ; implicit-def: $sgpr86
-; GFX9-NEXT: ; implicit-def: $sgpr39
-; GFX9-NEXT: ; implicit-def: $sgpr84
-; GFX9-NEXT: ; implicit-def: $sgpr9
-; GFX9-NEXT: ; implicit-def: $sgpr82
-; GFX9-NEXT: ; implicit-def: $sgpr97
-; GFX9-NEXT: ; implicit-def: $sgpr48
; GFX9-NEXT: ; implicit-def: $sgpr87
-; GFX9-NEXT: ; implicit-def: $sgpr10
; GFX9-NEXT: ; implicit-def: $sgpr85
-; GFX9-NEXT: ; implicit-def: $sgpr54
-; GFX9-NEXT: ; implicit-def: $sgpr49
+; GFX9-NEXT: ; implicit-def: $sgpr10
+; GFX9-NEXT: ; implicit-def: $sgpr84
+; GFX9-NEXT: ; implicit-def: $sgpr97
; GFX9-NEXT: ; implicit-def: $sgpr98
-; GFX9-NEXT: ; implicit-def: $sgpr11
; GFX9-NEXT: ; implicit-def: $sgpr96
+; GFX9-NEXT: ; implicit-def: $sgpr11
+; GFX9-NEXT: ; implicit-def: $sgpr48
+; GFX9-NEXT: ; implicit-def: $sgpr64
; GFX9-NEXT: ; implicit-def: $sgpr65
-; GFX9-NEXT: ; implicit-def: $sgpr50
-; GFX9-NEXT: ; implicit-def: $sgpr55
-; GFX9-NEXT: ; implicit-def: $sgpr12
; GFX9-NEXT: ; implicit-def: $sgpr99
+; GFX9-NEXT: ; implicit-def: $sgpr12
+; GFX9-NEXT: ; implicit-def: $sgpr49
+; GFX9-NEXT: ; implicit-def: $sgpr67
; GFX9-NEXT: ; implicit-def: $sgpr68
-; GFX9-NEXT: ; implicit-def: $sgpr51
; GFX9-NEXT: ; implicit-def: $sgpr66
; GFX9-NEXT: ; implicit-def: $sgpr13
-; GFX9-NEXT: ; implicit-def: $sgpr64
+; GFX9-NEXT: ; implicit-def: $sgpr50
+; GFX9-NEXT: ; implicit-def: $sgpr70
; GFX9-NEXT: ; implicit-def: $sgpr71
; GFX9-NEXT: ; implicit-def: $sgpr69
; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: ; implicit-def: $sgpr67
+; GFX9-NEXT: ; implicit-def: $sgpr51
+; GFX9-NEXT: ; implicit-def: $sgpr81
+; GFX9-NEXT: ; implicit-def: $sgpr82
+; GFX9-NEXT: ; implicit-def: $sgpr80
+; GFX9-NEXT: ; implicit-def: $sgpr15
; GFX9-NEXT: ; implicit-def: $sgpr52
-; GFX9-NEXT: ; implicit-def: $sgpr53
; GFX9-NEXT: ; implicit-def: $sgpr7
-; GFX9-NEXT: ; implicit-def: $sgpr15
-; GFX9-NEXT: ; implicit-def: $sgpr70
+; GFX9-NEXT: ; implicit-def: $sgpr54
+; GFX9-NEXT: ; implicit-def: $sgpr83
+; GFX9-NEXT: ; implicit-def: $sgpr40
+; GFX9-NEXT: ; implicit-def: $sgpr53
+; GFX9-NEXT: ; implicit-def: $sgpr9
+; GFX9-NEXT: ; implicit-def: $sgpr55
+; GFX9-NEXT: ; implicit-def: $sgpr8
+; GFX9-NEXT: ; implicit-def: $sgpr41
; GFX9-NEXT: ; implicit-def: $sgpr44
; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: ; implicit-def: $sgpr40
+; GFX9-NEXT: ; implicit-def: $sgpr38
; GFX9-NEXT: ; implicit-def: $sgpr36
; GFX9-NEXT: ; implicit-def: $sgpr34
; GFX9-NEXT: ; implicit-def: $sgpr30
; GFX9-NEXT: ; implicit-def: $sgpr94
; GFX9-NEXT: ; implicit-def: $sgpr92
; GFX9-NEXT: ; implicit-def: $sgpr90
+; GFX9-NEXT: ; implicit-def: $sgpr6
+; GFX9-NEXT: ; kill: killed $sgpr6
+; GFX9-NEXT: v_writelane_b32 v62, s88, 0
+; GFX9-NEXT: ; implicit-def: $sgpr6
+; GFX9-NEXT: ; kill: killed $sgpr6
+; GFX9-NEXT: v_writelane_b32 v62, s89, 1
+; GFX9-NEXT: ; implicit-def: $sgpr6
+; GFX9-NEXT: ; kill: killed $sgpr6
; GFX9-NEXT: ; implicit-def: $sgpr88
; GFX9-NEXT: ; implicit-def: $sgpr6
; GFX9-NEXT: ; kill: killed $sgpr6
-; GFX9-NEXT: v_writelane_b32 v62, s78, 2
+; GFX9-NEXT: v_writelane_b32 v62, s88, 2
; GFX9-NEXT: ; implicit-def: $sgpr6
; GFX9-NEXT: ; kill: killed $sgpr6
-; GFX9-NEXT: v_writelane_b32 v62, s79, 3
+; GFX9-NEXT: v_writelane_b32 v62, s89, 3
; GFX9-NEXT: ; implicit-def: $sgpr6
; GFX9-NEXT: ; kill: killed $sgpr6
-; GFX9-NEXT: ; implicit-def: $sgpr78
+; GFX9-NEXT: ; implicit-def: $sgpr88
; GFX9-NEXT: ; implicit-def: $sgpr6
; GFX9-NEXT: ; kill: killed $sgpr6
-; GFX9-NEXT: v_writelane_b32 v62, s78, 4
+; GFX9-NEXT: v_writelane_b32 v62, s88, 4
; GFX9-NEXT: ; implicit-def: $sgpr6
; GFX9-NEXT: ; kill: killed $sgpr6
-; GFX9-NEXT: v_writelane_b32 v62, s79, 5
+; GFX9-NEXT: v_writelane_b32 v62, s89, 5
; GFX9-NEXT: ; implicit-def: $sgpr6
; GFX9-NEXT: ; kill: killed $sgpr6
-; GFX9-NEXT: ; implicit-def: $sgpr78
+; GFX9-NEXT: ; implicit-def: $sgpr88
; GFX9-NEXT: ; implicit-def: $sgpr6
; GFX9-NEXT: ; kill: killed $sgpr6
-; GFX9-NEXT: v_writelane_b32 v62, s78, 6
+; GFX9-NEXT: v_writelane_b32 v62, s88, 6
; GFX9-NEXT: ; implicit-def: $sgpr6
; GFX9-NEXT: ; kill: killed $sgpr6
-; GFX9-NEXT: v_writelane_b32 v62, s79, 7
+; GFX9-NEXT: v_writelane_b32 v62, s89, 7
; GFX9-NEXT: ; implicit-def: $sgpr6
; GFX9-NEXT: ; kill: killed $sgpr6
-; GFX9-NEXT: ; implicit-def: $sgpr78
+; GFX9-NEXT: ; implicit-def: $sgpr88
; GFX9-NEXT: ; implicit-def: $sgpr6
; GFX9-NEXT: ; kill: killed $sgpr6
-; GFX9-NEXT: v_writelane_b32 v62, s78, 8
+; GFX9-NEXT: v_writelane_b32 v62, s88, 8
; GFX9-NEXT: ; implicit-def: $sgpr6
; GFX9-NEXT: ; kill: killed $sgpr6
-; GFX9-NEXT: v_writelane_b32 v62, s79, 9
+; GFX9-NEXT: v_writelane_b32 v62, s89, 9
; GFX9-NEXT: ; implicit-def: $sgpr6
; GFX9-NEXT: ; kill: killed $sgpr6
-; GFX9-NEXT: ; implicit-def: $sgpr78
+; GFX9-NEXT: ; implicit-def: $sgpr88
; GFX9-NEXT: ; implicit-def: $sgpr6
; GFX9-NEXT: ; kill: killed $sgpr6
+; GFX9-NEXT: v_writelane_b32 v62, s88, 10
; GFX9-NEXT: ; implicit-def: $sgpr6
; GFX9-NEXT: ; kill: killed $sgpr6
+; GFX9-NEXT: v_writelane_b32 v62, s89, 11
; GFX9-NEXT: ; implicit-def: $sgpr6
; GFX9-NEXT: ; kill: killed $sgpr6
+; GFX9-NEXT: ; implicit-def: $sgpr88
; GFX9-NEXT: ; implicit-def: $sgpr6
; GFX9-NEXT: ; kill: killed $sgpr6
; GFX9-NEXT: ; implicit-def: $sgpr6
@@ -167321,23 +167940,25 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: ; implicit-def: $sgpr6
; GFX9-NEXT: ; kill: killed $sgpr6
; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: s_branch .LBB91_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[78:79]
+; GFX9-NEXT: s_cbranch_vccz .LBB91_2
; GFX9-NEXT: .LBB91_4:
-; GFX9-NEXT: v_mov_b32_e32 v1, s9
+; GFX9-NEXT: v_mov_b32_e32 v1, s11
; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v1, s76
; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v1, s77
; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
-; GFX9-NEXT: v_mov_b32_e32 v1, s8
-; GFX9-NEXT: v_mov_b32_e32 v46, s51
-; GFX9-NEXT: v_mov_b32_e32 v56, s50
-; GFX9-NEXT: v_mov_b32_e32 v58, s49
-; GFX9-NEXT: v_mov_b32_e32 v60, s48
-; GFX9-NEXT: v_mov_b32_e32 v27, s39
-; GFX9-NEXT: v_mov_b32_e32 v29, s38
-; GFX9-NEXT: v_mov_b32_e32 v10, s34
-; GFX9-NEXT: v_mov_b32_e32 v11, s36
+; GFX9-NEXT: v_mov_b32_e32 v1, s10
+; GFX9-NEXT: v_mov_b32_e32 v47, s51
+; GFX9-NEXT: v_mov_b32_e32 v57, s50
+; GFX9-NEXT: v_mov_b32_e32 v59, s49
+; GFX9-NEXT: v_mov_b32_e32 v61, s48
+; GFX9-NEXT: v_mov_b32_e32 v5, s30
+; GFX9-NEXT: v_mov_b32_e32 v6, s34
+; GFX9-NEXT: v_mov_b32_e32 v10, s36
+; GFX9-NEXT: v_mov_b32_e32 v11, s38
+; GFX9-NEXT: v_readlane_b32 s30, v62, 10
; GFX9-NEXT: v_readlane_b32 s34, v62, 8
; GFX9-NEXT: v_readlane_b32 s36, v62, 6
; GFX9-NEXT: v_readlane_b32 s38, v62, 4
@@ -167345,97 +167966,98 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: v_readlane_b32 s50, v62, 0
; GFX9-NEXT: v_mov_b32_e32 v42, s46
; GFX9-NEXT: v_mov_b32_e32 v41, s47
-; GFX9-NEXT: v_mov_b32_e32 v55, s15
+; GFX9-NEXT: v_mov_b32_e32 v55, s41
; GFX9-NEXT: v_mov_b32_e32 v40, s56
; GFX9-NEXT: v_mov_b32_e32 v54, s57
-; GFX9-NEXT: v_mov_b32_e32 v52, s14
+; GFX9-NEXT: v_mov_b32_e32 v52, s40
; GFX9-NEXT: v_mov_b32_e32 v53, s58
; GFX9-NEXT: v_mov_b32_e32 v51, s59
-; GFX9-NEXT: v_mov_b32_e32 v49, s13
+; GFX9-NEXT: v_mov_b32_e32 v49, s15
; GFX9-NEXT: v_mov_b32_e32 v50, s60
; GFX9-NEXT: v_mov_b32_e32 v48, s61
-; GFX9-NEXT: v_mov_b32_e32 v38, s12
+; GFX9-NEXT: v_mov_b32_e32 v38, s14
; GFX9-NEXT: v_mov_b32_e32 v39, s62
; GFX9-NEXT: v_mov_b32_e32 v37, s63
-; GFX9-NEXT: v_mov_b32_e32 v35, s11
+; GFX9-NEXT: v_mov_b32_e32 v35, s13
; GFX9-NEXT: v_mov_b32_e32 v36, s72
; GFX9-NEXT: v_mov_b32_e32 v34, s73
-; GFX9-NEXT: v_mov_b32_e32 v32, s10
+; GFX9-NEXT: v_mov_b32_e32 v32, s12
; GFX9-NEXT: v_mov_b32_e32 v33, s74
; GFX9-NEXT: v_mov_b32_e32 v31, s75
; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
-; GFX9-NEXT: v_mov_b32_e32 v26, s53
-; GFX9-NEXT: v_mov_b32_e32 v25, s52
-; GFX9-NEXT: v_mov_b32_e32 v6, s70
-; GFX9-NEXT: v_mov_b32_e32 v12, s7
-; GFX9-NEXT: v_mov_b32_e32 v44, s6
-; GFX9-NEXT: v_mov_b32_e32 v23, s71
-; GFX9-NEXT: v_mov_b32_e32 v43, s67
-; GFX9-NEXT: v_mov_b32_e32 v24, s69
-; GFX9-NEXT: v_mov_b32_e32 v21, s68
-; GFX9-NEXT: v_mov_b32_e32 v45, s64
-; GFX9-NEXT: v_mov_b32_e32 v22, s66
-; GFX9-NEXT: v_mov_b32_e32 v19, s65
-; GFX9-NEXT: v_mov_b32_e32 v47, s99
-; GFX9-NEXT: v_mov_b32_e32 v20, s55
-; GFX9-NEXT: v_mov_b32_e32 v17, s54
-; GFX9-NEXT: v_mov_b32_e32 v57, s96
-; GFX9-NEXT: v_mov_b32_e32 v18, s98
-; GFX9-NEXT: v_mov_b32_e32 v15, s97
-; GFX9-NEXT: v_mov_b32_e32 v59, s85
-; GFX9-NEXT: v_mov_b32_e32 v16, s87
-; GFX9-NEXT: v_mov_b32_e32 v13, s86
-; GFX9-NEXT: v_mov_b32_e32 v61, s82
-; GFX9-NEXT: v_mov_b32_e32 v14, s84
-; GFX9-NEXT: v_mov_b32_e32 v7, s83
-; GFX9-NEXT: v_mov_b32_e32 v28, s80
-; GFX9-NEXT: v_mov_b32_e32 v8, s81
-; GFX9-NEXT: v_mov_b32_e32 v1, s78
-; GFX9-NEXT: v_mov_b32_e32 v2, s88
-; GFX9-NEXT: v_mov_b32_e32 v3, s90
-; GFX9-NEXT: v_mov_b32_e32 v4, s92
-; GFX9-NEXT: v_mov_b32_e32 v5, s94
-; GFX9-NEXT: v_mov_b32_e32 v9, s30
-; GFX9-NEXT: v_readlane_b32 s11, v62, 10
-; GFX9-NEXT: v_readlane_b32 s12, v62, 11
-; GFX9-NEXT: v_readlane_b32 s13, v62, 12
-; GFX9-NEXT: v_readlane_b32 s14, v62, 13
-; GFX9-NEXT: v_readlane_b32 s15, v62, 14
-; GFX9-NEXT: v_readlane_b32 s76, v62, 15
-; GFX9-NEXT: v_readlane_b32 s77, v62, 16
-; GFX9-NEXT: v_readlane_b32 s78, v62, 17
-; GFX9-NEXT: v_readlane_b32 s9, v62, 18
-; GFX9-NEXT: v_readlane_b32 s10, v62, 19
-; GFX9-NEXT: v_readlane_b32 s41, v62, 20
-; GFX9-NEXT: v_readlane_b32 s43, v62, 21
-; GFX9-NEXT: v_readlane_b32 s45, v62, 22
-; GFX9-NEXT: v_readlane_b32 s75, v62, 23
-; GFX9-NEXT: v_readlane_b32 s79, v62, 24
-; GFX9-NEXT: v_readlane_b32 s74, v62, 25
-; GFX9-NEXT: v_readlane_b32 s88, v62, 26
-; GFX9-NEXT: v_readlane_b32 s73, v62, 27
-; GFX9-NEXT: v_readlane_b32 s89, v62, 28
-; GFX9-NEXT: v_readlane_b32 s72, v62, 29
-; GFX9-NEXT: v_readlane_b32 s90, v62, 30
-; GFX9-NEXT: v_readlane_b32 s63, v62, 31
-; GFX9-NEXT: v_readlane_b32 s91, v62, 32
-; GFX9-NEXT: v_readlane_b32 s62, v62, 33
-; GFX9-NEXT: v_readlane_b32 s92, v62, 34
-; GFX9-NEXT: v_readlane_b32 s61, v62, 35
-; GFX9-NEXT: v_readlane_b32 s93, v62, 36
-; GFX9-NEXT: v_readlane_b32 s60, v62, 37
-; GFX9-NEXT: v_readlane_b32 s94, v62, 38
-; GFX9-NEXT: v_readlane_b32 s59, v62, 39
-; GFX9-NEXT: v_readlane_b32 s95, v62, 40
-; GFX9-NEXT: v_readlane_b32 s58, v62, 41
-; GFX9-NEXT: v_readlane_b32 vcc_lo, v62, 42
-; GFX9-NEXT: v_readlane_b32 s57, v62, 43
-; GFX9-NEXT: v_readlane_b32 vcc_hi, v62, 44
-; GFX9-NEXT: v_readlane_b32 s56, v62, 45
-; GFX9-NEXT: v_readlane_b32 s30, v62, 46
-; GFX9-NEXT: v_readlane_b32 s47, v62, 47
-; GFX9-NEXT: v_readlane_b32 s8, v62, 48
-; GFX9-NEXT: v_readlane_b32 s7, v62, 49
+; GFX9-NEXT: v_mov_b32_e32 v26, s55
+; GFX9-NEXT: v_mov_b32_e32 v25, s9
+; GFX9-NEXT: v_mov_b32_e32 v7, s6
+; GFX9-NEXT: v_mov_b32_e32 v12, s8
+; GFX9-NEXT: v_mov_b32_e32 v44, s54
+; GFX9-NEXT: v_mov_b32_e32 v23, s7
+; GFX9-NEXT: v_mov_b32_e32 v43, s53
+; GFX9-NEXT: v_mov_b32_e32 v24, s83
+; GFX9-NEXT: v_mov_b32_e32 v46, s82
+; GFX9-NEXT: v_mov_b32_e32 v21, s81
+; GFX9-NEXT: v_mov_b32_e32 v45, s52
+; GFX9-NEXT: v_mov_b32_e32 v22, s80
+; GFX9-NEXT: v_mov_b32_e32 v56, s71
+; GFX9-NEXT: v_mov_b32_e32 v19, s70
+; GFX9-NEXT: v_mov_b32_e32 v20, s69
+; GFX9-NEXT: v_mov_b32_e32 v58, s68
+; GFX9-NEXT: v_mov_b32_e32 v17, s67
+; GFX9-NEXT: v_mov_b32_e32 v18, s66
+; GFX9-NEXT: v_mov_b32_e32 v60, s65
+; GFX9-NEXT: v_mov_b32_e32 v15, s64
+; GFX9-NEXT: v_mov_b32_e32 v16, s99
+; GFX9-NEXT: v_mov_b32_e32 v27, s98
+; GFX9-NEXT: v_mov_b32_e32 v13, s97
+; GFX9-NEXT: v_mov_b32_e32 v14, s96
+; GFX9-NEXT: v_mov_b32_e32 v29, s87
+; GFX9-NEXT: v_mov_b32_e32 v8, s86
+; GFX9-NEXT: v_mov_b32_e32 v28, s84
+; GFX9-NEXT: v_mov_b32_e32 v9, s85
+; GFX9-NEXT: v_mov_b32_e32 v1, s88
+; GFX9-NEXT: v_mov_b32_e32 v2, s90
+; GFX9-NEXT: v_mov_b32_e32 v3, s92
+; GFX9-NEXT: v_mov_b32_e32 v4, s94
+; GFX9-NEXT: v_readlane_b32 s11, v62, 12
+; GFX9-NEXT: v_readlane_b32 s12, v62, 13
+; GFX9-NEXT: v_readlane_b32 s13, v62, 14
+; GFX9-NEXT: v_readlane_b32 s14, v62, 15
+; GFX9-NEXT: v_readlane_b32 s15, v62, 16
+; GFX9-NEXT: v_readlane_b32 s40, v62, 17
+; GFX9-NEXT: v_readlane_b32 s41, v62, 18
+; GFX9-NEXT: v_readlane_b32 s76, v62, 19
+; GFX9-NEXT: v_readlane_b32 s9, v62, 20
+; GFX9-NEXT: v_readlane_b32 s10, v62, 21
+; GFX9-NEXT: v_readlane_b32 s43, v62, 22
+; GFX9-NEXT: v_readlane_b32 s45, v62, 23
+; GFX9-NEXT: v_readlane_b32 s77, v62, 24
+; GFX9-NEXT: v_readlane_b32 s75, v62, 25
+; GFX9-NEXT: v_readlane_b32 s78, v62, 26
+; GFX9-NEXT: v_readlane_b32 s74, v62, 27
+; GFX9-NEXT: v_readlane_b32 s79, v62, 28
+; GFX9-NEXT: v_readlane_b32 s73, v62, 29
+; GFX9-NEXT: v_readlane_b32 s88, v62, 30
+; GFX9-NEXT: v_readlane_b32 s72, v62, 31
+; GFX9-NEXT: v_readlane_b32 s89, v62, 32
+; GFX9-NEXT: v_readlane_b32 s63, v62, 33
+; GFX9-NEXT: v_readlane_b32 s90, v62, 34
+; GFX9-NEXT: v_readlane_b32 s62, v62, 35
+; GFX9-NEXT: v_readlane_b32 s91, v62, 36
+; GFX9-NEXT: v_readlane_b32 s61, v62, 37
+; GFX9-NEXT: v_readlane_b32 s92, v62, 38
+; GFX9-NEXT: v_readlane_b32 s60, v62, 39
+; GFX9-NEXT: v_readlane_b32 s93, v62, 40
+; GFX9-NEXT: v_readlane_b32 s59, v62, 41
+; GFX9-NEXT: v_readlane_b32 s94, v62, 42
+; GFX9-NEXT: v_readlane_b32 s58, v62, 43
+; GFX9-NEXT: v_readlane_b32 s95, v62, 44
+; GFX9-NEXT: v_readlane_b32 s57, v62, 45
+; GFX9-NEXT: v_readlane_b32 vcc_lo, v62, 46
+; GFX9-NEXT: v_readlane_b32 s56, v62, 47
+; GFX9-NEXT: v_readlane_b32 vcc_hi, v62, 48
+; GFX9-NEXT: v_readlane_b32 s47, v62, 49
+; GFX9-NEXT: v_readlane_b32 s8, v62, 50
+; GFX9-NEXT: v_readlane_b32 s7, v62, 51
+; GFX9-NEXT: v_readlane_b32 s31, v62, 11
; GFX9-NEXT: v_readlane_b32 s35, v62, 9
; GFX9-NEXT: v_readlane_b32 s37, v62, 7
; GFX9-NEXT: v_readlane_b32 s39, v62, 5
@@ -167456,7 +168078,7 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: s_lshl_b32 s7, s47, 8
; GFX9-NEXT: s_or_b32 s6, s6, s7
; GFX9-NEXT: s_and_b32 s7, s11, 0xff
-; GFX9-NEXT: s_lshl_b32 s8, s30, 8
+; GFX9-NEXT: s_lshl_b32 s8, vcc_hi, 8
; GFX9-NEXT: s_or_b32 s7, s7, s8
; GFX9-NEXT: s_and_b32 s6, s6, 0xffff
; GFX9-NEXT: s_lshl_b32 s7, s7, 16
@@ -167466,7 +168088,7 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: s_and_b32 s6, s18, 0xff
; GFX9-NEXT: s_lshl_b32 s7, s56, 8
; GFX9-NEXT: s_or_b32 s6, s6, s7
-; GFX9-NEXT: s_and_b32 s7, vcc_hi, 0xff
+; GFX9-NEXT: s_and_b32 s7, vcc_lo, 0xff
; GFX9-NEXT: s_lshl_b32 s8, s42, 8
; GFX9-NEXT: s_or_b32 s7, s7, s8
; GFX9-NEXT: s_and_b32 s6, s6, 0xffff
@@ -167478,7 +168100,7 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: s_lshl_b32 s7, s57, 8
; GFX9-NEXT: s_or_b32 s6, s6, s7
; GFX9-NEXT: s_and_b32 s7, s12, 0xff
-; GFX9-NEXT: s_lshl_b32 s8, vcc_lo, 8
+; GFX9-NEXT: s_lshl_b32 s8, s95, 8
; GFX9-NEXT: s_or_b32 s7, s7, s8
; GFX9-NEXT: s_and_b32 s6, s6, 0xffff
; GFX9-NEXT: s_lshl_b32 s7, s7, 16
@@ -167488,8 +168110,8 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: s_and_b32 s6, s20, 0xff
; GFX9-NEXT: s_lshl_b32 s7, s58, 8
; GFX9-NEXT: s_or_b32 s6, s6, s7
-; GFX9-NEXT: s_and_b32 s7, s95, 0xff
-; GFX9-NEXT: s_lshl_b32 s8, s40, 8
+; GFX9-NEXT: s_and_b32 s7, s94, 0xff
+; GFX9-NEXT: s_lshl_b32 s8, s50, 8
; GFX9-NEXT: s_or_b32 s7, s7, s8
; GFX9-NEXT: s_and_b32 s6, s6, 0xffff
; GFX9-NEXT: s_lshl_b32 s7, s7, 16
@@ -167500,7 +168122,7 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: s_lshl_b32 s7, s59, 8
; GFX9-NEXT: s_or_b32 s6, s6, s7
; GFX9-NEXT: s_and_b32 s7, s13, 0xff
-; GFX9-NEXT: s_lshl_b32 s8, s94, 8
+; GFX9-NEXT: s_lshl_b32 s8, s93, 8
; GFX9-NEXT: s_or_b32 s7, s7, s8
; GFX9-NEXT: s_and_b32 s6, s6, 0xffff
; GFX9-NEXT: s_lshl_b32 s7, s7, 16
@@ -167510,8 +168132,8 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: s_and_b32 s6, s22, 0xff
; GFX9-NEXT: s_lshl_b32 s7, s60, 8
; GFX9-NEXT: s_or_b32 s6, s6, s7
-; GFX9-NEXT: s_and_b32 s7, s93, 0xff
-; GFX9-NEXT: s_lshl_b32 s8, s50, 8
+; GFX9-NEXT: s_and_b32 s7, s92, 0xff
+; GFX9-NEXT: s_lshl_b32 s8, s48, 8
; GFX9-NEXT: s_or_b32 s7, s7, s8
; GFX9-NEXT: s_and_b32 s6, s6, 0xffff
; GFX9-NEXT: s_lshl_b32 s7, s7, 16
@@ -167522,7 +168144,7 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: s_lshl_b32 s7, s61, 8
; GFX9-NEXT: s_or_b32 s6, s6, s7
; GFX9-NEXT: s_and_b32 s7, s14, 0xff
-; GFX9-NEXT: s_lshl_b32 s8, s92, 8
+; GFX9-NEXT: s_lshl_b32 s8, s91, 8
; GFX9-NEXT: s_or_b32 s7, s7, s8
; GFX9-NEXT: s_and_b32 s6, s6, 0xffff
; GFX9-NEXT: s_lshl_b32 s7, s7, 16
@@ -167532,8 +168154,8 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: s_and_b32 s6, s24, 0xff
; GFX9-NEXT: s_lshl_b32 s7, s62, 8
; GFX9-NEXT: s_or_b32 s6, s6, s7
-; GFX9-NEXT: s_and_b32 s7, s91, 0xff
-; GFX9-NEXT: s_lshl_b32 s8, s48, 8
+; GFX9-NEXT: s_and_b32 s7, s90, 0xff
+; GFX9-NEXT: s_lshl_b32 s8, s38, 8
; GFX9-NEXT: s_or_b32 s7, s7, s8
; GFX9-NEXT: s_and_b32 s6, s6, 0xffff
; GFX9-NEXT: s_lshl_b32 s7, s7, 16
@@ -167544,7 +168166,7 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: s_lshl_b32 s7, s63, 8
; GFX9-NEXT: s_or_b32 s6, s6, s7
; GFX9-NEXT: s_and_b32 s7, s15, 0xff
-; GFX9-NEXT: s_lshl_b32 s8, s90, 8
+; GFX9-NEXT: s_lshl_b32 s8, s89, 8
; GFX9-NEXT: s_or_b32 s7, s7, s8
; GFX9-NEXT: s_and_b32 s6, s6, 0xffff
; GFX9-NEXT: s_lshl_b32 s7, s7, 16
@@ -167554,8 +168176,8 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: s_and_b32 s6, s26, 0xff
; GFX9-NEXT: s_lshl_b32 s7, s72, 8
; GFX9-NEXT: s_or_b32 s6, s6, s7
-; GFX9-NEXT: s_and_b32 s7, s89, 0xff
-; GFX9-NEXT: s_lshl_b32 s8, s38, 8
+; GFX9-NEXT: s_and_b32 s7, s88, 0xff
+; GFX9-NEXT: s_lshl_b32 s8, s36, 8
; GFX9-NEXT: s_or_b32 s7, s7, s8
; GFX9-NEXT: s_and_b32 s6, s6, 0xffff
; GFX9-NEXT: s_lshl_b32 s7, s7, 16
@@ -167565,8 +168187,8 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: s_and_b32 s6, s27, 0xff
; GFX9-NEXT: s_lshl_b32 s7, s73, 8
; GFX9-NEXT: s_or_b32 s6, s6, s7
-; GFX9-NEXT: s_and_b32 s7, s76, 0xff
-; GFX9-NEXT: s_lshl_b32 s8, s88, 8
+; GFX9-NEXT: s_and_b32 s7, s40, 0xff
+; GFX9-NEXT: s_lshl_b32 s8, s79, 8
; GFX9-NEXT: s_or_b32 s7, s7, s8
; GFX9-NEXT: s_and_b32 s6, s6, 0xffff
; GFX9-NEXT: s_lshl_b32 s7, s7, 16
@@ -167576,8 +168198,8 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: s_and_b32 s6, s28, 0xff
; GFX9-NEXT: s_lshl_b32 s7, s74, 8
; GFX9-NEXT: s_or_b32 s6, s6, s7
-; GFX9-NEXT: s_and_b32 s7, s79, 0xff
-; GFX9-NEXT: s_lshl_b32 s8, s36, 8
+; GFX9-NEXT: s_and_b32 s7, s78, 0xff
+; GFX9-NEXT: s_lshl_b32 s8, s34, 8
; GFX9-NEXT: s_or_b32 s7, s7, s8
; GFX9-NEXT: s_and_b32 s6, s6, 0xffff
; GFX9-NEXT: s_lshl_b32 s7, s7, 16
@@ -167587,8 +168209,8 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: s_and_b32 s6, s29, 0xff
; GFX9-NEXT: s_lshl_b32 s7, s75, 8
; GFX9-NEXT: s_or_b32 s6, s6, s7
-; GFX9-NEXT: s_and_b32 s7, s77, 0xff
-; GFX9-NEXT: s_lshl_b32 s8, s45, 8
+; GFX9-NEXT: s_and_b32 s7, s41, 0xff
+; GFX9-NEXT: s_lshl_b32 s8, s77, 8
; GFX9-NEXT: s_or_b32 s7, s7, s8
; GFX9-NEXT: s_and_b32 s6, s6, 0xffff
; GFX9-NEXT: s_lshl_b32 s7, s7, 16
@@ -167596,10 +168218,10 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: buffer_store_dword v30, v0, s[0:3], 0 offen offset:48
; GFX9-NEXT: v_mov_b32_e32 v30, s6
; GFX9-NEXT: s_and_b32 s4, s4, 0xff
-; GFX9-NEXT: s_lshl_b32 s6, s43, 8
+; GFX9-NEXT: s_lshl_b32 s6, s45, 8
; GFX9-NEXT: s_or_b32 s4, s4, s6
-; GFX9-NEXT: s_and_b32 s6, s41, 0xff
-; GFX9-NEXT: s_lshl_b32 s7, s34, 8
+; GFX9-NEXT: s_and_b32 s6, s43, 0xff
+; GFX9-NEXT: s_lshl_b32 s7, s30, 8
; GFX9-NEXT: s_or_b32 s6, s6, s7
; GFX9-NEXT: s_and_b32 s4, s4, 0xffff
; GFX9-NEXT: s_lshl_b32 s6, s6, 16
@@ -167609,7 +168231,7 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: s_and_b32 s4, s5, 0xff
; GFX9-NEXT: s_lshl_b32 s5, s10, 8
; GFX9-NEXT: s_or_b32 s4, s4, s5
-; GFX9-NEXT: s_and_b32 s5, s78, 0xff
+; GFX9-NEXT: s_and_b32 s5, s76, 0xff
; GFX9-NEXT: s_lshl_b32 s6, s9, 8
; GFX9-NEXT: s_or_b32 s5, s5, s6
; GFX9-NEXT: s_and_b32 s4, s4, 0xffff
@@ -167619,9 +168241,11 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v30, s4
; GFX9-NEXT: buffer_store_dword v30, v0, s[0:3], 0 offen offset:60
; GFX9-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
-; GFX9-NEXT: v_lshlrev_b32_e32 v7, 8, v7
+; GFX9-NEXT: v_lshlrev_b32_e32 v8, 8, v8
; GFX9-NEXT: v_lshlrev_b32_e32 v11, 8, v11
; GFX9-NEXT: v_or_b32_sdwa v11, v29, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_lshlrev_b32_e32 v6, 8, v6
+; GFX9-NEXT: v_or_b32_sdwa v6, v60, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshlrev_b32_e32 v5, 8, v5
; GFX9-NEXT: v_or_b32_sdwa v5, v58, v5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshlrev_b32_e32 v4, 8, v4
@@ -167669,54 +168293,52 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: v_readlane_b32 s31, v63, 1
; GFX9-NEXT: v_readlane_b32 s30, v63, 0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v7, v30, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v7, v7, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_store_dword v7, v0, s[0:3], 0 offen offset:64
-; GFX9-NEXT: v_lshlrev_b32_e32 v7, 8, v8
-; GFX9-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v8, v30, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v8, v8, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_store_dword v8, v0, s[0:3], 0 offen offset:64
+; GFX9-NEXT: v_lshlrev_b32_e32 v8, 8, v9
+; GFX9-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v7, v8, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_lshlrev_b32_e32 v8, 8, v28
+; GFX9-NEXT: v_or_b32_sdwa v8, v9, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_lshlrev_b32_e32 v9, 8, v28
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v8, v11, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v7, v7, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_store_dword v7, v0, s[0:3], 0 offen offset:68
-; GFX9-NEXT: v_lshlrev_b32_e32 v7, 8, v13
-; GFX9-NEXT: v_lshlrev_b32_e32 v8, 8, v10
-; GFX9-NEXT: v_or_b32_sdwa v7, v33, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v8, v27, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v7, v7, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_store_dword v7, v0, s[0:3], 0 offen offset:72
+; GFX9-NEXT: v_or_b32_sdwa v9, v11, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v8, v8, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_store_dword v8, v0, s[0:3], 0 offen offset:68
+; GFX9-NEXT: v_lshlrev_b32_e32 v8, 8, v13
+; GFX9-NEXT: v_lshlrev_b32_e32 v9, 8, v10
+; GFX9-NEXT: v_or_b32_sdwa v8, v33, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v9, v27, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v8, v8, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_store_dword v8, v0, s[0:3], 0 offen offset:72
; GFX9-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload
-; GFX9-NEXT: v_lshlrev_b32_e32 v7, 8, v14
-; GFX9-NEXT: v_lshlrev_b32_e32 v8, 8, v61
-; GFX9-NEXT: v_or_b32_sdwa v7, v31, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_lshlrev_b32_e32 v8, 8, v14
+; GFX9-NEXT: v_lshlrev_b32_e32 v9, 8, v61
+; GFX9-NEXT: v_or_b32_sdwa v8, v31, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v8, v10, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v7, v7, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_store_dword v7, v0, s[0:3], 0 offen offset:76
-; GFX9-NEXT: v_lshlrev_b32_e32 v7, 8, v15
-; GFX9-NEXT: v_lshlrev_b32_e32 v8, 8, v9
-; GFX9-NEXT: v_or_b32_sdwa v7, v36, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v8, v60, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v7, v7, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_store_dword v7, v0, s[0:3], 0 offen offset:80
-; GFX9-NEXT: v_lshlrev_b32_e32 v7, 8, v16
+; GFX9-NEXT: v_or_b32_sdwa v9, v10, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v8, v8, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_store_dword v8, v0, s[0:3], 0 offen offset:76
+; GFX9-NEXT: v_lshlrev_b32_e32 v8, 8, v15
+; GFX9-NEXT: v_or_b32_sdwa v8, v36, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v6, v8, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_store_dword v6, v0, s[0:3], 0 offen offset:80
+; GFX9-NEXT: v_lshlrev_b32_e32 v6, 8, v16
; GFX9-NEXT: v_lshlrev_b32_e32 v8, 8, v59
-; GFX9-NEXT: v_or_b32_sdwa v7, v34, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v6, v34, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v8, v32, v8 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v7, v7, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_store_dword v7, v0, s[0:3], 0 offen offset:84
-; GFX9-NEXT: v_lshlrev_b32_e32 v7, 8, v17
-; GFX9-NEXT: v_or_b32_sdwa v7, v39, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v5, v7, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v6, v6, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_store_dword v6, v0, s[0:3], 0 offen offset:84
+; GFX9-NEXT: v_lshlrev_b32_e32 v6, 8, v17
+; GFX9-NEXT: v_or_b32_sdwa v6, v39, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v5, v6, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: buffer_store_dword v5, v0, s[0:3], 0 offen offset:88
; GFX9-NEXT: v_lshlrev_b32_e32 v5, 8, v18
-; GFX9-NEXT: v_lshlrev_b32_e32 v7, 8, v57
+; GFX9-NEXT: v_lshlrev_b32_e32 v6, 8, v57
; GFX9-NEXT: v_or_b32_sdwa v5, v37, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v7, v35, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v5, v5, v7 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v6, v35, v6 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v5, v5, v6 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: buffer_store_dword v5, v0, s[0:3], 0 offen offset:92
; GFX9-NEXT: v_lshlrev_b32_e32 v5, 8, v19
; GFX9-NEXT: v_or_b32_sdwa v5, v50, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
@@ -167753,7 +168375,7 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen offset:120
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v12
-; GFX9-NEXT: v_lshlrev_b32_e32 v2, 8, v6
+; GFX9-NEXT: v_lshlrev_b32_e32 v2, 8, v7
; GFX9-NEXT: v_or_b32_sdwa v1, v41, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v2, v55, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
@@ -167798,28 +168420,28 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX11-NEXT: v_writelane_b32 v41, s97, 1
; GFX11-NEXT: v_readfirstlane_b32 s62, v3
; GFX11-NEXT: v_readfirstlane_b32 s63, v4
-; GFX11-NEXT: v_readfirstlane_b32 s60, v5
+; GFX11-NEXT: v_readfirstlane_b32 s56, v5
; GFX11-NEXT: v_writelane_b32 v40, s34, 2
; GFX11-NEXT: v_writelane_b32 v41, s98, 2
-; GFX11-NEXT: v_readfirstlane_b32 s61, v6
-; GFX11-NEXT: v_readfirstlane_b32 s58, v7
-; GFX11-NEXT: v_readfirstlane_b32 s59, v8
+; GFX11-NEXT: v_readfirstlane_b32 s57, v6
+; GFX11-NEXT: v_readfirstlane_b32 s44, v7
+; GFX11-NEXT: v_readfirstlane_b32 s45, v8
; GFX11-NEXT: v_writelane_b32 v40, s35, 3
; GFX11-NEXT: v_writelane_b32 v41, s99, 3
-; GFX11-NEXT: v_readfirstlane_b32 s56, v9
-; GFX11-NEXT: v_readfirstlane_b32 s57, v10
-; GFX11-NEXT: v_readfirstlane_b32 s46, v11
+; GFX11-NEXT: v_readfirstlane_b32 s8, v9
+; GFX11-NEXT: v_readfirstlane_b32 s9, v10
+; GFX11-NEXT: v_readfirstlane_b32 s6, v11
; GFX11-NEXT: v_writelane_b32 v40, s36, 4
; GFX11-NEXT: v_writelane_b32 v41, s100, 4
-; GFX11-NEXT: v_readfirstlane_b32 s47, v12
-; GFX11-NEXT: v_readfirstlane_b32 s44, v13
-; GFX11-NEXT: v_readfirstlane_b32 s45, v14
+; GFX11-NEXT: v_readfirstlane_b32 s7, v12
+; GFX11-NEXT: v_readfirstlane_b32 s4, v13
+; GFX11-NEXT: v_readfirstlane_b32 s5, v14
; GFX11-NEXT: v_writelane_b32 v40, s37, 5
; GFX11-NEXT: v_writelane_b32 v41, s101, 5
-; GFX11-NEXT: s_mov_b32 vcc_hi, 0
-; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
-; GFX11-NEXT: ; implicit-def: $vgpr43 : SGPR spill to VGPR lane
+; GFX11-NEXT: s_and_b32 s10, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 vcc_lo, -1
; GFX11-NEXT: ; implicit-def: $vgpr42 : SGPR spill to VGPR lane
+; GFX11-NEXT: ; implicit-def: $vgpr43 : SGPR spill to VGPR lane
; GFX11-NEXT: v_writelane_b32 v40, s38, 6
; GFX11-NEXT: v_writelane_b32 v41, s102, 6
; GFX11-NEXT: v_writelane_b32 v40, s39, 7
@@ -167851,273 +168473,273 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX11-NEXT: v_writelane_b32 v40, s87, 31
; GFX11-NEXT: s_cbranch_scc0 .LBB91_3
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s4, s27, 24
-; GFX11-NEXT: s_lshr_b64 s[12:13], s[26:27], 24
-; GFX11-NEXT: v_writelane_b32 v43, s4, 15
-; GFX11-NEXT: s_lshr_b32 s4, s27, 16
-; GFX11-NEXT: s_lshr_b32 s99, s2, 16
-; GFX11-NEXT: s_lshr_b32 s100, s2, 8
-; GFX11-NEXT: s_lshr_b32 s101, s1, 24
-; GFX11-NEXT: v_writelane_b32 v43, s4, 14
-; GFX11-NEXT: s_lshr_b32 s4, s27, 8
-; GFX11-NEXT: s_lshr_b32 s11, s1, 16
-; GFX11-NEXT: s_lshr_b32 s102, s1, 8
-; GFX11-NEXT: s_lshr_b32 s103, s0, 16
-; GFX11-NEXT: v_writelane_b32 v43, s4, 16
-; GFX11-NEXT: s_lshr_b32 s4, s26, 16
-; GFX11-NEXT: s_lshr_b32 s104, s0, 8
-; GFX11-NEXT: s_lshr_b32 s85, s45, 24
-; GFX11-NEXT: s_lshr_b32 s10, s45, 16
-; GFX11-NEXT: v_writelane_b32 v43, s4, 17
-; GFX11-NEXT: s_lshr_b32 s4, s26, 8
-; GFX11-NEXT: s_lshr_b32 s5, s45, 8
-; GFX11-NEXT: s_lshr_b32 s87, s44, 16
-; GFX11-NEXT: s_lshr_b32 s86, s44, 8
-; GFX11-NEXT: v_writelane_b32 v43, s4, 18
-; GFX11-NEXT: s_lshr_b32 s4, s25, 24
-; GFX11-NEXT: s_lshr_b32 s81, s47, 24
-; GFX11-NEXT: s_lshr_b32 s98, s47, 16
-; GFX11-NEXT: s_lshr_b32 s84, s47, 8
-; GFX11-NEXT: v_writelane_b32 v43, s4, 19
-; GFX11-NEXT: s_lshr_b32 s4, s25, 16
-; GFX11-NEXT: s_lshr_b32 s48, s46, 8
-; GFX11-NEXT: s_lshr_b32 s70, s57, 24
-; GFX11-NEXT: s_lshr_b32 s97, s57, 16
-; GFX11-NEXT: v_writelane_b32 v43, s4, 13
-; GFX11-NEXT: s_lshr_b32 s4, s25, 8
-; GFX11-NEXT: s_lshr_b32 s80, s57, 8
-; GFX11-NEXT: s_lshr_b32 s83, s56, 16
-; GFX11-NEXT: s_lshr_b32 s82, s56, 8
-; GFX11-NEXT: v_writelane_b32 v43, s4, 20
-; GFX11-NEXT: s_lshr_b32 s4, s24, 16
-; GFX11-NEXT: s_lshr_b32 s66, s59, 24
-; GFX11-NEXT: s_lshr_b32 s9, s59, 16
-; GFX11-NEXT: s_lshr_b32 s69, s59, 8
-; GFX11-NEXT: v_writelane_b32 v43, s4, 21
-; GFX11-NEXT: s_lshr_b32 s4, s24, 8
-; GFX11-NEXT: s_lshr_b32 s71, s58, 16
-; GFX11-NEXT: s_lshr_b32 s39, s58, 8
-; GFX11-NEXT: s_lshr_b32 s55, s61, 24
-; GFX11-NEXT: v_writelane_b32 v43, s4, 22
-; GFX11-NEXT: s_lshr_b32 s4, s23, 24
-; GFX11-NEXT: s_lshr_b32 s8, s61, 16
-; GFX11-NEXT: s_lshr_b32 s65, s61, 8
-; GFX11-NEXT: s_lshr_b32 s68, s60, 16
-; GFX11-NEXT: v_writelane_b32 v43, s4, 23
-; GFX11-NEXT: s_lshr_b32 s4, s23, 16
-; GFX11-NEXT: s_lshr_b32 s67, s60, 8
-; GFX11-NEXT: s_lshr_b32 s51, s63, 24
-; GFX11-NEXT: s_lshr_b32 s96, s63, 16
-; GFX11-NEXT: v_writelane_b32 v43, s4, 12
-; GFX11-NEXT: s_lshr_b32 s4, s23, 8
-; GFX11-NEXT: s_lshr_b32 s54, s63, 8
-; GFX11-NEXT: s_lshr_b32 s38, s62, 16
-; GFX11-NEXT: s_lshr_b32 s64, s62, 8
-; GFX11-NEXT: v_writelane_b32 v43, s4, 24
-; GFX11-NEXT: s_lshr_b32 s4, s22, 16
-; GFX11-NEXT: s_lshr_b32 s36, s73, 24
-; GFX11-NEXT: s_lshr_b32 s7, s73, 16
-; GFX11-NEXT: s_lshr_b32 s50, s73, 8
-; GFX11-NEXT: v_writelane_b32 v43, s4, 25
-; GFX11-NEXT: s_lshr_b32 s4, s22, 8
-; GFX11-NEXT: s_lshr_b32 s53, s72, 16
-; GFX11-NEXT: s_lshr_b32 s52, s72, 8
-; GFX11-NEXT: s_lshr_b32 s34, s29, 24
-; GFX11-NEXT: v_writelane_b32 v43, s4, 26
-; GFX11-NEXT: s_lshr_b32 s4, s21, 24
-; GFX11-NEXT: s_lshr_b32 s6, s29, 16
-; GFX11-NEXT: s_lshr_b32 s35, s29, 8
-; GFX11-NEXT: s_lshr_b32 s37, s28, 16
-; GFX11-NEXT: v_writelane_b32 v43, s4, 27
-; GFX11-NEXT: s_lshr_b32 s4, s21, 16
-; GFX11-NEXT: s_lshr_b32 s49, s28, 8
-; GFX11-NEXT: s_lshr_b64 s[14:15], s[16:17], 24
-; GFX11-NEXT: s_lshr_b64 s[40:41], s[2:3], 24
-; GFX11-NEXT: v_writelane_b32 v43, s4, 11
-; GFX11-NEXT: s_lshr_b32 s4, s21, 8
-; GFX11-NEXT: s_lshr_b64 s[42:43], s[0:1], 24
-; GFX11-NEXT: s_lshr_b64 s[74:75], s[44:45], 24
-; GFX11-NEXT: s_lshr_b64 s[76:77], s[46:47], 24
-; GFX11-NEXT: v_writelane_b32 v43, s4, 28
-; GFX11-NEXT: s_lshr_b32 s4, s20, 16
-; GFX11-NEXT: s_lshr_b64 s[78:79], s[56:57], 24
-; GFX11-NEXT: s_lshr_b64 s[88:89], s[58:59], 24
-; GFX11-NEXT: s_lshr_b64 s[90:91], s[60:61], 24
-; GFX11-NEXT: v_writelane_b32 v43, s4, 29
-; GFX11-NEXT: s_lshr_b32 s4, s20, 8
-; GFX11-NEXT: s_lshr_b64 s[92:93], s[62:63], 24
-; GFX11-NEXT: s_lshr_b64 s[94:95], s[72:73], 24
-; GFX11-NEXT: s_lshr_b64 s[30:31], s[28:29], 24
-; GFX11-NEXT: v_writelane_b32 v43, s4, 30
-; GFX11-NEXT: s_lshr_b32 s4, s19, 24
+; GFX11-NEXT: s_lshr_b32 s10, s27, 24
+; GFX11-NEXT: s_lshr_b32 s84, s5, 24
+; GFX11-NEXT: v_writelane_b32 v42, s10, 8
+; GFX11-NEXT: s_lshr_b32 s10, s27, 16
+; GFX11-NEXT: s_lshr_b32 s104, s5, 16
+; GFX11-NEXT: s_lshr_b32 s86, s5, 8
+; GFX11-NEXT: s_lshr_b32 s96, s4, 16
+; GFX11-NEXT: v_writelane_b32 v42, s10, 7
+; GFX11-NEXT: s_lshr_b32 s10, s27, 8
+; GFX11-NEXT: s_lshr_b32 s87, s4, 8
+; GFX11-NEXT: s_lshr_b32 s80, s7, 24
+; GFX11-NEXT: s_lshr_b32 s103, s7, 16
+; GFX11-NEXT: v_writelane_b32 v42, s10, 9
+; GFX11-NEXT: s_lshr_b32 s10, s26, 16
+; GFX11-NEXT: s_lshr_b32 s83, s7, 8
+; GFX11-NEXT: s_lshr_b32 s48, s6, 16
+; GFX11-NEXT: s_lshr_b32 s85, s6, 8
+; GFX11-NEXT: v_writelane_b32 v42, s10, 10
+; GFX11-NEXT: s_lshr_b32 s10, s26, 8
+; GFX11-NEXT: s_lshr_b32 s69, s9, 24
+; GFX11-NEXT: s_lshr_b32 s102, s9, 16
+; GFX11-NEXT: s_lshr_b32 s39, s9, 8
+; GFX11-NEXT: v_writelane_b32 v42, s10, 11
+; GFX11-NEXT: s_lshr_b32 s10, s25, 24
+; GFX11-NEXT: s_lshr_b32 s82, s8, 16
+; GFX11-NEXT: s_lshr_b32 s81, s8, 8
+; GFX11-NEXT: s_lshr_b32 s65, s45, 24
+; GFX11-NEXT: v_writelane_b32 v42, s10, 12
+; GFX11-NEXT: s_lshr_b32 s10, s25, 16
+; GFX11-NEXT: s_lshr_b32 s101, s45, 16
+; GFX11-NEXT: s_lshr_b32 s68, s45, 8
+; GFX11-NEXT: s_lshr_b32 s71, s44, 16
+; GFX11-NEXT: v_writelane_b32 v42, s10, 6
+; GFX11-NEXT: s_lshr_b32 s10, s25, 8
+; GFX11-NEXT: s_lshr_b32 s70, s44, 8
+; GFX11-NEXT: s_lshr_b32 s54, s57, 24
+; GFX11-NEXT: s_lshr_b32 s100, s57, 16
+; GFX11-NEXT: v_writelane_b32 v42, s10, 13
+; GFX11-NEXT: s_lshr_b32 s10, s24, 16
+; GFX11-NEXT: s_lshr_b32 s38, s57, 8
+; GFX11-NEXT: s_lshr_b32 s67, s56, 16
+; GFX11-NEXT: s_lshr_b32 s66, s56, 8
+; GFX11-NEXT: v_writelane_b32 v42, s10, 14
+; GFX11-NEXT: s_lshr_b32 s10, s24, 8
+; GFX11-NEXT: s_lshr_b32 s50, s63, 24
+; GFX11-NEXT: s_lshr_b32 s99, s63, 16
+; GFX11-NEXT: s_lshr_b32 s53, s63, 8
+; GFX11-NEXT: v_writelane_b32 v42, s10, 15
+; GFX11-NEXT: s_lshr_b32 s10, s23, 24
+; GFX11-NEXT: s_lshr_b32 s64, s62, 16
+; GFX11-NEXT: s_lshr_b32 s55, s62, 8
+; GFX11-NEXT: s_lshr_b32 s35, s73, 24
+; GFX11-NEXT: v_writelane_b32 v42, s10, 16
+; GFX11-NEXT: s_lshr_b32 s10, s23, 16
+; GFX11-NEXT: s_lshr_b32 s98, s73, 16
+; GFX11-NEXT: s_lshr_b32 s37, s73, 8
+; GFX11-NEXT: s_lshr_b32 s52, s72, 16
+; GFX11-NEXT: v_writelane_b32 v42, s10, 5
+; GFX11-NEXT: s_lshr_b32 s10, s23, 8
+; GFX11-NEXT: s_lshr_b32 s51, s72, 8
+; GFX11-NEXT: s_lshr_b32 vcc_hi, s29, 24
+; GFX11-NEXT: s_lshr_b32 s97, s29, 16
+; GFX11-NEXT: v_writelane_b32 v42, s10, 17
+; GFX11-NEXT: s_lshr_b32 s10, s22, 16
+; GFX11-NEXT: s_lshr_b32 s34, s29, 8
+; GFX11-NEXT: s_lshr_b32 s49, s28, 16
+; GFX11-NEXT: s_lshr_b32 s36, s28, 8
+; GFX11-NEXT: v_writelane_b32 v42, s10, 18
+; GFX11-NEXT: s_lshr_b32 s10, s22, 8
+; GFX11-NEXT: s_lshr_b64 s[14:15], s[26:27], 24
+; GFX11-NEXT: s_lshr_b64 s[42:43], s[24:25], 24
+; GFX11-NEXT: v_writelane_b32 v42, s10, 19
+; GFX11-NEXT: s_lshr_b32 s10, s21, 24
+; GFX11-NEXT: s_lshr_b64 s[12:13], s[20:21], 24
+; GFX11-NEXT: s_lshr_b64 s[40:41], s[18:19], 24
+; GFX11-NEXT: s_lshr_b64 s[46:47], s[16:17], 24
+; GFX11-NEXT: v_writelane_b32 v42, s10, 20
+; GFX11-NEXT: s_lshr_b32 s10, s21, 16
+; GFX11-NEXT: s_lshr_b64 s[58:59], s[2:3], 24
+; GFX11-NEXT: s_lshr_b64 s[60:61], s[0:1], 24
+; GFX11-NEXT: s_lshr_b64 s[30:31], s[4:5], 24
+; GFX11-NEXT: v_writelane_b32 v42, s10, 4
+; GFX11-NEXT: s_lshr_b32 s10, s21, 8
+; GFX11-NEXT: s_lshr_b64 s[94:95], s[6:7], 24
+; GFX11-NEXT: s_lshr_b64 s[92:93], s[8:9], 24
+; GFX11-NEXT: s_lshr_b64 s[90:91], s[44:45], 24
+; GFX11-NEXT: v_writelane_b32 v42, s10, 21
+; GFX11-NEXT: s_lshr_b32 s10, s20, 16
+; GFX11-NEXT: s_lshr_b64 s[88:89], s[56:57], 24
+; GFX11-NEXT: s_lshr_b64 s[78:79], s[62:63], 24
+; GFX11-NEXT: s_lshr_b64 s[76:77], s[72:73], 24
+; GFX11-NEXT: v_writelane_b32 v42, s10, 22
+; GFX11-NEXT: s_lshr_b32 s10, s20, 8
+; GFX11-NEXT: s_lshr_b64 s[74:75], s[28:29], 24
+; GFX11-NEXT: v_writelane_b32 v42, s10, 23
+; GFX11-NEXT: s_lshr_b32 s10, s19, 24
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v43, s4, 31
-; GFX11-NEXT: s_lshr_b32 s4, s19, 16
-; GFX11-NEXT: v_writelane_b32 v43, s4, 10
-; GFX11-NEXT: s_lshr_b32 s4, s19, 8
+; GFX11-NEXT: v_writelane_b32 v42, s10, 24
+; GFX11-NEXT: s_lshr_b32 s10, s19, 16
+; GFX11-NEXT: v_writelane_b32 v42, s10, 3
+; GFX11-NEXT: s_lshr_b32 s10, s19, 8
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v42, s4, 0
-; GFX11-NEXT: s_lshr_b32 s4, s18, 16
-; GFX11-NEXT: v_writelane_b32 v42, s4, 1
-; GFX11-NEXT: s_lshr_b32 s4, s18, 8
+; GFX11-NEXT: v_writelane_b32 v42, s10, 25
+; GFX11-NEXT: s_lshr_b32 s10, s18, 16
+; GFX11-NEXT: v_writelane_b32 v42, s10, 26
+; GFX11-NEXT: s_lshr_b32 s10, s18, 8
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v42, s4, 2
-; GFX11-NEXT: s_lshr_b32 s4, s17, 24
-; GFX11-NEXT: v_writelane_b32 v42, s4, 3
-; GFX11-NEXT: s_lshr_b32 s4, s17, 16
+; GFX11-NEXT: v_writelane_b32 v42, s10, 27
+; GFX11-NEXT: s_lshr_b32 s10, s17, 24
+; GFX11-NEXT: v_writelane_b32 v42, s10, 28
+; GFX11-NEXT: s_lshr_b32 s10, s17, 16
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v43, s4, 9
-; GFX11-NEXT: s_lshr_b32 s4, s17, 8
-; GFX11-NEXT: v_writelane_b32 v42, s4, 4
-; GFX11-NEXT: s_lshr_b32 s4, s16, 16
+; GFX11-NEXT: v_writelane_b32 v42, s10, 2
+; GFX11-NEXT: s_lshr_b32 s10, s17, 8
+; GFX11-NEXT: v_writelane_b32 v42, s10, 29
+; GFX11-NEXT: s_lshr_b32 s10, s16, 16
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v42, s4, 5
-; GFX11-NEXT: s_lshr_b32 s4, s16, 8
-; GFX11-NEXT: v_writelane_b32 v42, s4, 6
-; GFX11-NEXT: s_lshr_b32 s4, s3, 24
+; GFX11-NEXT: v_writelane_b32 v42, s10, 30
+; GFX11-NEXT: s_lshr_b32 s10, s16, 8
+; GFX11-NEXT: v_writelane_b32 v42, s10, 31
+; GFX11-NEXT: s_lshr_b32 s10, s3, 24
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v42, s4, 7
-; GFX11-NEXT: s_lshr_b32 s4, s3, 16
-; GFX11-NEXT: v_writelane_b32 v43, s4, 8
-; GFX11-NEXT: s_lshr_b32 s4, s3, 8
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_4) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v42, s4, 8
-; GFX11-NEXT: s_lshr_b32 s4, s46, 16
-; GFX11-NEXT: v_writelane_b32 v43, s12, 6
-; GFX11-NEXT: v_writelane_b32 v43, s13, 7
-; GFX11-NEXT: s_lshr_b64 s[12:13], s[24:25], 24
-; GFX11-NEXT: v_writelane_b32 v43, s12, 4
-; GFX11-NEXT: v_writelane_b32 v43, s13, 5
-; GFX11-NEXT: s_lshr_b64 s[12:13], s[22:23], 24
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_2) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v43, s12, 2
-; GFX11-NEXT: v_writelane_b32 v43, s13, 3
-; GFX11-NEXT: s_lshr_b64 s[12:13], s[20:21], 24
-; GFX11-NEXT: v_writelane_b32 v43, s12, 0
-; GFX11-NEXT: v_writelane_b32 v43, s13, 1
-; GFX11-NEXT: s_lshr_b64 s[12:13], s[18:19], 24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, vcc_hi
-; GFX11-NEXT: s_cbranch_vccnz .LBB91_4
+; GFX11-NEXT: v_writelane_b32 v43, s10, 0
+; GFX11-NEXT: s_lshr_b32 s10, s3, 16
+; GFX11-NEXT: v_writelane_b32 v42, s10, 1
+; GFX11-NEXT: s_lshr_b32 s10, s3, 8
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: v_writelane_b32 v43, s10, 1
+; GFX11-NEXT: s_lshr_b32 s10, s2, 16
+; GFX11-NEXT: v_writelane_b32 v43, s10, 2
+; GFX11-NEXT: s_lshr_b32 s10, s2, 8
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: v_writelane_b32 v43, s10, 3
+; GFX11-NEXT: s_lshr_b32 s10, s1, 24
+; GFX11-NEXT: v_writelane_b32 v43, s10, 4
+; GFX11-NEXT: s_lshr_b32 s10, s1, 16
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: v_writelane_b32 v42, s10, 0
+; GFX11-NEXT: s_lshr_b32 s10, s1, 8
+; GFX11-NEXT: v_writelane_b32 v43, s10, 5
+; GFX11-NEXT: s_lshr_b32 s10, s0, 16
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: v_writelane_b32 v43, s10, 6
+; GFX11-NEXT: s_lshr_b32 s10, s0, 8
+; GFX11-NEXT: v_writelane_b32 v43, s10, 7
+; GFX11-NEXT: s_lshr_b64 s[10:11], s[22:23], 24
+; GFX11-NEXT: s_cbranch_execnz .LBB91_4
; GFX11-NEXT: .LBB91_2: ; %cmp.true
-; GFX11-NEXT: s_and_b32 s4, s29, 0xffff0000
-; GFX11-NEXT: s_and_b32 s14, s47, 0xffff0000
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
-; GFX11-NEXT: s_and_b32 s4, s1, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s15, s47, 16
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s4
-; GFX11-NEXT: s_lshl_b32 s6, s29, 16
+; GFX11-NEXT: s_and_b32 s10, s29, 0xffff0000
+; GFX11-NEXT: s_and_b32 s13, s9, 0xffff0000
+; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s10
+; GFX11-NEXT: s_lshl_b32 s10, s9, 16
+; GFX11-NEXT: s_and_b32 s9, s1, 0xffff0000
+; GFX11-NEXT: s_lshl_b32 s11, s29, 16
+; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s9
+; GFX11-NEXT: s_and_b32 s76, s28, 0xffff0000
+; GFX11-NEXT: s_lshl_b32 s77, s28, 16
; GFX11-NEXT: v_bfe_u32 v3, v1, 16, 1
-; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s6
-; GFX11-NEXT: s_and_b32 s8, s45, 0xffff0000
-; GFX11-NEXT: v_readfirstlane_b32 s47, v6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-NEXT: s_and_b32 s28, s44, 0xffff0000
+; GFX11-NEXT: s_lshl_b32 s29, s44, 16
+; GFX11-NEXT: v_readfirstlane_b32 s44, v6
+; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s11
; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v1
-; GFX11-NEXT: s_lshl_b32 s7, s45, 16
-; GFX11-NEXT: s_and_b32 s78, s28, 0xffff0000
-; GFX11-NEXT: s_bfe_u32 s6, s47, 0x10010
-; GFX11-NEXT: s_lshl_b32 s79, s28, 16
-; GFX11-NEXT: s_add_i32 s45, s6, s47
-; GFX11-NEXT: s_and_b32 s5, s73, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s77, s73, 16
-; GFX11-NEXT: s_and_b32 s75, s72, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s76, s72, 16
+; GFX11-NEXT: s_and_b32 s12, s6, 0xffff0000
+; GFX11-NEXT: s_lshl_b32 s9, s6, 16
+; GFX11-NEXT: s_bfe_u32 s6, s44, 0x10010
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
+; GFX11-NEXT: s_and_b32 s41, s45, 0xffff0000
+; GFX11-NEXT: s_lshl_b32 s40, s45, 16
+; GFX11-NEXT: s_add_i32 s45, s6, s44
+; GFX11-NEXT: s_and_b32 s74, s73, 0xffff0000
+; GFX11-NEXT: s_lshl_b32 s75, s73, 16
+; GFX11-NEXT: s_and_b32 s73, s72, 0xffff0000
+; GFX11-NEXT: s_lshl_b32 s72, s72, 16
; GFX11-NEXT: s_and_b32 s11, s63, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s74, s63, 16
-; GFX11-NEXT: s_and_b32 s72, s62, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s73, s62, 16
-; GFX11-NEXT: s_and_b32 s63, s61, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s62, s61, 16
-; GFX11-NEXT: s_and_b32 s61, s60, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s60, s60, 16
-; GFX11-NEXT: s_and_b32 s41, s59, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s40, s59, 16
-; GFX11-NEXT: s_and_b32 s28, s58, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s29, s58, 16
-; GFX11-NEXT: s_and_b32 s13, s57, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s10, s57, 16
-; GFX11-NEXT: s_and_b32 s42, s56, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s43, s56, 16
-; GFX11-NEXT: s_and_b32 s12, s46, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s9, s46, 16
-; GFX11-NEXT: s_and_b32 s4, s44, 0xffff0000
-; GFX11-NEXT: s_lshl_b32 s6, s44, 16
+; GFX11-NEXT: s_lshl_b32 s61, s63, 16
+; GFX11-NEXT: s_and_b32 s59, s62, 0xffff0000
+; GFX11-NEXT: s_lshl_b32 s60, s62, 16
+; GFX11-NEXT: s_and_b32 s58, s57, 0xffff0000
+; GFX11-NEXT: s_lshl_b32 s57, s57, 16
+; GFX11-NEXT: s_and_b32 s46, s56, 0xffff0000
+; GFX11-NEXT: s_lshl_b32 s47, s56, 16
+; GFX11-NEXT: s_and_b32 s42, s8, 0xffff0000
+; GFX11-NEXT: s_lshl_b32 s43, s8, 16
+; GFX11-NEXT: s_and_b32 s14, s7, 0xffff0000
+; GFX11-NEXT: s_lshl_b32 s15, s7, 16
+; GFX11-NEXT: s_and_b32 s8, s5, 0xffff0000
+; GFX11-NEXT: s_lshl_b32 s7, s5, 16
+; GFX11-NEXT: s_and_b32 s5, s4, 0xffff0000
+; GFX11-NEXT: s_lshl_b32 s6, s4, 16
; GFX11-NEXT: s_addk_i32 s45, 0x7fff
-; GFX11-NEXT: s_bitset1_b32 s47, 22
+; GFX11-NEXT: s_bitset1_b32 s44, 22
; GFX11-NEXT: v_bfe_u32 v4, v2, 16, 1
; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v1
; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: s_and_b32 s44, vcc_lo, exec_lo
+; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
-; GFX11-NEXT: s_cselect_b32 s44, s47, s45
+; GFX11-NEXT: s_cselect_b32 s4, s44, s45
; GFX11-NEXT: s_lshl_b32 s1, s1, 16
; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v2
-; GFX11-NEXT: s_lshr_b32 s58, s44, 16
+; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s76
; GFX11-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc_lo
; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s1
; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v2
; GFX11-NEXT: v_add_nc_u32_e32 v4, 0x7fff, v4
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
-; GFX11-NEXT: v_add_f32_e64 v6, 0x40c00000, s78
+; GFX11-NEXT: s_lshr_b32 s76, s4, 16
; GFX11-NEXT: v_readfirstlane_b32 s1, v3
-; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s79
-; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v1
+; GFX11-NEXT: v_add_f32_e64 v7, 0x40c00000, s77
+; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v1
; GFX11-NEXT: v_cndmask_b32_e32 v2, v4, v5, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: s_bfe_u32 s45, s1, 0x10010
+; GFX11-NEXT: s_bfe_u32 s44, s1, 0x10010
; GFX11-NEXT: v_bfe_u32 v4, v6, 16, 1
-; GFX11-NEXT: s_add_i32 s45, s45, s1
+; GFX11-NEXT: s_add_i32 s44, s44, s1
; GFX11-NEXT: s_bitset1_b32 s1, 22
-; GFX11-NEXT: s_addk_i32 s45, 0x7fff
-; GFX11-NEXT: s_and_b32 s44, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s1, s1, s45
-; GFX11-NEXT: s_and_b32 s44, s0, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v2
-; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s44
+; GFX11-NEXT: s_addk_i32 s44, 0x7fff
+; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-NEXT: s_cselect_b32 s1, s1, s44
+; GFX11-NEXT: s_and_b32 s4, s0, 0xffff0000
+; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v2
+; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s4
; GFX11-NEXT: v_bfe_u32 v5, v7, 16, 1
; GFX11-NEXT: v_add_nc_u32_e32 v3, v4, v6
; GFX11-NEXT: s_lshr_b32 s1, s1, 16
; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v7
-; GFX11-NEXT: v_readfirstlane_b32 s44, v2
+; GFX11-NEXT: v_readfirstlane_b32 s4, v2
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
-; GFX11-NEXT: s_bfe_u32 s45, s44, 0x10010
+; GFX11-NEXT: s_bfe_u32 s44, s4, 0x10010
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_add_i32 s45, s45, s44
-; GFX11-NEXT: s_bitset1_b32 s44, 22
-; GFX11-NEXT: s_addk_i32 s45, 0x7fff
-; GFX11-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-NEXT: s_add_i32 s44, s44, s4
+; GFX11-NEXT: s_bitset1_b32 s4, 22
+; GFX11-NEXT: s_addk_i32 s44, 0x7fff
+; GFX11-NEXT: s_and_b32 s45, vcc_lo, exec_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v6, v6
; GFX11-NEXT: v_add_nc_u32_e32 v1, v5, v7
; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v6
-; GFX11-NEXT: s_cselect_b32 s44, s44, s45
+; GFX11-NEXT: s_cselect_b32 s4, s4, s44
; GFX11-NEXT: s_lshl_b32 s0, s0, 16
+; GFX11-NEXT: s_lshr_b32 s4, s4, 16
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_cndmask_b32_e32 v2, v3, v5, vcc_lo
; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s0
; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v7, v7
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v21
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s5
+; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v22
+; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s74
; GFX11-NEXT: v_readfirstlane_b32 s0, v3
; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v8, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s77
-; GFX11-NEXT: s_bfe_u32 s5, s0, 0x10010
-; GFX11-NEXT: v_lshl_or_b32 v7, v22, 16, v4
-; GFX11-NEXT: s_add_i32 s45, s5, s0
-; GFX11-NEXT: s_lshr_b32 s5, s44, 16
-; GFX11-NEXT: s_addk_i32 s45, 0x7fff
+; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s75
+; GFX11-NEXT: s_bfe_u32 s44, s0, 0x10010
+; GFX11-NEXT: v_lshl_or_b32 v7, v23, 16, v4
+; GFX11-NEXT: s_add_i32 s44, s44, s0
; GFX11-NEXT: s_bitset1_b32 s0, 22
-; GFX11-NEXT: s_and_b32 s44, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s0, s0, s45
+; GFX11-NEXT: s_addk_i32 s44, 0x7fff
+; GFX11-NEXT: s_and_b32 s45, vcc_lo, exec_lo
+; GFX11-NEXT: s_cselect_b32 s0, s0, s44
; GFX11-NEXT: s_and_b32 s44, s3, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v1
; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s44
; GFX11-NEXT: v_bfe_u32 v6, v8, 16, 1
; GFX11-NEXT: v_bfe_u32 v1, v5, 16, 1
; GFX11-NEXT: s_lshr_b32 s0, s0, 16
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v23
+; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v24
; GFX11-NEXT: v_readfirstlane_b32 s44, v9
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
; GFX11-NEXT: v_add_nc_u32_e32 v4, v6, v8
@@ -168128,7 +168750,7 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX11-NEXT: s_add_i32 s45, s45, s44
; GFX11-NEXT: s_bitset1_b32 s44, 22
; GFX11-NEXT: s_addk_i32 s45, 0x7fff
-; GFX11-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-NEXT: s_and_b32 s56, vcc_lo, exec_lo
; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v4
; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v8
; GFX11-NEXT: s_cselect_b32 s44, s44, s45
@@ -168136,15 +168758,15 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s3
; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s76
-; GFX11-NEXT: s_lshr_b32 s59, s44, 16
+; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s72
+; GFX11-NEXT: s_lshr_b32 s74, s44, 16
; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s75
+; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s73
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX11-NEXT: v_readfirstlane_b32 s3, v10
; GFX11-NEXT: v_bfe_u32 v8, v9, 16, 1
+; GFX11-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-NEXT: v_lshrrev_b32_e32 v87, 24, v7
-; GFX11-NEXT: v_lshrrev_b32_e32 v96, 16, v6
; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v2, vcc_lo
; GFX11-NEXT: v_bfe_u32 v2, v4, 16, 1
; GFX11-NEXT: s_bfe_u32 s45, s3, 0x10010
@@ -168156,10 +168778,10 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX11-NEXT: s_and_b32 s44, vcc_lo, exec_lo
; GFX11-NEXT: s_cselect_b32 s3, s3, s45
; GFX11-NEXT: s_and_b32 s44, s2, 0xffff0000
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v1
; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s44
; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v3
+; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v3
; GFX11-NEXT: v_add_nc_u32_e32 v3, v8, v9
; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v9
; GFX11-NEXT: v_readfirstlane_b32 s44, v1
@@ -168168,26 +168790,26 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v3
; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v4
; GFX11-NEXT: s_bfe_u32 s45, s44, 0x10010
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v24
+; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v25
; GFX11-NEXT: s_add_i32 s45, s45, s44
; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v8, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
; GFX11-NEXT: s_addk_i32 s45, 0x7fff
; GFX11-NEXT: s_bitset1_b32 s44, 22
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s74
-; GFX11-NEXT: v_lshl_or_b32 v14, v25, 16, v5
-; GFX11-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s61
+; GFX11-NEXT: v_lshl_or_b32 v13, v26, 16, v5
+; GFX11-NEXT: s_and_b32 s56, vcc_lo, exec_lo
; GFX11-NEXT: s_cselect_b32 s44, s44, s45
; GFX11-NEXT: s_lshl_b32 s2, s2, 16
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s2
; GFX11-NEXT: v_bfe_u32 v4, v1, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v85, 24, v14
+; GFX11-NEXT: v_lshrrev_b32_e32 v85, 24, v13
+; GFX11-NEXT: v_lshrrev_b32_e32 v96, 16, v6
; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v10, vcc_lo
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_readfirstlane_b32 s2, v8
; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v3
+; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v3
; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s11
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
; GFX11-NEXT: s_bfe_u32 s11, s2, 0x10010
@@ -168199,18 +168821,18 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX11-NEXT: s_and_b32 s44, vcc_lo, exec_lo
; GFX11-NEXT: s_cselect_b32 s2, s2, s45
; GFX11-NEXT: s_and_b32 s44, s17, 0xffff0000
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v26
+; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v27
; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s44
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
; GFX11-NEXT: v_bfe_u32 v10, v3, 16, 1
; GFX11-NEXT: s_lshr_b32 s2, s2, 16
-; GFX11-NEXT: v_lshl_or_b32 v13, v2, 16, v9
+; GFX11-NEXT: v_lshl_or_b32 v12, v2, 16, v9
; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v1
; GFX11-NEXT: v_readfirstlane_b32 s44, v5
; GFX11-NEXT: v_add_nc_u32_e32 v8, v10, v3
; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v86, 16, v13
+; GFX11-NEXT: v_lshrrev_b32_e32 v86, 16, v12
; GFX11-NEXT: v_cndmask_b32_e32 v1, v2, v4, vcc_lo
; GFX11-NEXT: s_bfe_u32 s45, s44, 0x10010
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
@@ -168218,28 +168840,28 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX11-NEXT: s_bitset1_b32 s44, 22
; GFX11-NEXT: s_addk_i32 s45, 0x7fff
; GFX11-NEXT: v_add_nc_u32_e32 v8, 0x7fff, v8
-; GFX11-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-NEXT: s_and_b32 s56, vcc_lo, exec_lo
; GFX11-NEXT: s_cselect_b32 s44, s44, s45
; GFX11-NEXT: s_lshl_b32 s17, s17, 16
-; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s73
+; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s60
; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s17
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v1
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s72
+; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v1
+; GFX11-NEXT: s_lshr_b32 s75, s44, 16
; GFX11-NEXT: v_bfe_u32 v5, v2, 16, 1
; GFX11-NEXT: v_readfirstlane_b32 s17, v4
; GFX11-NEXT: v_cndmask_b32_e32 v3, v8, v9, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: s_lshr_b32 s72, s44, 16
+; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s59
; GFX11-NEXT: v_add_nc_u32_e32 v5, v5, v2
; GFX11-NEXT: s_bfe_u32 s45, s17, 0x10010
-; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v27
+; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v3
+; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v28
; GFX11-NEXT: s_add_i32 s45, s45, s17
; GFX11-NEXT: s_bitset1_b32 s17, 22
; GFX11-NEXT: s_addk_i32 s45, 0x7fff
; GFX11-NEXT: s_and_b32 s44, vcc_lo, exec_lo
-; GFX11-NEXT: v_lshl_or_b32 v16, v28, 16, v3
+; GFX11-NEXT: v_lshl_or_b32 v17, v29, 16, v3
; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v5
; GFX11-NEXT: v_or_b32_e32 v5, 0x400000, v2
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
@@ -168248,15 +168870,15 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX11-NEXT: s_and_b32 s44, s16, 0xffff0000
; GFX11-NEXT: s_lshr_b32 s17, s17, 16
; GFX11-NEXT: v_cndmask_b32_e32 v2, v3, v5, vcc_lo
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s63
+; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s58
; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v83, 24, v16
+; GFX11-NEXT: v_lshrrev_b32_e32 v83, 24, v17
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v2
; GFX11-NEXT: v_bfe_u32 v5, v3, 16, 1
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_add_nc_u32_e32 v2, v5, v3
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v29
+; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v30
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_2) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
; GFX11-NEXT: v_add_nc_u32_e32 v4, v8, v1
@@ -168269,69 +168891,69 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX11-NEXT: s_add_i32 s45, s45, s44
; GFX11-NEXT: s_bitset1_b32 s44, 22
; GFX11-NEXT: s_addk_i32 s45, 0x7fff
-; GFX11-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-NEXT: s_and_b32 s56, vcc_lo, exec_lo
; GFX11-NEXT: s_cselect_b32 s44, s44, s45
; GFX11-NEXT: s_lshl_b32 s16, s16, 16
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s16
-; GFX11-NEXT: s_lshr_b32 s46, s44, 16
+; GFX11-NEXT: s_lshr_b32 s44, s44, 16
; GFX11-NEXT: v_cndmask_b32_e32 v1, v4, v9, vcc_lo
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-NEXT: v_readfirstlane_b32 s16, v8
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
; GFX11-NEXT: v_or_b32_e32 v8, 0x400000, v3
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s62
+; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s57
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; GFX11-NEXT: s_bfe_u32 s45, s16, 0x10010
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_add_i32 s45, s45, s16
; GFX11-NEXT: s_bitset1_b32 s16, 22
; GFX11-NEXT: s_addk_i32 s45, 0x7fff
-; GFX11-NEXT: s_and_b32 s44, vcc_lo, exec_lo
+; GFX11-NEXT: s_and_b32 s56, vcc_lo, exec_lo
; GFX11-NEXT: s_cselect_b32 s16, s16, s45
-; GFX11-NEXT: s_and_b32 s44, s19, 0xffff0000
+; GFX11-NEXT: s_and_b32 s45, s19, 0xffff0000
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s44
-; GFX11-NEXT: v_lshl_or_b32 v15, v1, 16, v5
+; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s45
+; GFX11-NEXT: v_lshl_or_b32 v16, v1, 16, v5
+; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s46
; GFX11-NEXT: v_bfe_u32 v9, v4, 16, 1
-; GFX11-NEXT: s_lshr_b32 s16, s16, 16
; GFX11-NEXT: v_cndmask_b32_e32 v1, v2, v8, vcc_lo
-; GFX11-NEXT: v_readfirstlane_b32 s44, v10
+; GFX11-NEXT: v_readfirstlane_b32 s45, v10
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
+; GFX11-NEXT: s_lshr_b32 s16, s16, 16
+; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s47
; GFX11-NEXT: v_add_nc_u32_e32 v9, v9, v4
-; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s60
+; GFX11-NEXT: s_bfe_u32 s46, s45, 0x10010
; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v4
-; GFX11-NEXT: s_bfe_u32 s45, s44, 0x10010
-; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s61
-; GFX11-NEXT: s_add_i32 s45, s45, s44
-; GFX11-NEXT: s_bitset1_b32 s44, 22
-; GFX11-NEXT: s_addk_i32 s45, 0x7fff
+; GFX11-NEXT: s_add_i32 s46, s46, s45
+; GFX11-NEXT: s_bitset1_b32 s45, 22
+; GFX11-NEXT: s_addk_i32 s46, 0x7fff
; GFX11-NEXT: s_and_b32 s47, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s44, s44, s45
+; GFX11-NEXT: s_cselect_b32 s45, s45, s46
; GFX11-NEXT: s_lshl_b32 s19, s19, 16
; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v9
; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s19
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
; GFX11-NEXT: v_bfe_u32 v9, v8, 16, 1
-; GFX11-NEXT: s_lshr_b32 s60, s44, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v1
+; GFX11-NEXT: s_lshr_b32 s77, s45, 16
+; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v1
; GFX11-NEXT: v_readfirstlane_b32 s19, v10
; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
; GFX11-NEXT: v_bfe_u32 v3, v5, 16, 1
; GFX11-NEXT: v_add_nc_u32_e32 v4, v9, v8
-; GFX11-NEXT: s_bfe_u32 s45, s19, 0x10010
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v2
-; GFX11-NEXT: s_add_i32 s45, s45, s19
+; GFX11-NEXT: s_bfe_u32 s46, s19, 0x10010
+; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v2
+; GFX11-NEXT: s_add_i32 s46, s46, s19
; GFX11-NEXT: s_bitset1_b32 s19, 22
-; GFX11-NEXT: s_addk_i32 s45, 0x7fff
-; GFX11-NEXT: s_and_b32 s44, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s19, s19, s45
-; GFX11-NEXT: s_and_b32 s44, s18, 0xffff0000
+; GFX11-NEXT: s_addk_i32 s46, 0x7fff
+; GFX11-NEXT: s_and_b32 s45, vcc_lo, exec_lo
+; GFX11-NEXT: s_cselect_b32 s19, s19, s46
+; GFX11-NEXT: s_and_b32 s45, s18, 0xffff0000
; GFX11-NEXT: v_add_nc_u32_e32 v1, v3, v5
; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v4
; GFX11-NEXT: v_or_b32_e32 v3, 0x400000, v8
-; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s44
+; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s45
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
; GFX11-NEXT: v_or_b32_e32 v9, 0x400000, v5
@@ -168341,38 +168963,38 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s41
; GFX11-NEXT: v_readfirstlane_b32 s41, v4
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: s_pack_ll_b32_b16 s47, s17, s72
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v84, 16, v16
+; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v2
; GFX11-NEXT: v_bfe_u32 v2, v3, 16, 1
-; GFX11-NEXT: s_bfe_u32 s44, s41, 0x10010
+; GFX11-NEXT: s_bfe_u32 s45, s41, 0x10010
; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v4, v4
-; GFX11-NEXT: s_add_i32 s44, s44, s41
+; GFX11-NEXT: s_add_i32 s45, s45, s41
; GFX11-NEXT: s_bitset1_b32 s41, 22
-; GFX11-NEXT: s_addk_i32 s44, 0x7fff
+; GFX11-NEXT: s_addk_i32 s45, 0x7fff
; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s40
-; GFX11-NEXT: s_and_b32 s45, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s41, s41, s44
+; GFX11-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-NEXT: s_cselect_b32 s41, s41, s45
; GFX11-NEXT: s_lshl_b32 s18, s18, 16
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v31
+; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v32
; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s18
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v8, 0xffff, v32
+; GFX11-NEXT: v_and_b32_e32 v8, 0xffff, v33
; GFX11-NEXT: v_add_nc_u32_e32 v2, v2, v3
-; GFX11-NEXT: v_lshl_or_b32 v18, v30, 16, v4
+; GFX11-NEXT: v_lshl_or_b32 v19, v31, 16, v4
; GFX11-NEXT: v_readfirstlane_b32 s18, v5
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
-; GFX11-NEXT: v_lshl_or_b32 v17, v1, 16, v8
+; GFX11-NEXT: v_lshl_or_b32 v18, v1, 16, v8
; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v2
; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v3
; GFX11-NEXT: s_bfe_u32 s40, s18, 0x10010
; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s28
-; GFX11-NEXT: s_add_i32 s44, s40, s18
+; GFX11-NEXT: s_add_i32 s45, s40, s18
; GFX11-NEXT: s_lshr_b32 s40, s41, 16
-; GFX11-NEXT: s_addk_i32 s44, 0x7fff
+; GFX11-NEXT: s_addk_i32 s45, 0x7fff
; GFX11-NEXT: s_bitset1_b32 s18, 22
; GFX11-NEXT: s_and_b32 s41, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s18, s18, s44
+; GFX11-NEXT: s_cselect_b32 s18, s18, s45
; GFX11-NEXT: s_and_b32 s41, s21, 0xffff0000
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s41
@@ -168386,7 +169008,7 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX11-NEXT: v_bfe_u32 v4, v8, 16, 1
; GFX11-NEXT: v_bfe_u32 v5, v10, 16, 1
; GFX11-NEXT: s_bfe_u32 s29, s28, 0x10010
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
; GFX11-NEXT: s_add_i32 s29, s29, s28
; GFX11-NEXT: s_bitset1_b32 s28, 22
; GFX11-NEXT: s_addk_i32 s29, 0x7fff
@@ -168396,16 +169018,16 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
; GFX11-NEXT: v_add_f32_e64 v11, 0x40c00000, s21
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: s_lshr_b32 s61, s28, 16
+; GFX11-NEXT: s_lshr_b32 s78, s28, 16
; GFX11-NEXT: v_add_nc_u32_e32 v1, v5, v10
-; GFX11-NEXT: s_pack_ll_b32_b16 s44, s2, s11
+; GFX11-NEXT: v_lshrrev_b32_e32 v81, 24, v19
; GFX11-NEXT: v_readfirstlane_b32 s21, v11
; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v11, v11
; GFX11-NEXT: v_add_nc_u32_e32 v3, v4, v8
; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
; GFX11-NEXT: s_bfe_u32 s29, s21, 0x10010
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v2
; GFX11-NEXT: s_add_i32 s29, s29, s21
; GFX11-NEXT: s_bitset1_b32 s21, 22
; GFX11-NEXT: s_addk_i32 s29, 0x7fff
@@ -168418,35 +169040,35 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v10
; GFX11-NEXT: s_lshr_b32 s21, s21, 16
-; GFX11-NEXT: s_pack_ll_b32_b16 s45, s3, s59
-; GFX11-NEXT: s_pack_ll_b32_b16 s46, s16, s46
+; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v18
+; GFX11-NEXT: s_pack_ll_b32_b16 s45, s21, s78
; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s13
; GFX11-NEXT: v_readfirstlane_b32 s13, v5
-; GFX11-NEXT: v_lshrrev_b32_e32 v81, 24, v18
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v4, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX11-NEXT: s_bfe_u32 s28, s13, 0x10010
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v34
+; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v35
; GFX11-NEXT: s_add_i32 s28, s28, s13
; GFX11-NEXT: s_bitset1_b32 s13, 22
; GFX11-NEXT: s_addk_i32 s28, 0x7fff
; GFX11-NEXT: s_and_b32 s29, vcc_lo, exec_lo
; GFX11-NEXT: s_cselect_b32 s13, s13, s28
; GFX11-NEXT: s_lshl_b32 s20, s20, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v1
; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s20
; GFX11-NEXT: v_bfe_u32 v1, v3, 16, 1
; GFX11-NEXT: v_add_f32_e64 v5, 0x40c00000, s10
-; GFX11-NEXT: v_lshl_or_b32 v20, v33, 16, v4
-; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v35
+; GFX11-NEXT: v_lshl_or_b32 v21, v34, 16, v4
+; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v36
; GFX11-NEXT: v_readfirstlane_b32 s20, v8
; GFX11-NEXT: v_add_nc_u32_e32 v1, v1, v3
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
; GFX11-NEXT: v_bfe_u32 v4, v5, 16, 1
-; GFX11-NEXT: v_lshl_or_b32 v19, v2, 16, v9
+; GFX11-NEXT: v_lshl_or_b32 v20, v2, 16, v9
; GFX11-NEXT: s_bfe_u32 s10, s20, 0x10010
; GFX11-NEXT: v_add_nc_u32_e32 v1, 0x7fff, v1
; GFX11-NEXT: s_add_i32 s28, s10, s20
@@ -168467,8 +169089,8 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v5
; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s43
; GFX11-NEXT: v_readfirstlane_b32 s28, v2
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v1
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v19
+; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v20
; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
; GFX11-NEXT: s_bfe_u32 s20, s28, 0x10010
@@ -168482,9 +169104,9 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX11-NEXT: s_lshl_b32 s23, s23, 16
; GFX11-NEXT: v_bfe_u32 v5, v9, 16, 1
; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s23
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v3
+; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v3
; GFX11-NEXT: v_add_nc_u32_e32 v3, v4, v8
-; GFX11-NEXT: s_lshr_b32 s62, s13, 16
+; GFX11-NEXT: s_lshr_b32 s79, s13, 16
; GFX11-NEXT: v_add_nc_u32_e32 v1, v5, v9
; GFX11-NEXT: v_readfirstlane_b32 s23, v2
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
@@ -168501,7 +169123,7 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX11-NEXT: s_cselect_b32 s13, s23, s28
; GFX11-NEXT: s_and_b32 s23, s22, 0xffff0000
; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s15
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v36
+; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v37
; GFX11-NEXT: v_cndmask_b32_e32 v2, v3, v5, vcc_lo
; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s23
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
@@ -168511,10 +169133,10 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX11-NEXT: v_readfirstlane_b32 s14, v3
; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v10, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
-; GFX11-NEXT: v_lshl_or_b32 v71, v37, 16, v4
+; GFX11-NEXT: v_lshl_or_b32 v71, v38, 16, v4
; GFX11-NEXT: v_add_f32_e64 v4, 0x40c00000, s12
; GFX11-NEXT: s_bfe_u32 s15, s14, 0x10010
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v1
; GFX11-NEXT: s_add_i32 s15, s15, s14
; GFX11-NEXT: s_bitset1_b32 s14, 22
; GFX11-NEXT: s_addk_i32 s15, 0x7fff
@@ -168524,7 +169146,7 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s14
; GFX11-NEXT: v_bfe_u32 v1, v5, 16, 1
-; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v38
+; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v39
; GFX11-NEXT: v_add_nc_u32_e32 v9, v9, v8
; GFX11-NEXT: s_lshr_b32 s13, s13, 16
; GFX11-NEXT: v_readfirstlane_b32 s14, v10
@@ -168550,21 +169172,21 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v5, v5
; GFX11-NEXT: s_lshr_b32 s22, s12, 16
; GFX11-NEXT: v_bfe_u32 v3, v4, 16, 1
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v2
+; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v2
; GFX11-NEXT: s_bfe_u32 s14, s9, 0x10010
; GFX11-NEXT: v_cndmask_b32_e32 v1, v1, v9, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
; GFX11-NEXT: s_add_i32 s14, s14, s9
; GFX11-NEXT: s_bitset1_b32 s9, 22
; GFX11-NEXT: s_addk_i32 s14, 0x7fff
-; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v1
+; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v1
; GFX11-NEXT: s_and_b32 s12, vcc_lo, exec_lo
; GFX11-NEXT: s_cselect_b32 s9, s9, s14
; GFX11-NEXT: s_lshl_b32 s12, s25, 16
; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s8
; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s12
; GFX11-NEXT: v_add_nc_u32_e32 v2, v3, v4
-; GFX11-NEXT: s_lshr_b32 s63, s9, 16
+; GFX11-NEXT: s_lshr_b32 s88, s9, 16
; GFX11-NEXT: v_bfe_u32 v3, v8, 16, 1
; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v4
; GFX11-NEXT: v_readfirstlane_b32 s8, v1
@@ -168573,7 +169195,7 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX11-NEXT: v_add_nc_u32_e32 v3, v3, v8
; GFX11-NEXT: v_or_b32_e32 v11, 0x400000, v8
; GFX11-NEXT: s_bfe_u32 s12, s8, 0x10010
-; GFX11-NEXT: v_bfe_u32 v12, v9, 16, 1
+; GFX11-NEXT: v_bfe_u32 v14, v9, 16, 1
; GFX11-NEXT: s_add_i32 s12, s12, s8
; GFX11-NEXT: s_bitset1_b32 s8, 22
; GFX11-NEXT: s_addk_i32 s12, 0x7fff
@@ -168587,15 +169209,15 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX11-NEXT: v_add_f32_e64 v2, 0x40c00000, s9
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s7
-; GFX11-NEXT: v_add_nc_u32_e32 v4, v12, v9
-; GFX11-NEXT: v_add_f32_e64 v12, 0x40c00000, s6
+; GFX11-NEXT: v_add_nc_u32_e32 v4, v14, v9
+; GFX11-NEXT: v_add_f32_e64 v14, 0x40c00000, s6
; GFX11-NEXT: v_readfirstlane_b32 s7, v2
; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v11, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v2, v2
; GFX11-NEXT: v_or_b32_e32 v2, 0x400000, v9
-; GFX11-NEXT: s_pack_ll_b32_b16 s28, s0, s5
+; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; GFX11-NEXT: s_bfe_u32 s9, s7, 0x10010
-; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v3
+; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v3
; GFX11-NEXT: s_add_i32 s9, s9, s7
; GFX11-NEXT: s_bitset1_b32 s7, 22
; GFX11-NEXT: s_addk_i32 s9, 0x7fff
@@ -168605,110 +169227,117 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v4
; GFX11-NEXT: v_add_f32_e64 v10, 0x40c00000, s8
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s4
+; GFX11-NEXT: v_add_f32_e64 v9, 0x40c00000, s5
; GFX11-NEXT: v_bfe_u32 v4, v8, 16, 1
; GFX11-NEXT: s_lshr_b32 s12, s7, 16
; GFX11-NEXT: v_readfirstlane_b32 s8, v10
; GFX11-NEXT: v_cndmask_b32_e32 v2, v3, v2, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v10, v10
; GFX11-NEXT: v_add_nc_u32_e32 v4, v4, v8
-; GFX11-NEXT: v_bfe_u32 v10, v12, 16, 1
-; GFX11-NEXT: s_bfe_u32 s4, s8, 0x10010
-; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v2
-; GFX11-NEXT: s_add_i32 s4, s4, s8
+; GFX11-NEXT: v_bfe_u32 v10, v14, 16, 1
+; GFX11-NEXT: s_bfe_u32 s5, s8, 0x10010
+; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v2
+; GFX11-NEXT: s_add_i32 s5, s5, s8
; GFX11-NEXT: s_bitset1_b32 s8, 22
-; GFX11-NEXT: s_addk_i32 s4, 0x7fff
+; GFX11-NEXT: s_addk_i32 s5, 0x7fff
; GFX11-NEXT: s_and_b32 s6, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s4, s8, s4
+; GFX11-NEXT: s_cselect_b32 s5, s8, s5
; GFX11-NEXT: s_and_b32 s6, s27, 0xffff0000
; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v4
-; GFX11-NEXT: v_add_f32_e64 v52, 0x40c00000, s6
+; GFX11-NEXT: v_add_f32_e64 v15, 0x40c00000, s6
; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v8
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
-; GFX11-NEXT: v_add_nc_u32_e32 v8, v10, v12
-; GFX11-NEXT: s_lshr_b32 s24, s4, 16
-; GFX11-NEXT: v_readfirstlane_b32 s6, v52
+; GFX11-NEXT: v_add_nc_u32_e32 v8, v10, v14
+; GFX11-NEXT: s_lshr_b32 s24, s5, 16
+; GFX11-NEXT: v_readfirstlane_b32 s6, v15
; GFX11-NEXT: v_or_b32_e32 v10, 0x400000, v9
; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v52, v52
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v15, v15
; GFX11-NEXT: v_bfe_u32 v4, v9, 16, 1
; GFX11-NEXT: s_bfe_u32 s7, s6, 0x10010
-; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v50
; GFX11-NEXT: s_add_i32 s7, s7, s6
; GFX11-NEXT: s_bitset1_b32 s6, 22
; GFX11-NEXT: s_addk_i32 s7, 0x7fff
-; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s4, s6, s7
+; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-NEXT: s_cselect_b32 s5, s6, s7
; GFX11-NEXT: s_lshl_b32 s6, s27, 16
-; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v3
+; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v3
; GFX11-NEXT: v_add_nc_u32_e32 v2, v4, v9
; GFX11-NEXT: v_add_nc_u32_e32 v3, 0x7fff, v8
-; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v12
+; GFX11-NEXT: v_or_b32_e32 v4, 0x400000, v14
; GFX11-NEXT: v_add_f32_e64 v8, 0x40c00000, s6
-; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v12, v12
+; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v14, v14
; GFX11-NEXT: v_add_nc_u32_e32 v2, 0x7fff, v2
-; GFX11-NEXT: s_lshr_b32 s73, s4, 16
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v49
+; GFX11-NEXT: s_lshr_b32 s89, s5, 16
+; GFX11-NEXT: v_lshl_or_b32 v66, v1, 16, v11
; GFX11-NEXT: v_readfirstlane_b32 s6, v8
; GFX11-NEXT: v_cndmask_b32_e32 v3, v3, v4, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v9, v9
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v51
-; GFX11-NEXT: v_lshl_or_b32 v66, v1, 16, v11
+; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v52
+; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v48
; GFX11-NEXT: s_bfe_u32 s7, s6, 0x10010
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v3
+; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v3
; GFX11-NEXT: v_cndmask_b32_e32 v2, v2, v10, vcc_lo
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v8, v8
; GFX11-NEXT: s_add_i32 s7, s7, s6
; GFX11-NEXT: s_bitset1_b32 s6, 22
; GFX11-NEXT: s_addk_i32 s7, 0x7fff
; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s4, s6, s7
-; GFX11-NEXT: s_and_b32 s6, s26, 0xffff0000
-; GFX11-NEXT: s_lshr_b32 s27, s4, 16
-; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s6
-; GFX11-NEXT: v_and_b32_e32 v8, 0xffff, v52
-; GFX11-NEXT: v_and_b32_e32 v5, 0xffff, v39
-; GFX11-NEXT: v_lshl_or_b32 v55, v50, 16, v4
-; GFX11-NEXT: s_pack_ll_b32_b16 s8, s22, s13
-; GFX11-NEXT: v_readfirstlane_b32 s6, v3
+; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-NEXT: s_cselect_b32 s6, s6, s7
+; GFX11-NEXT: s_and_b32 s5, s26, 0xffff0000
+; GFX11-NEXT: s_lshr_b32 s27, s6, 16
+; GFX11-NEXT: v_add_f32_e64 v3, 0x40c00000, s5
+; GFX11-NEXT: v_and_b32_e32 v8, 0xffff, v53
+; GFX11-NEXT: v_lshl_or_b32 v55, v51, 16, v4
+; GFX11-NEXT: v_lshl_or_b32 v67, v49, 16, v5
+; GFX11-NEXT: s_pack_ll_b32_b16 s56, s22, s13
+; GFX11-NEXT: v_readfirstlane_b32 s8, v3
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v3, v3
; GFX11-NEXT: v_lshl_or_b32 v54, v2, 16, v8
-; GFX11-NEXT: v_lshl_or_b32 v67, v48, 16, v5
-; GFX11-NEXT: v_lshrrev_b64 v[8:9], 24, v[17:18]
-; GFX11-NEXT: s_bfe_u32 s5, s6, 0x10010
-; GFX11-NEXT: v_lshrrev_b64 v[9:10], 24, v[15:16]
-; GFX11-NEXT: s_add_i32 s5, s5, s6
-; GFX11-NEXT: s_bitset1_b32 s6, 22
-; GFX11-NEXT: s_addk_i32 s5, 0x7fff
-; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
-; GFX11-NEXT: s_cselect_b32 s14, s6, s5
-; GFX11-NEXT: s_lshl_b32 s4, s26, 16
-; GFX11-NEXT: s_pack_ll_b32_b16 s6, s20, s10
-; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s4
+; GFX11-NEXT: v_lshrrev_b64 v[8:9], 24, v[18:19]
+; GFX11-NEXT: v_lshrrev_b64 v[9:10], 24, v[16:17]
+; GFX11-NEXT: s_bfe_u32 s9, s8, 0x10010
+; GFX11-NEXT: v_lshrrev_b64 v[10:11], 24, v[12:13]
+; GFX11-NEXT: s_add_i32 s9, s9, s8
+; GFX11-NEXT: s_bitset1_b32 s8, 22
+; GFX11-NEXT: s_addk_i32 s9, 0x7fff
+; GFX11-NEXT: s_and_b32 s6, vcc_lo, exec_lo
+; GFX11-NEXT: s_cselect_b32 s14, s8, s9
+; GFX11-NEXT: s_lshl_b32 s8, s26, 16
+; GFX11-NEXT: s_pack_ll_b32_b16 s6, s2, s11
+; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s8
+; GFX11-NEXT: s_pack_ll_b32_b16 s8, s16, s44
+; GFX11-NEXT: s_pack_ll_b32_b16 s44, s20, s10
; GFX11-NEXT: s_lshr_b32 s13, s14, 16
-; GFX11-NEXT: v_lshrrev_b64 v[10:11], 24, v[13:14]
-; GFX11-NEXT: v_lshrrev_b64 v[11:12], 24, v[6:7]
-; GFX11-NEXT: s_pack_ll_b32_b16 s29, s1, s58
+; GFX11-NEXT: v_lshrrev_b64 v[14:15], 24, v[6:7]
; GFX11-NEXT: v_readfirstlane_b32 s11, v1
; GFX11-NEXT: v_cmp_u_f32_e32 vcc_lo, v1, v1
; GFX11-NEXT: v_lshrrev_b64 v[1:2], 24, v[54:55]
; GFX11-NEXT: v_lshrrev_b64 v[2:3], 24, v[66:67]
; GFX11-NEXT: v_lshrrev_b64 v[3:4], 24, v[70:71]
; GFX11-NEXT: s_bfe_u32 s10, s11, 0x10010
-; GFX11-NEXT: v_lshrrev_b64 v[4:5], 24, v[19:20]
+; GFX11-NEXT: v_lshrrev_b64 v[4:5], 24, v[20:21]
; GFX11-NEXT: s_add_i32 s10, s10, s11
; GFX11-NEXT: s_bitset1_b32 s11, 22
; GFX11-NEXT: s_addk_i32 s10, 0x7fff
; GFX11-NEXT: s_and_b32 s14, vcc_lo, exec_lo
; GFX11-NEXT: s_cselect_b32 s10, s11, s10
-; GFX11-NEXT: s_pack_ll_b32_b16 s5, s19, s60
+; GFX11-NEXT: s_pack_ll_b32_b16 s5, s1, s76
; GFX11-NEXT: s_lshr_b32 s26, s10, 16
-; GFX11-NEXT: s_pack_ll_b32_b16 s4, s18, s40
-; GFX11-NEXT: s_pack_ll_b32_b16 s9, s23, s62
+; GFX11-NEXT: s_pack_ll_b32_b16 s7, s3, s74
+; GFX11-NEXT: s_pack_ll_b32_b16 s9, s17, s75
+; GFX11-NEXT: s_pack_ll_b32_b16 s29, s19, s77
+; GFX11-NEXT: s_pack_ll_b32_b16 s28, s18, s40
+; GFX11-NEXT: s_pack_ll_b32_b16 s57, s23, s79
+; GFX11-NEXT: s_pack_ll_b32_b16 s63, s25, s88
+; GFX11-NEXT: s_pack_ll_b32_b16 s73, s27, s89
+; GFX11-NEXT: s_pack_ll_b32_b16 s72, s26, s13
+; GFX11-NEXT: s_pack_ll_b32_b16 s62, s24, s12
; GFX11-NEXT: v_lshrrev_b32_e32 v5, 24, v55
-; GFX11-NEXT: v_lshrrev_b32_e32 v12, 8, v55
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v54
+; GFX11-NEXT: v_lshrrev_b32_e32 v11, 8, v55
+; GFX11-NEXT: v_lshrrev_b32_e32 v15, 16, v54
; GFX11-NEXT: v_lshrrev_b32_e32 v54, 8, v54
; GFX11-NEXT: v_lshrrev_b32_e32 v55, 24, v67
; GFX11-NEXT: v_lshrrev_b32_e32 v64, 8, v67
@@ -168718,290 +169347,282 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX11-NEXT: v_lshrrev_b32_e32 v68, 8, v71
; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v70
; GFX11-NEXT: v_lshrrev_b32_e32 v70, 8, v70
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 24, v20
+; GFX11-NEXT: v_lshrrev_b32_e32 v71, 24, v21
+; GFX11-NEXT: v_lshrrev_b32_e32 v21, 8, v21
; GFX11-NEXT: v_lshrrev_b32_e32 v20, 8, v20
; GFX11-NEXT: v_lshrrev_b32_e32 v19, 8, v19
; GFX11-NEXT: v_lshrrev_b32_e32 v18, 8, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v17
; GFX11-NEXT: v_lshrrev_b32_e32 v17, 8, v17
; GFX11-NEXT: v_lshrrev_b32_e32 v16, 8, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v84, 16, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v15, 8, v15
-; GFX11-NEXT: v_lshrrev_b32_e32 v14, 8, v14
; GFX11-NEXT: v_lshrrev_b32_e32 v13, 8, v13
+; GFX11-NEXT: v_lshrrev_b32_e32 v12, 8, v12
; GFX11-NEXT: v_lshrrev_b32_e32 v7, 8, v7
; GFX11-NEXT: v_lshrrev_b32_e32 v6, 8, v6
-; GFX11-NEXT: s_pack_ll_b32_b16 s7, s21, s61
-; GFX11-NEXT: s_pack_ll_b32_b16 s11, s25, s63
-; GFX11-NEXT: s_pack_ll_b32_b16 s57, s27, s73
-; GFX11-NEXT: s_pack_ll_b32_b16 s56, s26, s13
-; GFX11-NEXT: s_pack_ll_b32_b16 s10, s24, s12
-; GFX11-NEXT: s_lshr_b64 s[94:95], s[8:9], 24
-; GFX11-NEXT: s_lshr_b64 s[12:13], s[4:5], 24
-; GFX11-NEXT: s_lshr_b64 s[14:15], s[46:47], 24
-; GFX11-NEXT: s_lshr_b64 s[40:41], s[44:45], 24
-; GFX11-NEXT: s_lshr_b64 s[42:43], s[28:29], 24
-; GFX11-NEXT: s_lshr_b64 vcc, s[56:57], 24
-; GFX11-NEXT: s_lshr_b64 s[34:35], s[10:11], 24
-; GFX11-NEXT: s_lshr_b64 s[30:31], s[6:7], 24
-; GFX11-NEXT: s_lshr_b32 s13, s57, 24
-; GFX11-NEXT: s_lshr_b32 s15, s57, 8
-; GFX11-NEXT: s_lshr_b32 s41, s56, 16
-; GFX11-NEXT: s_lshr_b32 s43, s56, 8
-; GFX11-NEXT: s_lshr_b32 s56, s11, 24
-; GFX11-NEXT: s_lshr_b32 s11, s11, 8
-; GFX11-NEXT: s_lshr_b32 s57, s10, 16
-; GFX11-NEXT: s_lshr_b32 s10, s10, 8
-; GFX11-NEXT: s_lshr_b32 s74, s9, 24
+; GFX11-NEXT: s_lshr_b64 s[14:15], s[72:73], 24
+; GFX11-NEXT: s_lshr_b64 s[42:43], s[62:63], 24
+; GFX11-NEXT: s_lshr_b64 s[10:11], s[56:57], 24
+; GFX11-NEXT: s_lshr_b64 s[12:13], s[44:45], 24
+; GFX11-NEXT: s_lshr_b64 s[40:41], s[28:29], 24
+; GFX11-NEXT: s_lshr_b64 s[46:47], s[8:9], 24
+; GFX11-NEXT: s_lshr_b64 s[58:59], s[6:7], 24
+; GFX11-NEXT: s_lshr_b64 s[60:61], s[4:5], 24
+; GFX11-NEXT: s_lshr_b32 s11, s73, 24
+; GFX11-NEXT: s_lshr_b32 s13, s73, 8
+; GFX11-NEXT: s_lshr_b32 s15, s72, 16
+; GFX11-NEXT: s_lshr_b32 s41, s72, 8
+; GFX11-NEXT: s_lshr_b32 s43, s63, 24
+; GFX11-NEXT: s_lshr_b32 s47, s63, 8
+; GFX11-NEXT: s_lshr_b32 s59, s62, 16
+; GFX11-NEXT: s_lshr_b32 s61, s62, 8
+; GFX11-NEXT: s_lshr_b32 s62, s57, 24
+; GFX11-NEXT: s_lshr_b32 s57, s57, 8
+; GFX11-NEXT: s_lshr_b32 s63, s56, 16
+; GFX11-NEXT: s_lshr_b32 s56, s56, 8
+; GFX11-NEXT: s_lshr_b32 s72, s45, 24
+; GFX11-NEXT: s_lshr_b32 s45, s45, 8
+; GFX11-NEXT: s_lshr_b32 s73, s44, 16
+; GFX11-NEXT: s_lshr_b32 s44, s44, 8
+; GFX11-NEXT: s_lshr_b32 s90, s29, 24
+; GFX11-NEXT: s_lshr_b32 s29, s29, 8
+; GFX11-NEXT: s_lshr_b32 s91, s28, 16
+; GFX11-NEXT: s_lshr_b32 s28, s28, 8
+; GFX11-NEXT: s_lshr_b32 s92, s9, 24
; GFX11-NEXT: s_lshr_b32 s9, s9, 8
-; GFX11-NEXT: s_lshr_b32 s75, s8, 16
+; GFX11-NEXT: s_lshr_b32 s93, s8, 16
; GFX11-NEXT: s_lshr_b32 s8, s8, 8
-; GFX11-NEXT: s_lshr_b32 s76, s7, 24
-; GFX11-NEXT: s_lshr_b32 s77, s7, 8
-; GFX11-NEXT: s_lshr_b32 s78, s6, 16
-; GFX11-NEXT: s_lshr_b32 s79, s6, 8
-; GFX11-NEXT: s_lshr_b32 s88, s5, 24
-; GFX11-NEXT: s_lshr_b32 s89, s5, 8
-; GFX11-NEXT: s_lshr_b32 s90, s4, 16
-; GFX11-NEXT: s_lshr_b32 s91, s4, 8
-; GFX11-NEXT: s_lshr_b32 s92, s47, 24
-; GFX11-NEXT: s_lshr_b32 s47, s47, 8
-; GFX11-NEXT: s_lshr_b32 s93, s46, 16
-; GFX11-NEXT: s_lshr_b32 s46, s46, 8
-; GFX11-NEXT: s_lshr_b32 s95, s45, 24
-; GFX11-NEXT: s_lshr_b32 s45, s45, 8
-; GFX11-NEXT: s_lshr_b32 s99, s44, 16
-; GFX11-NEXT: s_lshr_b32 s100, s44, 8
-; GFX11-NEXT: s_lshr_b32 s101, s29, 24
-; GFX11-NEXT: s_lshr_b32 s102, s29, 8
-; GFX11-NEXT: s_lshr_b32 s103, s28, 16
-; GFX11-NEXT: s_lshr_b32 s104, s28, 8
+; GFX11-NEXT: s_lshr_b32 s94, s7, 24
+; GFX11-NEXT: s_lshr_b32 s95, s7, 8
+; GFX11-NEXT: s_lshr_b32 vcc_lo, s6, 16
+; GFX11-NEXT: s_lshr_b32 vcc_hi, s6, 8
+; GFX11-NEXT: s_lshr_b32 s7, s5, 24
+; GFX11-NEXT: s_lshr_b32 s30, s5, 8
+; GFX11-NEXT: s_lshr_b32 s5, s4, 16
+; GFX11-NEXT: s_lshr_b32 s4, s4, 8
; GFX11-NEXT: s_branch .LBB91_5
; GFX11-NEXT: .LBB91_3:
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr74
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr104
-; GFX11-NEXT: ; implicit-def: $sgpr103
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr102
+; GFX11-NEXT: ; implicit-def: $sgpr10
+; GFX11-NEXT: ; kill: killed $sgpr10
; GFX11-NEXT: ; implicit-def: $sgpr11
-; GFX11-NEXT: ; implicit-def: $sgpr101
-; GFX11-NEXT: ; implicit-def: $sgpr100
-; GFX11-NEXT: ; implicit-def: $sgpr99
+; GFX11-NEXT: ; kill: killed $sgpr11
+; GFX11-NEXT: ; implicit-def: $sgpr60
+; GFX11-NEXT: ; implicit-def: $sgpr58
+; GFX11-NEXT: ; implicit-def: $sgpr46
; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr14
; GFX11-NEXT: ; implicit-def: $sgpr12
+; GFX11-NEXT: ; implicit-def: $sgpr36
; GFX11-NEXT: ; implicit-def: $sgpr49
-; GFX11-NEXT: ; implicit-def: $sgpr37
-; GFX11-NEXT: ; implicit-def: $sgpr35
-; GFX11-NEXT: ; implicit-def: $sgpr6
; GFX11-NEXT: ; implicit-def: $sgpr34
+; GFX11-NEXT: ; implicit-def: $sgpr97
+; GFX11-NEXT: ; implicit-def: $vcc_hi
+; GFX11-NEXT: ; implicit-def: $sgpr51
; GFX11-NEXT: ; implicit-def: $sgpr52
+; GFX11-NEXT: ; implicit-def: $sgpr37
+; GFX11-NEXT: ; implicit-def: $sgpr98
+; GFX11-NEXT: ; implicit-def: $sgpr35
+; GFX11-NEXT: ; implicit-def: $sgpr55
+; GFX11-NEXT: ; implicit-def: $sgpr64
; GFX11-NEXT: ; implicit-def: $sgpr53
+; GFX11-NEXT: ; implicit-def: $sgpr99
; GFX11-NEXT: ; implicit-def: $sgpr50
-; GFX11-NEXT: ; implicit-def: $sgpr7
-; GFX11-NEXT: ; implicit-def: $sgpr36
-; GFX11-NEXT: ; implicit-def: $sgpr64
+; GFX11-NEXT: ; implicit-def: $sgpr66
+; GFX11-NEXT: ; implicit-def: $sgpr67
; GFX11-NEXT: ; implicit-def: $sgpr38
+; GFX11-NEXT: ; implicit-def: $sgpr100
; GFX11-NEXT: ; implicit-def: $sgpr54
-; GFX11-NEXT: ; implicit-def: $sgpr96
-; GFX11-NEXT: ; implicit-def: $sgpr51
-; GFX11-NEXT: ; implicit-def: $sgpr67
+; GFX11-NEXT: ; implicit-def: $sgpr70
+; GFX11-NEXT: ; implicit-def: $sgpr71
; GFX11-NEXT: ; implicit-def: $sgpr68
+; GFX11-NEXT: ; implicit-def: $sgpr101
; GFX11-NEXT: ; implicit-def: $sgpr65
-; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr55
+; GFX11-NEXT: ; implicit-def: $sgpr81
+; GFX11-NEXT: ; implicit-def: $sgpr82
; GFX11-NEXT: ; implicit-def: $sgpr39
-; GFX11-NEXT: ; implicit-def: $sgpr71
+; GFX11-NEXT: ; implicit-def: $sgpr102
; GFX11-NEXT: ; implicit-def: $sgpr69
-; GFX11-NEXT: ; implicit-def: $sgpr9
-; GFX11-NEXT: ; implicit-def: $sgpr66
-; GFX11-NEXT: ; implicit-def: $sgpr82
+; GFX11-NEXT: ; implicit-def: $sgpr85
+; GFX11-NEXT: ; implicit-def: $sgpr48
; GFX11-NEXT: ; implicit-def: $sgpr83
+; GFX11-NEXT: ; implicit-def: $sgpr103
; GFX11-NEXT: ; implicit-def: $sgpr80
-; GFX11-NEXT: ; implicit-def: $sgpr97
-; GFX11-NEXT: ; implicit-def: $sgpr70
-; GFX11-NEXT: ; implicit-def: $sgpr48
-; GFX11-NEXT: ; implicit-def: $sgpr84
-; GFX11-NEXT: ; implicit-def: $sgpr98
-; GFX11-NEXT: ; implicit-def: $sgpr81
-; GFX11-NEXT: ; implicit-def: $sgpr86
; GFX11-NEXT: ; implicit-def: $sgpr87
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr85
-; GFX11-NEXT: ; implicit-def: $sgpr30
-; GFX11-NEXT: ; implicit-def: $sgpr94
-; GFX11-NEXT: ; implicit-def: $sgpr92
-; GFX11-NEXT: ; implicit-def: $sgpr90
-; GFX11-NEXT: ; implicit-def: $sgpr88
-; GFX11-NEXT: ; implicit-def: $sgpr78
-; GFX11-NEXT: ; implicit-def: $sgpr76
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: v_writelane_b32 v43, s4, 0
-; GFX11-NEXT: v_writelane_b32 v43, s5, 1
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: v_writelane_b32 v43, s4, 2
-; GFX11-NEXT: v_writelane_b32 v43, s5, 3
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: v_writelane_b32 v43, s74, 4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: v_writelane_b32 v43, s75, 5
-; GFX11-NEXT: ; implicit-def: $sgpr74
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; kill: killed $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr5
-; GFX11-NEXT: v_writelane_b32 v43, s74, 6
-; GFX11-NEXT: v_writelane_b32 v43, s75, 7
+; GFX11-NEXT: ; implicit-def: $sgpr96
+; GFX11-NEXT: ; implicit-def: $sgpr86
+; GFX11-NEXT: ; implicit-def: $sgpr104
+; GFX11-NEXT: ; implicit-def: $sgpr84
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr14
; GFX11-NEXT: ; implicit-def: $sgpr74
-; GFX11-NEXT: s_branch .LBB91_2
+; GFX11-NEXT: ; implicit-def: $sgpr76
+; GFX11-NEXT: ; implicit-def: $sgpr78
+; GFX11-NEXT: ; implicit-def: $sgpr88
+; GFX11-NEXT: ; implicit-def: $sgpr90
+; GFX11-NEXT: ; implicit-def: $sgpr92
+; GFX11-NEXT: ; implicit-def: $sgpr94
+; GFX11-NEXT: ; implicit-def: $sgpr30
+; GFX11-NEXT: ; implicit-def: $sgpr10
+; GFX11-NEXT: ; kill: killed $sgpr10
+; GFX11-NEXT: ; implicit-def: $sgpr11
+; GFX11-NEXT: ; kill: killed $sgpr11
+; GFX11-NEXT: ; implicit-def: $sgpr10
+; GFX11-NEXT: ; kill: killed $sgpr10
+; GFX11-NEXT: ; implicit-def: $sgpr11
+; GFX11-NEXT: ; kill: killed $sgpr11
+; GFX11-NEXT: ; implicit-def: $sgpr10
+; GFX11-NEXT: ; kill: killed $sgpr10
+; GFX11-NEXT: ; implicit-def: $sgpr11
+; GFX11-NEXT: ; kill: killed $sgpr11
+; GFX11-NEXT: ; implicit-def: $sgpr10
+; GFX11-NEXT: ; kill: killed $sgpr10
+; GFX11-NEXT: ; implicit-def: $sgpr11
+; GFX11-NEXT: ; kill: killed $sgpr11
+; GFX11-NEXT: ; implicit-def: $sgpr10
+; GFX11-NEXT: ; kill: killed $sgpr10
+; GFX11-NEXT: ; implicit-def: $sgpr11
+; GFX11-NEXT: ; kill: killed $sgpr11
+; GFX11-NEXT: ; implicit-def: $sgpr10
+; GFX11-NEXT: ; kill: killed $sgpr10
+; GFX11-NEXT: ; implicit-def: $sgpr11
+; GFX11-NEXT: ; kill: killed $sgpr11
+; GFX11-NEXT: ; implicit-def: $sgpr10
+; GFX11-NEXT: ; kill: killed $sgpr10
+; GFX11-NEXT: ; implicit-def: $sgpr11
+; GFX11-NEXT: ; kill: killed $sgpr11
+; GFX11-NEXT: ; implicit-def: $sgpr10
+; GFX11-NEXT: ; kill: killed $sgpr10
+; GFX11-NEXT: ; implicit-def: $sgpr11
+; GFX11-NEXT: ; kill: killed $sgpr11
+; GFX11-NEXT: ; implicit-def: $sgpr10
+; GFX11-NEXT: ; kill: killed $sgpr10
+; GFX11-NEXT: ; implicit-def: $sgpr11
+; GFX11-NEXT: ; kill: killed $sgpr11
+; GFX11-NEXT: ; implicit-def: $sgpr10
+; GFX11-NEXT: ; kill: killed $sgpr10
+; GFX11-NEXT: ; implicit-def: $sgpr11
+; GFX11-NEXT: ; kill: killed $sgpr11
+; GFX11-NEXT: ; implicit-def: $sgpr10
+; GFX11-NEXT: ; kill: killed $sgpr10
+; GFX11-NEXT: ; implicit-def: $sgpr11
+; GFX11-NEXT: ; kill: killed $sgpr11
+; GFX11-NEXT: ; implicit-def: $sgpr10
+; GFX11-NEXT: ; kill: killed $sgpr10
+; GFX11-NEXT: ; implicit-def: $sgpr11
+; GFX11-NEXT: ; kill: killed $sgpr11
+; GFX11-NEXT: ; implicit-def: $sgpr10
+; GFX11-NEXT: ; kill: killed $sgpr10
+; GFX11-NEXT: ; implicit-def: $sgpr10
+; GFX11-NEXT: ; kill: killed $sgpr10
+; GFX11-NEXT: ; implicit-def: $sgpr10
+; GFX11-NEXT: ; kill: killed $sgpr10
+; GFX11-NEXT: ; implicit-def: $sgpr10
+; GFX11-NEXT: ; kill: killed $sgpr10
+; GFX11-NEXT: ; implicit-def: $sgpr10
+; GFX11-NEXT: ; kill: killed $sgpr10
+; GFX11-NEXT: ; implicit-def: $sgpr10
+; GFX11-NEXT: ; kill: killed $sgpr10
+; GFX11-NEXT: ; implicit-def: $sgpr10
+; GFX11-NEXT: ; kill: killed $sgpr10
+; GFX11-NEXT: ; implicit-def: $sgpr10
+; GFX11-NEXT: ; kill: killed $sgpr10
+; GFX11-NEXT: ; implicit-def: $sgpr10
+; GFX11-NEXT: ; kill: killed $sgpr10
+; GFX11-NEXT: ; implicit-def: $sgpr10
+; GFX11-NEXT: ; kill: killed $sgpr10
+; GFX11-NEXT: ; implicit-def: $sgpr10
+; GFX11-NEXT: ; kill: killed $sgpr10
+; GFX11-NEXT: ; implicit-def: $sgpr10
+; GFX11-NEXT: ; kill: killed $sgpr10
+; GFX11-NEXT: ; implicit-def: $sgpr10
+; GFX11-NEXT: ; kill: killed $sgpr10
+; GFX11-NEXT: ; implicit-def: $sgpr10
+; GFX11-NEXT: ; kill: killed $sgpr10
+; GFX11-NEXT: ; implicit-def: $sgpr10
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, vcc_lo
+; GFX11-NEXT: s_cbranch_vccz .LBB91_2
; GFX11-NEXT: .LBB91_4:
-; GFX11-NEXT: v_dual_mov_b32 v10, s94 :: v_dual_mov_b32 v11, s30
-; GFX11-NEXT: v_readlane_b32 s94, v43, 2
-; GFX11-NEXT: v_dual_mov_b32 v96, s37 :: v_dual_mov_b32 v87, s34
-; GFX11-NEXT: v_dual_mov_b32 v6, s49 :: v_dual_mov_b32 v7, s35
-; GFX11-NEXT: v_readlane_b32 s95, v43, 3
-; GFX11-NEXT: v_readlane_b32 vcc_lo, v43, 6
-; GFX11-NEXT: v_readlane_b32 s30, v43, 0
-; GFX11-NEXT: v_readlane_b32 s34, v43, 4
-; GFX11-NEXT: v_dual_mov_b32 v52, s44 :: v_dual_mov_b32 v51, s45
-; GFX11-NEXT: v_dual_mov_b32 v50, s10 :: v_dual_mov_b32 v49, s46
-; GFX11-NEXT: v_dual_mov_b32 v39, s47 :: v_dual_mov_b32 v48, s98
-; GFX11-NEXT: v_dual_mov_b32 v38, s56 :: v_dual_mov_b32 v37, s97
-; GFX11-NEXT: v_dual_mov_b32 v36, s57 :: v_dual_mov_b32 v35, s58
-; GFX11-NEXT: v_dual_mov_b32 v34, s59 :: v_dual_mov_b32 v33, s9
-; GFX11-NEXT: v_dual_mov_b32 v32, s60 :: v_dual_mov_b32 v31, s61
-; GFX11-NEXT: v_dual_mov_b32 v30, s8 :: v_dual_mov_b32 v29, s62
-; GFX11-NEXT: v_dual_mov_b32 v27, s63 :: v_dual_mov_b32 v28, s96
-; GFX11-NEXT: v_dual_mov_b32 v26, s72 :: v_dual_mov_b32 v25, s7
-; GFX11-NEXT: v_dual_mov_b32 v24, s73 :: v_dual_mov_b32 v23, s28
-; GFX11-NEXT: v_dual_mov_b32 v21, s29 :: v_dual_mov_b32 v22, s6
-; GFX11-NEXT: v_dual_mov_b32 v53, s87 :: v_dual_mov_b32 v54, s86
-; GFX11-NEXT: v_dual_mov_b32 v5, s85 :: v_dual_mov_b32 v12, s5
-; GFX11-NEXT: v_dual_mov_b32 v65, s4 :: v_dual_mov_b32 v66, s48
-; GFX11-NEXT: v_dual_mov_b32 v55, s81 :: v_dual_mov_b32 v64, s84
-; GFX11-NEXT: v_dual_mov_b32 v69, s83 :: v_dual_mov_b32 v70, s82
-; GFX11-NEXT: v_dual_mov_b32 v67, s70 :: v_dual_mov_b32 v68, s80
-; GFX11-NEXT: v_dual_mov_b32 v80, s71 :: v_dual_mov_b32 v19, s39
-; GFX11-NEXT: v_dual_mov_b32 v71, s66 :: v_dual_mov_b32 v20, s69
-; GFX11-NEXT: v_dual_mov_b32 v82, s68 :: v_dual_mov_b32 v17, s67
-; GFX11-NEXT: v_dual_mov_b32 v81, s55 :: v_dual_mov_b32 v18, s65
-; GFX11-NEXT: v_dual_mov_b32 v84, s38 :: v_dual_mov_b32 v15, s64
-; GFX11-NEXT: v_dual_mov_b32 v83, s51 :: v_dual_mov_b32 v16, s54
-; GFX11-NEXT: v_dual_mov_b32 v86, s53 :: v_dual_mov_b32 v13, s52
-; GFX11-NEXT: v_dual_mov_b32 v85, s36 :: v_dual_mov_b32 v14, s50
-; GFX11-NEXT: v_dual_mov_b32 v1, s74 :: v_dual_mov_b32 v2, s76
-; GFX11-NEXT: v_dual_mov_b32 v3, s78 :: v_dual_mov_b32 v4, s88
-; GFX11-NEXT: v_dual_mov_b32 v8, s90 :: v_dual_mov_b32 v9, s92
-; GFX11-NEXT: s_mov_b32 s58, s11
-; GFX11-NEXT: v_readlane_b32 s59, v43, 8
-; GFX11-NEXT: v_readlane_b32 s72, v43, 9
-; GFX11-NEXT: v_readlane_b32 s60, v43, 10
-; GFX11-NEXT: v_readlane_b32 s61, v43, 11
-; GFX11-NEXT: v_readlane_b32 s62, v43, 12
-; GFX11-NEXT: v_readlane_b32 s63, v43, 13
-; GFX11-NEXT: v_readlane_b32 s73, v43, 14
-; GFX11-NEXT: v_readlane_b32 s13, v43, 15
-; GFX11-NEXT: v_readlane_b32 s15, v43, 16
-; GFX11-NEXT: v_readlane_b32 s41, v43, 17
-; GFX11-NEXT: v_readlane_b32 s43, v43, 18
-; GFX11-NEXT: v_readlane_b32 s56, v43, 19
-; GFX11-NEXT: v_readlane_b32 s11, v43, 20
-; GFX11-NEXT: v_readlane_b32 s57, v43, 21
-; GFX11-NEXT: v_readlane_b32 s10, v43, 22
-; GFX11-NEXT: v_readlane_b32 s74, v43, 23
-; GFX11-NEXT: v_readlane_b32 s9, v43, 24
-; GFX11-NEXT: v_readlane_b32 s75, v43, 25
-; GFX11-NEXT: v_readlane_b32 s8, v43, 26
-; GFX11-NEXT: v_readlane_b32 s76, v43, 27
-; GFX11-NEXT: v_readlane_b32 s77, v43, 28
-; GFX11-NEXT: v_readlane_b32 s78, v43, 29
-; GFX11-NEXT: v_readlane_b32 s79, v43, 30
-; GFX11-NEXT: v_readlane_b32 s88, v43, 31
-; GFX11-NEXT: v_readlane_b32 s89, v42, 0
-; GFX11-NEXT: v_readlane_b32 s90, v42, 1
-; GFX11-NEXT: v_readlane_b32 s91, v42, 2
-; GFX11-NEXT: v_readlane_b32 s92, v42, 3
-; GFX11-NEXT: v_readlane_b32 s47, v42, 4
-; GFX11-NEXT: v_readlane_b32 s93, v42, 5
-; GFX11-NEXT: v_readlane_b32 vcc_hi, v43, 7
-; GFX11-NEXT: v_readlane_b32 s46, v42, 6
-; GFX11-NEXT: v_readlane_b32 s31, v43, 1
-; GFX11-NEXT: v_readlane_b32 s95, v42, 7
-; GFX11-NEXT: v_readlane_b32 s45, v42, 8
-; GFX11-NEXT: v_readlane_b32 s35, v43, 5
+; GFX11-NEXT: v_dual_mov_b32 v53, s4 :: v_dual_mov_b32 v52, s5
+; GFX11-NEXT: v_dual_mov_b32 v51, s104 :: v_dual_mov_b32 v50, s6
+; GFX11-NEXT: v_dual_mov_b32 v48, s7 :: v_dual_mov_b32 v49, s103
+; GFX11-NEXT: v_dual_mov_b32 v39, s8 :: v_dual_mov_b32 v38, s102
+; GFX11-NEXT: v_dual_mov_b32 v37, s9 :: v_dual_mov_b32 v36, s44
+; GFX11-NEXT: v_dual_mov_b32 v35, s45 :: v_dual_mov_b32 v34, s101
+; GFX11-NEXT: v_dual_mov_b32 v33, s56 :: v_dual_mov_b32 v32, s57
+; GFX11-NEXT: v_dual_mov_b32 v31, s100 :: v_dual_mov_b32 v30, s62
+; GFX11-NEXT: v_dual_mov_b32 v28, s63 :: v_dual_mov_b32 v29, s99
+; GFX11-NEXT: v_dual_mov_b32 v27, s72 :: v_dual_mov_b32 v26, s98
+; GFX11-NEXT: v_dual_mov_b32 v25, s73 :: v_dual_mov_b32 v24, s28
+; GFX11-NEXT: v_dual_mov_b32 v22, s29 :: v_dual_mov_b32 v23, s97
+; GFX11-NEXT: v_dual_mov_b32 v15, s96 :: v_dual_mov_b32 v54, s87
+; GFX11-NEXT: v_dual_mov_b32 v5, s84 :: v_dual_mov_b32 v66, s85
+; GFX11-NEXT: v_dual_mov_b32 v11, s86 :: v_dual_mov_b32 v64, s83
+; GFX11-NEXT: v_dual_mov_b32 v65, s48 :: v_dual_mov_b32 v70, s81
+; GFX11-NEXT: v_dual_mov_b32 v55, s80 :: v_dual_mov_b32 v68, s39
+; GFX11-NEXT: v_dual_mov_b32 v69, s82 :: v_dual_mov_b32 v80, s71
+; GFX11-NEXT: v_dual_mov_b32 v67, s69 :: v_dual_mov_b32 v20, s70
+; GFX11-NEXT: v_dual_mov_b32 v71, s65 :: v_dual_mov_b32 v82, s67
+; GFX11-NEXT: v_dual_mov_b32 v21, s68 :: v_dual_mov_b32 v18, s66
+; GFX11-NEXT: v_dual_mov_b32 v81, s54 :: v_dual_mov_b32 v84, s64
+; GFX11-NEXT: v_dual_mov_b32 v19, s38 :: v_dual_mov_b32 v16, s55
+; GFX11-NEXT: v_dual_mov_b32 v83, s50 :: v_dual_mov_b32 v86, s52
+; GFX11-NEXT: v_dual_mov_b32 v17, s53 :: v_dual_mov_b32 v12, s51
+; GFX11-NEXT: v_dual_mov_b32 v85, s35 :: v_dual_mov_b32 v96, s49
+; GFX11-NEXT: v_dual_mov_b32 v13, s37 :: v_dual_mov_b32 v6, s36
+; GFX11-NEXT: v_dual_mov_b32 v87, vcc_hi :: v_dual_mov_b32 v2, s94
+; GFX11-NEXT: v_dual_mov_b32 v7, s34 :: v_dual_mov_b32 v4, s90
+; GFX11-NEXT: v_dual_mov_b32 v1, s30 :: v_dual_mov_b32 v8, s88
+; GFX11-NEXT: v_dual_mov_b32 v3, s92 :: v_dual_mov_b32 v10, s76
+; GFX11-NEXT: v_dual_mov_b32 v9, s78 :: v_dual_mov_b32 v14, s74
+; GFX11-NEXT: v_readlane_b32 s76, v42, 0
+; GFX11-NEXT: v_readlane_b32 s74, v42, 1
+; GFX11-NEXT: v_readlane_b32 s75, v42, 2
+; GFX11-NEXT: v_readlane_b32 s77, v42, 3
+; GFX11-NEXT: v_readlane_b32 s78, v42, 4
+; GFX11-NEXT: v_readlane_b32 s79, v42, 5
+; GFX11-NEXT: v_readlane_b32 s88, v42, 6
+; GFX11-NEXT: v_readlane_b32 s89, v42, 7
+; GFX11-NEXT: v_readlane_b32 s11, v42, 8
+; GFX11-NEXT: v_readlane_b32 s13, v42, 9
+; GFX11-NEXT: v_readlane_b32 s15, v42, 10
+; GFX11-NEXT: v_readlane_b32 s41, v42, 11
+; GFX11-NEXT: v_readlane_b32 s43, v42, 12
+; GFX11-NEXT: v_readlane_b32 s47, v42, 13
+; GFX11-NEXT: v_readlane_b32 s59, v42, 14
+; GFX11-NEXT: v_readlane_b32 s61, v42, 15
+; GFX11-NEXT: v_readlane_b32 s62, v42, 16
+; GFX11-NEXT: v_readlane_b32 s57, v42, 17
+; GFX11-NEXT: v_readlane_b32 s63, v42, 18
+; GFX11-NEXT: v_readlane_b32 s56, v42, 19
+; GFX11-NEXT: v_readlane_b32 s72, v42, 20
+; GFX11-NEXT: v_readlane_b32 s45, v42, 21
+; GFX11-NEXT: v_readlane_b32 s73, v42, 22
+; GFX11-NEXT: v_readlane_b32 s44, v42, 23
+; GFX11-NEXT: v_readlane_b32 s90, v42, 24
+; GFX11-NEXT: v_readlane_b32 s29, v42, 25
+; GFX11-NEXT: v_readlane_b32 s91, v42, 26
+; GFX11-NEXT: v_readlane_b32 s28, v42, 27
+; GFX11-NEXT: v_readlane_b32 s92, v42, 28
+; GFX11-NEXT: v_readlane_b32 s9, v42, 29
+; GFX11-NEXT: v_readlane_b32 s93, v42, 30
+; GFX11-NEXT: v_readlane_b32 s8, v42, 31
+; GFX11-NEXT: v_readlane_b32 s94, v43, 0
+; GFX11-NEXT: v_readlane_b32 s95, v43, 1
+; GFX11-NEXT: v_readlane_b32 vcc_lo, v43, 2
+; GFX11-NEXT: v_readlane_b32 vcc_hi, v43, 3
+; GFX11-NEXT: v_readlane_b32 s7, v43, 4
+; GFX11-NEXT: v_readlane_b32 s30, v43, 5
+; GFX11-NEXT: v_readlane_b32 s5, v43, 6
+; GFX11-NEXT: v_readlane_b32 s4, v43, 7
; GFX11-NEXT: .LBB91_5: ; %end
; GFX11-NEXT: s_and_b32 s0, s0, 0xff
-; GFX11-NEXT: s_lshl_b32 s4, s104, 8
-; GFX11-NEXT: s_and_b32 s5, s103, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s42, 8
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: s_lshl_b32 s4, s4, 8
+; GFX11-NEXT: s_and_b32 s5, s5, 0xff
+; GFX11-NEXT: s_lshl_b32 s6, s60, 8
; GFX11-NEXT: s_or_b32 s0, s0, s4
; GFX11-NEXT: s_or_b32 s4, s5, s6
; GFX11-NEXT: s_and_b32 s1, s1, 0xff
-; GFX11-NEXT: s_lshl_b32 s5, s102, 8
-; GFX11-NEXT: s_and_b32 s6, s58, 0xff
-; GFX11-NEXT: s_lshl_b32 s7, s101, 8
+; GFX11-NEXT: s_lshl_b32 s5, s30, 8
+; GFX11-NEXT: s_and_b32 s6, s76, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s7, 8
; GFX11-NEXT: s_or_b32 s1, s1, s5
; GFX11-NEXT: s_or_b32 s5, s6, s7
; GFX11-NEXT: s_and_b32 s0, s0, 0xffff
@@ -169011,15 +169632,15 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX11-NEXT: s_or_b32 s0, s0, s4
; GFX11-NEXT: s_or_b32 s1, s1, s5
; GFX11-NEXT: s_and_b32 s2, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s4, s100, 8
-; GFX11-NEXT: s_and_b32 s5, s99, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s40, 8
+; GFX11-NEXT: s_lshl_b32 s4, vcc_hi, 8
+; GFX11-NEXT: s_and_b32 s5, vcc_lo, 0xff
+; GFX11-NEXT: s_lshl_b32 s6, s58, 8
; GFX11-NEXT: s_or_b32 s2, s2, s4
; GFX11-NEXT: s_or_b32 s4, s5, s6
; GFX11-NEXT: s_and_b32 s3, s3, 0xff
-; GFX11-NEXT: s_lshl_b32 s5, s45, 8
-; GFX11-NEXT: s_and_b32 s6, s59, 0xff
-; GFX11-NEXT: s_lshl_b32 s7, s95, 8
+; GFX11-NEXT: s_lshl_b32 s5, s95, 8
+; GFX11-NEXT: s_and_b32 s6, s74, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s94, 8
; GFX11-NEXT: s_or_b32 s3, s3, s5
; GFX11-NEXT: s_or_b32 s5, s6, s7
; GFX11-NEXT: s_and_b32 s2, s2, 0xffff
@@ -169031,14 +169652,14 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX11-NEXT: v_dual_mov_b32 v97, s0 :: v_dual_mov_b32 v98, s1
; GFX11-NEXT: v_dual_mov_b32 v99, s2 :: v_dual_mov_b32 v100, s3
; GFX11-NEXT: s_and_b32 s0, s16, 0xff
-; GFX11-NEXT: s_lshl_b32 s1, s46, 8
+; GFX11-NEXT: s_lshl_b32 s1, s8, 8
; GFX11-NEXT: s_and_b32 s2, s93, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s14, 8
+; GFX11-NEXT: s_lshl_b32 s3, s46, 8
; GFX11-NEXT: s_or_b32 s0, s0, s1
; GFX11-NEXT: s_or_b32 s1, s2, s3
; GFX11-NEXT: s_and_b32 s2, s17, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s47, 8
-; GFX11-NEXT: s_and_b32 s4, s72, 0xff
+; GFX11-NEXT: s_lshl_b32 s3, s9, 8
+; GFX11-NEXT: s_and_b32 s4, s75, 0xff
; GFX11-NEXT: s_lshl_b32 s5, s92, 8
; GFX11-NEXT: s_or_b32 s2, s2, s3
; GFX11-NEXT: s_or_b32 s3, s4, s5
@@ -169049,15 +169670,15 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX11-NEXT: s_or_b32 s0, s0, s1
; GFX11-NEXT: s_or_b32 s1, s2, s3
; GFX11-NEXT: s_and_b32 s2, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s91, 8
-; GFX11-NEXT: s_and_b32 s4, s90, 0xff
-; GFX11-NEXT: s_lshl_b32 s5, s12, 8
+; GFX11-NEXT: s_lshl_b32 s3, s28, 8
+; GFX11-NEXT: s_and_b32 s4, s91, 0xff
+; GFX11-NEXT: s_lshl_b32 s5, s40, 8
; GFX11-NEXT: s_or_b32 s2, s2, s3
; GFX11-NEXT: s_or_b32 s3, s4, s5
; GFX11-NEXT: s_and_b32 s4, s19, 0xff
-; GFX11-NEXT: s_lshl_b32 s5, s89, 8
-; GFX11-NEXT: s_and_b32 s6, s60, 0xff
-; GFX11-NEXT: s_lshl_b32 s7, s88, 8
+; GFX11-NEXT: s_lshl_b32 s5, s29, 8
+; GFX11-NEXT: s_and_b32 s6, s77, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s90, 8
; GFX11-NEXT: s_or_b32 s4, s4, s5
; GFX11-NEXT: s_or_b32 s5, s6, s7
; GFX11-NEXT: s_and_b32 s2, s2, 0xffff
@@ -169069,15 +169690,15 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX11-NEXT: v_dual_mov_b32 v112, s0 :: v_dual_mov_b32 v113, s1
; GFX11-NEXT: v_dual_mov_b32 v114, s2 :: v_dual_mov_b32 v115, s3
; GFX11-NEXT: s_and_b32 s0, s20, 0xff
-; GFX11-NEXT: s_lshl_b32 s1, s79, 8
-; GFX11-NEXT: s_and_b32 s2, s78, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s30, 8
+; GFX11-NEXT: s_lshl_b32 s1, s44, 8
+; GFX11-NEXT: s_and_b32 s2, s73, 0xff
+; GFX11-NEXT: s_lshl_b32 s3, s12, 8
; GFX11-NEXT: s_or_b32 s0, s0, s1
; GFX11-NEXT: s_or_b32 s1, s2, s3
; GFX11-NEXT: s_and_b32 s2, s21, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s77, 8
-; GFX11-NEXT: s_and_b32 s4, s61, 0xff
-; GFX11-NEXT: s_lshl_b32 s5, s76, 8
+; GFX11-NEXT: s_lshl_b32 s3, s45, 8
+; GFX11-NEXT: s_and_b32 s4, s78, 0xff
+; GFX11-NEXT: s_lshl_b32 s5, s72, 8
; GFX11-NEXT: s_or_b32 s2, s2, s3
; GFX11-NEXT: s_or_b32 s3, s4, s5
; GFX11-NEXT: s_and_b32 s0, s0, 0xffff
@@ -169087,15 +169708,15 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX11-NEXT: s_or_b32 s0, s0, s1
; GFX11-NEXT: s_or_b32 s1, s2, s3
; GFX11-NEXT: s_and_b32 s2, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s3, s8, 8
-; GFX11-NEXT: s_and_b32 s4, s75, 0xff
-; GFX11-NEXT: s_lshl_b32 s5, s94, 8
+; GFX11-NEXT: s_lshl_b32 s3, s56, 8
+; GFX11-NEXT: s_and_b32 s4, s63, 0xff
+; GFX11-NEXT: s_lshl_b32 s5, s10, 8
; GFX11-NEXT: s_or_b32 s2, s2, s3
; GFX11-NEXT: s_or_b32 s3, s4, s5
; GFX11-NEXT: s_and_b32 s4, s23, 0xff
-; GFX11-NEXT: s_lshl_b32 s5, s9, 8
-; GFX11-NEXT: s_and_b32 s6, s62, 0xff
-; GFX11-NEXT: s_lshl_b32 s7, s74, 8
+; GFX11-NEXT: s_lshl_b32 s5, s57, 8
+; GFX11-NEXT: s_and_b32 s6, s79, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s62, 8
; GFX11-NEXT: s_and_b32 s2, s2, 0xffff
; GFX11-NEXT: s_lshl_b32 s3, s3, 16
; GFX11-NEXT: s_or_b32 s4, s4, s5
@@ -169110,193 +169731,194 @@ define inreg <128 x i8> @bitcast_v64bf16_to_v128i8_scalar(<64 x bfloat> inreg %a
; GFX11-NEXT: v_dual_mov_b32 v97, s0 :: v_dual_mov_b32 v98, s1
; GFX11-NEXT: v_dual_mov_b32 v99, s2 :: v_dual_mov_b32 v100, s3
; GFX11-NEXT: s_and_b32 s0, s24, 0xff
-; GFX11-NEXT: s_lshl_b32 s1, s10, 8
-; GFX11-NEXT: s_and_b32 s2, s57, 0xff
-; GFX11-NEXT: s_lshl_b32 s4, s34, 8
+; GFX11-NEXT: s_lshl_b32 s1, s61, 8
+; GFX11-NEXT: s_and_b32 s2, s59, 0xff
+; GFX11-NEXT: s_lshl_b32 s4, s42, 8
; GFX11-NEXT: s_or_b32 s0, s0, s1
; GFX11-NEXT: s_or_b32 s1, s2, s4
; GFX11-NEXT: s_and_b32 s0, s0, 0xffff
; GFX11-NEXT: s_lshl_b32 s1, s1, 16
-; GFX11-NEXT: s_lshl_b32 s2, s11, 8
+; GFX11-NEXT: s_lshl_b32 s2, s47, 8
; GFX11-NEXT: s_or_b32 s0, s0, s1
; GFX11-NEXT: s_and_b32 s1, s25, 0xff
-; GFX11-NEXT: s_and_b32 s3, s63, 0xff
-; GFX11-NEXT: s_lshl_b32 s4, s56, 8
+; GFX11-NEXT: s_and_b32 s3, s88, 0xff
+; GFX11-NEXT: s_lshl_b32 s4, s43, 8
; GFX11-NEXT: s_or_b32 s1, s1, s2
; GFX11-NEXT: s_or_b32 s2, s3, s4
; GFX11-NEXT: s_and_b32 s1, s1, 0xffff
; GFX11-NEXT: s_lshl_b32 s2, s2, 16
; GFX11-NEXT: s_and_b32 s3, s26, 0xff
-; GFX11-NEXT: s_lshl_b32 s4, s43, 8
+; GFX11-NEXT: s_lshl_b32 s4, s41, 8
; GFX11-NEXT: s_or_b32 s1, s1, s2
; GFX11-NEXT: s_or_b32 s2, s3, s4
-; GFX11-NEXT: s_and_b32 s3, s41, 0xff
-; GFX11-NEXT: s_lshl_b32 s4, vcc_lo, 8
-; GFX11-NEXT: s_lshl_b32 s5, s15, 8
+; GFX11-NEXT: s_and_b32 s3, s15, 0xff
+; GFX11-NEXT: s_lshl_b32 s4, s14, 8
+; GFX11-NEXT: s_lshl_b32 s5, s13, 8
; GFX11-NEXT: s_or_b32 s3, s3, s4
; GFX11-NEXT: s_and_b32 s4, s27, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s13, 8
+; GFX11-NEXT: s_lshl_b32 s6, s11, 8
; GFX11-NEXT: s_or_b32 s4, s4, s5
-; GFX11-NEXT: s_and_b32 s5, s73, 0xff
+; GFX11-NEXT: s_and_b32 s5, s89, 0xff
; GFX11-NEXT: s_and_b32 s2, s2, 0xffff
; GFX11-NEXT: s_or_b32 s5, s5, s6
; GFX11-NEXT: s_lshl_b32 s3, s3, 16
; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
; GFX11-NEXT: s_lshl_b32 s5, s5, 16
-; GFX11-NEXT: v_dual_mov_b32 v112, s0 :: v_dual_and_b32 v23, 0xff, v23
-; GFX11-NEXT: v_dual_mov_b32 v113, s1 :: v_dual_lshlrev_b32 v6, 8, v6
+; GFX11-NEXT: v_dual_mov_b32 v113, s1 :: v_dual_and_b32 v24, 0xff, v24
+; GFX11-NEXT: v_and_b32_e32 v96, 0xff, v96
+; GFX11-NEXT: v_lshlrev_b32_e32 v14, 8, v14
; GFX11-NEXT: s_or_b32 s2, s2, s3
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_dual_mov_b32 v114, s2 :: v_dual_lshlrev_b32 v11, 8, v11
; GFX11-NEXT: s_or_b32 s3, s4, s5
-; GFX11-NEXT: v_dual_mov_b32 v115, s3 :: v_dual_and_b32 v96, 0xff, v96
-; GFX11-NEXT: v_or_b32_e32 v6, v23, v6
-; GFX11-NEXT: v_lshlrev_b32_e32 v7, 8, v7
-; GFX11-NEXT: v_lshlrev_b32_e32 v13, 8, v13
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
-; GFX11-NEXT: v_or_b32_e32 v11, v96, v11
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_dual_mov_b32 v115, s3 :: v_dual_lshlrev_b32 v6, 8, v6
+; GFX11-NEXT: v_or_b32_e32 v14, v96, v14
+; GFX11-NEXT: v_dual_mov_b32 v112, s0 :: v_dual_lshlrev_b32 v7, 8, v7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_or_b32_e32 v6, v24, v6
+; GFX11-NEXT: v_mov_b32_e32 v114, s2
+; GFX11-NEXT: v_lshlrev_b32_e32 v14, 16, v14
+; GFX11-NEXT: v_lshlrev_b32_e32 v12, 8, v12
; GFX11-NEXT: v_lshlrev_b32_e32 v10, 8, v10
; GFX11-NEXT: v_and_b32_e32 v6, 0xffff, v6
-; GFX11-NEXT: v_and_b32_e32 v24, 0xff, v24
-; GFX11-NEXT: v_lshlrev_b32_e32 v14, 8, v14
-; GFX11-NEXT: v_lshlrev_b32_e32 v11, 16, v11
-; GFX11-NEXT: v_lshlrev_b32_e32 v15, 8, v15
-; GFX11-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; GFX11-NEXT: v_and_b32_e32 v25, 0xff, v25
+; GFX11-NEXT: v_lshlrev_b32_e32 v13, 8, v13
; GFX11-NEXT: v_lshlrev_b32_e32 v16, 8, v16
-; GFX11-NEXT: v_lshlrev_b32_e32 v8, 8, v8
-; GFX11-NEXT: v_or_b32_e32 v23, v6, v11
-; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v21
-; GFX11-NEXT: v_and_b32_e32 v11, 0xff, v22
-; GFX11-NEXT: v_lshlrev_b32_e32 v21, 8, v87
-; GFX11-NEXT: v_and_b32_e32 v22, 0xff, v26
-; GFX11-NEXT: v_and_b32_e32 v26, 0xff, v86
+; GFX11-NEXT: v_lshlrev_b32_e32 v9, 8, v9
+; GFX11-NEXT: v_or_b32_e32 v24, v6, v14
+; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v22
+; GFX11-NEXT: v_and_b32_e32 v14, 0xff, v23
+; GFX11-NEXT: v_lshlrev_b32_e32 v22, 8, v87
+; GFX11-NEXT: v_and_b32_e32 v23, 0xff, v27
+; GFX11-NEXT: v_and_b32_e32 v27, 0xff, v86
; GFX11-NEXT: v_or_b32_e32 v6, v6, v7
-; GFX11-NEXT: v_lshlrev_b32_e32 v4, 8, v4
-; GFX11-NEXT: v_or_b32_e32 v7, v11, v21
-; GFX11-NEXT: v_or_b32_e32 v11, v22, v13
-; GFX11-NEXT: v_or_b32_e32 v10, v26, v10
-; GFX11-NEXT: v_or_b32_e32 v13, v24, v14
-; GFX11-NEXT: v_and_b32_e32 v14, 0xff, v25
-; GFX11-NEXT: v_lshlrev_b32_e32 v21, 8, v85
-; GFX11-NEXT: v_and_b32_e32 v22, 0xff, v29
-; GFX11-NEXT: v_and_b32_e32 v24, 0xff, v84
-; GFX11-NEXT: v_and_b32_e32 v25, 0xff, v27
+; GFX11-NEXT: v_or_b32_e32 v13, v25, v13
+; GFX11-NEXT: v_or_b32_e32 v7, v14, v22
+; GFX11-NEXT: v_or_b32_e32 v12, v23, v12
+; GFX11-NEXT: v_or_b32_e32 v10, v27, v10
+; GFX11-NEXT: v_and_b32_e32 v14, 0xff, v26
+; GFX11-NEXT: v_lshlrev_b32_e32 v22, 8, v85
+; GFX11-NEXT: v_and_b32_e32 v23, 0xff, v30
+; GFX11-NEXT: v_and_b32_e32 v25, 0xff, v84
; GFX11-NEXT: v_and_b32_e32 v26, 0xff, v28
-; GFX11-NEXT: v_lshlrev_b32_e32 v27, 8, v83
-; GFX11-NEXT: v_or_b32_e32 v14, v14, v21
-; GFX11-NEXT: v_or_b32_e32 v15, v22, v15
-; GFX11-NEXT: v_or_b32_e32 v9, v24, v9
-; GFX11-NEXT: v_or_b32_e32 v16, v25, v16
-; GFX11-NEXT: v_or_b32_e32 v21, v26, v27
+; GFX11-NEXT: v_lshlrev_b32_e32 v17, 8, v17
+; GFX11-NEXT: v_and_b32_e32 v27, 0xff, v29
+; GFX11-NEXT: v_lshlrev_b32_e32 v28, 8, v83
+; GFX11-NEXT: v_or_b32_e32 v14, v14, v22
+; GFX11-NEXT: v_or_b32_e32 v16, v23, v16
+; GFX11-NEXT: v_or_b32_e32 v9, v25, v9
+; GFX11-NEXT: v_or_b32_e32 v17, v26, v17
+; GFX11-NEXT: v_or_b32_e32 v22, v27, v28
; GFX11-NEXT: v_and_b32_e32 v6, 0xffff, v6
; GFX11-NEXT: v_lshlrev_b32_e32 v7, 16, v7
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v12
; GFX11-NEXT: v_lshlrev_b32_e32 v10, 16, v10
; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v13
; GFX11-NEXT: v_lshlrev_b32_e32 v14, 16, v14
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_lshlrev_b32_e32 v9, 16, v9
; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-NEXT: v_lshlrev_b32_e32 v21, 16, v21
-; GFX11-NEXT: v_or_b32_e32 v24, v6, v7
-; GFX11-NEXT: v_or_b32_e32 v25, v11, v10
-; GFX11-NEXT: v_or_b32_e32 v26, v13, v14
-; GFX11-NEXT: v_or_b32_e32 v6, v15, v9
-; GFX11-NEXT: v_or_b32_e32 v7, v16, v21
-; GFX11-NEXT: v_and_b32_e32 v9, 0xff, v32
-; GFX11-NEXT: v_lshlrev_b32_e32 v10, 8, v17
-; GFX11-NEXT: v_and_b32_e32 v11, 0xff, v82
-; GFX11-NEXT: v_and_b32_e32 v13, 0xff, v31
-; GFX11-NEXT: v_lshlrev_b32_e32 v14, 8, v18
-; GFX11-NEXT: v_and_b32_e32 v15, 0xff, v30
-; GFX11-NEXT: v_lshlrev_b32_e32 v16, 8, v81
-; GFX11-NEXT: v_and_b32_e32 v17, 0xff, v35
-; GFX11-NEXT: v_lshlrev_b32_e32 v18, 8, v19
+; GFX11-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-NEXT: v_lshlrev_b32_e32 v22, 16, v22
+; GFX11-NEXT: v_or_b32_e32 v25, v6, v7
+; GFX11-NEXT: v_or_b32_e32 v26, v12, v10
+; GFX11-NEXT: v_or_b32_e32 v27, v13, v14
+; GFX11-NEXT: v_or_b32_e32 v6, v16, v9
+; GFX11-NEXT: v_or_b32_e32 v7, v17, v22
+; GFX11-NEXT: v_and_b32_e32 v9, 0xff, v33
+; GFX11-NEXT: v_lshlrev_b32_e32 v10, 8, v18
+; GFX11-NEXT: v_and_b32_e32 v12, 0xff, v82
+; GFX11-NEXT: v_lshlrev_b32_e32 v8, 8, v8
+; GFX11-NEXT: v_and_b32_e32 v13, 0xff, v32
+; GFX11-NEXT: v_lshlrev_b32_e32 v14, 8, v19
+; GFX11-NEXT: v_and_b32_e32 v16, 0xff, v31
+; GFX11-NEXT: v_lshlrev_b32_e32 v17, 8, v81
+; GFX11-NEXT: v_and_b32_e32 v18, 0xff, v36
+; GFX11-NEXT: v_lshlrev_b32_e32 v19, 8, v20
; GFX11-NEXT: v_or_b32_e32 v9, v9, v10
-; GFX11-NEXT: v_or_b32_e32 v8, v11, v8
+; GFX11-NEXT: v_or_b32_e32 v8, v12, v8
; GFX11-NEXT: v_or_b32_e32 v10, v13, v14
-; GFX11-NEXT: v_or_b32_e32 v11, v15, v16
-; GFX11-NEXT: v_or_b32_e32 v13, v17, v18
+; GFX11-NEXT: v_or_b32_e32 v12, v16, v17
+; GFX11-NEXT: v_or_b32_e32 v13, v18, v19
; GFX11-NEXT: v_and_b32_e32 v14, 0xff, v80
-; GFX11-NEXT: v_and_b32_e32 v15, 0xff, v34
-; GFX11-NEXT: v_lshlrev_b32_e32 v16, 8, v20
-; GFX11-NEXT: v_and_b32_e32 v17, 0xff, v33
-; GFX11-NEXT: v_lshlrev_b32_e32 v18, 8, v71
-; GFX11-NEXT: v_and_b32_e32 v19, 0xff, v38
-; GFX11-NEXT: v_lshlrev_b32_e32 v20, 8, v70
-; GFX11-NEXT: v_and_b32_e32 v21, 0xff, v69
+; GFX11-NEXT: v_lshlrev_b32_e32 v4, 8, v4
+; GFX11-NEXT: v_and_b32_e32 v16, 0xff, v35
+; GFX11-NEXT: v_lshlrev_b32_e32 v17, 8, v21
+; GFX11-NEXT: v_and_b32_e32 v18, 0xff, v34
+; GFX11-NEXT: v_lshlrev_b32_e32 v19, 8, v71
+; GFX11-NEXT: v_and_b32_e32 v20, 0xff, v39
+; GFX11-NEXT: v_lshlrev_b32_e32 v21, 8, v70
+; GFX11-NEXT: v_and_b32_e32 v22, 0xff, v69
; GFX11-NEXT: v_lshlrev_b32_e32 v3, 8, v3
; GFX11-NEXT: v_or_b32_e32 v4, v14, v4
-; GFX11-NEXT: v_or_b32_e32 v14, v15, v16
-; GFX11-NEXT: v_or_b32_e32 v15, v17, v18
-; GFX11-NEXT: v_or_b32_e32 v16, v19, v20
-; GFX11-NEXT: v_or_b32_e32 v3, v21, v3
+; GFX11-NEXT: v_or_b32_e32 v14, v16, v17
+; GFX11-NEXT: v_or_b32_e32 v16, v18, v19
+; GFX11-NEXT: v_or_b32_e32 v17, v20, v21
+; GFX11-NEXT: v_or_b32_e32 v3, v22, v3
; GFX11-NEXT: v_and_b32_e32 v9, 0xffff, v9
; GFX11-NEXT: v_lshlrev_b32_e32 v8, 16, v8
; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX11-NEXT: v_lshlrev_b32_e32 v11, 16, v11
+; GFX11-NEXT: v_lshlrev_b32_e32 v12, 16, v12
; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v13
; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v4
; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_lshlrev_b32_e32 v15, 16, v15
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-NEXT: v_lshlrev_b32_e32 v16, 16, v16
+; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX11-NEXT: v_or_b32_e32 v8, v9, v8
-; GFX11-NEXT: v_or_b32_e32 v9, v10, v11
-; GFX11-NEXT: v_or_b32_e32 v13, v13, v4
-; GFX11-NEXT: v_or_b32_e32 v14, v14, v15
-; GFX11-NEXT: v_or_b32_e32 v15, v16, v3
-; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v36
+; GFX11-NEXT: v_or_b32_e32 v9, v10, v12
+; GFX11-NEXT: v_or_b32_e32 v12, v13, v4
+; GFX11-NEXT: v_or_b32_e32 v13, v14, v16
+; GFX11-NEXT: v_or_b32_e32 v14, v17, v3
+; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v37
; GFX11-NEXT: v_lshlrev_b32_e32 v4, 8, v68
-; GFX11-NEXT: v_and_b32_e32 v10, 0xff, v37
-; GFX11-NEXT: v_lshlrev_b32_e32 v11, 8, v67
-; GFX11-NEXT: v_and_b32_e32 v16, 0xff, v49
-; GFX11-NEXT: v_lshlrev_b32_e32 v17, 8, v66
-; GFX11-NEXT: v_and_b32_e32 v18, 0xff, v65
+; GFX11-NEXT: v_and_b32_e32 v10, 0xff, v38
+; GFX11-NEXT: v_lshlrev_b32_e32 v16, 8, v67
+; GFX11-NEXT: v_and_b32_e32 v17, 0xff, v50
+; GFX11-NEXT: v_lshlrev_b32_e32 v18, 8, v66
+; GFX11-NEXT: v_and_b32_e32 v19, 0xff, v65
; GFX11-NEXT: v_lshlrev_b32_e32 v2, 8, v2
-; GFX11-NEXT: v_and_b32_e32 v19, 0xff, v39
-; GFX11-NEXT: v_lshlrev_b32_e32 v20, 8, v64
+; GFX11-NEXT: v_and_b32_e32 v20, 0xff, v48
+; GFX11-NEXT: v_lshlrev_b32_e32 v21, 8, v64
; GFX11-NEXT: v_or_b32_e32 v3, v3, v4
-; GFX11-NEXT: v_or_b32_e32 v4, v10, v11
-; GFX11-NEXT: v_or_b32_e32 v10, v16, v17
-; GFX11-NEXT: v_or_b32_e32 v2, v18, v2
-; GFX11-NEXT: v_or_b32_e32 v11, v19, v20
-; GFX11-NEXT: v_and_b32_e32 v16, 0xff, v48
-; GFX11-NEXT: v_lshlrev_b32_e32 v17, 8, v55
-; GFX11-NEXT: v_and_b32_e32 v18, 0xff, v52
-; GFX11-NEXT: v_lshlrev_b32_e32 v19, 8, v54
-; GFX11-NEXT: v_and_b32_e32 v20, 0xff, v53
+; GFX11-NEXT: v_or_b32_e32 v4, v10, v16
+; GFX11-NEXT: v_or_b32_e32 v10, v17, v18
+; GFX11-NEXT: v_or_b32_e32 v2, v19, v2
+; GFX11-NEXT: v_or_b32_e32 v16, v20, v21
+; GFX11-NEXT: v_and_b32_e32 v17, 0xff, v49
+; GFX11-NEXT: v_lshlrev_b32_e32 v18, 8, v55
+; GFX11-NEXT: v_and_b32_e32 v19, 0xff, v53
+; GFX11-NEXT: v_lshlrev_b32_e32 v20, 8, v54
+; GFX11-NEXT: v_and_b32_e32 v15, 0xff, v15
; GFX11-NEXT: v_lshlrev_b32_e32 v1, 8, v1
-; GFX11-NEXT: v_and_b32_e32 v21, 0xff, v51
-; GFX11-NEXT: v_lshlrev_b32_e32 v12, 8, v12
-; GFX11-NEXT: v_and_b32_e32 v22, 0xff, v50
+; GFX11-NEXT: v_and_b32_e32 v21, 0xff, v52
+; GFX11-NEXT: v_lshlrev_b32_e32 v11, 8, v11
+; GFX11-NEXT: v_and_b32_e32 v22, 0xff, v51
; GFX11-NEXT: v_lshlrev_b32_e32 v5, 8, v5
-; GFX11-NEXT: v_or_b32_e32 v16, v16, v17
-; GFX11-NEXT: v_or_b32_e32 v17, v18, v19
-; GFX11-NEXT: v_or_b32_e32 v1, v20, v1
-; GFX11-NEXT: v_or_b32_e32 v12, v21, v12
+; GFX11-NEXT: v_or_b32_e32 v17, v17, v18
+; GFX11-NEXT: v_or_b32_e32 v18, v19, v20
+; GFX11-NEXT: v_or_b32_e32 v1, v15, v1
+; GFX11-NEXT: v_or_b32_e32 v11, v21, v11
; GFX11-NEXT: v_or_b32_e32 v5, v22, v5
; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
; GFX11-NEXT: v_lshlrev_b32_e32 v4, 16, v4
; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v10
; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-NEXT: v_lshlrev_b32_e32 v18, 16, v16
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-NEXT: v_lshlrev_b32_e32 v17, 16, v17
+; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
; GFX11-NEXT: v_lshlrev_b32_e32 v19, 16, v1
-; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v12
+; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
; GFX11-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; GFX11-NEXT: v_or_b32_e32 v16, v3, v4
+; GFX11-NEXT: v_or_b32_e32 v15, v3, v4
; GFX11-NEXT: v_or_b32_e32 v1, v10, v2
-; GFX11-NEXT: v_or_b32_e32 v2, v11, v18
-; GFX11-NEXT: v_or_b32_e32 v3, v17, v19
-; GFX11-NEXT: v_or_b32_e32 v4, v12, v5
+; GFX11-NEXT: v_or_b32_e32 v2, v16, v17
+; GFX11-NEXT: v_or_b32_e32 v3, v18, v19
+; GFX11-NEXT: v_or_b32_e32 v4, v11, v5
; GFX11-NEXT: s_clause 0x5
; GFX11-NEXT: scratch_store_b128 v0, v[97:100], off offset:32
; GFX11-NEXT: scratch_store_b128 v0, v[112:115], off offset:48
-; GFX11-NEXT: scratch_store_b128 v0, v[23:26], off offset:64
+; GFX11-NEXT: scratch_store_b128 v0, v[24:27], off offset:64
; GFX11-NEXT: scratch_store_b128 v0, v[6:9], off offset:80
-; GFX11-NEXT: scratch_store_b128 v0, v[13:16], off offset:96
+; GFX11-NEXT: scratch_store_b128 v0, v[12:15], off offset:96
; GFX11-NEXT: scratch_store_b128 v0, v[1:4], off offset:112
; GFX11-NEXT: v_readlane_b32 s104, v41, 8
; GFX11-NEXT: v_readlane_b32 s103, v41, 7
@@ -175874,6 +176496,7 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; SI-NEXT: v_writelane_b32 v61, s29, 0
; SI-NEXT: v_writelane_b32 v61, s28, 1
; SI-NEXT: v_writelane_b32 v61, s27, 2
+; SI-NEXT: v_writelane_b32 v61, s26, 3
; SI-NEXT: s_mov_b32 s61, s21
; SI-NEXT: v_writelane_b32 v63, s30, 0
; SI-NEXT: v_writelane_b32 v63, s31, 1
@@ -175909,38 +176532,34 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; SI-NEXT: v_writelane_b32 v63, s87, 31
; SI-NEXT: v_writelane_b32 v63, s96, 32
; SI-NEXT: v_writelane_b32 v63, s97, 33
-; SI-NEXT: s_mov_b32 s67, s19
-; SI-NEXT: s_mov_b32 s54, s17
-; SI-NEXT: s_mov_b32 s35, s23
-; SI-NEXT: s_mov_b32 s39, s26
; SI-NEXT: s_mov_b32 s62, s25
-; SI-NEXT: v_writelane_b32 v63, s98, 34
-; SI-NEXT: v_writelane_b32 v63, s99, 35
-; SI-NEXT: v_readfirstlane_b32 s99, v1
-; SI-NEXT: v_readfirstlane_b32 s74, v24
-; SI-NEXT: ; implicit-def: $vgpr62 : SGPR spill to VGPR lane
+; SI-NEXT: s_mov_b32 s54, s18
+; SI-NEXT: s_mov_b32 s65, s17
+; SI-NEXT: s_mov_b32 s52, s19
+; SI-NEXT: s_mov_b32 s30, s23
+; SI-NEXT: v_readfirstlane_b32 s55, v1
; SI-NEXT: v_readfirstlane_b32 s6, v23
+; SI-NEXT: ; implicit-def: $vgpr62 : SGPR spill to VGPR lane
+; SI-NEXT: v_readfirstlane_b32 s11, v26
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_writelane_b32 v62, s74, 0
-; SI-NEXT: v_readfirstlane_b32 s12, v26
-; SI-NEXT: v_writelane_b32 v62, s6, 1
-; SI-NEXT: v_readfirstlane_b32 s14, v25
+; SI-NEXT: v_writelane_b32 v62, s6, 0
+; SI-NEXT: v_readfirstlane_b32 s12, v25
+; SI-NEXT: v_writelane_b32 v62, s11, 1
+; SI-NEXT: v_readfirstlane_b32 s41, v28
; SI-NEXT: v_writelane_b32 v62, s12, 2
-; SI-NEXT: v_readfirstlane_b32 s46, v28
-; SI-NEXT: v_writelane_b32 v62, s14, 3
-; SI-NEXT: v_readfirstlane_b32 s56, v27
-; SI-NEXT: v_writelane_b32 v62, s46, 4
-; SI-NEXT: v_readfirstlane_b32 s57, v30
+; SI-NEXT: v_readfirstlane_b32 s47, v27
+; SI-NEXT: v_writelane_b32 v62, s41, 3
+; SI-NEXT: v_readfirstlane_b32 s56, v30
+; SI-NEXT: v_writelane_b32 v62, s47, 4
+; SI-NEXT: v_readfirstlane_b32 s58, v29
; SI-NEXT: v_writelane_b32 v62, s56, 5
-; SI-NEXT: v_readfirstlane_b32 s59, v29
-; SI-NEXT: v_writelane_b32 v62, s57, 6
-; SI-NEXT: v_writelane_b32 v62, s59, 7
+; SI-NEXT: v_writelane_b32 v62, s58, 6
; SI-NEXT: s_mov_b32 s60, s20
; SI-NEXT: s_mov_b32 s63, s24
; SI-NEXT: v_readfirstlane_b32 s95, v3
; SI-NEXT: v_readfirstlane_b32 s31, v5
; SI-NEXT: v_readfirstlane_b32 s24, v9
-; SI-NEXT: v_readfirstlane_b32 s38, v12
+; SI-NEXT: v_readfirstlane_b32 s49, v12
; SI-NEXT: v_readfirstlane_b32 s36, v11
; SI-NEXT: v_readfirstlane_b32 s8, v14
; SI-NEXT: v_readfirstlane_b32 s27, v13
@@ -175951,17 +176570,20 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; SI-NEXT: v_readfirstlane_b32 s42, v20
; SI-NEXT: v_readfirstlane_b32 s43, v19
; SI-NEXT: v_readfirstlane_b32 s44, v22
+; SI-NEXT: v_readfirstlane_b32 s45, v21
+; SI-NEXT: v_writelane_b32 v63, s98, 34
+; SI-NEXT: v_readfirstlane_b32 s74, v24
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:328
-; SI-NEXT: v_writelane_b32 v61, s4, 3
-; SI-NEXT: v_readfirstlane_b32 s45, v21
+; SI-NEXT: v_writelane_b32 v61, s4, 4
+; SI-NEXT: v_writelane_b32 v63, s99, 35
; SI-NEXT: v_readfirstlane_b32 s98, v10
; SI-NEXT: v_readfirstlane_b32 s90, v8
; SI-NEXT: v_readfirstlane_b32 s88, v7
; SI-NEXT: v_readfirstlane_b32 s91, v6
; SI-NEXT: v_readfirstlane_b32 s93, v4
-; SI-NEXT: v_readfirstlane_b32 s55, v2
+; SI-NEXT: v_readfirstlane_b32 s99, v2
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill
@@ -175979,142 +176601,171 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_readfirstlane_b32 s4, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:324
-; SI-NEXT: v_writelane_b32 v61, s4, 4
+; SI-NEXT: v_writelane_b32 v61, s4, 5
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v1
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:320
-; SI-NEXT: v_writelane_b32 v61, s4, 5
+; SI-NEXT: v_writelane_b32 v61, s4, 6
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:316
-; SI-NEXT: v_writelane_b32 v61, s4, 6
+; SI-NEXT: v_writelane_b32 v61, s4, 7
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:312
-; SI-NEXT: v_writelane_b32 v61, s4, 7
+; SI-NEXT: v_writelane_b32 v61, s4, 8
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:308
-; SI-NEXT: v_writelane_b32 v61, s4, 8
+; SI-NEXT: v_writelane_b32 v61, s4, 9
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:304
-; SI-NEXT: v_writelane_b32 v61, s4, 9
+; SI-NEXT: v_writelane_b32 v61, s4, 10
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:300
-; SI-NEXT: v_writelane_b32 v61, s4, 10
+; SI-NEXT: v_writelane_b32 v61, s4, 11
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:296
-; SI-NEXT: v_writelane_b32 v61, s4, 11
+; SI-NEXT: v_writelane_b32 v61, s4, 12
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:292
-; SI-NEXT: v_writelane_b32 v61, s4, 12
+; SI-NEXT: v_writelane_b32 v61, s4, 13
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:288
-; SI-NEXT: v_writelane_b32 v61, s4, 13
+; SI-NEXT: v_writelane_b32 v61, s4, 14
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:284
-; SI-NEXT: v_writelane_b32 v61, s4, 14
+; SI-NEXT: v_writelane_b32 v61, s4, 15
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:280
-; SI-NEXT: v_writelane_b32 v61, s4, 15
+; SI-NEXT: v_writelane_b32 v61, s4, 16
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:276
-; SI-NEXT: v_writelane_b32 v61, s4, 16
+; SI-NEXT: v_writelane_b32 v61, s4, 17
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:272
-; SI-NEXT: v_writelane_b32 v61, s4, 17
+; SI-NEXT: v_writelane_b32 v61, s4, 18
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:268
-; SI-NEXT: v_writelane_b32 v61, s4, 18
+; SI-NEXT: v_writelane_b32 v61, s4, 19
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:264
-; SI-NEXT: v_writelane_b32 v61, s4, 19
+; SI-NEXT: v_writelane_b32 v61, s4, 20
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:260
-; SI-NEXT: v_writelane_b32 v61, s4, 20
+; SI-NEXT: v_writelane_b32 v61, s4, 21
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:256
-; SI-NEXT: v_writelane_b32 v61, s4, 21
+; SI-NEXT: v_writelane_b32 v61, s4, 22
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:252
-; SI-NEXT: v_writelane_b32 v61, s4, 22
+; SI-NEXT: v_writelane_b32 v61, s4, 23
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:248
-; SI-NEXT: v_writelane_b32 v61, s4, 23
+; SI-NEXT: v_writelane_b32 v61, s4, 24
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:244
-; SI-NEXT: v_writelane_b32 v61, s4, 24
+; SI-NEXT: v_writelane_b32 v61, s4, 25
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:240
-; SI-NEXT: v_writelane_b32 v61, s4, 25
+; SI-NEXT: v_writelane_b32 v61, s4, 26
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:236
-; SI-NEXT: v_writelane_b32 v61, s4, 26
+; SI-NEXT: v_writelane_b32 v61, s4, 27
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:232
-; SI-NEXT: v_writelane_b32 v61, s4, 27
+; SI-NEXT: v_writelane_b32 v61, s4, 28
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:228
-; SI-NEXT: v_writelane_b32 v61, s4, 28
+; SI-NEXT: v_writelane_b32 v61, s4, 29
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:224
-; SI-NEXT: v_writelane_b32 v61, s4, 29
+; SI-NEXT: v_writelane_b32 v61, s4, 30
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:220
-; SI-NEXT: v_writelane_b32 v61, s4, 30
+; SI-NEXT: v_writelane_b32 v61, s4, 31
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:216
-; SI-NEXT: v_writelane_b32 v61, s4, 31
+; SI-NEXT: v_writelane_b32 v61, s4, 32
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:212
-; SI-NEXT: v_writelane_b32 v61, s4, 32
+; SI-NEXT: v_writelane_b32 v61, s4, 33
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s16, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:208
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:204
-; SI-NEXT: v_writelane_b32 v61, s4, 33
+; SI-NEXT: v_writelane_b32 v61, s4, 34
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s89, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:200
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:196
-; SI-NEXT: v_writelane_b32 v61, s4, 34
+; SI-NEXT: v_writelane_b32 v61, s4, 35
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s73, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:192
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s4, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:188
-; SI-NEXT: v_writelane_b32 v61, s4, 35
+; SI-NEXT: v_writelane_b32 v61, s4, 36
+; SI-NEXT: v_writelane_b32 v61, s65, 37
+; SI-NEXT: v_writelane_b32 v61, s10, 38
+; SI-NEXT: v_writelane_b32 v61, s52, 39
+; SI-NEXT: v_writelane_b32 v61, s54, 40
+; SI-NEXT: v_writelane_b32 v61, s61, 41
+; SI-NEXT: v_writelane_b32 v61, s60, 42
+; SI-NEXT: v_writelane_b32 v61, s30, 43
+; SI-NEXT: v_writelane_b32 v61, s22, 44
+; SI-NEXT: v_writelane_b32 v61, s62, 45
+; SI-NEXT: v_writelane_b32 v61, s63, 46
+; SI-NEXT: v_writelane_b32 v61, s55, 47
+; SI-NEXT: v_writelane_b32 v61, s95, 48
+; SI-NEXT: v_writelane_b32 v61, s31, 49
+; SI-NEXT: v_writelane_b32 v61, s24, 50
+; SI-NEXT: v_writelane_b32 v61, s49, 51
+; SI-NEXT: v_writelane_b32 v61, s36, 52
+; SI-NEXT: v_writelane_b32 v61, s8, 53
+; SI-NEXT: v_writelane_b32 v61, s27, 54
+; SI-NEXT: v_writelane_b32 v61, s9, 55
+; SI-NEXT: v_writelane_b32 v61, s79, 56
+; SI-NEXT: v_writelane_b32 v61, s13, 57
+; SI-NEXT: v_writelane_b32 v61, s15, 58
+; SI-NEXT: v_writelane_b32 v61, s42, 59
+; SI-NEXT: v_writelane_b32 v61, s43, 60
+; SI-NEXT: v_writelane_b32 v61, s44, 61
+; SI-NEXT: v_writelane_b32 v61, s45, 62
+; SI-NEXT: v_writelane_b32 v61, s74, 63
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s72, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:184
@@ -176134,19 +176785,19 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; SI-NEXT: v_readfirstlane_b32 s97, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:164
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_readfirstlane_b32 s7, v31
+; SI-NEXT: v_readfirstlane_b32 s25, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:160
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_readfirstlane_b32 s11, v31
+; SI-NEXT: v_readfirstlane_b32 s7, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:156
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_readfirstlane_b32 s41, v31
+; SI-NEXT: v_readfirstlane_b32 s14, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:152
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_readfirstlane_b32 s47, v31
+; SI-NEXT: v_readfirstlane_b32 s46, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:148
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_readfirstlane_b32 s58, v31
+; SI-NEXT: v_readfirstlane_b32 s57, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:144
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s76, v31
@@ -176155,176 +176806,147 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; SI-NEXT: v_readfirstlane_b32 s29, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:136
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_readfirstlane_b32 s4, v31
+; SI-NEXT: v_readfirstlane_b32 s75, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:132
-; SI-NEXT: v_writelane_b32 v61, s4, 36
-; SI-NEXT: v_writelane_b32 v61, s54, 37
-; SI-NEXT: v_writelane_b32 v61, s10, 38
-; SI-NEXT: v_writelane_b32 v61, s67, 39
-; SI-NEXT: v_writelane_b32 v61, s18, 40
-; SI-NEXT: v_writelane_b32 v61, s61, 41
-; SI-NEXT: v_writelane_b32 v61, s60, 42
-; SI-NEXT: v_writelane_b32 v61, s35, 43
-; SI-NEXT: v_writelane_b32 v61, s22, 44
-; SI-NEXT: v_writelane_b32 v61, s62, 45
-; SI-NEXT: v_writelane_b32 v61, s63, 46
-; SI-NEXT: v_writelane_b32 v61, s39, 47
-; SI-NEXT: v_writelane_b32 v61, s99, 48
-; SI-NEXT: v_writelane_b32 v61, s95, 49
-; SI-NEXT: v_writelane_b32 v61, s31, 50
-; SI-NEXT: v_writelane_b32 v61, s24, 51
-; SI-NEXT: v_writelane_b32 v61, s38, 52
-; SI-NEXT: v_writelane_b32 v61, s36, 53
-; SI-NEXT: v_writelane_b32 v61, s8, 54
-; SI-NEXT: v_writelane_b32 v61, s27, 55
-; SI-NEXT: v_writelane_b32 v61, s9, 56
-; SI-NEXT: v_writelane_b32 v61, s79, 57
-; SI-NEXT: v_writelane_b32 v61, s13, 58
-; SI-NEXT: v_writelane_b32 v61, s15, 59
-; SI-NEXT: v_writelane_b32 v61, s42, 60
-; SI-NEXT: v_writelane_b32 v61, s43, 61
-; SI-NEXT: v_writelane_b32 v61, s44, 62
-; SI-NEXT: v_writelane_b32 v61, s45, 63
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_readfirstlane_b32 s37, v31
+; SI-NEXT: v_readfirstlane_b32 s39, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:128
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_readfirstlane_b32 s50, v31
+; SI-NEXT: v_readfirstlane_b32 s67, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:124
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_readfirstlane_b32 s48, v31
+; SI-NEXT: v_readfirstlane_b32 s35, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:120
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_readfirstlane_b32 s19, v31
+; SI-NEXT: v_readfirstlane_b32 s18, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:116
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_readfirstlane_b32 s64, v31
+; SI-NEXT: v_readfirstlane_b32 s50, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:112
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s17, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:108
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_readfirstlane_b32 s65, v31
+; SI-NEXT: v_readfirstlane_b32 s70, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:104
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s71, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:100
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_readfirstlane_b32 s70, v31
+; SI-NEXT: v_readfirstlane_b32 s38, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:96
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_readfirstlane_b32 s83, v31
+; SI-NEXT: v_readfirstlane_b32 s80, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:92
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_readfirstlane_b32 s49, v31
+; SI-NEXT: v_readfirstlane_b32 s83, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:88
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_readfirstlane_b32 s80, v31
+; SI-NEXT: v_readfirstlane_b32 s82, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:84
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_readfirstlane_b32 s82, v31
+; SI-NEXT: v_readfirstlane_b32 s84, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:80
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_readfirstlane_b32 s87, v31
+; SI-NEXT: v_readfirstlane_b32 s86, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:76
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_readfirstlane_b32 s84, v31
+; SI-NEXT: v_readfirstlane_b32 s87, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:72
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_readfirstlane_b32 s51, v31
+; SI-NEXT: v_readfirstlane_b32 s96, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:68
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_readfirstlane_b32 s86, v31
+; SI-NEXT: v_readfirstlane_b32 s51, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:64
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_readfirstlane_b32 s94, v31
+; SI-NEXT: v_readfirstlane_b32 s53, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:60
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_readfirstlane_b32 s96, v31
+; SI-NEXT: v_readfirstlane_b32 s34, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:56
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_readfirstlane_b32 s68, v31
+; SI-NEXT: v_readfirstlane_b32 s69, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:52
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_readfirstlane_b32 s34, v31
+; SI-NEXT: v_readfirstlane_b32 s94, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:48
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_readfirstlane_b32 s77, v31
+; SI-NEXT: v_readfirstlane_b32 s78, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:44
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_readfirstlane_b32 s66, v31
+; SI-NEXT: v_readfirstlane_b32 s68, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:40
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_readfirstlane_b32 s78, v31
+; SI-NEXT: v_readfirstlane_b32 s37, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:36
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_readfirstlane_b32 s53, v31
+; SI-NEXT: v_readfirstlane_b32 s66, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:32
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_readfirstlane_b32 s69, v31
+; SI-NEXT: v_readfirstlane_b32 s77, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:28
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_readfirstlane_b32 s30, v31
+; SI-NEXT: v_readfirstlane_b32 s48, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:24
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_readfirstlane_b32 s52, v31
+; SI-NEXT: v_readfirstlane_b32 s64, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:20
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_readfirstlane_b32 s75, v31
+; SI-NEXT: v_readfirstlane_b32 s26, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:16
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_readfirstlane_b32 s23, v31
+; SI-NEXT: v_readfirstlane_b32 s19, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:12
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s28, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:8
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_readfirstlane_b32 s26, v31
+; SI-NEXT: v_readfirstlane_b32 s23, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:4
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_readfirstlane_b32 s25, v31
+; SI-NEXT: v_readfirstlane_b32 s59, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32
-; SI-NEXT: v_writelane_b32 v62, s25, 8
-; SI-NEXT: v_writelane_b32 v62, s28, 9
+; SI-NEXT: v_writelane_b32 v62, s59, 7
+; SI-NEXT: v_writelane_b32 v62, s28, 8
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s92, v31
-; SI-NEXT: v_writelane_b32 v62, s92, 10
-; SI-NEXT: v_writelane_b32 v62, s75, 11
-; SI-NEXT: v_writelane_b32 v62, s26, 12
-; SI-NEXT: v_writelane_b32 v62, s30, 13
-; SI-NEXT: v_writelane_b32 v62, s23, 14
-; SI-NEXT: v_writelane_b32 v62, s52, 15
-; SI-NEXT: v_writelane_b32 v62, s64, 16
-; SI-NEXT: v_writelane_b32 v62, s17, 17
-; SI-NEXT: v_writelane_b32 v62, s65, 18
-; SI-NEXT: v_writelane_b32 v62, s70, 19
-; SI-NEXT: v_writelane_b32 v62, s71, 20
-; SI-NEXT: v_writelane_b32 v62, s49, 21
-; SI-NEXT: v_writelane_b32 v62, s83, 22
-; SI-NEXT: v_writelane_b32 v62, s80, 23
-; SI-NEXT: v_writelane_b32 v62, s82, 24
-; SI-NEXT: v_writelane_b32 v62, s84, 25
-; SI-NEXT: v_writelane_b32 v62, s87, 26
-; SI-NEXT: v_writelane_b32 v62, s86, 27
-; SI-NEXT: v_writelane_b32 v62, s51, 28
-; SI-NEXT: v_writelane_b32 v62, s96, 29
-; SI-NEXT: v_writelane_b32 v62, s34, 30
-; SI-NEXT: v_writelane_b32 v62, s94, 31
-; SI-NEXT: v_writelane_b32 v62, s53, 32
-; SI-NEXT: v_writelane_b32 v62, s66, 33
-; SI-NEXT: v_writelane_b32 v62, s68, 34
-; SI-NEXT: v_writelane_b32 v62, s69, 35
-; SI-NEXT: v_writelane_b32 v62, s77, 36
-; SI-NEXT: v_writelane_b32 v62, s78, 37
+; SI-NEXT: v_writelane_b32 v62, s92, 9
+; SI-NEXT: v_writelane_b32 v62, s26, 10
+; SI-NEXT: v_writelane_b32 v62, s23, 11
+; SI-NEXT: v_writelane_b32 v62, s48, 12
+; SI-NEXT: v_writelane_b32 v62, s19, 13
+; SI-NEXT: v_writelane_b32 v62, s64, 14
+; SI-NEXT: v_writelane_b32 v62, s50, 15
+; SI-NEXT: v_writelane_b32 v62, s17, 16
+; SI-NEXT: v_writelane_b32 v62, s70, 17
+; SI-NEXT: v_writelane_b32 v62, s38, 18
+; SI-NEXT: v_writelane_b32 v62, s71, 19
+; SI-NEXT: v_writelane_b32 v62, s83, 20
+; SI-NEXT: v_writelane_b32 v62, s80, 21
+; SI-NEXT: v_writelane_b32 v62, s82, 22
+; SI-NEXT: v_writelane_b32 v62, s84, 23
+; SI-NEXT: v_writelane_b32 v62, s87, 24
+; SI-NEXT: v_writelane_b32 v62, s86, 25
+; SI-NEXT: v_writelane_b32 v62, s51, 26
+; SI-NEXT: v_writelane_b32 v62, s96, 27
+; SI-NEXT: v_writelane_b32 v62, s34, 28
+; SI-NEXT: v_writelane_b32 v62, s94, 29
+; SI-NEXT: v_writelane_b32 v62, s53, 30
+; SI-NEXT: v_writelane_b32 v62, s66, 31
+; SI-NEXT: v_writelane_b32 v62, s68, 32
+; SI-NEXT: v_writelane_b32 v62, s69, 33
+; SI-NEXT: v_writelane_b32 v62, s77, 34
+; SI-NEXT: v_writelane_b32 v62, s78, 35
+; SI-NEXT: v_writelane_b32 v62, s37, 36
; SI-NEXT: s_cbranch_scc0 .LBB93_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s10, 0xff
-; SI-NEXT: s_lshl_b32 s5, s54, 8
+; SI-NEXT: s_lshl_b32 s5, s65, 8
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_cvt_f32_f16_e32 v6, s4
-; SI-NEXT: s_and_b32 s4, s18, 0xff
-; SI-NEXT: s_lshl_b32 s5, s67, 8
+; SI-NEXT: s_and_b32 s4, s54, 0xff
+; SI-NEXT: s_lshl_b32 s5, s52, 8
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_cvt_f32_f16_e32 v5, s4
; SI-NEXT: s_and_b32 s4, s60, 0xff
@@ -176332,7 +176954,7 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_cvt_f32_f16_e32 v1, s4
; SI-NEXT: s_and_b32 s4, s22, 0xff
-; SI-NEXT: s_lshl_b32 s5, s35, 8
+; SI-NEXT: s_lshl_b32 s5, s30, 8
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -176343,8 +176965,9 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v1, s4
+; SI-NEXT: v_readlane_b32 s4, v61, 3
; SI-NEXT: v_readlane_b32 s5, v61, 2
-; SI-NEXT: s_and_b32 s4, s39, 0xff
+; SI-NEXT: s_and_b32 s4, s4, 0xff
; SI-NEXT: s_lshl_b32 s5, s5, 8
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill
@@ -176356,8 +176979,8 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; SI-NEXT: s_lshl_b32 s5, s5, 8
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_cvt_f32_f16_e32 v7, s4
-; SI-NEXT: s_and_b32 s4, s99, 0xff
-; SI-NEXT: s_lshl_b32 s5, s55, 8
+; SI-NEXT: s_and_b32 s4, s55, 0xff
+; SI-NEXT: s_lshl_b32 s5, s99, 8
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_cvt_f32_f16_e32 v10, s4
; SI-NEXT: s_and_b32 s4, s95, 0xff
@@ -176377,7 +177000,7 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_cvt_f32_f16_e32 v13, s4
; SI-NEXT: s_and_b32 s4, s36, 0xff
-; SI-NEXT: s_lshl_b32 s5, s38, 8
+; SI-NEXT: s_lshl_b32 s5, s49, 8
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_cvt_f32_f16_e32 v12, s4
; SI-NEXT: s_and_b32 s4, s27, 0xff
@@ -176404,101 +177027,100 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; SI-NEXT: s_lshl_b32 s5, s74, 8
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_cvt_f32_f16_e32 v18, s4
-; SI-NEXT: s_and_b32 s4, s14, 0xff
-; SI-NEXT: s_lshl_b32 s5, s12, 8
+; SI-NEXT: s_and_b32 s4, s12, 0xff
+; SI-NEXT: s_lshl_b32 s5, s11, 8
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_cvt_f32_f16_e32 v21, s4
-; SI-NEXT: s_and_b32 s4, s56, 0xff
-; SI-NEXT: s_lshl_b32 s5, s46, 8
+; SI-NEXT: s_and_b32 s4, s47, 0xff
+; SI-NEXT: s_lshl_b32 s5, s41, 8
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_cvt_f32_f16_e32 v20, s4
-; SI-NEXT: s_and_b32 s4, s59, 0xff
-; SI-NEXT: s_lshl_b32 s5, s57, 8
+; SI-NEXT: s_and_b32 s4, s58, 0xff
+; SI-NEXT: s_lshl_b32 s5, s56, 8
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_cvt_f32_f16_e32 v23, s4
; SI-NEXT: s_and_b32 s4, s92, 0xff
-; SI-NEXT: s_lshl_b32 s5, s25, 8
+; SI-NEXT: s_lshl_b32 s5, s59, 8
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_cvt_f32_f16_e32 v22, s4
-; SI-NEXT: s_and_b32 s4, s26, 0xff
+; SI-NEXT: s_and_b32 s4, s23, 0xff
; SI-NEXT: s_lshl_b32 s5, s28, 8
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_cvt_f32_f16_e32 v25, s4
-; SI-NEXT: s_and_b32 s4, s23, 0xff
-; SI-NEXT: s_lshl_b32 s5, s75, 8
+; SI-NEXT: s_and_b32 s4, s19, 0xff
+; SI-NEXT: s_lshl_b32 s5, s26, 8
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_cvt_f32_f16_e32 v24, s4
-; SI-NEXT: s_and_b32 s4, s52, 0xff
-; SI-NEXT: s_lshl_b32 s5, s30, 8
+; SI-NEXT: s_and_b32 s4, s64, 0xff
+; SI-NEXT: s_lshl_b32 s5, s48, 8
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_cvt_f32_f16_e32 v27, s4
-; SI-NEXT: s_and_b32 s4, s69, 0xff
-; SI-NEXT: s_lshl_b32 s5, s53, 8
+; SI-NEXT: s_and_b32 s4, s77, 0xff
+; SI-NEXT: s_lshl_b32 s5, s66, 8
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_cvt_f32_f16_e32 v26, s4
-; SI-NEXT: s_and_b32 s4, s78, 0xff
-; SI-NEXT: s_lshl_b32 s5, s66, 8
+; SI-NEXT: s_and_b32 s4, s37, 0xff
+; SI-NEXT: s_lshl_b32 s5, s68, 8
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_cvt_f32_f16_e32 v29, s4
-; SI-NEXT: s_and_b32 s4, s77, 0xff
-; SI-NEXT: s_lshl_b32 s5, s34, 8
+; SI-NEXT: s_and_b32 s4, s78, 0xff
+; SI-NEXT: s_lshl_b32 s5, s94, 8
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_cvt_f32_f16_e32 v28, s4
-; SI-NEXT: s_and_b32 s4, s68, 0xff
-; SI-NEXT: s_lshl_b32 s5, s96, 8
+; SI-NEXT: s_and_b32 s4, s69, 0xff
+; SI-NEXT: s_lshl_b32 s5, s34, 8
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_cvt_f32_f16_e32 v31, s4
-; SI-NEXT: s_and_b32 s4, s94, 0xff
-; SI-NEXT: s_lshl_b32 s5, s86, 8
+; SI-NEXT: s_and_b32 s4, s53, 0xff
+; SI-NEXT: s_lshl_b32 s5, s51, 8
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_cvt_f32_f16_e32 v30, s4
-; SI-NEXT: s_and_b32 s4, s51, 0xff
-; SI-NEXT: s_lshl_b32 s5, s84, 8
+; SI-NEXT: s_and_b32 s4, s96, 0xff
+; SI-NEXT: s_lshl_b32 s5, s87, 8
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_cvt_f32_f16_e32 v33, s4
-; SI-NEXT: s_and_b32 s4, s87, 0xff
-; SI-NEXT: s_lshl_b32 s5, s82, 8
+; SI-NEXT: s_and_b32 s4, s86, 0xff
+; SI-NEXT: s_lshl_b32 s5, s84, 8
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_cvt_f32_f16_e32 v32, s4
-; SI-NEXT: s_and_b32 s4, s80, 0xff
-; SI-NEXT: s_lshl_b32 s5, s49, 8
+; SI-NEXT: s_and_b32 s4, s82, 0xff
+; SI-NEXT: s_lshl_b32 s5, s83, 8
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_cvt_f32_f16_e32 v35, s4
-; SI-NEXT: s_and_b32 s4, s83, 0xff
-; SI-NEXT: s_lshl_b32 s5, s70, 8
+; SI-NEXT: s_and_b32 s4, s80, 0xff
+; SI-NEXT: s_lshl_b32 s5, s38, 8
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_cvt_f32_f16_e32 v34, s4
; SI-NEXT: s_and_b32 s4, s71, 0xff
-; SI-NEXT: s_lshl_b32 s5, s65, 8
+; SI-NEXT: s_lshl_b32 s5, s70, 8
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_cvt_f32_f16_e32 v37, s4
; SI-NEXT: s_and_b32 s4, s17, 0xff
-; SI-NEXT: s_lshl_b32 s5, s64, 8
+; SI-NEXT: s_lshl_b32 s5, s50, 8
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_cvt_f32_f16_e32 v36, s4
-; SI-NEXT: s_and_b32 s4, s19, 0xff
-; SI-NEXT: s_lshl_b32 s5, s48, 8
+; SI-NEXT: s_and_b32 s4, s18, 0xff
+; SI-NEXT: s_lshl_b32 s5, s35, 8
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_cvt_f32_f16_e32 v39, s4
-; SI-NEXT: s_and_b32 s4, s50, 0xff
-; SI-NEXT: s_lshl_b32 s5, s37, 8
+; SI-NEXT: s_and_b32 s4, s67, 0xff
+; SI-NEXT: s_lshl_b32 s5, s39, 8
; SI-NEXT: s_or_b32 s4, s4, s5
-; SI-NEXT: v_readlane_b32 s8, v61, 36
; SI-NEXT: v_cvt_f32_f16_e32 v38, s4
-; SI-NEXT: s_and_b32 s4, s8, 0xff
+; SI-NEXT: s_and_b32 s4, s75, 0xff
; SI-NEXT: s_lshl_b32 s5, s29, 8
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_cvt_f32_f16_e32 v49, s4
; SI-NEXT: s_and_b32 s4, s76, 0xff
-; SI-NEXT: s_lshl_b32 s5, s58, 8
+; SI-NEXT: s_lshl_b32 s5, s57, 8
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_cvt_f32_f16_e32 v48, s4
-; SI-NEXT: s_and_b32 s4, s47, 0xff
-; SI-NEXT: s_lshl_b32 s5, s41, 8
+; SI-NEXT: s_and_b32 s4, s46, 0xff
+; SI-NEXT: s_lshl_b32 s5, s14, 8
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_cvt_f32_f16_e32 v51, s4
-; SI-NEXT: s_and_b32 s4, s11, 0xff
-; SI-NEXT: s_lshl_b32 s5, s7, 8
+; SI-NEXT: s_and_b32 s4, s7, 0xff
+; SI-NEXT: s_lshl_b32 s5, s25, 8
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_cvt_f32_f16_e32 v50, s4
; SI-NEXT: s_and_b32 s4, s97, 0xff
@@ -176512,126 +177134,126 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; SI-NEXT: s_and_b32 s4, s40, 0xff
; SI-NEXT: s_lshl_b32 s5, s72, 8
; SI-NEXT: s_or_b32 s4, s4, s5
-; SI-NEXT: v_readlane_b32 s69, v61, 35
+; SI-NEXT: v_readlane_b32 s69, v61, 36
; SI-NEXT: v_cvt_f32_f16_e32 v55, s4
; SI-NEXT: s_and_b32 s4, s69, 0xff
; SI-NEXT: s_lshl_b32 s5, s73, 8
; SI-NEXT: s_or_b32 s4, s4, s5
-; SI-NEXT: v_readlane_b32 s68, v61, 34
+; SI-NEXT: v_readlane_b32 s68, v61, 35
; SI-NEXT: v_cvt_f32_f16_e32 v54, s4
; SI-NEXT: s_and_b32 s4, s68, 0xff
; SI-NEXT: s_lshl_b32 s5, s89, 8
; SI-NEXT: s_or_b32 s4, s4, s5
-; SI-NEXT: v_readlane_b32 s66, v61, 33
+; SI-NEXT: v_readlane_b32 s66, v61, 34
; SI-NEXT: v_cvt_f32_f16_e32 v41, s4
; SI-NEXT: s_and_b32 s4, s66, 0xff
; SI-NEXT: s_lshl_b32 s5, s16, 8
; SI-NEXT: s_or_b32 s4, s4, s5
-; SI-NEXT: v_readlane_b32 s53, v61, 32
-; SI-NEXT: v_readlane_b32 s94, v61, 31
+; SI-NEXT: v_readlane_b32 s53, v61, 33
+; SI-NEXT: v_readlane_b32 s94, v61, 32
; SI-NEXT: v_cvt_f32_f16_e32 v40, s4
; SI-NEXT: s_and_b32 s4, s53, 0xff
; SI-NEXT: s_lshl_b32 s5, s94, 8
; SI-NEXT: s_or_b32 s4, s4, s5
-; SI-NEXT: v_readlane_b32 s34, v61, 30
-; SI-NEXT: v_readlane_b32 s96, v61, 29
+; SI-NEXT: v_readlane_b32 s34, v61, 31
+; SI-NEXT: v_readlane_b32 s96, v61, 30
; SI-NEXT: v_cvt_f32_f16_e32 v43, s4
; SI-NEXT: s_and_b32 s4, s34, 0xff
; SI-NEXT: s_lshl_b32 s5, s96, 8
; SI-NEXT: s_or_b32 s4, s4, s5
-; SI-NEXT: v_readlane_b32 s51, v61, 28
-; SI-NEXT: v_readlane_b32 s86, v61, 27
+; SI-NEXT: v_readlane_b32 s51, v61, 29
+; SI-NEXT: v_readlane_b32 s86, v61, 28
; SI-NEXT: v_cvt_f32_f16_e32 v42, s4
; SI-NEXT: s_and_b32 s4, s51, 0xff
; SI-NEXT: s_lshl_b32 s5, s86, 8
; SI-NEXT: s_or_b32 s4, s4, s5
-; SI-NEXT: v_readlane_b32 s87, v61, 26
-; SI-NEXT: v_readlane_b32 s84, v61, 25
+; SI-NEXT: v_readlane_b32 s87, v61, 27
+; SI-NEXT: v_readlane_b32 s84, v61, 26
; SI-NEXT: v_cvt_f32_f16_e32 v45, s4
; SI-NEXT: s_and_b32 s4, s87, 0xff
; SI-NEXT: s_lshl_b32 s5, s84, 8
; SI-NEXT: s_or_b32 s4, s4, s5
-; SI-NEXT: v_readlane_b32 s82, v61, 24
-; SI-NEXT: v_readlane_b32 s80, v61, 23
+; SI-NEXT: v_readlane_b32 s82, v61, 25
+; SI-NEXT: v_readlane_b32 s80, v61, 24
; SI-NEXT: v_cvt_f32_f16_e32 v44, s4
; SI-NEXT: s_and_b32 s4, s82, 0xff
; SI-NEXT: s_lshl_b32 s5, s80, 8
; SI-NEXT: s_or_b32 s4, s4, s5
-; SI-NEXT: v_readlane_b32 s83, v61, 22
-; SI-NEXT: v_readlane_b32 s49, v61, 21
+; SI-NEXT: v_readlane_b32 s83, v61, 23
+; SI-NEXT: v_readlane_b32 s71, v61, 22
; SI-NEXT: v_cvt_f32_f16_e32 v47, s4
; SI-NEXT: s_and_b32 s4, s83, 0xff
-; SI-NEXT: s_lshl_b32 s5, s49, 8
+; SI-NEXT: s_lshl_b32 s5, s71, 8
; SI-NEXT: s_or_b32 s4, s4, s5
-; SI-NEXT: v_readlane_b32 s71, v61, 20
-; SI-NEXT: v_readlane_b32 s70, v61, 19
+; SI-NEXT: v_readlane_b32 s38, v61, 21
+; SI-NEXT: v_readlane_b32 s70, v61, 20
; SI-NEXT: v_cvt_f32_f16_e32 v46, s4
-; SI-NEXT: s_and_b32 s4, s71, 0xff
+; SI-NEXT: s_and_b32 s4, s38, 0xff
; SI-NEXT: s_lshl_b32 s5, s70, 8
; SI-NEXT: s_or_b32 s4, s4, s5
-; SI-NEXT: v_readlane_b32 s65, v61, 18
-; SI-NEXT: v_readlane_b32 s54, v61, 17
+; SI-NEXT: v_readlane_b32 s65, v61, 19
+; SI-NEXT: v_readlane_b32 s54, v61, 18
; SI-NEXT: v_cvt_f32_f16_e32 v57, s4
; SI-NEXT: s_and_b32 s4, s65, 0xff
; SI-NEXT: s_lshl_b32 s5, s54, 8
-; SI-NEXT: s_mov_b32 s17, s19
-; SI-NEXT: s_mov_b32 s19, s50
+; SI-NEXT: s_mov_b32 s17, s18
+; SI-NEXT: s_mov_b32 s18, s67
; SI-NEXT: s_or_b32 s4, s4, s5
-; SI-NEXT: v_readlane_b32 s67, v61, 16
-; SI-NEXT: v_readlane_b32 s50, v61, 15
+; SI-NEXT: v_readlane_b32 s67, v61, 17
+; SI-NEXT: v_readlane_b32 s50, v61, 16
; SI-NEXT: v_cvt_f32_f16_e32 v56, s4
; SI-NEXT: s_and_b32 s4, s67, 0xff
; SI-NEXT: s_lshl_b32 s5, s50, 8
; SI-NEXT: s_or_b32 s4, s4, s5
-; SI-NEXT: v_readlane_b32 s64, v61, 14
-; SI-NEXT: v_readlane_b32 s52, v61, 13
+; SI-NEXT: v_readlane_b32 s64, v61, 15
+; SI-NEXT: v_readlane_b32 s52, v61, 14
; SI-NEXT: v_cvt_f32_f16_e32 v59, s4
; SI-NEXT: s_and_b32 s4, s64, 0xff
; SI-NEXT: s_lshl_b32 s5, s52, 8
-; SI-NEXT: s_mov_b32 s23, s48
+; SI-NEXT: s_mov_b32 s19, s35
; SI-NEXT: s_or_b32 s4, s4, s5
-; SI-NEXT: v_readlane_b32 s35, v61, 12
-; SI-NEXT: v_readlane_b32 s48, v61, 11
+; SI-NEXT: v_readlane_b32 s35, v61, 13
+; SI-NEXT: v_readlane_b32 s48, v61, 12
; SI-NEXT: v_cvt_f32_f16_e32 v58, s4
; SI-NEXT: s_and_b32 s4, s35, 0xff
; SI-NEXT: s_lshl_b32 s5, s48, 8
+; SI-NEXT: s_mov_b32 s23, s39
; SI-NEXT: s_or_b32 s4, s4, s5
-; SI-NEXT: v_readlane_b32 s30, v61, 10
-; SI-NEXT: v_readlane_b32 s39, v61, 9
+; SI-NEXT: v_readlane_b32 s30, v61, 11
+; SI-NEXT: v_readlane_b32 s39, v61, 10
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v1, s4
; SI-NEXT: s_and_b32 s4, s30, 0xff
; SI-NEXT: s_lshl_b32 s5, s39, 8
-; SI-NEXT: s_mov_b32 s26, s37
+; SI-NEXT: s_mov_b32 s26, s75
; SI-NEXT: s_or_b32 s4, s4, s5
-; SI-NEXT: v_readlane_b32 s37, v61, 8
-; SI-NEXT: v_readlane_b32 s75, v61, 7
+; SI-NEXT: v_readlane_b32 s37, v61, 9
+; SI-NEXT: v_readlane_b32 s75, v61, 8
; SI-NEXT: v_cvt_f32_f16_e32 v60, s4
; SI-NEXT: s_and_b32 s4, s37, 0xff
; SI-NEXT: s_lshl_b32 s5, s75, 8
; SI-NEXT: s_or_b32 s4, s4, s5
-; SI-NEXT: v_readlane_b32 s92, v61, 6
-; SI-NEXT: v_readlane_b32 s77, v61, 5
+; SI-NEXT: v_readlane_b32 s92, v61, 7
+; SI-NEXT: v_readlane_b32 s77, v61, 6
; SI-NEXT: v_cvt_f32_f16_e32 v2, s4
; SI-NEXT: s_and_b32 s4, s92, 0xff
; SI-NEXT: s_lshl_b32 s5, s77, 8
; SI-NEXT: s_mov_b32 s28, s29
; SI-NEXT: s_mov_b32 s29, s76
; SI-NEXT: s_or_b32 s4, s4, s5
-; SI-NEXT: v_readlane_b32 s78, v61, 4
-; SI-NEXT: v_readlane_b32 s76, v61, 3
+; SI-NEXT: v_readlane_b32 s78, v61, 5
+; SI-NEXT: v_readlane_b32 s76, v61, 4
; SI-NEXT: v_cvt_f32_f16_e32 v3, s4
; SI-NEXT: s_and_b32 s4, s78, 0xff
; SI-NEXT: s_lshl_b32 s5, s76, 8
; SI-NEXT: s_or_b32 s4, s4, s5
-; SI-NEXT: s_mov_b32 s99, s55
+; SI-NEXT: s_mov_b32 s55, s99
; SI-NEXT: s_mov_b32 s20, s88
; SI-NEXT: s_mov_b32 s24, s98
-; SI-NEXT: s_mov_b32 s59, s58
-; SI-NEXT: s_mov_b32 s56, s47
-; SI-NEXT: s_mov_b32 s46, s41
-; SI-NEXT: s_mov_b32 s12, s11
+; SI-NEXT: s_mov_b32 s58, s57
+; SI-NEXT: s_mov_b32 s47, s46
+; SI-NEXT: s_mov_b32 s41, s14
; SI-NEXT: s_mov_b32 s11, s7
; SI-NEXT: s_mov_b32 s7, s97
; SI-NEXT: s_mov_b32 s97, s81
@@ -176641,7 +177263,7 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; SI-NEXT: s_mov_b32 s45, s73
; SI-NEXT: s_mov_b32 s15, s89
; SI-NEXT: v_cvt_f32_f16_e32 v4, s4
-; SI-NEXT: s_mov_b32 s55, s93
+; SI-NEXT: s_mov_b32 s99, s93
; SI-NEXT: s_mov_b32 s95, s91
; SI-NEXT: s_mov_b32 s31, s90
; SI-NEXT: s_cbranch_execnz .LBB93_3
@@ -176678,13 +177300,13 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; SI-NEXT: s_and_b32 vcc_hi, vcc_hi, 0xff
; SI-NEXT: s_lshl_b32 s72, s54, 8
; SI-NEXT: s_or_b32 s72, s72, vcc_hi
-; SI-NEXT: s_add_i32 vcc_hi, s71, 3
+; SI-NEXT: s_add_i32 vcc_hi, s38, 3
; SI-NEXT: s_and_b32 vcc_hi, vcc_hi, 0xff
; SI-NEXT: s_lshl_b32 s73, s70, 8
; SI-NEXT: s_or_b32 s73, s73, vcc_hi
; SI-NEXT: s_add_i32 vcc_hi, s83, 3
; SI-NEXT: s_and_b32 vcc_hi, vcc_hi, 0xff
-; SI-NEXT: s_lshl_b32 s74, s49, 8
+; SI-NEXT: s_lshl_b32 s74, s71, 8
; SI-NEXT: s_or_b32 s74, s74, vcc_hi
; SI-NEXT: s_add_i32 vcc_hi, s82, 3
; SI-NEXT: s_and_b32 vcc_hi, vcc_hi, 0xff
@@ -176729,194 +177351,194 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; SI-NEXT: s_or_b32 s39, s52, s39
; SI-NEXT: s_and_b32 s52, s81, 0xff
; SI-NEXT: s_lshl_b32 s53, s97, 8
-; SI-NEXT: s_add_i32 s85, s12, 3
+; SI-NEXT: s_add_i32 s85, s11, 3
; SI-NEXT: s_or_b32 s52, s53, s52
; SI-NEXT: s_and_b32 s53, s85, 0xff
-; SI-NEXT: s_lshl_b32 s64, s11, 8
-; SI-NEXT: s_add_i32 s97, s56, 3
+; SI-NEXT: s_lshl_b32 s64, s25, 8
+; SI-NEXT: s_add_i32 s97, s47, 3
; SI-NEXT: s_or_b32 s53, s64, s53
; SI-NEXT: s_and_b32 s64, s97, 0xff
-; SI-NEXT: s_lshl_b32 s66, s46, 8
+; SI-NEXT: s_lshl_b32 s66, s41, 8
; SI-NEXT: s_add_i32 s21, s29, 3
; SI-NEXT: s_or_b32 s64, s66, s64
; SI-NEXT: s_and_b32 s21, s21, 0xff
-; SI-NEXT: s_lshl_b32 s66, s59, 8
-; SI-NEXT: s_add_i32 s25, s8, 3
+; SI-NEXT: s_lshl_b32 s66, s58, 8
+; SI-NEXT: s_add_i32 s25, s26, 3
; SI-NEXT: s_or_b32 s66, s66, s21
; SI-NEXT: s_and_b32 s21, s25, 0xff
; SI-NEXT: s_lshl_b32 s6, s28, 8
-; SI-NEXT: s_add_i32 s29, s19, 3
+; SI-NEXT: s_add_i32 s29, s18, 3
; SI-NEXT: s_or_b32 s67, s6, s21
; SI-NEXT: s_and_b32 s6, s29, 0xff
-; SI-NEXT: s_lshl_b32 s18, s26, 8
+; SI-NEXT: s_lshl_b32 s18, s23, 8
; SI-NEXT: s_add_i32 s28, s17, 3
; SI-NEXT: s_or_b32 s68, s18, s6
; SI-NEXT: s_and_b32 s6, s28, 0xff
-; SI-NEXT: s_lshl_b32 s18, s23, 8
+; SI-NEXT: s_lshl_b32 s18, s19, 8
; SI-NEXT: s_or_b32 s69, s18, s6
-; SI-NEXT: v_readlane_b32 s6, v62, 17
+; SI-NEXT: v_readlane_b32 s6, v62, 16
; SI-NEXT: s_add_i32 s7, s6, 3
-; SI-NEXT: v_readlane_b32 s16, v62, 15
+; SI-NEXT: v_readlane_b32 s16, v62, 14
; SI-NEXT: s_and_b32 s6, s7, 0xff
-; SI-NEXT: v_readlane_b32 s7, v62, 16
+; SI-NEXT: v_readlane_b32 s7, v62, 15
; SI-NEXT: s_add_i32 s27, s16, 3
-; SI-NEXT: v_readlane_b32 s16, v62, 13
+; SI-NEXT: v_readlane_b32 s16, v62, 12
; SI-NEXT: s_lshl_b32 s7, s7, 8
; SI-NEXT: s_lshl_b32 s23, s16, 8
-; SI-NEXT: v_readlane_b32 s16, v62, 14
+; SI-NEXT: v_readlane_b32 s16, v62, 13
; SI-NEXT: s_mov_b32 s91, s24
; SI-NEXT: s_or_b32 s70, s7, s6
-; SI-NEXT: v_readlane_b32 s6, v62, 20
+; SI-NEXT: v_readlane_b32 s6, v62, 19
; SI-NEXT: s_add_i32 s24, s16, 3
-; SI-NEXT: v_readlane_b32 s16, v62, 11
+; SI-NEXT: v_readlane_b32 s16, v62, 10
; SI-NEXT: s_add_i32 s11, s6, 3
-; SI-NEXT: v_readlane_b32 s7, v62, 18
+; SI-NEXT: v_readlane_b32 s7, v62, 17
; SI-NEXT: s_lshl_b32 s19, s16, 8
-; SI-NEXT: v_readlane_b32 s16, v62, 12
+; SI-NEXT: v_readlane_b32 s16, v62, 11
; SI-NEXT: s_mov_b32 s90, s20
; SI-NEXT: s_and_b32 s6, s11, 0xff
; SI-NEXT: s_lshl_b32 s7, s7, 8
; SI-NEXT: s_add_i32 s20, s16, 3
-; SI-NEXT: v_readlane_b32 s16, v62, 9
+; SI-NEXT: v_readlane_b32 s16, v62, 8
; SI-NEXT: s_or_b32 s71, s7, s6
-; SI-NEXT: v_readlane_b32 s6, v62, 22
+; SI-NEXT: v_readlane_b32 s6, v62, 21
; SI-NEXT: s_and_b32 s20, s20, 0xff
; SI-NEXT: s_lshl_b32 s17, s16, 8
-; SI-NEXT: v_readlane_b32 s16, v62, 10
+; SI-NEXT: v_readlane_b32 s16, v62, 9
; SI-NEXT: s_add_i32 s12, s6, 3
-; SI-NEXT: v_readlane_b32 s7, v62, 19
+; SI-NEXT: v_readlane_b32 s7, v62, 18
; SI-NEXT: s_or_b32 s17, s17, s20
; SI-NEXT: s_add_i32 s16, s16, 3
-; SI-NEXT: v_readlane_b32 s20, v62, 8
+; SI-NEXT: v_readlane_b32 s20, v62, 7
; SI-NEXT: s_and_b32 s6, s12, 0xff
; SI-NEXT: s_lshl_b32 s7, s7, 8
; SI-NEXT: s_and_b32 s16, s16, 0xff
; SI-NEXT: s_lshl_b32 s20, s20, 8
; SI-NEXT: s_or_b32 s81, s7, s6
-; SI-NEXT: v_readlane_b32 s6, v62, 23
+; SI-NEXT: v_readlane_b32 s6, v62, 22
; SI-NEXT: s_and_b32 s24, s24, 0xff
; SI-NEXT: s_or_b32 s16, s20, s16
-; SI-NEXT: v_readlane_b32 s20, v62, 7
+; SI-NEXT: v_readlane_b32 s20, v62, 6
; SI-NEXT: s_add_i32 s14, s6, 3
-; SI-NEXT: v_readlane_b32 s7, v62, 21
+; SI-NEXT: v_readlane_b32 s7, v62, 20
; SI-NEXT: s_or_b32 s19, s19, s24
; SI-NEXT: s_add_i32 s98, s20, 3
-; SI-NEXT: v_readlane_b32 s24, v62, 6
+; SI-NEXT: v_readlane_b32 s24, v62, 5
; SI-NEXT: s_and_b32 s6, s14, 0xff
; SI-NEXT: s_lshl_b32 s7, s7, 8
; SI-NEXT: s_and_b32 s20, s98, 0xff
; SI-NEXT: s_lshl_b32 s24, s24, 8
; SI-NEXT: s_or_b32 s83, s7, s6
-; SI-NEXT: v_readlane_b32 s6, v62, 26
+; SI-NEXT: v_readlane_b32 s6, v62, 25
; SI-NEXT: s_and_b32 s27, s27, 0xff
; SI-NEXT: s_or_b32 s20, s24, s20
-; SI-NEXT: v_readlane_b32 s24, v62, 5
+; SI-NEXT: v_readlane_b32 s24, v62, 4
; SI-NEXT: s_add_i32 s41, s6, 3
-; SI-NEXT: v_readlane_b32 s7, v62, 24
+; SI-NEXT: v_readlane_b32 s7, v62, 23
; SI-NEXT: s_or_b32 s23, s23, s27
; SI-NEXT: s_add_i32 s86, s24, 3
-; SI-NEXT: v_readlane_b32 s27, v62, 4
+; SI-NEXT: v_readlane_b32 s27, v62, 3
; SI-NEXT: s_and_b32 s6, s41, 0xff
; SI-NEXT: s_lshl_b32 s7, s7, 8
; SI-NEXT: s_and_b32 s24, s86, 0xff
; SI-NEXT: s_lshl_b32 s27, s27, 8
; SI-NEXT: s_or_b32 s85, s7, s6
-; SI-NEXT: v_readlane_b32 s6, v62, 28
+; SI-NEXT: v_readlane_b32 s6, v62, 27
; SI-NEXT: s_or_b32 s24, s27, s24
-; SI-NEXT: v_readlane_b32 s27, v62, 3
+; SI-NEXT: v_readlane_b32 s27, v62, 2
; SI-NEXT: s_add_i32 s46, s6, 3
-; SI-NEXT: v_readlane_b32 s7, v62, 25
+; SI-NEXT: v_readlane_b32 s7, v62, 24
; SI-NEXT: s_add_i32 s12, s73, 0x300
; SI-NEXT: s_add_i32 s82, s27, 3
-; SI-NEXT: v_readlane_b32 s73, v62, 2
+; SI-NEXT: v_readlane_b32 s73, v62, 1
; SI-NEXT: s_and_b32 s6, s46, 0xff
; SI-NEXT: s_lshl_b32 s7, s7, 8
; SI-NEXT: s_and_b32 s27, s82, 0xff
; SI-NEXT: s_lshl_b32 s73, s73, 8
; SI-NEXT: s_or_b32 s96, s7, s6
-; SI-NEXT: v_readlane_b32 s6, v62, 31
+; SI-NEXT: v_readlane_b32 s6, v62, 30
; SI-NEXT: s_or_b32 s27, s73, s27
-; SI-NEXT: v_readlane_b32 s73, v62, 1
+; SI-NEXT: v_readlane_b32 s73, v62, 0
; SI-NEXT: s_add_i32 s47, s6, 3
-; SI-NEXT: v_readlane_b32 s7, v62, 27
+; SI-NEXT: v_readlane_b32 s7, v62, 26
; SI-NEXT: s_add_i32 s13, s74, 0x300
; SI-NEXT: s_add_i32 s65, s73, 3
-; SI-NEXT: v_readlane_b32 s74, v62, 0
+; SI-NEXT: v_readlane_b32 s74, v61, 63
; SI-NEXT: s_and_b32 s6, s47, 0xff
; SI-NEXT: s_lshl_b32 s7, s7, 8
; SI-NEXT: s_and_b32 s73, s65, 0xff
; SI-NEXT: s_lshl_b32 s74, s74, 8
; SI-NEXT: s_or_b32 s97, s7, s6
-; SI-NEXT: v_readlane_b32 s6, v62, 34
+; SI-NEXT: v_readlane_b32 s6, v62, 33
; SI-NEXT: s_or_b32 s73, s74, s73
-; SI-NEXT: v_readlane_b32 s74, v61, 63
+; SI-NEXT: v_readlane_b32 s74, v61, 62
; SI-NEXT: s_add_i32 s56, s6, 3
-; SI-NEXT: v_readlane_b32 s7, v62, 29
+; SI-NEXT: v_readlane_b32 s7, v62, 28
; SI-NEXT: s_add_i32 s14, s75, 0x300
; SI-NEXT: s_add_i32 s54, s74, 3
-; SI-NEXT: v_readlane_b32 s75, v61, 62
+; SI-NEXT: v_readlane_b32 s75, v61, 61
; SI-NEXT: s_and_b32 s6, s56, 0xff
; SI-NEXT: s_lshl_b32 s7, s7, 8
; SI-NEXT: s_and_b32 s74, s54, 0xff
; SI-NEXT: s_lshl_b32 s75, s75, 8
; SI-NEXT: s_or_b32 s63, s7, s6
-; SI-NEXT: v_readlane_b32 s6, v62, 36
+; SI-NEXT: v_readlane_b32 s6, v62, 35
; SI-NEXT: s_or_b32 s74, s75, s74
-; SI-NEXT: v_readlane_b32 s75, v61, 61
+; SI-NEXT: v_readlane_b32 s75, v61, 60
; SI-NEXT: s_add_i32 s58, s6, 3
-; SI-NEXT: v_readlane_b32 s7, v62, 30
+; SI-NEXT: v_readlane_b32 s7, v62, 29
; SI-NEXT: s_add_i32 s15, s76, 0x300
; SI-NEXT: s_add_i32 s50, s75, 3
-; SI-NEXT: v_readlane_b32 s76, v61, 60
+; SI-NEXT: v_readlane_b32 s76, v61, 59
; SI-NEXT: s_and_b32 s6, s58, 0xff
; SI-NEXT: s_lshl_b32 s7, s7, 8
; SI-NEXT: s_and_b32 s75, s50, 0xff
; SI-NEXT: s_lshl_b32 s76, s76, 8
; SI-NEXT: s_or_b32 s79, s7, s6
-; SI-NEXT: v_readlane_b32 s6, v62, 37
+; SI-NEXT: v_readlane_b32 s6, v62, 36
; SI-NEXT: s_or_b32 s75, s76, s75
-; SI-NEXT: v_readlane_b32 s76, v61, 59
+; SI-NEXT: v_readlane_b32 s76, v61, 58
; SI-NEXT: s_add_i32 s59, s6, 3
-; SI-NEXT: v_readlane_b32 s7, v62, 33
+; SI-NEXT: v_readlane_b32 s7, v62, 32
; SI-NEXT: s_add_i32 s18, s77, 0x300
; SI-NEXT: s_add_i32 s48, s76, 3
-; SI-NEXT: v_readlane_b32 s77, v61, 58
+; SI-NEXT: v_readlane_b32 s77, v61, 57
; SI-NEXT: s_and_b32 s6, s59, 0xff
; SI-NEXT: s_lshl_b32 s7, s7, 8
; SI-NEXT: s_and_b32 s76, s48, 0xff
; SI-NEXT: s_lshl_b32 s77, s77, 8
; SI-NEXT: s_or_b32 s78, s7, s6
-; SI-NEXT: v_readlane_b32 s6, v62, 35
+; SI-NEXT: v_readlane_b32 s6, v62, 34
; SI-NEXT: s_or_b32 s76, s77, s76
-; SI-NEXT: v_readlane_b32 s77, v61, 57
+; SI-NEXT: v_readlane_b32 s77, v61, 56
; SI-NEXT: s_add_i32 s57, s6, 3
-; SI-NEXT: v_readlane_b32 s7, v62, 32
+; SI-NEXT: v_readlane_b32 s7, v62, 31
; SI-NEXT: s_add_i32 s11, s72, 0x300
; SI-NEXT: s_add_i32 s72, s79, 0x300
; SI-NEXT: s_add_i32 s37, s77, 3
-; SI-NEXT: v_readlane_b32 s79, v61, 56
+; SI-NEXT: v_readlane_b32 s79, v61, 55
; SI-NEXT: s_and_b32 s6, s57, 0xff
; SI-NEXT: s_lshl_b32 s7, s7, 8
; SI-NEXT: s_and_b32 s77, s37, 0xff
; SI-NEXT: s_lshl_b32 s79, s79, 8
; SI-NEXT: s_or_b32 s88, s7, s6
; SI-NEXT: s_or_b32 s77, s79, s77
-; SI-NEXT: v_readlane_b32 s79, v61, 55
+; SI-NEXT: v_readlane_b32 s79, v61, 54
; SI-NEXT: s_add_i32 s21, s89, 0x300
; SI-NEXT: s_add_i32 s89, s88, 0x300
; SI-NEXT: s_add_i32 s35, s79, 3
-; SI-NEXT: v_readlane_b32 s88, v61, 54
+; SI-NEXT: v_readlane_b32 s88, v61, 53
; SI-NEXT: s_and_b32 s79, s35, 0xff
; SI-NEXT: s_lshl_b32 s88, s88, 8
; SI-NEXT: s_or_b32 s79, s88, s79
-; SI-NEXT: v_readlane_b32 s88, v61, 53
+; SI-NEXT: v_readlane_b32 s88, v61, 52
; SI-NEXT: s_add_i32 s25, s92, 0x300
; SI-NEXT: s_add_i32 s30, s88, 3
-; SI-NEXT: v_readlane_b32 s92, v61, 52
+; SI-NEXT: v_readlane_b32 s92, v61, 51
; SI-NEXT: s_and_b32 s88, s30, 0xff
; SI-NEXT: s_lshl_b32 s92, s92, 8
; SI-NEXT: s_or_b32 s88, s92, s88
-; SI-NEXT: v_readlane_b32 s92, v61, 51
+; SI-NEXT: v_readlane_b32 s92, v61, 50
; SI-NEXT: s_add_i32 s94, s92, 3
; SI-NEXT: s_and_b32 s92, s94, 0xff
; SI-NEXT: s_lshl_b32 s91, s91, 8
@@ -176925,21 +177547,21 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; SI-NEXT: s_and_b32 s90, s90, 0xff
; SI-NEXT: s_lshl_b32 s92, s31, 8
; SI-NEXT: s_or_b32 s90, s92, s90
-; SI-NEXT: v_readlane_b32 s92, v61, 50
+; SI-NEXT: v_readlane_b32 s92, v61, 49
; SI-NEXT: s_add_i32 s92, s92, 3
; SI-NEXT: s_add_i32 s26, s93, 0x300
; SI-NEXT: s_and_b32 s92, s92, 0xff
; SI-NEXT: s_lshl_b32 s93, s95, 8
; SI-NEXT: s_or_b32 s92, s93, s92
-; SI-NEXT: v_readlane_b32 s93, v61, 49
+; SI-NEXT: v_readlane_b32 s93, v61, 48
; SI-NEXT: s_add_i32 s93, s93, 3
; SI-NEXT: s_and_b32 s93, s93, 0xff
-; SI-NEXT: s_lshl_b32 s94, s55, 8
+; SI-NEXT: s_lshl_b32 s94, s99, 8
; SI-NEXT: s_or_b32 s93, s94, s93
-; SI-NEXT: v_readlane_b32 s94, v61, 48
+; SI-NEXT: v_readlane_b32 s94, v61, 47
; SI-NEXT: s_add_i32 s94, s94, 3
; SI-NEXT: s_and_b32 s94, s94, 0xff
-; SI-NEXT: s_lshl_b32 s95, s99, 8
+; SI-NEXT: s_lshl_b32 s95, s55, 8
; SI-NEXT: s_or_b32 s94, s95, s94
; SI-NEXT: v_readlane_b32 s95, v61, 1
; SI-NEXT: s_add_i32 s95, s95, 3
@@ -176947,7 +177569,7 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; SI-NEXT: s_add_i32 s6, vcc_lo, 0x300
; SI-NEXT: s_and_b32 s95, s95, 0xff
; SI-NEXT: s_lshl_b32 vcc_lo, s30, 8
-; SI-NEXT: v_readlane_b32 s30, v61, 47
+; SI-NEXT: v_readlane_b32 s30, v61, 3
; SI-NEXT: s_or_b32 s95, vcc_lo, s95
; SI-NEXT: s_add_i32 vcc_lo, s30, 3
; SI-NEXT: v_readlane_b32 s30, v61, 2
@@ -177398,19 +178020,19 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; SI-NEXT: .LBB93_4:
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; kill: killed $vgpr1
-; SI-NEXT: s_mov_b32 s17, s19
+; SI-NEXT: s_mov_b32 s17, s18
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; kill: killed $vgpr1
-; SI-NEXT: s_mov_b32 s19, s50
+; SI-NEXT: s_mov_b32 s18, s67
; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: s_mov_b32 s23, s48
-; SI-NEXT: s_mov_b32 s26, s37
+; SI-NEXT: s_mov_b32 s19, s35
+; SI-NEXT: s_mov_b32 s23, s39
+; SI-NEXT: s_mov_b32 s26, s75
; SI-NEXT: s_mov_b32 s28, s29
; SI-NEXT: s_mov_b32 s29, s76
-; SI-NEXT: s_mov_b32 s59, s58
-; SI-NEXT: s_mov_b32 s56, s47
-; SI-NEXT: s_mov_b32 s46, s41
-; SI-NEXT: s_mov_b32 s12, s11
+; SI-NEXT: s_mov_b32 s58, s57
+; SI-NEXT: s_mov_b32 s47, s46
+; SI-NEXT: s_mov_b32 s41, s14
; SI-NEXT: s_mov_b32 s11, s7
; SI-NEXT: s_mov_b32 s7, s97
; SI-NEXT: s_mov_b32 s97, s81
@@ -177421,46 +178043,45 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; SI-NEXT: s_mov_b32 s15, s89
; SI-NEXT: s_mov_b32 s24, s98
; SI-NEXT: s_mov_b32 s20, s88
-; SI-NEXT: s_mov_b32 s99, s55
-; SI-NEXT: ; kill: killed $vgpr1
-; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: v_readlane_b32 s75, v61, 7
-; SI-NEXT: v_readlane_b32 s76, v61, 3
-; SI-NEXT: v_readlane_b32 s77, v61, 5
-; SI-NEXT: v_readlane_b32 s78, v61, 4
-; SI-NEXT: v_readlane_b32 s92, v61, 6
-; SI-NEXT: v_readlane_b32 s39, v61, 9
-; SI-NEXT: v_readlane_b32 s37, v61, 8
-; SI-NEXT: v_readlane_b32 s30, v61, 10
-; SI-NEXT: v_readlane_b32 s48, v61, 11
-; SI-NEXT: v_readlane_b32 s52, v61, 13
-; SI-NEXT: v_readlane_b32 s35, v61, 12
-; SI-NEXT: v_readlane_b32 s50, v61, 15
-; SI-NEXT: v_readlane_b32 s64, v61, 14
-; SI-NEXT: v_readlane_b32 s54, v61, 17
-; SI-NEXT: v_readlane_b32 s67, v61, 16
-; SI-NEXT: v_readlane_b32 s65, v61, 18
-; SI-NEXT: v_readlane_b32 s70, v61, 19
-; SI-NEXT: v_readlane_b32 s49, v61, 21
-; SI-NEXT: v_readlane_b32 s71, v61, 20
-; SI-NEXT: v_readlane_b32 s80, v61, 23
-; SI-NEXT: v_readlane_b32 s83, v61, 22
-; SI-NEXT: v_readlane_b32 s84, v61, 25
-; SI-NEXT: v_readlane_b32 s82, v61, 24
-; SI-NEXT: v_readlane_b32 s87, v61, 26
-; SI-NEXT: v_readlane_b32 s86, v61, 27
-; SI-NEXT: v_readlane_b32 s96, v61, 29
-; SI-NEXT: v_readlane_b32 s51, v61, 28
-; SI-NEXT: s_mov_b32 s55, s93
+; SI-NEXT: s_mov_b32 s55, s99
+; SI-NEXT: ; kill: killed $vgpr1
+; SI-NEXT: ; implicit-def: $vgpr1
+; SI-NEXT: v_readlane_b32 s75, v61, 8
+; SI-NEXT: v_readlane_b32 s76, v61, 4
+; SI-NEXT: v_readlane_b32 s77, v61, 6
+; SI-NEXT: v_readlane_b32 s78, v61, 5
+; SI-NEXT: v_readlane_b32 s92, v61, 7
+; SI-NEXT: v_readlane_b32 s39, v61, 10
+; SI-NEXT: v_readlane_b32 s37, v61, 9
+; SI-NEXT: v_readlane_b32 s30, v61, 11
+; SI-NEXT: v_readlane_b32 s48, v61, 12
+; SI-NEXT: v_readlane_b32 s52, v61, 14
+; SI-NEXT: v_readlane_b32 s35, v61, 13
+; SI-NEXT: v_readlane_b32 s50, v61, 16
+; SI-NEXT: v_readlane_b32 s64, v61, 15
+; SI-NEXT: v_readlane_b32 s54, v61, 18
+; SI-NEXT: v_readlane_b32 s67, v61, 17
+; SI-NEXT: v_readlane_b32 s65, v61, 19
+; SI-NEXT: v_readlane_b32 s70, v61, 20
+; SI-NEXT: v_readlane_b32 s71, v61, 22
+; SI-NEXT: v_readlane_b32 s38, v61, 21
+; SI-NEXT: v_readlane_b32 s80, v61, 24
+; SI-NEXT: v_readlane_b32 s83, v61, 23
+; SI-NEXT: v_readlane_b32 s84, v61, 26
+; SI-NEXT: v_readlane_b32 s82, v61, 25
+; SI-NEXT: v_readlane_b32 s87, v61, 27
+; SI-NEXT: v_readlane_b32 s86, v61, 28
+; SI-NEXT: v_readlane_b32 s96, v61, 30
+; SI-NEXT: v_readlane_b32 s51, v61, 29
+; SI-NEXT: s_mov_b32 s99, s93
; SI-NEXT: s_mov_b32 s95, s91
-; SI-NEXT: v_readlane_b32 s94, v61, 31
+; SI-NEXT: v_readlane_b32 s94, v61, 32
; SI-NEXT: s_mov_b32 s31, s90
-; SI-NEXT: v_readlane_b32 s34, v61, 30
-; SI-NEXT: v_readlane_b32 s53, v61, 32
-; SI-NEXT: v_readlane_b32 s66, v61, 33
-; SI-NEXT: v_readlane_b32 s68, v61, 34
-; SI-NEXT: v_readlane_b32 s69, v61, 35
-; SI-NEXT: v_readlane_b32 s8, v61, 36
+; SI-NEXT: v_readlane_b32 s34, v61, 31
+; SI-NEXT: v_readlane_b32 s53, v61, 33
+; SI-NEXT: v_readlane_b32 s66, v61, 34
+; SI-NEXT: v_readlane_b32 s68, v61, 35
+; SI-NEXT: v_readlane_b32 s69, v61, 36
; SI-NEXT: ; implicit-def: $vgpr6
; SI-NEXT: ; implicit-def: $vgpr5
; SI-NEXT: ; kill: killed $vgpr1
@@ -177522,7 +178143,9 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; implicit-def: $vgpr4
-; SI-NEXT: s_branch .LBB93_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB93_2
+; SI-NEXT: s_branch .LBB93_3
;
; VI-LABEL: bitcast_v128i8_to_v64f16_scalar:
; VI: ; %bb.0:
@@ -177543,22 +178166,22 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:332
; VI-NEXT: buffer_load_ushort v2, off, s[0:3], s32
; VI-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:8
@@ -177583,14 +178206,17 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:160
; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:168
; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:176
+; VI-NEXT: v_mov_b32_e32 v48, v27
+; VI-NEXT: v_mov_b32_e32 v39, v29
; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v1
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v3
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v5
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v3
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v7
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v5
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v7
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_lshlrev_b32_e32 v9, 8, v9
; VI-NEXT: v_lshlrev_b32_e32 v11, 8, v11
; VI-NEXT: v_lshlrev_b32_e32 v13, 8, v13
@@ -177598,50 +178224,51 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; VI-NEXT: v_lshlrev_b32_e32 v17, 8, v17
; VI-NEXT: v_lshlrev_b32_e32 v19, 8, v19
; VI-NEXT: v_lshlrev_b32_e32 v21, 8, v21
-; VI-NEXT: v_lshlrev_b32_e32 v23, 8, v23
-; VI-NEXT: v_lshlrev_b32_e32 v25, 8, v25
; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v27
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v29
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v23
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v2
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v25
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v4
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v48
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v6
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v39
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v8
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v2
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v10
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v4
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v12
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v6
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v26
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v8
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v10
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v12
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v26
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v28
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v30
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v31
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v32
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v33
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v34
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v35
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v36
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v37
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(14)
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v38
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:184
; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:192
; VI-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:200
@@ -177650,9 +178277,9 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; VI-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:224
; VI-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:232
; VI-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:240
-; VI-NEXT: v_lshlrev_b32_e32 v45, 8, v22
-; VI-NEXT: v_lshlrev_b32_e32 v8, 8, v24
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: v_lshlrev_b32_e32 v8, 8, v22
+; VI-NEXT: v_lshlrev_b32_e32 v10, 8, v24
+; VI-NEXT: s_and_b64 s[6:7], vcc, exec
; VI-NEXT: v_lshlrev_b32_e32 v14, 8, v14
; VI-NEXT: v_lshlrev_b32_e32 v16, 8, v16
; VI-NEXT: v_lshlrev_b32_e32 v18, 8, v18
@@ -177661,23 +178288,25 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; VI-NEXT: v_lshlrev_b32_e32 v22, 8, v0
; VI-NEXT: s_waitcnt vmcnt(6)
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v1
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(5)
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(6)
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v2
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(6)
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v3
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(5)
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(6)
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v4
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(5)
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(6)
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v5
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(5)
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(6)
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v6
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(5)
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(6)
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v7
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v24, 8, v2
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:248
; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:256
; VI-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:264
@@ -177690,128 +178319,123 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; VI-NEXT: v_lshlrev_b32_e32 v26, 8, v0
; VI-NEXT: s_waitcnt vmcnt(6)
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v1
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
; VI-NEXT: s_waitcnt vmcnt(6)
-; VI-NEXT: v_lshlrev_b32_e32 v27, 8, v2
-; VI-NEXT: s_waitcnt vmcnt(4)
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v4
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v2
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(6)
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v3
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:312
; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:320
; VI-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:328
-; VI-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:4
-; VI-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:12
-; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:20
-; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:28
-; VI-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:36
-; VI-NEXT: s_waitcnt vmcnt(11)
-; VI-NEXT: v_lshlrev_b32_e32 v4, 8, v6
-; VI-NEXT: v_lshlrev_b32_e32 v28, 8, v3
-; VI-NEXT: v_lshlrev_b32_e32 v3, 8, v5
+; VI-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:324
; VI-NEXT: s_waitcnt vmcnt(10)
-; VI-NEXT: v_lshlrev_b32_e32 v5, 8, v7
+; VI-NEXT: v_lshlrev_b32_e32 v4, 8, v4
+; VI-NEXT: s_waitcnt vmcnt(9)
+; VI-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; VI-NEXT: s_waitcnt vmcnt(8)
+; VI-NEXT: v_lshlrev_b32_e32 v6, 8, v6
; VI-NEXT: s_waitcnt vmcnt(7)
-; VI-NEXT: v_lshlrev_b32_e32 v6, 8, v0
-; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:44
-; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:52
-; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:60
-; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:68
-; VI-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:76
-; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:84
-; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:92
-; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:100
-; VI-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:108
-; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:116
-; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:124
-; VI-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:132
-; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:140
-; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:148
-; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:156
-; VI-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:164
-; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:172
-; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:180
-; VI-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:188
-; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:196
-; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:204
-; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:212
-; VI-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:220
-; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:228
-; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:236
-; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:244
-; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:252
-; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:260
-; VI-NEXT: s_waitcnt vmcnt(14)
+; VI-NEXT: v_lshlrev_b32_e32 v33, 8, v7
+; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v1
-; VI-NEXT: v_lshlrev_b32_e32 v2, 8, v2
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:268
-; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:276
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:284
-; VI-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:292
-; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:300
-; VI-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:308
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:316
-; VI-NEXT: buffer_load_ushort v10, off, s[0:3], s32 offset:324
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
+; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:316
+; VI-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:308
+; VI-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:300
+; VI-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:292
+; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:284
+; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:276
+; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:268
+; VI-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:260
+; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:252
+; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:244
+; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:236
+; VI-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:228
+; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:220
+; VI-NEXT: buffer_load_ushort v24, off, s[0:3], s32 offset:212
+; VI-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:204
+; VI-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:196
+; VI-NEXT: buffer_load_ushort v27, off, s[0:3], s32 offset:188
+; VI-NEXT: buffer_load_ushort v25, off, s[0:3], s32 offset:180
+; VI-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:172
+; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:164
+; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:156
+; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:148
+; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:140
+; VI-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:132
+; VI-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:124
+; VI-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:116
+; VI-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:108
+; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:100
+; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:92
+; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:84
+; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:76
+; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:68
+; VI-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:60
+; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:52
+; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:44
+; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:36
+; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:28
+; VI-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:20
+; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:12
+; VI-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:4
+; VI-NEXT: v_lshlrev_b32_e32 v3, 8, v0
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v2
+; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(13)
+; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
; VI-NEXT: s_cbranch_scc0 .LBB93_4
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
; VI-NEXT: s_and_b32 s4, s28, 0xff
; VI-NEXT: s_lshl_b32 s5, s29, 8
; VI-NEXT: s_or_b32 s4, s4, s5
@@ -177822,222 +178446,228 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; VI-NEXT: s_lshl_b32 s8, s27, 8
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
-; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
-; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
+; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill
; VI-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill
+; VI-NEXT: v_mov_b32_e32 v33, v6
+; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(5)
; VI-NEXT: v_or_b32_sdwa v2, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_or_b32_sdwa v0, v0, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v1, v1, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_or_b32_sdwa v3, v3, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v3, v8
+; VI-NEXT: v_mov_b32_e32 v2, v8
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v0, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v1, v17 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v0, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v1, v21 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v0, v0, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v1, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v1, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v3, v10
; VI-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v2, v50, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v12, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v34, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v33, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v35, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v0, v34, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v36, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v35, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v36, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v37, v16 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v38, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v39, v16 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v39, v22
; VI-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v38, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v39, v20 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v48, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v49, v20 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v49, v26
; VI-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v48, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v49, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
-; VI-NEXT: v_mov_b32_e32 v45, v62
+; VI-NEXT: v_or_b32_sdwa v0, v50, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v51, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
; VI-NEXT: v_or_b32_sdwa v16, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(3)
-; VI-NEXT: v_or_b32_sdwa v3, v51, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v17, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v2, v52, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(2)
-; VI-NEXT: v_or_b32_sdwa v0, v54, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v3, v53, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v17, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v32, v1
-; VI-NEXT: v_or_b32_sdwa v1, v41, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v54, v22
-; VI-NEXT: v_mov_b32_e32 v41, v24
+; VI-NEXT: v_or_b32_sdwa v0, v54, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v1, v55, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v18, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v34, v0
-; VI-NEXT: v_or_b32_sdwa v0, v56, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v45, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v37, v1
-; VI-NEXT: v_or_b32_sdwa v1, v55, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v55, v26
+; VI-NEXT: v_mov_b32_e32 v51, v1
+; VI-NEXT: v_or_b32_sdwa v1, v46, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v19, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v39, v0
-; VI-NEXT: v_or_b32_sdwa v0, v52, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v53, v0
+; VI-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v49, v1
-; VI-NEXT: v_or_b32_sdwa v1, v43, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v43, v27
+; VI-NEXT: v_mov_b32_e32 v54, v1
+; VI-NEXT: v_or_b32_sdwa v1, v56, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v47, v32
; VI-NEXT: v_or_b32_sdwa v20, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v51, v0
-; VI-NEXT: v_or_b32_sdwa v0, v53, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v55, v0
+; VI-NEXT: v_or_b32_sdwa v0, v60, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_mov_b32_e32 v35, v1
-; VI-NEXT: v_or_b32_sdwa v1, v46, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v53, v28
+; VI-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v21, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
-; VI-NEXT: v_or_b32_sdwa v1, v47, v22 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v33, v0
-; VI-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v22, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
-; VI-NEXT: v_or_b32_sdwa v1, v57, v24 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v25, v22 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v36, v0
+; VI-NEXT: v_mov_b32_e32 v38, v0
; VI-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v63, v33
+; VI-NEXT: v_or_b32_sdwa v22, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_mov_b32_e32 v48, v0
+; VI-NEXT: v_or_b32_sdwa v0, v27, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v50, v1
+; VI-NEXT: v_or_b32_sdwa v1, v23, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v23, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v56, v0
-; VI-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v36, v0
+; VI-NEXT: v_or_b32_sdwa v0, v28, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v58, v1
-; VI-NEXT: v_or_b32_sdwa v1, v60, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v61, v60
-; VI-NEXT: v_mov_b32_e32 v60, v59
+; VI-NEXT: v_mov_b32_e32 v60, v1
+; VI-NEXT: v_or_b32_sdwa v1, v24, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v24, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v38, v0
-; VI-NEXT: v_or_b32_sdwa v0, v59, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v61, v0
+; VI-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v48, v1
-; VI-NEXT: v_or_b32_sdwa v1, v40, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v34, v1
+; VI-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v62, v43
; VI-NEXT: v_or_b32_sdwa v25, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v1, v44, v26 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v44, v42
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v37, v0
+; VI-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v58, v57
+; VI-NEXT: v_or_b32_sdwa v26, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
-; VI-NEXT: v_or_b32_sdwa v1, v45, v26 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v50, v0
-; VI-NEXT: v_or_b32_sdwa v0, v42, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v26, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
-; VI-NEXT: v_or_b32_sdwa v1, v62, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_mov_b32_e32 v52, v0
-; VI-NEXT: v_or_b32_sdwa v0, v44, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v27, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
-; VI-NEXT: v_or_b32_sdwa v0, v29, v28 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v57, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_mov_b32_e32 v46, v1
-; VI-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v32, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v32, v30
+; VI-NEXT: v_mov_b32_e32 v57, v40
+; VI-NEXT: v_or_b32_sdwa v27, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_mov_b32_e32 v45, v0
+; VI-NEXT: v_or_b32_sdwa v0, v42, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v56, v1
+; VI-NEXT: v_or_b32_sdwa v1, v41, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v42, v41
+; VI-NEXT: v_mov_b32_e32 v41, v29
; VI-NEXT: v_or_b32_sdwa v28, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v63, v0
-; VI-NEXT: v_or_b32_sdwa v0, v30, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v29, v33 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v47, v1
-; VI-NEXT: v_or_b32_sdwa v1, v31, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v59, v0
+; VI-NEXT: v_or_b32_sdwa v0, v43, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v29, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v43, v31
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v57, v1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v33, v0
+; VI-NEXT: v_or_b32_sdwa v0, v31, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v30, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v30, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v40, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v40, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v31, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_e32 v3, s4, v0
@@ -178069,12 +178699,14 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v2, s6
; VI-NEXT: s_cbranch_execnz .LBB93_3
; VI-NEXT: .LBB93_2: ; %cmp.true
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v59
-; VI-NEXT: v_or_b32_sdwa v29, v46, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v40
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v42
+; VI-NEXT: v_or_b32_sdwa v28, v56, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v44
+; VI-NEXT: v_or_b32_sdwa v27, v45, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v47
; VI-NEXT: s_add_i32 s28, s28, 3
; VI-NEXT: s_and_b32 s4, s28, 0xff
; VI-NEXT: s_lshl_b32 s5, s29, 8
@@ -178093,302 +178725,309 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; VI-NEXT: s_lshl_b32 s9, s19, 8
; VI-NEXT: s_add_i32 s16, s16, 3
; VI-NEXT: s_lshl_b32 s10, s17, 8
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(3)
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: v_or_b32_sdwa v31, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v57
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v32
+; VI-NEXT: v_or_b32_sdwa v30, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v41
+; VI-NEXT: v_or_b32_sdwa v29, v63, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v63, v46, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v58
+; VI-NEXT: v_or_b32_sdwa v24, v52, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v43
+; VI-NEXT: v_or_b32_sdwa v1, v33, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v62
+; VI-NEXT: v_or_b32_sdwa v2, v59, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: v_or_b32_sdwa v26, v53, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v62
-; VI-NEXT: v_or_b32_sdwa v28, v43, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v44
-; VI-NEXT: v_or_b32_sdwa v53, v52, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v45
-; VI-NEXT: v_or_b32_sdwa v27, v55, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v42
-; VI-NEXT: v_or_b32_sdwa v52, v50, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v40
-; VI-NEXT: v_or_b32_sdwa v25, v48, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v60
-; VI-NEXT: v_or_b32_sdwa v59, v38, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v61
-; VI-NEXT: v_or_b32_sdwa v24, v58, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v26, v49, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: v_or_b32_sdwa v48, v56, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v48, vcc, 0x300, v48
-; VI-NEXT: v_or_b32_sdwa v24, v24, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v24, vcc, 0x3000000, v24
+; VI-NEXT: v_or_b32_sdwa v32, v37, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v32, vcc, 0x300, v32
+; VI-NEXT: v_or_b32_sdwa v26, v26, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v26, vcc, 0x3000000, v26
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: v_or_b32_sdwa v23, v41, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v25, v34, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: v_or_b32_sdwa v38, v36, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v38, vcc, 0x300, v38
-; VI-NEXT: v_or_b32_sdwa v23, v23, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v23, vcc, 0x3000000, v23
+; VI-NEXT: v_or_b32_sdwa v33, v61, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v33, vcc, 0x300, v33
+; VI-NEXT: v_or_b32_sdwa v25, v25, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v25, vcc, 0x3000000, v25
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: v_or_b32_sdwa v22, v54, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v60, v60, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: v_or_b32_sdwa v50, v33, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v50, vcc, 0x300, v50
-; VI-NEXT: v_or_b32_sdwa v22, v22, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v22, vcc, 0x3000000, v22
+; VI-NEXT: v_or_b32_sdwa v34, v36, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v34, vcc, 0x300, v34
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: v_or_b32_sdwa v21, v35, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v23, v50, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: v_or_b32_sdwa v54, v51, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v37, v48, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v37, vcc, 0x300, v37
+; VI-NEXT: v_or_b32_sdwa v23, v23, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v23, vcc, 0x3000000, v23
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: v_or_b32_sdwa v20, v49, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v22, v39, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: v_or_b32_sdwa v49, v39, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v49, v38, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v49, vcc, 0x300, v49
-; VI-NEXT: v_or_b32_sdwa v20, v20, v49 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v20, vcc, 0x3000000, v20
-; VI-NEXT: s_waitcnt vmcnt(3)
+; VI-NEXT: v_or_b32_sdwa v22, v22, v49 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v22, vcc, 0x3000000, v22
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
+; VI-NEXT: v_or_b32_sdwa v21, v35, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: v_or_b32_sdwa v19, v37, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v52, v55, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(2)
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: v_or_b32_sdwa v37, v34, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v20, v54, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v37, vcc, 0x300, v37
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
-; VI-NEXT: v_or_b32_sdwa v31, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload
-; VI-NEXT: v_or_b32_sdwa v19, v19, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v19, vcc, 0x3000000, v19
-; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: v_or_b32_sdwa v18, v32, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v54, v53, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(4)
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
-; VI-NEXT: v_or_b32_sdwa v1, v57, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v57, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v19, v51, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v35, vcc, 0x300, v57
-; VI-NEXT: v_or_b32_sdwa v18, v18, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v18, vcc, 0x3000000, v18
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v16, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
+; VI-NEXT: v_or_b32_sdwa v51, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v51, vcc, 0x300, v51
+; VI-NEXT: v_or_b32_sdwa v19, v19, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v19, vcc, 0x3000000, v19
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v10, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v18, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v17, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v48, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v48, vcc, 0x300, v48
+; VI-NEXT: v_or_b32_sdwa v18, v18, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v18, vcc, 0x3000000, v18
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v11, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v16, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v15, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v10, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v56, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v17, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v14, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v11, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v34, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v15, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v34, vcc, 0x300, v34
-; VI-NEXT: v_or_b32_sdwa v14, v14, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v14, vcc, 0x3000000, v14
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v13, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v35, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v35, vcc, 0x300, v35
+; VI-NEXT: v_or_b32_sdwa v15, v15, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v15, vcc, 0x3000000, v15
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v36, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v14, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v36, vcc, 0x300, v36
-; VI-NEXT: v_or_b32_sdwa v13, v13, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v36, vcc, 0x300, v26
-; VI-NEXT: v_add_u32_e32 v26, vcc, 0x300, v52
-; VI-NEXT: v_or_b32_sdwa v26, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v52, vcc, 0x300, v54
-; VI-NEXT: v_or_b32_sdwa v21, v21, v52 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v13, vcc, 0x3000000, v13
-; VI-NEXT: v_add_u32_e32 v21, vcc, 0x3000000, v21
-; VI-NEXT: v_add_u32_e32 v26, vcc, 0x3000000, v26
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v12, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v38, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v38, vcc, 0x300, v38
+; VI-NEXT: v_or_b32_sdwa v14, v14, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v38, vcc, 0x300, v2
+; VI-NEXT: v_or_b32_sdwa v29, v29, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v14, vcc, 0x3000000, v14
+; VI-NEXT: v_add_u32_e32 v29, vcc, 0x3000000, v29
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v51, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v13, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v51, vcc, 0x300, v51
-; VI-NEXT: v_or_b32_sdwa v12, v12, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v51, vcc, 0x300, v59
-; VI-NEXT: v_or_b32_sdwa v25, v25, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v39, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v39, vcc, 0x300, v39
+; VI-NEXT: v_or_b32_sdwa v13, v13, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v39, vcc, 0x300, v27
+; VI-NEXT: v_add_u32_e32 v27, vcc, 0x300, v24
+; VI-NEXT: v_or_b32_sdwa v24, v60, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_or_b32_sdwa v27, v63, v27 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_or_b32_sdwa v28, v28, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v13, vcc, 0x3000000, v13
+; VI-NEXT: v_add_u32_e32 v24, vcc, 0x3000000, v24
+; VI-NEXT: v_add_u32_e32 v27, vcc, 0x3000000, v27
+; VI-NEXT: v_add_u32_e32 v28, vcc, 0x3000000, v28
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v12, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v53, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v53, vcc, 0x300, v53
+; VI-NEXT: v_or_b32_sdwa v12, v12, v53 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v12, vcc, 0x3000000, v12
-; VI-NEXT: v_add_u32_e32 v25, vcc, 0x3000000, v25
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v33, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v36, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v41, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v40, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v50, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
-; VI-NEXT: v_or_b32_sdwa v30, v47, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(3)
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: s_waitcnt vmcnt(2)
-; VI-NEXT: v_or_b32_sdwa v39, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(3)
-; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
-; VI-NEXT: v_or_b32_sdwa v2, v63, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v34, vcc, 0x300, v2
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v55, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v9, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v41, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v41, vcc, 0x300, v41
-; VI-NEXT: v_or_b32_sdwa v9, v9, v41 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v41, vcc, 0x300, v10
+; VI-NEXT: v_or_b32_sdwa v42, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v42, vcc, 0x300, v42
+; VI-NEXT: v_or_b32_sdwa v9, v9, v42 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v42, vcc, 0x300, v10
; VI-NEXT: v_add_u32_e32 v10, vcc, 0x300, v55
-; VI-NEXT: v_or_b32_sdwa v10, v39, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v39, vcc, 0x300, v53
-; VI-NEXT: v_or_b32_sdwa v27, v28, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_or_b32_sdwa v28, v29, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_or_b32_sdwa v29, v30, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_or_b32_sdwa v35, v16, v42 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_or_b32_sdwa v10, v50, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v50, vcc, 0x300, v52
+; VI-NEXT: v_add_u32_e32 v52, vcc, 0x300, v54
+; VI-NEXT: v_or_b32_sdwa v20, v20, v52 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_or_b32_sdwa v21, v21, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v9, vcc, 0x3000000, v9
; VI-NEXT: v_add_u32_e32 v10, vcc, 0x3000000, v10
-; VI-NEXT: v_add_u32_e32 v27, vcc, 0x3000000, v27
-; VI-NEXT: v_add_u32_e32 v28, vcc, 0x3000000, v28
-; VI-NEXT: v_add_u32_e32 v29, vcc, 0x3000000, v29
+; VI-NEXT: v_add_u32_e32 v20, vcc, 0x3000000, v20
+; VI-NEXT: v_add_u32_e32 v21, vcc, 0x3000000, v21
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v8, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v42, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v42, vcc, 0x300, v42
-; VI-NEXT: v_or_b32_sdwa v8, v8, v42 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v42, vcc, 0x300, v11
-; VI-NEXT: v_add_u32_e32 v11, vcc, 0x300, v40
-; VI-NEXT: v_or_b32_sdwa v11, v33, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v33, vcc, 0x300, v1
-; VI-NEXT: v_or_b32_sdwa v30, v31, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
-; VI-NEXT: v_or_b32_sdwa v17, v17, v42 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_or_b32_sdwa v43, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v43, vcc, 0x300, v43
+; VI-NEXT: v_or_b32_sdwa v8, v8, v43 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v43, vcc, 0x300, v11
+; VI-NEXT: v_add_u32_e32 v11, vcc, 0x300, v41
+; VI-NEXT: v_or_b32_sdwa v17, v17, v43 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_or_b32_sdwa v11, v36, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v16, vcc, 0x3000000, v17
+; VI-NEXT: v_add_u32_e32 v17, vcc, 0x3000000, v35
+; VI-NEXT: v_add_u32_e32 v35, vcc, 0x300, v0
+; VI-NEXT: v_add_u32_e32 v36, vcc, 0x300, v1
+; VI-NEXT: v_or_b32_sdwa v30, v30, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_or_b32_sdwa v31, v31, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v8, vcc, 0x3000000, v8
; VI-NEXT: v_add_u32_e32 v11, vcc, 0x3000000, v11
; VI-NEXT: v_add_u32_e32 v30, vcc, 0x3000000, v30
-; VI-NEXT: s_waitcnt vmcnt(2)
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
+; VI-NEXT: v_add_u32_e32 v31, vcc, 0x3000000, v31
; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v7, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v44, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v44, vcc, 0x300, v44
; VI-NEXT: v_or_b32_sdwa v7, v7, v44 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v7, vcc, 0x3000000, v7
@@ -178396,14 +179035,14 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v6, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v45, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v45, vcc, 0x300, v45
; VI-NEXT: v_or_b32_sdwa v6, v6, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v6, vcc, 0x3000000, v6
@@ -178411,14 +179050,14 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v5, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v46, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v46, vcc, 0x300, v46
; VI-NEXT: v_or_b32_sdwa v5, v5, v46 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v5, vcc, 0x3000000, v5
@@ -178426,17 +179065,17 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v3, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v4, vcc, 3, v4
; VI-NEXT: v_or_b32_sdwa v4, v47, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v47, vcc, 3, v32
-; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v4, vcc, 0x300, v4
; VI-NEXT: v_or_b32_sdwa v4, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v4, vcc, 0x3000000, v4
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v47, v32, v47 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v47, vcc, 3, v47
+; VI-NEXT: v_or_b32_sdwa v47, v56, v47 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_e32 v47, s4, v47
; VI-NEXT: s_and_b32 s4, s26, 0xff
; VI-NEXT: s_or_b32 s4, s5, s4
@@ -178450,34 +179089,25 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; VI-NEXT: s_or_b32 s8, s9, s8
; VI-NEXT: s_and_b32 s9, s16, 0xff
; VI-NEXT: s_or_b32 s9, s10, s9
-; VI-NEXT: v_add_u32_e32 v32, vcc, 0x300, v56
; VI-NEXT: s_addk_i32 s5, 0x300
; VI-NEXT: s_addk_i32 s7, 0x300
; VI-NEXT: s_addk_i32 s9, 0x300
-; VI-NEXT: v_or_b32_sdwa v15, v15, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_or_b32_sdwa v32, v16, v41 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_lshl_b32 s4, s4, 16
; VI-NEXT: s_lshl_b32 s6, s6, 16
; VI-NEXT: s_lshl_b32 s8, s8, 16
; VI-NEXT: s_and_b32 s9, s9, 0xffff
; VI-NEXT: s_and_b32 s7, s7, 0xffff
; VI-NEXT: s_and_b32 s5, s5, 0xffff
-; VI-NEXT: v_add_u32_e32 v16, vcc, 0x3000000, v17
-; VI-NEXT: v_add_u32_e32 v17, vcc, 0x3000000, v32
-; VI-NEXT: v_add_u32_e32 v32, vcc, 0x300, v0
; VI-NEXT: s_or_b32 s8, s8, s9
; VI-NEXT: s_or_b32 s6, s6, s7
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s8, s8, 0x3000000
; VI-NEXT: s_add_i32 s6, s6, 0x3000000
; VI-NEXT: s_add_i32 s4, s4, 0x3000000
-; VI-NEXT: v_or_b32_sdwa v31, v31, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v3, vcc, 0x3000000, v47
-; VI-NEXT: v_add_u32_e32 v15, vcc, 0x3000000, v15
; VI-NEXT: v_mov_b32_e32 v0, s8
; VI-NEXT: v_mov_b32_e32 v1, s6
; VI-NEXT: v_mov_b32_e32 v2, s4
-; VI-NEXT: v_add_u32_e32 v31, vcc, 0x3000000, v31
; VI-NEXT: .LBB93_3: ; %end
; VI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload
@@ -178498,38 +179128,42 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB93_4:
-; VI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
-; VI-NEXT: v_mov_b32_e32 v61, v60
-; VI-NEXT: v_mov_b32_e32 v60, v59
-; VI-NEXT: v_mov_b32_e32 v45, v62
-; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
-; VI-NEXT: v_mov_b32_e32 v57, v5
-; VI-NEXT: v_mov_b32_e32 v47, v4
-; VI-NEXT: v_mov_b32_e32 v63, v3
-; VI-NEXT: v_mov_b32_e32 v53, v28
-; VI-NEXT: v_mov_b32_e32 v43, v27
-; VI-NEXT: v_mov_b32_e32 v55, v26
-; VI-NEXT: v_mov_b32_e32 v41, v24
-; VI-NEXT: v_mov_b32_e32 v54, v22
+; VI-NEXT: v_mov_b32_e32 v63, v6
+; VI-NEXT: v_mov_b32_e32 v59, v5
+; VI-NEXT: v_mov_b32_e32 v58, v57
+; VI-NEXT: v_mov_b32_e32 v47, v32
+; VI-NEXT: v_mov_b32_e32 v57, v40
+; VI-NEXT: v_mov_b32_e32 v32, v30
+; VI-NEXT: v_mov_b32_e32 v44, v42
+; VI-NEXT: v_mov_b32_e32 v62, v43
+; VI-NEXT: v_mov_b32_e32 v43, v31
+; VI-NEXT: v_mov_b32_e32 v42, v41
+; VI-NEXT: v_mov_b32_e32 v41, v29
+; VI-NEXT: v_mov_b32_e32 v56, v4
+; VI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v49, v26
+; VI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v39, v22
+; VI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; VI-NEXT: ; implicit-def: $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB93_2
+; VI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB93_2
+; VI-NEXT: s_branch .LBB93_3
;
; GFX9-LABEL: bitcast_v128i8_to_v64f16_scalar:
; GFX9: ; %bb.0:
@@ -178550,16 +179184,18 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:332
; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32
; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:8
@@ -178584,93 +179220,97 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:160
; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:168
; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:176
-; GFX9-NEXT: v_lshlrev_b32_e32 v16, 8, v1
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v29
+; GFX9-NEXT: v_lshlrev_b32_e32 v63, 8, v1
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v19
; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshlrev_b32_e32 v14, 8, v3
-; GFX9-NEXT: v_lshlrev_b32_e32 v46, 8, v5
-; GFX9-NEXT: v_lshlrev_b32_e32 v22, 8, v7
-; GFX9-NEXT: v_lshlrev_b32_e32 v24, 8, v9
-; GFX9-NEXT: v_lshlrev_b32_e32 v26, 8, v11
-; GFX9-NEXT: v_lshlrev_b32_e32 v20, 8, v13
-; GFX9-NEXT: v_lshlrev_b32_e32 v28, 8, v15
-; GFX9-NEXT: v_lshlrev_b32_e32 v18, 8, v17
-; GFX9-NEXT: v_lshlrev_b32_e32 v17, 8, v25
-; GFX9-NEXT: v_lshlrev_b32_e32 v12, 8, v27
-; GFX9-NEXT: v_lshlrev_b32_e32 v19, 8, v19
-; GFX9-NEXT: v_lshlrev_b32_e32 v21, 8, v21
-; GFX9-NEXT: v_lshlrev_b32_e32 v23, 8, v23
-; GFX9-NEXT: s_waitcnt vmcnt(24)
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v23
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v25
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v27
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v29
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v18, 8, v3
+; GFX9-NEXT: v_lshlrev_b32_e32 v20, 8, v5
+; GFX9-NEXT: v_lshlrev_b32_e32 v14, 8, v7
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 8, v9
+; GFX9-NEXT: v_lshlrev_b32_e32 v22, 8, v11
+; GFX9-NEXT: v_lshlrev_b32_e32 v8, 8, v13
+; GFX9-NEXT: v_lshlrev_b32_e32 v12, 8, v15
+; GFX9-NEXT: v_lshlrev_b32_e32 v10, 8, v17
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_waitcnt vmcnt(28)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v43
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v45
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v44
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v42
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v41
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v40
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v55
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v54
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v53
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v52
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v51
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v50
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v49
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v48
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v39
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v30
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v31
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v32
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v33
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v34
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v35
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v36
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v37
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v38
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:184
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:192
@@ -178680,31 +179320,32 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX9-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:224
; GFX9-NEXT: buffer_load_ushort v9, off, s[0:3], s32 offset:232
; GFX9-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:240
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: v_lshlrev_b32_e32 v43, 8, v21
+; GFX9-NEXT: s_and_b64 s[6:7], vcc, exec
; GFX9-NEXT: s_waitcnt vmcnt(7)
; GFX9-NEXT: v_lshlrev_b32_e32 v11, 8, v11
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v15
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v3
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v13
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v5
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v9
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v7
-; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:248
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:256
@@ -178718,148 +179359,145 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX9-NEXT: v_lshlrev_b32_e32 v11, 8, v11
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v15
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v3
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v13
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v5
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v9
-; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:312
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:320
; GFX9-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:328
-; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:4
-; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:12
-; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:20
-; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:28
+; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:324
+; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:316
+; GFX9-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:308
+; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:300
+; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:292
+; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:284
+; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:276
+; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:268
+; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:260
+; GFX9-NEXT: buffer_load_ushort v27, off, s[0:3], s32 offset:252
+; GFX9-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:244
+; GFX9-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:236
+; GFX9-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:228
+; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:220
+; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:212
+; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:204
+; GFX9-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:196
+; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:188
+; GFX9-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:180
+; GFX9-NEXT: buffer_load_ushort v26, off, s[0:3], s32 offset:172
+; GFX9-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:164
+; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:156
+; GFX9-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:148
+; GFX9-NEXT: buffer_load_ushort v25, off, s[0:3], s32 offset:140
+; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:132
+; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:124
+; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:116
+; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:108
+; GFX9-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:100
+; GFX9-NEXT: buffer_load_ushort v21, off, s[0:3], s32 offset:92
+; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:84
+; GFX9-NEXT: buffer_load_ushort v19, off, s[0:3], s32 offset:76
+; GFX9-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:68
+; GFX9-NEXT: buffer_load_ushort v17, off, s[0:3], s32 offset:60
+; GFX9-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:52
+; GFX9-NEXT: buffer_load_ushort v24, off, s[0:3], s32 offset:44
; GFX9-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:36
-; GFX9-NEXT: s_waitcnt vmcnt(15)
+; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:28
+; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:20
+; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:12
+; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:4
+; GFX9-NEXT: s_waitcnt vmcnt(51)
; GFX9-NEXT: v_lshlrev_b32_e32 v7, 8, v7
-; GFX9-NEXT: s_waitcnt vmcnt(7)
-; GFX9-NEXT: v_lshlrev_b32_e32 v3, 8, v3
-; GFX9-NEXT: s_waitcnt vmcnt(6)
-; GFX9-NEXT: v_lshlrev_b32_e32 v9, 8, v1
-; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:44
-; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:52
-; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:60
-; GFX9-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:68
-; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:76
-; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:84
-; GFX9-NEXT: buffer_load_ushort v25, off, s[0:3], s32 offset:92
-; GFX9-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:100
-; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:108
-; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:116
-; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:124
-; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:132
-; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:140
-; GFX9-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:148
-; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:156
-; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:164
-; GFX9-NEXT: s_waitcnt vmcnt(21)
-; GFX9-NEXT: v_lshlrev_b32_e32 v5, 8, v5
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:172
-; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:180
-; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:188
-; GFX9-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:196
-; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:204
-; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:212
-; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:220
-; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:228
-; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:236
-; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:244
-; GFX9-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:252
-; GFX9-NEXT: buffer_load_ushort v27, off, s[0:3], s32 offset:260
-; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:268
-; GFX9-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:276
-; GFX9-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:284
-; GFX9-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:292
-; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:300
-; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:308
-; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:316
-; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:324
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(28)
-; GFX9-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(30)
-; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(33)
-; GFX9-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(36)
-; GFX9-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(39)
-; GFX9-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(41)
-; GFX9-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(41)
-; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(41)
-; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(41)
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(43)
+; GFX9-NEXT: v_lshlrev_b32_e32 v9, 8, v3
+; GFX9-NEXT: s_waitcnt vmcnt(42)
+; GFX9-NEXT: v_lshlrev_b32_e32 v3, 8, v1
; GFX9-NEXT: s_waitcnt vmcnt(41)
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v5
+; GFX9-NEXT: s_waitcnt vmcnt(14)
+; GFX9-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:812 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:816 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:828 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:832 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(54)
+; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(55)
+; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(56)
+; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:812 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(56)
+; GFX9-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:816 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(56)
+; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(56)
+; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(56)
+; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:828 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(56)
+; GFX9-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:832 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(56)
+; GFX9-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:836 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(56)
+; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:840 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(56)
+; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:844 ; 4-byte Folded Spill
; GFX9-NEXT: s_cbranch_scc0 .LBB93_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_and_b32 s4, s28, 0xff
@@ -178867,17 +179505,13 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX9-NEXT: s_or_b32 s4, s4, s5
; GFX9-NEXT: v_mov_b32_e32 v1, 0xffff
; GFX9-NEXT: v_and_b32_e32 v3, s4, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v2, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v2, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX9-NEXT: v_or_b32_sdwa v2, v0, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v4, v4, v46 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v0, v6, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v4, v4, v20 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v4, v4, 16, v1
-; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: v_or_b32_sdwa v1, v8, v24 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_lshl_or_b32 v5, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
-; GFX9-NEXT: v_or_b32_sdwa v0, v10, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v2, v0, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v6, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_lshl_or_b32 v3, v2, 16, v3
; GFX9-NEXT: s_and_b32 s4, s16, 0xff
@@ -178902,266 +179536,279 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX9-NEXT: s_or_b32 s7, s7, s8
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s7
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v1, v20 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v1, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_lshl_or_b32 v5, v1, 16, v0
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_or_b32_sdwa v0, v0, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v1, v1, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v6, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v0, v28 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v0, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v1, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v1, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v7, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v0, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v1, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v1, v43 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v8, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v0, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v1, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v9, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_or_b32_sdwa v0, v0, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v10, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(4)
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_or_b32_sdwa v2, v39, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_mov_b32_e32 v39, v16
-; GFX9-NEXT: v_or_b32_sdwa v17, v34, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_waitcnt vmcnt(4)
+; GFX9-NEXT: v_or_b32_sdwa v2, v33, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v33, v32
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v1, v11, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v11, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v42, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v55, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v53, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v55, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v54, v1
+; GFX9-NEXT: v_or_b32_sdwa v1, v52, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v12, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
-; GFX9-NEXT: v_mov_b32_e32 v42, v61
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v53, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v53, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v36, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v55, v1
+; GFX9-NEXT: v_mov_b32_e32 v52, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v13, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v13, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_mov_b32_e32 v53, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v52, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v36, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v24, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v52, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v50, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v57, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v14, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_mov_b32_e32 v50, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v49, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v57, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v17, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v49, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v15, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v15, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v48, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v17, v23, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v63, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v19, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_lshl_or_b32 v16, v2, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_mov_b32_e32 v48, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v25, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v1, v21, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT: v_lshl_or_b32 v17, v17, 16, v1
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v2, s6
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: v_mov_b32_e32 v33, v45
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v45, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_or_b32_sdwa v1, v51, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v18, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v43, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v35, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v38, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v19, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v56, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v35, v61
+; GFX9-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: v_or_b32_sdwa v0, v25, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v47, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v31, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v20, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_or_b32_sdwa v0, v42, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_or_b32_sdwa v0, v49, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v29, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v21, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v31, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: v_or_b32_sdwa v0, v26, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v32, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_lshl_or_b32 v22, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v51, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v28, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_lshl_or_b32 v22, v1, 16, v0
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v34, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v30, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v34, v62 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
; GFX9-NEXT: v_lshl_or_b32 v23, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(4)
-; GFX9-NEXT: v_mov_b32_e32 v46, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v44, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v42, v49 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_or_b32_sdwa v1, v35, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_or_b32_sdwa v1, v50, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v24, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
-; GFX9-NEXT: v_mov_b32_e32 v35, v45
-; GFX9-NEXT: v_mov_b32_e32 v45, v61
-; GFX9-NEXT: v_mov_b32_e32 v61, v42
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_mov_b32_e32 v38, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v36, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v51, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v44, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v37, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_or_b32_sdwa v1, v30, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v25, v1, 16, v0
-; GFX9-NEXT: v_or_b32_sdwa v1, v54, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_mov_b32_e32 v54, v2
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v0, v41, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v0, v46, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_or_b32_sdwa v1, v59, v44 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
; GFX9-NEXT: v_lshl_or_b32 v26, v1, 16, v0
-; GFX9-NEXT: v_or_b32_sdwa v1, v27, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v0, v29, v41 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v27, v42 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-NEXT: v_or_b32_sdwa v1, v40, v43 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v27, v1, 16, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v60, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
-; GFX9-NEXT: v_or_b32_sdwa v1, v57, v44 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v0, v56, v61 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: v_lshl_or_b32 v28, v1, 16, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v59, v60 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v45, v46 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_lshl_or_b32 v28, v1, 16, v0
+; GFX9-NEXT: v_mov_b32_e32 v45, v34
+; GFX9-NEXT: v_mov_b32_e32 v34, v38
+; GFX9-NEXT: v_mov_b32_e32 v56, v39
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v0, v48, v59 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v63, v57 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v41, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v29, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v41, v43
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v60, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v40, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v47, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v30, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v60, v49
+; GFX9-NEXT: v_mov_b32_e32 v47, v61
+; GFX9-NEXT: v_mov_b32_e32 v49, v48
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_or_b32_sdwa v0, v37, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v58, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v31, v1, 16, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
-; GFX9-NEXT: v_mov_b32_e32 v2, s6
; GFX9-NEXT: s_mov_b64 s[4:5], 0
; GFX9-NEXT: s_branch .LBB93_3
; GFX9-NEXT: .LBB93_2:
-; GFX9-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
-; GFX9-NEXT: v_mov_b32_e32 v33, v45
-; GFX9-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
-; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v33, v32
+; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; GFX9-NEXT: ; implicit-def: $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v35, v61
; GFX9-NEXT: .LBB93_3: ; %Flow
-; GFX9-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:848 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
; GFX9-NEXT: s_cbranch_vccnz .LBB93_5
; GFX9-NEXT: ; %bb.4: ; %cmp.true
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
; GFX9-NEXT: s_add_i32 s28, s28, 3
; GFX9-NEXT: s_and_b32 s4, s28, 0xff
; GFX9-NEXT: s_lshl_b32 s5, s29, 8
@@ -179179,59 +179826,66 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX9-NEXT: s_lshl_b32 s9, s17, 8
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_lshl_b32 s10, s19, 8
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:824 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(15)
+; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(13)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: s_waitcnt vmcnt(14)
+; GFX9-NEXT: s_waitcnt vmcnt(12)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: s_waitcnt vmcnt(12)
+; GFX9-NEXT: s_waitcnt vmcnt(9)
+; GFX9-NEXT: v_add_u32_e32 v22, 3, v22
+; GFX9-NEXT: s_waitcnt vmcnt(8)
; GFX9-NEXT: v_add_u32_e32 v25, 3, v25
-; GFX9-NEXT: s_waitcnt vmcnt(11)
+; GFX9-NEXT: s_waitcnt vmcnt(7)
+; GFX9-NEXT: v_add_u32_e32 v20, 3, v20
+; GFX9-NEXT: v_or_b32_sdwa v20, v49, v20 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: s_waitcnt vmcnt(5)
+; GFX9-NEXT: v_add_u32_e32 v24, 3, v24
+; GFX9-NEXT: v_or_b32_sdwa v24, v41, v24 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: s_waitcnt vmcnt(4)
+; GFX9-NEXT: v_add_u32_e32 v23, 3, v23
+; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
-; GFX9-NEXT: v_or_b32_sdwa v25, v37, v25 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_or_b32_sdwa v37, v51, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v37, 0x300, v37
-; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_or_b32_sdwa v23, v42, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v21, 3, v21
+; GFX9-NEXT: v_or_b32_sdwa v21, v47, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v22, v46, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v25, v32, v25 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v28, 0x300, v21
+; GFX9-NEXT: v_and_b32_e32 v28, 0xffff, v28
+; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
; GFX9-NEXT: v_lshl_or_b32 v4, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(3)
-; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
-; GFX9-NEXT: v_or_b32_sdwa v38, v38, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(3)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_add_u32_e32 v2, 3, v2
; GFX9-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_mov_b32_e32 v3, 0xffff
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: v_add_u32_e32 v2, 0x300, v2
; GFX9-NEXT: v_and_b32_e32 v3, s4, v3
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
; GFX9-NEXT: v_lshl_or_b32 v3, v2, 16, v3
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_and_b32 s4, s24, 0xff
@@ -179245,8 +179899,6 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX9-NEXT: s_and_b32 s8, s16, 0xff
; GFX9-NEXT: s_or_b32 s8, s9, s8
; GFX9-NEXT: s_and_b32 s9, s18, 0xff
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
; GFX9-NEXT: s_or_b32 s9, s10, s9
; GFX9-NEXT: s_addk_i32 s4, 0x300
; GFX9-NEXT: s_addk_i32 s5, 0x300
@@ -179263,14 +179915,14 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
; GFX9-NEXT: v_lshl_or_b32 v5, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
@@ -179278,14 +179930,14 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
; GFX9-NEXT: v_lshl_or_b32 v6, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
@@ -179293,264 +179945,240 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
; GFX9-NEXT: v_lshl_or_b32 v7, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v48, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_lshl_or_b32 v8, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v37, v44, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v37, 0x300, v37
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v58, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
; GFX9-NEXT: v_lshl_or_b32 v9, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v63, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v43, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
; GFX9-NEXT: v_lshl_or_b32 v10, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:844 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:832 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v38, v51, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
+; GFX9-NEXT: v_or_b32_sdwa v1, v39, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
+; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-NEXT: v_lshl_or_b32 v11, v1, 16, v0
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:836 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:840 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
+; GFX9-NEXT: v_or_b32_sdwa v39, v50, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
+; GFX9-NEXT: v_or_b32_sdwa v48, v60, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v58, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v54, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
-; GFX9-NEXT: v_lshl_or_b32 v11, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:820 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:816 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v40, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
+; GFX9-NEXT: v_or_b32_sdwa v49, v45, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v55, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
-; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_lshl_or_b32 v12, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:812 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:832 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v42, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v55, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
-; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
-; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: v_lshl_or_b32 v13, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v53, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
+; GFX9-NEXT: v_or_b32_sdwa v50, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(3)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v52, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
-; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: v_lshl_or_b32 v14, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v50, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
+; GFX9-NEXT: v_or_b32_sdwa v51, v62, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v49, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
-; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
-; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: v_lshl_or_b32 v15, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v39, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_or_b32_sdwa v39, v36, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
+; GFX9-NEXT: v_or_b32_sdwa v52, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v48, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v0, v53, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
-; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_lshl_or_b32 v13, v1, 16, v0
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:820 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:824 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(3)
; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
-; GFX9-NEXT: v_or_b32_sdwa v48, v46, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: v_or_b32_sdwa v53, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
-; GFX9-NEXT: v_or_b32_sdwa v49, v35, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v54, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v21, 0x300, v54
+; GFX9-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
-; GFX9-NEXT: v_or_b32_sdwa v50, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v2, 3, v2
-; GFX9-NEXT: v_or_b32_sdwa v2, v16, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v2, 0x300, v2
-; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v55, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
-; GFX9-NEXT: v_or_b32_sdwa v51, v34, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v40, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_add_u32_e32 v16, 3, v16
-; GFX9-NEXT: v_or_b32_sdwa v16, v17, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_lshl_or_b32 v17, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v16, 0x300, v16
-; GFX9-NEXT: v_lshl_or_b32 v16, v16, 16, v2
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
-; GFX9-NEXT: v_or_b32_sdwa v52, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v53, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v24, 3, v24
-; GFX9-NEXT: v_add_u32_e32 v26, 3, v61
-; GFX9-NEXT: v_or_b32_sdwa v24, v54, v24 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v41, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v36, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_add_u32_e32 v36, 0x300, v24
; GFX9-NEXT: v_add_u32_e32 v24, 0x300, v48
; GFX9-NEXT: v_add_u32_e32 v48, 0x300, v51
+; GFX9-NEXT: v_add_u32_e32 v51, 0x300, v41
; GFX9-NEXT: v_and_b32_e32 v24, 0xffff, v24
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v54, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v26, 3, v45
-; GFX9-NEXT: v_add_u32_e32 v20, 3, v20
-; GFX9-NEXT: v_or_b32_sdwa v20, v57, v20 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v34, 0x300, v20
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v55, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v26, 3, v56
-; GFX9-NEXT: v_add_u32_e32 v21, 3, v21
-; GFX9-NEXT: v_or_b32_sdwa v21, v32, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v28, 0x300, v21
-; GFX9-NEXT: v_add_u32_e32 v21, 0x300, v54
-; GFX9-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX9-NEXT: v_and_b32_e32 v28, 0xffff, v28
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v40, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v23, 3, v23
-; GFX9-NEXT: v_add_u32_e32 v26, 3, v47
-; GFX9-NEXT: v_or_b32_sdwa v23, v41, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v20, 0x300, v40
-; GFX9-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v41, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v26, 3, v43
-; GFX9-NEXT: v_add_u32_e32 v22, 3, v22
-; GFX9-NEXT: v_or_b32_sdwa v22, v44, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v42, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v26, 3, v35
; GFX9-NEXT: v_add_u32_e32 v35, 0x300, v22
; GFX9-NEXT: v_add_u32_e32 v22, 0x300, v52
-; GFX9-NEXT: v_add_u32_e32 v51, 0x300, v41
; GFX9-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX9-NEXT: v_lshl_or_b32 v20, v51, 16, v20
; GFX9-NEXT: v_lshl_or_b32 v28, v35, 16, v28
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v42, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
-; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v43, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
-; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v31, 0x300, v0
+; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
+; GFX9-NEXT: v_lshl_or_b32 v14, v1, 16, v0
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:812 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:848 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:816 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v52, 0x300, v43
-; GFX9-NEXT: v_and_b32_e32 v31, 0xffff, v31
-; GFX9-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-NEXT: s_waitcnt vmcnt(4)
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: s_waitcnt vmcnt(3)
; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
-; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v44, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
+; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(4)
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v26, 3, v33
-; GFX9-NEXT: v_add_u32_e32 v32, 0x300, v1
-; GFX9-NEXT: v_mov_b32_e32 v1, s6
-; GFX9-NEXT: v_lshl_or_b32 v31, v32, 16, v31
-; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(3)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v57, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
+; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-NEXT: v_lshl_or_b32 v15, v1, 16, v0
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v16, 3, v16
+; GFX9-NEXT: v_or_b32_sdwa v16, v17, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v16, 0x300, v16
+; GFX9-NEXT: s_waitcnt vmcnt(4)
+; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
+; GFX9-NEXT: s_waitcnt vmcnt(3)
; GFX9-NEXT: v_or_b32_sdwa v45, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v27, 0x300, v23
; GFX9-NEXT: v_add_u32_e32 v26, 0x300, v25
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_add_u32_e32 v2, 3, v2
-; GFX9-NEXT: v_or_b32_sdwa v2, v18, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v30, 0x300, v2
; GFX9-NEXT: v_add_u32_e32 v25, 0x300, v38
; GFX9-NEXT: v_add_u32_e32 v23, 0x300, v50
; GFX9-NEXT: v_add_u32_e32 v38, 0x300, v39
; GFX9-NEXT: v_add_u32_e32 v39, 0x300, v49
; GFX9-NEXT: v_add_u32_e32 v49, 0x300, v53
; GFX9-NEXT: v_add_u32_e32 v50, 0x300, v55
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v56, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
+; GFX9-NEXT: v_or_b32_sdwa v1, v34, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
+; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-NEXT: v_lshl_or_b32 v17, v1, 16, v0
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v2, 3, v2
+; GFX9-NEXT: v_or_b32_sdwa v2, v63, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v2, 0x300, v2
+; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX9-NEXT: v_lshl_or_b32 v16, v16, 16, v2
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v34, 0x300, v20
+; GFX9-NEXT: v_add_u32_e32 v20, 0x300, v40
; GFX9-NEXT: v_add_u32_e32 v53, 0x300, v45
+; GFX9-NEXT: v_and_b32_e32 v20, 0xffff, v20
; GFX9-NEXT: v_and_b32_e32 v23, 0xffff, v23
; GFX9-NEXT: v_and_b32_e32 v25, 0xffff, v25
; GFX9-NEXT: v_and_b32_e32 v26, 0xffff, v26
; GFX9-NEXT: v_and_b32_e32 v27, 0xffff, v27
-; GFX9-NEXT: v_and_b32_e32 v30, 0xffff, v30
-; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_lshl_or_b32 v20, v51, 16, v20
; GFX9-NEXT: v_lshl_or_b32 v21, v50, 16, v21
; GFX9-NEXT: v_lshl_or_b32 v22, v49, 16, v22
; GFX9-NEXT: v_lshl_or_b32 v23, v48, 16, v23
@@ -179558,10 +180186,32 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX9-NEXT: v_lshl_or_b32 v25, v38, 16, v25
; GFX9-NEXT: v_lshl_or_b32 v26, v37, 16, v26
; GFX9-NEXT: v_lshl_or_b32 v27, v36, 16, v27
+; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v31, 0x300, v0
+; GFX9-NEXT: v_and_b32_e32 v31, 0xffff, v31
+; GFX9-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
+; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v32, 0x300, v1
+; GFX9-NEXT: v_mov_b32_e32 v1, s6
+; GFX9-NEXT: v_lshl_or_b32 v31, v32, 16, v31
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v2, 3, v2
+; GFX9-NEXT: v_or_b32_sdwa v2, v18, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v30, 0x300, v2
+; GFX9-NEXT: v_and_b32_e32 v30, 0xffff, v30
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v18, 3, v18
; GFX9-NEXT: v_or_b32_sdwa v18, v19, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v33, 0x300, v18
; GFX9-NEXT: v_add_u32_e32 v18, 0x300, v44
; GFX9-NEXT: v_and_b32_e32 v18, 0xffff, v18
@@ -179569,7 +180219,7 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX9-NEXT: v_lshl_or_b32 v30, v33, 16, v30
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v19, 3, v19
-; GFX9-NEXT: v_or_b32_sdwa v19, v60, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v19, v59, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v29, 0x300, v19
; GFX9-NEXT: v_add_u32_e32 v19, 0x300, v42
; GFX9-NEXT: v_and_b32_e32 v19, 0xffff, v19
@@ -179737,7 +180387,7 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v83, 8, v25
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v86, 8, v27
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v85, 8, v29
-; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, -1
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(62)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v97, 8, v2
@@ -179806,38 +180456,38 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX11-TRUE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB93_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: s_and_b32 s5, s28, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s29, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-TRUE16-NEXT: v_and_b32_e64 v5, 0xffff, s5
-; GFX11-TRUE16-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s3, 8
-; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s5, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s7, s8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s23, 8
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s25, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s11, s26, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s12, s27, 8
-; GFX11-TRUE16-NEXT: s_or_b32 s9, s9, s10
-; GFX11-TRUE16-NEXT: s_or_b32 s10, s11, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s9, s10
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s28, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s29, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s2, 0xff
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-TRUE16-NEXT: v_and_b32_e64 v5, 0xffff, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s0, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s16, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s4, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s6, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s20, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s21, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s25, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s26, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s27, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s8, s9
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s8, s9
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v36
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v32
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v35
@@ -179924,12 +180574,12 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX11-TRUE16-NEXT: v_and_b32_e32 v21, 0xffff, v15
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v17, 16, v19
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v17, v18, 16, v22
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, s7
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, s6
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v1, 16, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v151
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v149
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v16, v20, 16, v21
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, s8
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, s7
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v180
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v177
@@ -180032,9 +180682,8 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v31, v1, 16, v0
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB93_3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB93_3
; GFX11-TRUE16-NEXT: .LBB93_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_add_i32 s28, s28, 3
; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s29, 8
@@ -180420,7 +181069,9 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX11-TRUE16-NEXT: .LBB93_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB93_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB93_2
+; GFX11-TRUE16-NEXT: s_branch .LBB93_3
;
; GFX11-FAKE16-LABEL: bitcast_v128i8_to_v64f16_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -180563,7 +181214,7 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v83, 8, v25
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v86, 8, v27
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v85, 8, v29
-; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, -1
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(62)
; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v97, 8, v2
@@ -180632,38 +181283,38 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX11-FAKE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB93_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-FAKE16-NEXT: s_and_b32 s5, s28, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s29, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-FAKE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-FAKE16-NEXT: v_and_b32_e64 v5, 0xffff, s5
-; GFX11-FAKE16-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s3, 8
-; GFX11-FAKE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-FAKE16-NEXT: s_or_b32 s6, s7, s8
-; GFX11-FAKE16-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s5, s6
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s7, s8
-; GFX11-FAKE16-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s23, 8
-; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-FAKE16-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s25, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s11, s26, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s12, s27, 8
-; GFX11-FAKE16-NEXT: s_or_b32 s9, s9, s10
-; GFX11-FAKE16-NEXT: s_or_b32 s10, s11, s12
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s7, s8
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s9, s10
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s28, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s29, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s2, 0xff
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-FAKE16-NEXT: v_and_b32_e64 v5, 0xffff, s4
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s0, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s16, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s4, s5
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s6, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s20, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s21, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s25, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s10, s26, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s11, s27, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s8, s9
+; GFX11-FAKE16-NEXT: s_or_b32 s9, s10, s11
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s6, s7
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s8, s9
; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v36
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v32
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v35
@@ -180750,12 +181401,12 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v15
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v17, 16, v19
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v18, 16, v22
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, s7
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, s6
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v1, 16, v0
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v151
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v149
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v20, 16, v21
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, s8
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, s7
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v180
; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, v1, v177
@@ -180858,9 +181509,8 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v31, v1, 16, v0
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB93_3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB93_3
; GFX11-FAKE16-NEXT: .LBB93_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_add_i32 s28, s28, 3
; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s29, 8
@@ -181246,7 +181896,9 @@ define inreg <64 x half> @bitcast_v128i8_to_v64f16_scalar(<128 x i8> inreg %a, i
; GFX11-FAKE16-NEXT: .LBB93_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB93_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB93_2
+; GFX11-FAKE16-NEXT: s_branch .LBB93_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -186685,6 +187337,7 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v45, v46
; SI-NEXT: v_cvt_f16_f32_e32 v46, v57
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
@@ -187207,7 +187860,6 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v7, v8
; SI-NEXT: v_mov_b32_e32 v8, v5
; SI-NEXT: v_mov_b32_e32 v44, v37
-; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: ; kill: killed $vgpr1
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; implicit-def: $vgpr13
@@ -188470,8 +189122,9 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i
; VI-NEXT: v_readfirstlane_b32 s6, v17
; VI-NEXT: v_readfirstlane_b32 s7, v18
; VI-NEXT: v_readfirstlane_b32 s4, v1
-; VI-NEXT: s_and_b64 s[46:47], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s5, v2
+; VI-NEXT: s_and_b64 s[46:47], vcc, exec
+; VI-NEXT: s_mov_b64 vcc, -1
; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
@@ -188951,8 +189604,6 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i
; VI-NEXT: .LBB95_3:
; VI-NEXT: ; implicit-def: $sgpr46
; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
; VI-NEXT: ; implicit-def: $sgpr71
; VI-NEXT: ; implicit-def: $sgpr70
; VI-NEXT: ; implicit-def: $sgpr51
@@ -189103,7 +189754,10 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i
; VI-NEXT: ; implicit-def: $sgpr46
; VI-NEXT: ; kill: killed $sgpr46
; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: s_branch .LBB95_2
+; VI-NEXT: ; kill: killed $sgpr46
+; VI-NEXT: ; implicit-def: $sgpr46
+; VI-NEXT: s_andn2_b64 vcc, exec, vcc
+; VI-NEXT: s_cbranch_vccz .LBB95_2
; VI-NEXT: .LBB95_4:
; VI-NEXT: v_mov_b32_e32 v1, s44
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:96 ; 4-byte Folded Spill
@@ -189835,8 +190489,9 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i
; GFX9-NEXT: v_readfirstlane_b32 s6, v17
; GFX9-NEXT: v_readfirstlane_b32 s7, v18
; GFX9-NEXT: v_readfirstlane_b32 s4, v1
-; GFX9-NEXT: s_and_b64 s[46:47], vcc, exec
; GFX9-NEXT: v_readfirstlane_b32 s5, v2
+; GFX9-NEXT: s_and_b64 s[46:47], vcc, exec
+; GFX9-NEXT: s_mov_b64 vcc, -1
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
@@ -190235,8 +190890,6 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i
; GFX9-NEXT: .LBB95_3:
; GFX9-NEXT: ; implicit-def: $sgpr46
; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
; GFX9-NEXT: ; implicit-def: $sgpr81
; GFX9-NEXT: ; implicit-def: $sgpr71
; GFX9-NEXT: ; implicit-def: $sgpr80
@@ -190379,7 +191032,10 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i
; GFX9-NEXT: ; implicit-def: $sgpr46
; GFX9-NEXT: ; kill: killed $sgpr46
; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: s_branch .LBB95_2
+; GFX9-NEXT: ; kill: killed $sgpr46
+; GFX9-NEXT: ; implicit-def: $sgpr46
+; GFX9-NEXT: s_andn2_b64 vcc, exec, vcc
+; GFX9-NEXT: s_cbranch_vccz .LBB95_2
; GFX9-NEXT: .LBB95_4:
; GFX9-NEXT: v_mov_b32_e32 v15, s71
; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
@@ -191111,16 +191767,16 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i
; GFX11-NEXT: v_writelane_b32 v76, s99, 3
; GFX11-NEXT: v_readfirstlane_b32 s8, v9
; GFX11-NEXT: v_readfirstlane_b32 s9, v10
-; GFX11-NEXT: v_readfirstlane_b32 s6, v11
+; GFX11-NEXT: v_readfirstlane_b32 s4, v11
; GFX11-NEXT: v_writelane_b32 v75, s36, 4
; GFX11-NEXT: v_writelane_b32 v76, s100, 4
-; GFX11-NEXT: v_readfirstlane_b32 s7, v12
-; GFX11-NEXT: v_readfirstlane_b32 s4, v13
-; GFX11-NEXT: v_readfirstlane_b32 s5, v14
+; GFX11-NEXT: v_readfirstlane_b32 s5, v12
+; GFX11-NEXT: v_readfirstlane_b32 s6, v13
+; GFX11-NEXT: v_readfirstlane_b32 s7, v14
; GFX11-NEXT: v_writelane_b32 v75, s37, 5
; GFX11-NEXT: v_writelane_b32 v76, s101, 5
-; GFX11-NEXT: s_mov_b32 s99, 0
; GFX11-NEXT: s_and_b32 s42, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 vcc_lo, -1
; GFX11-NEXT: s_clause 0x12
; GFX11-NEXT: scratch_store_b32 off, v40, s32 offset:72
; GFX11-NEXT: scratch_store_b32 off, v41, s32 offset:68
@@ -191143,8 +191799,8 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i
; GFX11-NEXT: scratch_store_b32 off, v74, s32
; GFX11-NEXT: v_writelane_b32 v75, s38, 6
; GFX11-NEXT: v_writelane_b32 v76, s102, 6
-; GFX11-NEXT: ; implicit-def: $vgpr78 : SGPR spill to VGPR lane
; GFX11-NEXT: ; implicit-def: $vgpr77 : SGPR spill to VGPR lane
+; GFX11-NEXT: ; implicit-def: $vgpr78 : SGPR spill to VGPR lane
; GFX11-NEXT: v_writelane_b32 v75, s39, 7
; GFX11-NEXT: v_writelane_b32 v76, s103, 7
; GFX11-NEXT: v_writelane_b32 v75, s48, 8
@@ -191174,160 +191830,158 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i
; GFX11-NEXT: v_writelane_b32 v75, s87, 31
; GFX11-NEXT: s_cbranch_scc0 .LBB95_3
; GFX11-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-NEXT: s_lshr_b32 s42, s27, 24
+; GFX11-NEXT: s_lshr_b32 vcc_hi, s7, 24
+; GFX11-NEXT: v_writelane_b32 v78, s42, 7
; GFX11-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-NEXT: s_lshr_b64 s[74:75], s[0:1], 24
-; GFX11-NEXT: v_writelane_b32 v77, s42, 8
+; GFX11-NEXT: s_lshr_b32 s34, s7, 16
+; GFX11-NEXT: s_lshr_b32 s36, s7, 8
+; GFX11-NEXT: s_lshr_b32 s35, s6, 16
+; GFX11-NEXT: v_writelane_b32 v78, s42, 6
; GFX11-NEXT: s_lshr_b32 s42, s27, 8
-; GFX11-NEXT: s_lshr_b32 s43, s27, 24
-; GFX11-NEXT: s_lshr_b32 s34, s5, 24
-; GFX11-NEXT: s_lshr_b32 s35, s5, 16
-; GFX11-NEXT: v_writelane_b32 v77, s42, 7
+; GFX11-NEXT: s_lshr_b32 s37, s6, 8
+; GFX11-NEXT: s_lshr_b32 s38, s5, 24
+; GFX11-NEXT: s_lshr_b32 s39, s5, 16
+; GFX11-NEXT: v_writelane_b32 v78, s42, 5
; GFX11-NEXT: s_lshr_b32 s42, s26, 16
-; GFX11-NEXT: s_lshr_b32 s37, s5, 8
-; GFX11-NEXT: s_lshr_b32 s36, s4, 16
-; GFX11-NEXT: s_lshr_b32 s38, s4, 8
-; GFX11-NEXT: v_writelane_b32 v77, s42, 6
+; GFX11-NEXT: s_lshr_b32 s49, s5, 8
+; GFX11-NEXT: s_lshr_b32 s48, s4, 16
+; GFX11-NEXT: s_lshr_b32 s50, s4, 8
+; GFX11-NEXT: v_writelane_b32 v78, s42, 4
; GFX11-NEXT: s_lshr_b32 s42, s26, 8
-; GFX11-NEXT: s_lshr_b32 s39, s7, 24
-; GFX11-NEXT: s_lshr_b32 s48, s7, 16
-; GFX11-NEXT: s_lshr_b32 s50, s7, 8
-; GFX11-NEXT: v_writelane_b32 v77, s42, 5
+; GFX11-NEXT: s_lshr_b32 s51, s9, 24
+; GFX11-NEXT: s_lshr_b32 s52, s9, 16
+; GFX11-NEXT: s_lshr_b32 s54, s9, 8
+; GFX11-NEXT: v_writelane_b32 v78, s42, 3
; GFX11-NEXT: s_lshr_b32 s42, s25, 24
-; GFX11-NEXT: s_lshr_b32 s49, s6, 16
-; GFX11-NEXT: s_lshr_b32 s51, s6, 8
-; GFX11-NEXT: s_lshr_b32 s52, s9, 24
-; GFX11-NEXT: v_writelane_b32 v77, s42, 4
+; GFX11-NEXT: s_lshr_b32 s53, s8, 16
+; GFX11-NEXT: s_lshr_b32 s55, s8, 8
+; GFX11-NEXT: s_lshr_b32 s64, s11, 24
+; GFX11-NEXT: v_writelane_b32 v78, s42, 2
; GFX11-NEXT: s_lshr_b32 s42, s25, 16
-; GFX11-NEXT: s_lshr_b32 s53, s9, 16
-; GFX11-NEXT: s_lshr_b32 s55, s9, 8
-; GFX11-NEXT: s_lshr_b32 s54, s8, 16
-; GFX11-NEXT: v_writelane_b32 v77, s42, 3
+; GFX11-NEXT: s_lshr_b32 s65, s11, 16
+; GFX11-NEXT: s_lshr_b32 s67, s11, 8
+; GFX11-NEXT: s_lshr_b32 s66, s10, 16
+; GFX11-NEXT: v_writelane_b32 v78, s42, 1
; GFX11-NEXT: s_lshr_b32 s42, s25, 8
-; GFX11-NEXT: s_lshr_b32 s64, s8, 8
-; GFX11-NEXT: s_lshr_b32 s65, s11, 24
-; GFX11-NEXT: s_lshr_b32 s66, s11, 16
-; GFX11-NEXT: v_writelane_b32 v77, s42, 2
+; GFX11-NEXT: s_lshr_b32 s68, s10, 8
+; GFX11-NEXT: s_lshr_b32 s69, s13, 24
+; GFX11-NEXT: s_lshr_b32 s70, s13, 16
+; GFX11-NEXT: v_writelane_b32 v78, s42, 0
; GFX11-NEXT: s_lshr_b32 s42, s24, 16
-; GFX11-NEXT: s_lshr_b32 s68, s11, 8
-; GFX11-NEXT: s_lshr_b32 s67, s10, 16
-; GFX11-NEXT: s_lshr_b32 s69, s10, 8
-; GFX11-NEXT: v_writelane_b32 v77, s42, 1
+; GFX11-NEXT: s_lshr_b32 s80, s13, 8
+; GFX11-NEXT: v_writelane_b32 v77, s42, 31
; GFX11-NEXT: s_lshr_b32 s42, s24, 8
-; GFX11-NEXT: s_lshr_b32 s70, s13, 24
-; GFX11-NEXT: s_lshr_b32 s71, s13, 16
-; GFX11-NEXT: s_lshr_b32 s81, s13, 8
-; GFX11-NEXT: v_writelane_b32 v77, s42, 0
+; GFX11-NEXT: s_lshr_b32 s71, s12, 16
+; GFX11-NEXT: s_lshr_b32 s81, s12, 8
+; GFX11-NEXT: s_lshr_b32 s82, s15, 24
+; GFX11-NEXT: v_writelane_b32 v77, s42, 30
; GFX11-NEXT: s_lshr_b32 s42, s23, 24
-; GFX11-NEXT: s_lshr_b32 s80, s12, 16
-; GFX11-NEXT: v_writelane_b32 v78, s42, 31
+; GFX11-NEXT: s_lshr_b32 s83, s15, 16
+; GFX11-NEXT: s_lshr_b32 s85, s15, 8
+; GFX11-NEXT: s_lshr_b32 s84, s14, 16
+; GFX11-NEXT: v_writelane_b32 v77, s42, 29
; GFX11-NEXT: s_lshr_b32 s42, s23, 16
-; GFX11-NEXT: s_lshr_b32 s82, s12, 8
-; GFX11-NEXT: s_lshr_b32 s83, s15, 24
-; GFX11-NEXT: s_lshr_b32 s84, s15, 16
-; GFX11-NEXT: v_writelane_b32 v78, s42, 30
+; GFX11-NEXT: s_lshr_b32 s86, s14, 8
+; GFX11-NEXT: s_lshr_b32 s87, s41, 24
+; GFX11-NEXT: s_lshr_b32 s96, s41, 16
+; GFX11-NEXT: v_writelane_b32 v77, s42, 28
; GFX11-NEXT: s_lshr_b32 s42, s23, 8
-; GFX11-NEXT: s_lshr_b32 s86, s15, 8
-; GFX11-NEXT: s_lshr_b32 s85, s14, 16
-; GFX11-NEXT: s_lshr_b32 s87, s14, 8
-; GFX11-NEXT: v_writelane_b32 v78, s42, 29
+; GFX11-NEXT: s_lshr_b32 s98, s41, 8
+; GFX11-NEXT: s_lshr_b32 s97, s40, 16
+; GFX11-NEXT: s_lshr_b32 s99, s40, 8
+; GFX11-NEXT: v_writelane_b32 v77, s42, 27
; GFX11-NEXT: s_lshr_b32 s42, s22, 16
-; GFX11-NEXT: s_lshr_b32 s96, s41, 24
-; GFX11-NEXT: s_lshr_b32 s97, s41, 16
-; GFX11-NEXT: s_lshr_b32 s100, s41, 8
-; GFX11-NEXT: v_writelane_b32 v78, s42, 28
+; GFX11-NEXT: s_lshr_b32 s100, s29, 24
+; GFX11-NEXT: s_lshr_b32 s101, s29, 16
+; GFX11-NEXT: s_lshr_b32 s103, s29, 8
+; GFX11-NEXT: v_writelane_b32 v77, s42, 26
; GFX11-NEXT: s_lshr_b32 s42, s22, 8
-; GFX11-NEXT: s_lshr_b32 s98, s40, 16
-; GFX11-NEXT: s_lshr_b32 s101, s40, 8
-; GFX11-NEXT: s_lshr_b32 s102, s29, 24
-; GFX11-NEXT: v_writelane_b32 v78, s42, 27
+; GFX11-NEXT: s_lshr_b32 s102, s28, 16
+; GFX11-NEXT: s_lshr_b32 s104, s28, 8
+; GFX11-NEXT: v_writelane_b32 v77, s42, 25
; GFX11-NEXT: s_lshr_b32 s42, s21, 24
-; GFX11-NEXT: s_lshr_b32 s103, s29, 16
-; GFX11-NEXT: s_lshr_b32 vcc_hi, s29, 8
-; GFX11-NEXT: s_lshr_b32 s104, s28, 16
-; GFX11-NEXT: v_writelane_b32 v78, s42, 26
-; GFX11-NEXT: s_lshr_b32 s42, s21, 16
-; GFX11-NEXT: s_lshr_b64 s[62:63], s[26:27], 24
-; GFX11-NEXT: s_lshr_b64 s[72:73], s[24:25], 24
+; GFX11-NEXT: s_lshr_b64 s[72:73], s[26:27], 24
+; GFX11-NEXT: s_lshr_b64 s[62:63], s[24:25], 24
; GFX11-NEXT: s_lshr_b64 s[60:61], s[22:23], 24
-; GFX11-NEXT: v_writelane_b32 v78, s42, 25
-; GFX11-NEXT: s_lshr_b32 s42, s21, 8
+; GFX11-NEXT: v_writelane_b32 v77, s42, 24
+; GFX11-NEXT: s_lshr_b32 s42, s21, 16
; GFX11-NEXT: s_lshr_b64 s[58:59], s[20:21], 24
; GFX11-NEXT: s_lshr_b64 s[56:57], s[18:19], 24
; GFX11-NEXT: s_lshr_b64 s[46:47], s[16:17], 24
-; GFX11-NEXT: v_writelane_b32 v78, s42, 24
-; GFX11-NEXT: s_lshr_b32 s42, s20, 16
+; GFX11-NEXT: v_writelane_b32 v77, s42, 23
+; GFX11-NEXT: s_lshr_b32 s42, s21, 8
; GFX11-NEXT: s_lshr_b64 s[44:45], s[2:3], 24
-; GFX11-NEXT: s_lshr_b64 s[76:77], s[6:7], 24
-; GFX11-NEXT: s_lshr_b64 s[78:79], s[8:9], 24
-; GFX11-NEXT: v_writelane_b32 v78, s42, 23
+; GFX11-NEXT: s_lshr_b64 s[30:31], s[6:7], 24
+; GFX11-NEXT: s_lshr_b64 s[94:95], s[4:5], 24
+; GFX11-NEXT: v_writelane_b32 v77, s42, 22
+; GFX11-NEXT: s_lshr_b32 s42, s20, 16
+; GFX11-NEXT: s_lshr_b64 s[92:93], s[8:9], 24
+; GFX11-NEXT: s_lshr_b64 s[90:91], s[10:11], 24
+; GFX11-NEXT: s_lshr_b64 s[88:89], s[12:13], 24
+; GFX11-NEXT: v_writelane_b32 v77, s42, 21
; GFX11-NEXT: s_lshr_b32 s42, s20, 8
-; GFX11-NEXT: s_lshr_b64 s[88:89], s[10:11], 24
-; GFX11-NEXT: s_lshr_b64 s[90:91], s[12:13], 24
-; GFX11-NEXT: s_lshr_b64 s[92:93], s[14:15], 24
-; GFX11-NEXT: v_writelane_b32 v78, s42, 22
+; GFX11-NEXT: s_lshr_b64 s[78:79], s[14:15], 24
+; GFX11-NEXT: s_lshr_b64 s[76:77], s[40:41], 24
+; GFX11-NEXT: s_lshr_b64 s[74:75], s[28:29], 24
+; GFX11-NEXT: v_writelane_b32 v77, s42, 20
; GFX11-NEXT: s_lshr_b32 s42, s19, 24
-; GFX11-NEXT: s_lshr_b64 s[94:95], s[40:41], 24
-; GFX11-NEXT: s_lshr_b64 s[30:31], s[28:29], 24
-; GFX11-NEXT: v_writelane_b32 v78, s42, 21
-; GFX11-NEXT: s_lshr_b32 s42, s19, 16
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v78, s42, 20
+; GFX11-NEXT: v_writelane_b32 v77, s42, 19
+; GFX11-NEXT: s_lshr_b32 s42, s19, 16
+; GFX11-NEXT: v_writelane_b32 v77, s42, 18
; GFX11-NEXT: s_lshr_b32 s42, s19, 8
-; GFX11-NEXT: v_writelane_b32 v78, s42, 19
-; GFX11-NEXT: s_lshr_b32 s42, s18, 16
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v78, s42, 18
+; GFX11-NEXT: v_writelane_b32 v77, s42, 17
+; GFX11-NEXT: s_lshr_b32 s42, s18, 16
+; GFX11-NEXT: v_writelane_b32 v77, s42, 16
; GFX11-NEXT: s_lshr_b32 s42, s18, 8
-; GFX11-NEXT: v_writelane_b32 v78, s42, 17
-; GFX11-NEXT: s_lshr_b32 s42, s17, 24
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v78, s42, 16
+; GFX11-NEXT: v_writelane_b32 v77, s42, 15
+; GFX11-NEXT: s_lshr_b32 s42, s17, 24
+; GFX11-NEXT: v_writelane_b32 v77, s42, 14
; GFX11-NEXT: s_lshr_b32 s42, s17, 16
-; GFX11-NEXT: v_writelane_b32 v78, s42, 15
-; GFX11-NEXT: s_lshr_b32 s42, s17, 8
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v78, s42, 14
+; GFX11-NEXT: v_writelane_b32 v77, s42, 13
+; GFX11-NEXT: s_lshr_b32 s42, s17, 8
+; GFX11-NEXT: v_writelane_b32 v77, s42, 12
; GFX11-NEXT: s_lshr_b32 s42, s16, 16
-; GFX11-NEXT: v_writelane_b32 v78, s42, 13
-; GFX11-NEXT: s_lshr_b32 s42, s16, 8
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v78, s42, 12
+; GFX11-NEXT: v_writelane_b32 v77, s42, 11
+; GFX11-NEXT: s_lshr_b32 s42, s16, 8
+; GFX11-NEXT: v_writelane_b32 v77, s42, 10
; GFX11-NEXT: s_lshr_b32 s42, s3, 24
-; GFX11-NEXT: v_writelane_b32 v78, s42, 11
-; GFX11-NEXT: s_lshr_b32 s42, s3, 16
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v78, s42, 10
+; GFX11-NEXT: v_writelane_b32 v77, s42, 9
+; GFX11-NEXT: s_lshr_b32 s42, s3, 16
+; GFX11-NEXT: v_writelane_b32 v77, s42, 8
; GFX11-NEXT: s_lshr_b32 s42, s3, 8
-; GFX11-NEXT: v_writelane_b32 v78, s42, 9
-; GFX11-NEXT: s_lshr_b32 s42, s2, 16
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v78, s42, 8
+; GFX11-NEXT: v_writelane_b32 v77, s42, 7
+; GFX11-NEXT: s_lshr_b32 s42, s2, 16
+; GFX11-NEXT: v_writelane_b32 v77, s42, 6
; GFX11-NEXT: s_lshr_b32 s42, s2, 8
-; GFX11-NEXT: v_writelane_b32 v78, s42, 7
-; GFX11-NEXT: s_lshr_b32 s42, s1, 24
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v78, s42, 6
+; GFX11-NEXT: v_writelane_b32 v77, s42, 5
+; GFX11-NEXT: s_lshr_b32 s42, s1, 24
+; GFX11-NEXT: v_writelane_b32 v77, s42, 4
; GFX11-NEXT: s_lshr_b32 s42, s1, 16
-; GFX11-NEXT: v_writelane_b32 v78, s42, 5
-; GFX11-NEXT: s_lshr_b32 s42, s1, 8
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v78, s42, 4
+; GFX11-NEXT: v_writelane_b32 v77, s42, 3
+; GFX11-NEXT: s_lshr_b32 s42, s1, 8
+; GFX11-NEXT: v_writelane_b32 v77, s42, 2
; GFX11-NEXT: s_lshr_b32 s42, s0, 16
-; GFX11-NEXT: v_writelane_b32 v78, s42, 3
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: v_writelane_b32 v77, s42, 1
; GFX11-NEXT: s_lshr_b32 s42, s0, 8
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v78, s42, 2
-; GFX11-NEXT: s_lshr_b32 s42, s28, 8
-; GFX11-NEXT: v_writelane_b32 v78, s74, 0
-; GFX11-NEXT: v_writelane_b32 v78, s75, 1
-; GFX11-NEXT: s_lshr_b64 s[74:75], s[4:5], 24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s99
-; GFX11-NEXT: s_cbranch_vccnz .LBB95_4
+; GFX11-NEXT: v_writelane_b32 v77, s42, 0
+; GFX11-NEXT: s_lshr_b64 s[42:43], s[0:1], 24
+; GFX11-NEXT: s_cbranch_execnz .LBB95_4
; GFX11-NEXT: .LBB95_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v39, 0x200, s17 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v38, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v2, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s4 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v2, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s6 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v51, 0x200, s3 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v50, 0x200, s2 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v33, 0x200, s21 op_sel_hi:[0,1]
@@ -191346,8 +192000,8 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v7, 0x200, s10 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v6, 0x200, s9 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v5, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v4, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v3, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v4, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v3, 0x200, s4 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v53, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v52, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v37, 0x200, s19 op_sel_hi:[0,1]
@@ -191372,8 +192026,8 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i
; GFX11-NEXT: v_lshrrev_b64 v[34:35], 24, v[11:12]
; GFX11-NEXT: v_lshrrev_b64 v[65:66], 24, v[13:14]
; GFX11-NEXT: v_lshrrev_b64 v[68:69], 24, v[15:16]
-; GFX11-NEXT: v_lshrrev_b32_e32 v147, 24, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v148, 16, v21
+; GFX11-NEXT: v_lshrrev_b32_e32 v148, 24, v21
+; GFX11-NEXT: v_lshrrev_b32_e32 v147, 16, v21
; GFX11-NEXT: v_lshrrev_b32_e32 v149, 8, v21
; GFX11-NEXT: v_lshrrev_b32_e32 v150, 16, v20
; GFX11-NEXT: v_lshrrev_b32_e32 v151, 8, v20
@@ -191454,9 +192108,8 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i
; GFX11-NEXT: v_lshrrev_b32_e32 v146, 8, v15
; GFX11-NEXT: s_branch .LBB95_5
; GFX11-NEXT: .LBB95_3:
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: s_mov_b32 s99, -1
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
; GFX11-NEXT: ; implicit-def: $sgpr42
; GFX11-NEXT: ; kill: killed $sgpr42
; GFX11-NEXT: ; implicit-def: $sgpr44
@@ -191465,285 +192118,284 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i
; GFX11-NEXT: ; implicit-def: $sgpr58
; GFX11-NEXT: ; implicit-def: $sgpr60
; GFX11-NEXT: ; implicit-def: $sgpr104
-; GFX11-NEXT: ; implicit-def: $vcc_hi
-; GFX11-NEXT: ; implicit-def: $sgpr103
; GFX11-NEXT: ; implicit-def: $sgpr102
+; GFX11-NEXT: ; implicit-def: $sgpr103
; GFX11-NEXT: ; implicit-def: $sgpr101
-; GFX11-NEXT: ; implicit-def: $sgpr98
; GFX11-NEXT: ; implicit-def: $sgpr100
+; GFX11-NEXT: ; implicit-def: $sgpr99
; GFX11-NEXT: ; implicit-def: $sgpr97
+; GFX11-NEXT: ; implicit-def: $sgpr98
; GFX11-NEXT: ; implicit-def: $sgpr96
; GFX11-NEXT: ; implicit-def: $sgpr87
-; GFX11-NEXT: ; implicit-def: $sgpr85
; GFX11-NEXT: ; implicit-def: $sgpr86
; GFX11-NEXT: ; implicit-def: $sgpr84
+; GFX11-NEXT: ; implicit-def: $sgpr85
; GFX11-NEXT: ; implicit-def: $sgpr83
; GFX11-NEXT: ; implicit-def: $sgpr82
-; GFX11-NEXT: ; implicit-def: $sgpr80
; GFX11-NEXT: ; implicit-def: $sgpr81
; GFX11-NEXT: ; implicit-def: $sgpr71
+; GFX11-NEXT: ; implicit-def: $sgpr80
; GFX11-NEXT: ; implicit-def: $sgpr70
; GFX11-NEXT: ; implicit-def: $sgpr69
-; GFX11-NEXT: ; implicit-def: $sgpr67
; GFX11-NEXT: ; implicit-def: $sgpr68
; GFX11-NEXT: ; implicit-def: $sgpr66
+; GFX11-NEXT: ; implicit-def: $sgpr67
; GFX11-NEXT: ; implicit-def: $sgpr65
; GFX11-NEXT: ; implicit-def: $sgpr64
-; GFX11-NEXT: ; implicit-def: $sgpr54
; GFX11-NEXT: ; implicit-def: $sgpr55
; GFX11-NEXT: ; implicit-def: $sgpr53
+; GFX11-NEXT: ; implicit-def: $sgpr54
; GFX11-NEXT: ; implicit-def: $sgpr52
; GFX11-NEXT: ; implicit-def: $sgpr51
-; GFX11-NEXT: ; implicit-def: $sgpr49
; GFX11-NEXT: ; implicit-def: $sgpr50
; GFX11-NEXT: ; implicit-def: $sgpr48
+; GFX11-NEXT: ; implicit-def: $sgpr49
; GFX11-NEXT: ; implicit-def: $sgpr39
; GFX11-NEXT: ; implicit-def: $sgpr38
-; GFX11-NEXT: ; implicit-def: $sgpr36
; GFX11-NEXT: ; implicit-def: $sgpr37
; GFX11-NEXT: ; implicit-def: $sgpr35
+; GFX11-NEXT: ; implicit-def: $sgpr36
; GFX11-NEXT: ; implicit-def: $sgpr34
-; GFX11-NEXT: ; implicit-def: $sgpr72
+; GFX11-NEXT: ; implicit-def: $vcc_hi
; GFX11-NEXT: ; implicit-def: $sgpr62
-; GFX11-NEXT: ; implicit-def: $sgpr30
-; GFX11-NEXT: ; implicit-def: $sgpr94
-; GFX11-NEXT: ; implicit-def: $sgpr92
-; GFX11-NEXT: ; implicit-def: $sgpr90
-; GFX11-NEXT: ; implicit-def: $sgpr88
-; GFX11-NEXT: ; implicit-def: $sgpr78
-; GFX11-NEXT: ; implicit-def: $sgpr76
+; GFX11-NEXT: ; implicit-def: $sgpr72
; GFX11-NEXT: ; implicit-def: $sgpr74
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: v_writelane_b32 v78, s42, 0
-; GFX11-NEXT: v_writelane_b32 v78, s43, 1
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr76
+; GFX11-NEXT: ; implicit-def: $sgpr78
+; GFX11-NEXT: ; implicit-def: $sgpr88
+; GFX11-NEXT: ; implicit-def: $sgpr90
+; GFX11-NEXT: ; implicit-def: $sgpr92
+; GFX11-NEXT: ; implicit-def: $sgpr94
+; GFX11-NEXT: ; implicit-def: $sgpr30
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
; GFX11-NEXT: ; implicit-def: $sgpr42
; GFX11-NEXT: ; kill: killed $sgpr42
; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: s_branch .LBB95_2
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, vcc_lo
+; GFX11-NEXT: s_cbranch_vccz .LBB95_2
; GFX11-NEXT: .LBB95_4:
; GFX11-NEXT: v_dual_mov_b32 v52, s0 :: v_dual_mov_b32 v53, s1
-; GFX11-NEXT: v_readlane_b32 s0, v78, 2
-; GFX11-NEXT: v_mov_b32_e32 v71, s50
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_readlane_b32 s0, v77, 0
+; GFX11-NEXT: v_mov_b32_e32 v71, s49
; GFX11-NEXT: v_dual_mov_b32 v15, s28 :: v_dual_mov_b32 v16, s29
; GFX11-NEXT: v_dual_mov_b32 v13, s40 :: v_dual_mov_b32 v14, s41
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_mov_b32_e32 v74, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 3
+; GFX11-NEXT: v_readlane_b32 s0, v77, 1
; GFX11-NEXT: v_dual_mov_b32 v11, s14 :: v_dual_mov_b32 v12, s15
; GFX11-NEXT: v_dual_mov_b32 v9, s12 :: v_dual_mov_b32 v10, s13
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_mov_b32_e32 v73, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 4
-; GFX11-NEXT: v_mov_b32_e32 v55, s48
+; GFX11-NEXT: v_readlane_b32 s0, v77, 2
+; GFX11-NEXT: v_mov_b32_e32 v55, s39
; GFX11-NEXT: v_dual_mov_b32 v7, s10 :: v_dual_mov_b32 v8, s11
; GFX11-NEXT: v_dual_mov_b32 v5, s8 :: v_dual_mov_b32 v6, s9
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_mov_b32_e32 v72, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 5
-; GFX11-NEXT: v_mov_b32_e32 v49, s39
-; GFX11-NEXT: v_dual_mov_b32 v3, s6 :: v_dual_mov_b32 v4, s7
-; GFX11-NEXT: v_dual_mov_b32 v1, s4 :: v_dual_mov_b32 v2, s5
+; GFX11-NEXT: v_readlane_b32 s0, v77, 3
+; GFX11-NEXT: v_mov_b32_e32 v49, s38
+; GFX11-NEXT: v_dual_mov_b32 v3, s4 :: v_dual_mov_b32 v4, s5
+; GFX11-NEXT: v_dual_mov_b32 v1, s6 :: v_dual_mov_b32 v2, s7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_mov_b32_e32 v62, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 6
+; GFX11-NEXT: v_readlane_b32 s0, v77, 4
; GFX11-NEXT: v_dual_mov_b32 v50, s2 :: v_dual_mov_b32 v51, s3
; GFX11-NEXT: v_dual_mov_b32 v38, s16 :: v_dual_mov_b32 v39, s17
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_mov_b32_e32 v63, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 7
-; GFX11-NEXT: v_dual_mov_b32 v35, s38 :: v_dual_mov_b32 v36, s18
+; GFX11-NEXT: v_readlane_b32 s0, v77, 5
+; GFX11-NEXT: v_dual_mov_b32 v35, s37 :: v_dual_mov_b32 v36, s18
; GFX11-NEXT: v_dual_mov_b32 v37, s19 :: v_dual_mov_b32 v32, s20
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_dual_mov_b32 v33, s21 :: v_dual_mov_b32 v60, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 8
+; GFX11-NEXT: v_readlane_b32 s0, v77, 6
; GFX11-NEXT: v_dual_mov_b32 v28, s22 :: v_dual_mov_b32 v29, s23
; GFX11-NEXT: v_dual_mov_b32 v24, s24 :: v_dual_mov_b32 v25, s25
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_mov_b32_e32 v61, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 9
+; GFX11-NEXT: v_readlane_b32 s0, v77, 7
; GFX11-NEXT: v_dual_mov_b32 v20, s26 :: v_dual_mov_b32 v21, s27
-; GFX11-NEXT: v_dual_mov_b32 v146, s42 :: v_dual_mov_b32 v145, s104
-; GFX11-NEXT: v_mov_b32_e32 v59, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 10
-; GFX11-NEXT: v_dual_mov_b32 v144, vcc_hi :: v_dual_mov_b32 v135, s103
-; GFX11-NEXT: v_dual_mov_b32 v134, s102 :: v_dual_mov_b32 v133, s101
+; GFX11-NEXT: v_dual_mov_b32 v146, s104 :: v_dual_mov_b32 v145, s102
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_mov_b32_e32 v59, s0
+; GFX11-NEXT: v_readlane_b32 s0, v77, 8
+; GFX11-NEXT: v_dual_mov_b32 v144, s103 :: v_dual_mov_b32 v135, s101
+; GFX11-NEXT: v_dual_mov_b32 v134, s100 :: v_dual_mov_b32 v133, s99
; GFX11-NEXT: v_mov_b32_e32 v57, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 11
-; GFX11-NEXT: v_dual_mov_b32 v31, s36 :: v_dual_mov_b32 v132, s98
-; GFX11-NEXT: v_dual_mov_b32 v131, s100 :: v_dual_mov_b32 v130, s97
-; GFX11-NEXT: v_dual_mov_b32 v129, s96 :: v_dual_mov_b32 v58, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 12
-; GFX11-NEXT: v_dual_mov_b32 v27, s37 :: v_dual_mov_b32 v128, s87
-; GFX11-NEXT: v_dual_mov_b32 v119, s85 :: v_dual_mov_b32 v118, s86
+; GFX11-NEXT: v_readlane_b32 s0, v77, 9
+; GFX11-NEXT: v_dual_mov_b32 v31, s35 :: v_dual_mov_b32 v132, s97
+; GFX11-NEXT: v_dual_mov_b32 v131, s98 :: v_dual_mov_b32 v130, s96
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_mov_b32 v117, s84 :: v_dual_mov_b32 v56, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 13
-; GFX11-NEXT: v_dual_mov_b32 v116, s83 :: v_dual_mov_b32 v115, s82
-; GFX11-NEXT: v_dual_mov_b32 v114, s80 :: v_dual_mov_b32 v113, s81
-; GFX11-NEXT: v_mov_b32_e32 v47, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 14
-; GFX11-NEXT: v_dual_mov_b32 v23, s35 :: v_dual_mov_b32 v112, s71
-; GFX11-NEXT: v_dual_mov_b32 v103, s70 :: v_dual_mov_b32 v102, s69
+; GFX11-NEXT: v_dual_mov_b32 v129, s87 :: v_dual_mov_b32 v58, s0
+; GFX11-NEXT: v_readlane_b32 s0, v77, 10
+; GFX11-NEXT: v_dual_mov_b32 v27, s36 :: v_dual_mov_b32 v128, s86
+; GFX11-NEXT: v_dual_mov_b32 v119, s84 :: v_dual_mov_b32 v118, s85
+; GFX11-NEXT: v_dual_mov_b32 v117, s83 :: v_dual_mov_b32 v56, s0
+; GFX11-NEXT: v_readlane_b32 s0, v77, 11
+; GFX11-NEXT: v_dual_mov_b32 v116, s82 :: v_dual_mov_b32 v115, s81
+; GFX11-NEXT: v_dual_mov_b32 v114, s71 :: v_dual_mov_b32 v113, s80
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_mov_b32 v101, s67 :: v_dual_mov_b32 v46, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 15
-; GFX11-NEXT: v_dual_mov_b32 v19, s34 :: v_dual_mov_b32 v100, s68
-; GFX11-NEXT: v_dual_mov_b32 v99, s66 :: v_dual_mov_b32 v98, s65
-; GFX11-NEXT: v_dual_mov_b32 v97, s64 :: v_dual_mov_b32 v44, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 16
-; GFX11-NEXT: v_dual_mov_b32 v96, s54 :: v_dual_mov_b32 v87, s55
-; GFX11-NEXT: v_dual_mov_b32 v86, s53 :: v_dual_mov_b32 v85, s52
+; GFX11-NEXT: v_mov_b32_e32 v47, s0
+; GFX11-NEXT: v_readlane_b32 s0, v77, 12
+; GFX11-NEXT: v_dual_mov_b32 v23, s34 :: v_dual_mov_b32 v112, s70
+; GFX11-NEXT: v_dual_mov_b32 v103, s69 :: v_dual_mov_b32 v102, s68
+; GFX11-NEXT: v_dual_mov_b32 v101, s66 :: v_dual_mov_b32 v46, s0
+; GFX11-NEXT: v_readlane_b32 s0, v77, 13
+; GFX11-NEXT: v_dual_mov_b32 v19, vcc_hi :: v_dual_mov_b32 v100, s67
+; GFX11-NEXT: v_dual_mov_b32 v99, s65 :: v_dual_mov_b32 v98, s64
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_dual_mov_b32 v97, s55 :: v_dual_mov_b32 v44, s0
+; GFX11-NEXT: v_readlane_b32 s0, v77, 14
+; GFX11-NEXT: v_dual_mov_b32 v96, s53 :: v_dual_mov_b32 v87, s54
+; GFX11-NEXT: v_dual_mov_b32 v86, s52 :: v_dual_mov_b32 v85, s51
; GFX11-NEXT: v_mov_b32_e32 v45, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 17
-; GFX11-NEXT: v_dual_mov_b32 v84, s51 :: v_dual_mov_b32 v83, s49
-; GFX11-NEXT: v_dual_mov_b32 v147, s43 :: v_dual_mov_b32 v22, s78
-; GFX11-NEXT: v_mov_b32_e32 v43, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 18
-; GFX11-NEXT: v_dual_mov_b32 v67, s58 :: v_dual_mov_b32 v26, s88
-; GFX11-NEXT: v_dual_mov_b32 v81, s44 :: v_dual_mov_b32 v30, s90
+; GFX11-NEXT: v_readlane_b32 s0, v77, 15
+; GFX11-NEXT: v_dual_mov_b32 v84, s50 :: v_dual_mov_b32 v83, s48
+; GFX11-NEXT: v_dual_mov_b32 v67, s58 :: v_dual_mov_b32 v26, s90
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_mov_b32_e32 v43, s0
+; GFX11-NEXT: v_readlane_b32 s0, v77, 16
+; GFX11-NEXT: v_dual_mov_b32 v81, s44 :: v_dual_mov_b32 v30, s88
+; GFX11-NEXT: v_dual_mov_b32 v17, s30 :: v_dual_mov_b32 v34, s78
; GFX11-NEXT: v_mov_b32_e32 v42, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 19
-; GFX11-NEXT: v_dual_mov_b32 v17, s74 :: v_dual_mov_b32 v34, s92
-; GFX11-NEXT: v_dual_mov_b32 v65, s94 :: v_dual_mov_b32 v68, s30
-; GFX11-NEXT: v_mov_b32_e32 v41, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 20
-; GFX11-NEXT: v_mov_b32_e32 v48, s62
-; GFX11-NEXT: v_mov_b32_e32 v54, s72
+; GFX11-NEXT: v_readlane_b32 s0, v77, 17
+; GFX11-NEXT: v_dual_mov_b32 v65, s76 :: v_dual_mov_b32 v68, s74
+; GFX11-NEXT: v_mov_b32_e32 v48, s72
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_dual_mov_b32 v54, s62 :: v_dual_mov_b32 v41, s0
+; GFX11-NEXT: v_readlane_b32 s0, v77, 18
; GFX11-NEXT: v_mov_b32_e32 v64, s60
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_mov_b32 v70, s56 :: v_dual_mov_b32 v183, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 21
+; GFX11-NEXT: v_mov_b32_e32 v70, s56
; GFX11-NEXT: v_mov_b32_e32 v80, s46
-; GFX11-NEXT: v_mov_b32_e32 v18, s76
+; GFX11-NEXT: v_dual_mov_b32 v82, s42 :: v_dual_mov_b32 v183, s0
+; GFX11-NEXT: v_readlane_b32 s0, v77, 19
+; GFX11-NEXT: v_mov_b32_e32 v18, s94
+; GFX11-NEXT: v_mov_b32_e32 v22, s92
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_mov_b32_e32 v40, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 22
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_readlane_b32 s0, v77, 20
; GFX11-NEXT: v_mov_b32_e32 v182, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 23
-; GFX11-NEXT: v_mov_b32_e32 v181, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 24
+; GFX11-NEXT: v_readlane_b32 s0, v77, 21
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v181, s0
+; GFX11-NEXT: v_readlane_b32 s0, v77, 22
; GFX11-NEXT: v_mov_b32_e32 v180, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 25
-; GFX11-NEXT: v_mov_b32_e32 v178, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 26
+; GFX11-NEXT: v_readlane_b32 s0, v77, 23
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v178, s0
+; GFX11-NEXT: v_readlane_b32 s0, v77, 24
; GFX11-NEXT: v_mov_b32_e32 v179, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 27
-; GFX11-NEXT: v_mov_b32_e32 v177, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 28
+; GFX11-NEXT: v_readlane_b32 s0, v77, 25
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v177, s0
+; GFX11-NEXT: v_readlane_b32 s0, v77, 26
; GFX11-NEXT: v_mov_b32_e32 v176, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 29
-; GFX11-NEXT: v_mov_b32_e32 v167, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 30
+; GFX11-NEXT: v_readlane_b32 s0, v77, 27
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v167, s0
+; GFX11-NEXT: v_readlane_b32 s0, v77, 28
; GFX11-NEXT: v_mov_b32_e32 v165, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 31
-; GFX11-NEXT: v_mov_b32_e32 v166, s0
-; GFX11-NEXT: v_readlane_b32 s0, v77, 0
+; GFX11-NEXT: v_readlane_b32 s0, v77, 29
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v166, s0
+; GFX11-NEXT: v_readlane_b32 s0, v77, 30
; GFX11-NEXT: v_mov_b32_e32 v164, s0
-; GFX11-NEXT: v_readlane_b32 s0, v77, 1
-; GFX11-NEXT: v_mov_b32_e32 v163, s0
-; GFX11-NEXT: v_readlane_b32 s0, v77, 2
+; GFX11-NEXT: v_readlane_b32 s0, v77, 31
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v163, s0
+; GFX11-NEXT: v_readlane_b32 s0, v78, 0
; GFX11-NEXT: v_mov_b32_e32 v162, s0
-; GFX11-NEXT: v_readlane_b32 s0, v77, 3
-; GFX11-NEXT: v_mov_b32_e32 v160, s0
-; GFX11-NEXT: v_readlane_b32 s0, v77, 4
+; GFX11-NEXT: v_readlane_b32 s0, v78, 1
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v160, s0
+; GFX11-NEXT: v_readlane_b32 s0, v78, 2
; GFX11-NEXT: v_mov_b32_e32 v161, s0
-; GFX11-NEXT: v_readlane_b32 s0, v77, 5
-; GFX11-NEXT: v_mov_b32_e32 v151, s0
-; GFX11-NEXT: v_readlane_b32 s0, v77, 6
+; GFX11-NEXT: v_readlane_b32 s0, v78, 3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v151, s0
+; GFX11-NEXT: v_readlane_b32 s0, v78, 4
; GFX11-NEXT: v_mov_b32_e32 v150, s0
-; GFX11-NEXT: v_readlane_b32 s0, v77, 7
+; GFX11-NEXT: v_readlane_b32 s0, v78, 5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_mov_b32_e32 v149, s0
-; GFX11-NEXT: v_readlane_b32 s0, v77, 8
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_readlane_b32 s0, v78, 6
+; GFX11-NEXT: v_mov_b32_e32 v147, s0
+; GFX11-NEXT: v_readlane_b32 s0, v78, 7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_mov_b32_e32 v148, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 0
-; GFX11-NEXT: v_readlane_b32 s1, v78, 1
-; GFX11-NEXT: v_mov_b32_e32 v82, s0
; GFX11-NEXT: .LBB95_5: ; %end
; GFX11-NEXT: v_lshlrev_b32_e32 v69, 8, v74
; GFX11-NEXT: v_and_b32_e32 v52, 0xff, v52
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
; GFX11-NEXT: v_lshlrev_b32_e32 v66, 8, v82
; GFX11-NEXT: v_and_b32_e32 v53, 0xff, v53
; GFX11-NEXT: v_lshlrev_b32_e32 v82, 8, v63
@@ -191872,8 +192524,8 @@ define inreg <128 x i8> @bitcast_v64f16_to_v128i8_scalar(<64 x half> inreg %a, i
; GFX11-NEXT: v_lshlrev_b32_e32 v33, 8, v48
; GFX11-NEXT: v_and_b32_e32 v21, 0xff, v21
; GFX11-NEXT: v_lshlrev_b32_e32 v48, 8, v149
-; GFX11-NEXT: v_and_b32_e32 v50, 0xff, v148
-; GFX11-NEXT: v_lshlrev_b32_e32 v51, 8, v147
+; GFX11-NEXT: v_and_b32_e32 v50, 0xff, v147
+; GFX11-NEXT: v_lshlrev_b32_e32 v51, 8, v148
; GFX11-NEXT: v_and_b32_e32 v15, 0xff, v15
; GFX11-NEXT: v_lshlrev_b32_e32 v52, 8, v146
; GFX11-NEXT: v_and_b32_e32 v53, 0xff, v145
@@ -198476,8 +199128,8 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_or_saveexec_b64 s[4:5], -1
-; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill
@@ -198493,48 +199145,40 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
-; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:332
-; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:328
-; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:324
+; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
+; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:332
+; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:328
+; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:324
; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:320
-; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:308
-; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:304
-; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:300
-; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:296
-; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:292
-; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:288
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:276
-; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:272
-; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:268
+; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:308
+; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:304
+; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:300
+; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:296
+; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:292
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:288
+; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:276
+; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:272
+; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:268
; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:264
-; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:260
-; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:256
+; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:260
+; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:256
; SI-NEXT: s_waitcnt expcnt(6)
-; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:244
+; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:244
; SI-NEXT: s_waitcnt expcnt(5)
-; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:240
-; SI-NEXT: s_waitcnt expcnt(4)
-; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:236
-; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:232
-; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:228
-; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:224
-; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:212
-; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:208
-; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:204
-; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:200
-; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:196
-; SI-NEXT: ; implicit-def: $vgpr62 : SGPR spill to VGPR lane
+; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:240
+; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:236
+; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:232
+; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:228
+; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:224
+; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:212
+; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:208
+; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:204
+; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:200
+; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:196
; SI-NEXT: v_writelane_b32 v63, s30, 0
-; SI-NEXT: v_writelane_b32 v62, s28, 0
-; SI-NEXT: v_writelane_b32 v62, s25, 1
-; SI-NEXT: v_writelane_b32 v62, s24, 2
-; SI-NEXT: v_writelane_b32 v62, s23, 3
-; SI-NEXT: v_writelane_b32 v62, s22, 4
-; SI-NEXT: v_writelane_b32 v62, s21, 5
-; SI-NEXT: v_writelane_b32 v62, s18, 6
-; SI-NEXT: v_writelane_b32 v62, s16, 7
; SI-NEXT: v_writelane_b32 v63, s31, 1
; SI-NEXT: v_writelane_b32 v63, s34, 2
; SI-NEXT: v_writelane_b32 v63, s35, 3
@@ -198560,590 +199204,617 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: v_writelane_b32 v63, s71, 23
; SI-NEXT: v_writelane_b32 v63, s80, 24
; SI-NEXT: v_writelane_b32 v63, s81, 25
+; SI-NEXT: ; implicit-def: $vgpr62 : SGPR spill to VGPR lane
; SI-NEXT: v_writelane_b32 v63, s82, 26
+; SI-NEXT: v_writelane_b32 v62, s28, 0
+; SI-NEXT: v_writelane_b32 v62, s27, 1
+; SI-NEXT: v_writelane_b32 v62, s26, 2
+; SI-NEXT: v_writelane_b32 v62, s25, 3
+; SI-NEXT: v_writelane_b32 v62, s24, 4
+; SI-NEXT: v_writelane_b32 v62, s23, 5
+; SI-NEXT: v_writelane_b32 v62, s22, 6
+; SI-NEXT: v_writelane_b32 v62, s21, 7
+; SI-NEXT: v_writelane_b32 v62, s20, 8
; SI-NEXT: v_writelane_b32 v63, s83, 27
+; SI-NEXT: v_writelane_b32 v62, s19, 9
; SI-NEXT: v_writelane_b32 v63, s84, 28
+; SI-NEXT: v_writelane_b32 v62, s18, 10
; SI-NEXT: v_writelane_b32 v63, s85, 29
+; SI-NEXT: v_writelane_b32 v62, s16, 11
; SI-NEXT: v_writelane_b32 v63, s86, 30
; SI-NEXT: v_writelane_b32 v63, s87, 31
; SI-NEXT: v_writelane_b32 v63, s96, 32
; SI-NEXT: v_writelane_b32 v63, s97, 33
; SI-NEXT: v_writelane_b32 v63, s98, 34
-; SI-NEXT: v_writelane_b32 v63, s99, 35
-; SI-NEXT: s_waitcnt expcnt(1)
+; SI-NEXT: s_waitcnt expcnt(3)
+; SI-NEXT: v_mov_b32_e32 v30, v28
+; SI-NEXT: s_waitcnt expcnt(2)
; SI-NEXT: v_mov_b32_e32 v29, v26
-; SI-NEXT: v_readfirstlane_b32 s15, v16
-; SI-NEXT: v_readfirstlane_b32 s18, v25
-; SI-NEXT: v_readfirstlane_b32 s43, v15
-; SI-NEXT: v_readfirstlane_b32 s42, v24
-; SI-NEXT: v_readfirstlane_b32 s44, v23
-; SI-NEXT: v_readfirstlane_b32 s49, v12
-; SI-NEXT: v_readfirstlane_b32 s8, v11
-; SI-NEXT: v_readfirstlane_b32 s53, v20
+; SI-NEXT: s_waitcnt expcnt(1)
+; SI-NEXT: v_mov_b32_e32 v21, v5
+; SI-NEXT: v_readfirstlane_b32 s44, v25
+; SI-NEXT: v_readfirstlane_b32 s15, v15
+; SI-NEXT: v_readfirstlane_b32 s42, v11
; SI-NEXT: s_waitcnt vmcnt(14)
-; SI-NEXT: v_readfirstlane_b32 s4, v34
-; SI-NEXT: v_writelane_b32 v62, s4, 8
-; SI-NEXT: v_readfirstlane_b32 s4, v38
-; SI-NEXT: v_writelane_b32 v62, s4, 9
-; SI-NEXT: v_readfirstlane_b32 s4, v49
-; SI-NEXT: v_writelane_b32 v62, s4, 10
-; SI-NEXT: v_readfirstlane_b32 s4, v50
-; SI-NEXT: v_writelane_b32 v62, s4, 11
-; SI-NEXT: v_readfirstlane_b32 s79, v52
-; SI-NEXT: v_readfirstlane_b32 s88, v54
-; SI-NEXT: v_readfirstlane_b32 s4, v55
-; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:192
-; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:180
-; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:176
-; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:172
-; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:168
-; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:164
-; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:160
-; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:148
-; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:144
-; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:140
-; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:136
+; SI-NEXT: v_readfirstlane_b32 s52, v35
+; SI-NEXT: v_readfirstlane_b32 s53, v36
+; SI-NEXT: v_readfirstlane_b32 s68, v37
+; SI-NEXT: v_readfirstlane_b32 s81, v50
+; SI-NEXT: v_readfirstlane_b32 s95, v51
+; SI-NEXT: v_readfirstlane_b32 s88, v52
+; SI-NEXT: v_readfirstlane_b32 s78, v54
+; SI-NEXT: v_readfirstlane_b32 s4, v40
+; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:192
+; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:180
+; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:176
+; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:172
+; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:168
+; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:164
+; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:160
+; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:148
+; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:144
+; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:140
; SI-NEXT: v_writelane_b32 v62, s4, 12
-; SI-NEXT: v_readfirstlane_b32 s77, v41
-; SI-NEXT: v_readfirstlane_b32 s4, v42
-; SI-NEXT: v_readfirstlane_b32 s94, v31
-; SI-NEXT: v_readfirstlane_b32 s70, v32
-; SI-NEXT: v_readfirstlane_b32 s51, v33
-; SI-NEXT: s_waitcnt vmcnt(14)
-; SI-NEXT: v_readfirstlane_b32 s37, v45
-; SI-NEXT: v_readfirstlane_b32 s24, v56
-; SI-NEXT: v_readfirstlane_b32 s7, v57
-; SI-NEXT: v_readfirstlane_b32 s92, v58
-; SI-NEXT: v_readfirstlane_b32 s28, v59
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:132
-; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:128
-; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:116
-; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:112
-; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:108
-; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:104
-; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:100
-; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:96
-; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:84
-; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:80
-; SI-NEXT: v_readfirstlane_b32 s35, v43
-; SI-NEXT: v_readfirstlane_b32 s55, v46
-; SI-NEXT: v_readfirstlane_b32 s68, v35
-; SI-NEXT: v_readfirstlane_b32 s87, v37
-; SI-NEXT: v_readfirstlane_b32 s67, v39
-; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:76
+; SI-NEXT: v_readfirstlane_b32 s4, v41
+; SI-NEXT: v_writelane_b32 v62, s4, 13
+; SI-NEXT: v_readfirstlane_b32 s4, v32
+; SI-NEXT: v_writelane_b32 v62, s4, 14
; SI-NEXT: s_waitcnt vmcnt(14)
-; SI-NEXT: v_readfirstlane_b32 s74, v53
-; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:68
-; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:64
+; SI-NEXT: v_readfirstlane_b32 s4, v45
+; SI-NEXT: v_readfirstlane_b32 s97, v31
+; SI-NEXT: v_readfirstlane_b32 s64, v33
+; SI-NEXT: v_readfirstlane_b32 s31, v43
+; SI-NEXT: v_writelane_b32 v62, s4, 15
+; SI-NEXT: v_readfirstlane_b32 s34, v56
+; SI-NEXT: v_readfirstlane_b32 s4, v58
+; SI-NEXT: v_readfirstlane_b32 s55, v59
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:136
+; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:132
+; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:128
+; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:116
+; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:112
+; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:108
+; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:104
+; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:100
+; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:96
+; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:84
+; SI-NEXT: v_readfirstlane_b32 s7, v34
+; SI-NEXT: v_readfirstlane_b32 s92, v39
; SI-NEXT: v_readfirstlane_b32 s85, v48
-; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:52
-; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:48
-; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:44
-; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:40
-; SI-NEXT: v_writelane_b32 v62, s4, 13
-; SI-NEXT: v_readfirstlane_b32 s98, v40
-; SI-NEXT: v_readfirstlane_b32 s69, v51
-; SI-NEXT: v_readfirstlane_b32 s21, v36
-; SI-NEXT: v_readfirstlane_b32 s40, v19
-; SI-NEXT: v_readfirstlane_b32 s23, v28
-; SI-NEXT: v_readfirstlane_b32 s34, v27
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v21, v13
-; SI-NEXT: v_mov_b32_e32 v13, v5
-; SI-NEXT: v_readfirstlane_b32 s97, v29
-; SI-NEXT: v_readfirstlane_b32 s80, v18
-; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v6
-; SI-NEXT: v_lshlrev_b32_e32 v14, 24, v14
+; SI-NEXT: v_readfirstlane_b32 s20, v53
+; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:80
+; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:316
+; SI-NEXT: s_waitcnt vmcnt(14)
+; SI-NEXT: v_readfirstlane_b32 s18, v46
+; SI-NEXT: v_readfirstlane_b32 s23, v55
+; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:72
+; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:68
+; SI-NEXT: v_readfirstlane_b32 s6, v49
+; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:64
+; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:52
+; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:48
+; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:44
+; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:40
+; SI-NEXT: v_writelane_b32 v62, s4, 16
+; SI-NEXT: v_readfirstlane_b32 s4, v60
+; SI-NEXT: v_readfirstlane_b32 s90, v42
+; SI-NEXT: v_writelane_b32 v62, s4, 17
+; SI-NEXT: v_readfirstlane_b32 s76, v38
+; SI-NEXT: v_readfirstlane_b32 s49, v30
+; SI-NEXT: v_readfirstlane_b32 s62, v27
+; SI-NEXT: v_writelane_b32 v63, s99, 35
+; SI-NEXT: v_readfirstlane_b32 s96, v29
+; SI-NEXT: v_readfirstlane_b32 s89, v24
+; SI-NEXT: v_readfirstlane_b32 s38, v23
+; SI-NEXT: v_readfirstlane_b32 s39, v20
+; SI-NEXT: v_readfirstlane_b32 s69, v19
+; SI-NEXT: v_readfirstlane_b32 s70, v18
; SI-NEXT: v_lshlrev_b32_e32 v18, 24, v22
-; SI-NEXT: v_lshlrev_b32_e32 v19, 24, v30
-; SI-NEXT: v_readfirstlane_b32 s96, v17
-; SI-NEXT: v_readfirstlane_b32 s64, v9
-; SI-NEXT: v_readfirstlane_b32 s25, v8
+; SI-NEXT: v_readfirstlane_b32 s86, v17
+; SI-NEXT: v_readfirstlane_b32 s99, v16
+; SI-NEXT: v_readfirstlane_b32 s25, v12
+; SI-NEXT: v_readfirstlane_b32 s65, v10
+; SI-NEXT: v_readfirstlane_b32 s74, v9
+; SI-NEXT: v_readfirstlane_b32 s77, v8
; SI-NEXT: v_readfirstlane_b32 s83, v7
; SI-NEXT: v_readfirstlane_b32 s84, v4
-; SI-NEXT: v_readfirstlane_b32 s93, v3
-; SI-NEXT: v_readfirstlane_b32 s76, v1
-; SI-NEXT: v_readfirstlane_b32 s58, v38
-; SI-NEXT: v_readfirstlane_b32 s65, v49
-; SI-NEXT: v_readfirstlane_b32 s62, v54
-; SI-NEXT: v_readfirstlane_b32 s81, v44
-; SI-NEXT: v_readfirstlane_b32 s71, v47
-; SI-NEXT: v_readfirstlane_b32 s38, v60
-; SI-NEXT: v_readfirstlane_b32 s86, v61
-; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:156
-; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:220
-; SI-NEXT: s_waitcnt vmcnt(14)
-; SI-NEXT: v_readfirstlane_b32 s90, v50
-; SI-NEXT: v_readfirstlane_b32 s31, v52
-; SI-NEXT: v_readfirstlane_b32 s4, v55
-; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:36
+; SI-NEXT: v_readfirstlane_b32 s36, v3
+; SI-NEXT: v_readfirstlane_b32 s30, v37
+; SI-NEXT: v_readfirstlane_b32 s67, v50
+; SI-NEXT: v_readfirstlane_b32 s8, v51
+; SI-NEXT: v_readfirstlane_b32 s80, v40
+; SI-NEXT: v_readfirstlane_b32 s71, v44
+; SI-NEXT: v_readfirstlane_b32 s82, v47
+; SI-NEXT: v_readfirstlane_b32 s19, v57
+; SI-NEXT: v_readfirstlane_b32 s79, v61
+; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:36
; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:32
-; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:20
-; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:16
+; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:20
+; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:16
; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:12
-; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:336
-; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:8
-; SI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:4
-; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32
-; SI-NEXT: v_readfirstlane_b32 s72, v31
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:316
-; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:72
-; SI-NEXT: v_readfirstlane_b32 s82, v56
-; SI-NEXT: v_readfirstlane_b32 s95, v57
+; SI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:336
+; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:8
+; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:4
+; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32
; SI-NEXT: s_waitcnt vmcnt(14)
-; SI-NEXT: v_readfirstlane_b32 s39, v58
-; SI-NEXT: v_readfirstlane_b32 s56, v59
-; SI-NEXT: v_readfirstlane_b32 s57, v41
-; SI-NEXT: v_readfirstlane_b32 s36, v42
-; SI-NEXT: v_readfirstlane_b32 s73, v45
-; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:284
-; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:252
-; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:188
-; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:124
-; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:28
+; SI-NEXT: v_readfirstlane_b32 s24, v35
+; SI-NEXT: v_readfirstlane_b32 s4, v36
+; SI-NEXT: v_writelane_b32 v62, s4, 18
+; SI-NEXT: v_readfirstlane_b32 s57, v31
+; SI-NEXT: v_readfirstlane_b32 s58, v32
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:156
+; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:220
+; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:76
+; SI-NEXT: v_readfirstlane_b32 s35, v58
+; SI-NEXT: v_readfirstlane_b32 s94, v59
+; SI-NEXT: v_readfirstlane_b32 s22, v52
+; SI-NEXT: v_readfirstlane_b32 s75, v54
+; SI-NEXT: v_readfirstlane_b32 s87, v41
+; SI-NEXT: v_readfirstlane_b32 s21, v43
+; SI-NEXT: v_readfirstlane_b32 s37, v45
+; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:284
+; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:252
+; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:188
+; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:124
+; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:28
; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:60
; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:92
-; SI-NEXT: v_readfirstlane_b32 s16, v34
-; SI-NEXT: v_readfirstlane_b32 s48, v32
-; SI-NEXT: v_readfirstlane_b32 s52, v33
-; SI-NEXT: v_writelane_b32 v62, s4, 14
-; SI-NEXT: v_readfirstlane_b32 s47, v35
-; SI-NEXT: v_readfirstlane_b32 s60, v37
-; SI-NEXT: v_readfirstlane_b32 s61, v39
-; SI-NEXT: v_readfirstlane_b32 s89, v43
+; SI-NEXT: v_readfirstlane_b32 s59, v33
+; SI-NEXT: v_readfirstlane_b32 s66, v34
; SI-NEXT: s_waitcnt vmcnt(14)
-; SI-NEXT: v_readfirstlane_b32 s99, v46
-; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:312
+; SI-NEXT: v_readfirstlane_b32 s56, v49
+; SI-NEXT: v_readfirstlane_b32 s28, v39
+; SI-NEXT: v_readfirstlane_b32 s47, v48
+; SI-NEXT: v_readfirstlane_b32 s16, v46
+; SI-NEXT: v_readfirstlane_b32 s98, v56
+; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:312
; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:280
-; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:248
-; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:216
-; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:184
+; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:248
+; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:216
+; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:184
; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:152
-; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:120
-; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:88
-; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:56
-; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:24
-; SI-NEXT: v_readfirstlane_b32 s54, v48
-; SI-NEXT: v_readfirstlane_b32 s50, v53
-; SI-NEXT: v_readfirstlane_b32 s78, v49
-; SI-NEXT: v_readfirstlane_b32 s30, v51
-; SI-NEXT: v_readfirstlane_b32 s66, v54
-; SI-NEXT: v_readfirstlane_b32 s91, v40
+; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:120
+; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:88
+; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:56
+; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:24
+; SI-NEXT: v_readfirstlane_b32 s40, v53
+; SI-NEXT: v_readfirstlane_b32 s93, v55
+; SI-NEXT: v_readfirstlane_b32 s60, v50
+; SI-NEXT: v_readfirstlane_b32 s61, v51
+; SI-NEXT: v_readfirstlane_b32 s73, v40
+; SI-NEXT: v_readfirstlane_b32 s50, v42
+; SI-NEXT: v_readfirstlane_b32 s45, v44
; SI-NEXT: s_waitcnt vmcnt(14)
-; SI-NEXT: v_readfirstlane_b32 s6, v44
-; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v50
+; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v47
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: v_readfirstlane_b32 s4, v10
-; SI-NEXT: v_writelane_b32 v62, s4, 15
+; SI-NEXT: v_readfirstlane_b32 s91, v60
+; SI-NEXT: v_lshlrev_b32_e32 v60, 24, v6
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
; SI-NEXT: v_readfirstlane_b32 s4, v2
-; SI-NEXT: v_writelane_b32 v62, s4, 16
-; SI-NEXT: v_writelane_b32 v62, s17, 17
-; SI-NEXT: v_writelane_b32 v62, s15, 18
-; SI-NEXT: v_writelane_b32 v62, s18, 19
-; SI-NEXT: v_writelane_b32 v62, s43, 20
-; SI-NEXT: v_writelane_b32 v62, s42, 21
+; SI-NEXT: v_writelane_b32 v62, s4, 19
+; SI-NEXT: v_readfirstlane_b32 s4, v1
+; SI-NEXT: v_writelane_b32 v62, s4, 20
+; SI-NEXT: v_writelane_b32 v62, s17, 21
; SI-NEXT: v_writelane_b32 v62, s44, 22
-; SI-NEXT: v_writelane_b32 v62, s16, 23
-; SI-NEXT: v_writelane_b32 v62, s49, 24
-; SI-NEXT: v_writelane_b32 v62, s8, 25
-; SI-NEXT: v_writelane_b32 v62, s6, 26
-; SI-NEXT: v_readfirstlane_b32 s45, v52
-; SI-NEXT: v_writelane_b32 v62, s56, 27
-; SI-NEXT: v_writelane_b32 v62, s45, 28
-; SI-NEXT: v_writelane_b32 v62, s53, 29
-; SI-NEXT: v_writelane_b32 v62, s94, 30
-; SI-NEXT: v_writelane_b32 v62, s57, 31
-; SI-NEXT: v_writelane_b32 v62, s58, 32
-; SI-NEXT: v_writelane_b32 v62, s47, 33
-; SI-NEXT: v_readfirstlane_b32 s46, v55
-; SI-NEXT: v_writelane_b32 v62, s40, 34
-; SI-NEXT: v_readfirstlane_b32 s59, v47
-; SI-NEXT: v_writelane_b32 v62, s46, 35
-; SI-NEXT: v_writelane_b32 v62, s59, 36
-; SI-NEXT: v_writelane_b32 v62, s60, 37
-; SI-NEXT: v_writelane_b32 v62, s36, 38
-; SI-NEXT: v_writelane_b32 v62, s65, 39
-; SI-NEXT: v_writelane_b32 v62, s61, 40
-; SI-NEXT: v_writelane_b32 v62, s73, 41
-; SI-NEXT: v_writelane_b32 v62, s62, 42
-; SI-NEXT: v_writelane_b32 v62, s72, 43
-; SI-NEXT: v_writelane_b32 v62, s23, 44
-; SI-NEXT: v_writelane_b32 v62, s48, 45
-; SI-NEXT: v_writelane_b32 v62, s34, 46
-; SI-NEXT: v_writelane_b32 v62, s78, 47
-; SI-NEXT: v_writelane_b32 v62, s30, 48
-; SI-NEXT: v_writelane_b32 v62, s54, 49
-; SI-NEXT: v_writelane_b32 v62, s50, 50
-; SI-NEXT: v_writelane_b32 v62, s52, 51
-; SI-NEXT: v_writelane_b32 v62, s82, 52
-; SI-NEXT: v_writelane_b32 v62, s66, 53
-; SI-NEXT: v_readfirstlane_b32 s22, v36
+; SI-NEXT: v_writelane_b32 v62, s15, 23
+; SI-NEXT: v_writelane_b32 v62, s16, 24
+; SI-NEXT: v_writelane_b32 v62, s42, 25
+; SI-NEXT: v_readfirstlane_b32 s46, v57
+; SI-NEXT: v_writelane_b32 v62, s45, 26
+; SI-NEXT: v_writelane_b32 v62, s46, 27
+; SI-NEXT: v_writelane_b32 v62, s57, 28
+; SI-NEXT: v_writelane_b32 v62, s47, 29
+; SI-NEXT: v_writelane_b32 v62, s8, 30
+; SI-NEXT: v_writelane_b32 v62, s58, 31
+; SI-NEXT: v_writelane_b32 v62, s56, 32
+; SI-NEXT: v_writelane_b32 v62, s49, 33
+; SI-NEXT: v_writelane_b32 v62, s59, 34
+; SI-NEXT: v_writelane_b32 v62, s62, 35
+; SI-NEXT: v_writelane_b32 v62, s60, 36
+; SI-NEXT: v_writelane_b32 v62, s61, 37
+; SI-NEXT: v_writelane_b32 v62, s40, 38
+; SI-NEXT: v_writelane_b32 v62, s35, 39
+; SI-NEXT: v_writelane_b32 v62, s93, 40
+; SI-NEXT: v_writelane_b32 v62, s94, 41
+; SI-NEXT: v_writelane_b32 v62, s73, 42
+; SI-NEXT: v_writelane_b32 v62, s50, 43
+; SI-NEXT: v_readfirstlane_b32 s54, v38
+; SI-NEXT: v_readfirstlane_b32 s26, v61
+; SI-NEXT: v_lshlrev_b32_e32 v61, 24, v14
; SI-NEXT: s_waitcnt vmcnt(12)
-; SI-NEXT: v_lshlrev_b32_e32 v22, 24, v57
+; SI-NEXT: v_lshlrev_b32_e32 v14, 24, v58
; SI-NEXT: s_waitcnt vmcnt(11)
-; SI-NEXT: v_lshlrev_b32_e32 v23, 24, v58
-; SI-NEXT: s_waitcnt vmcnt(10)
-; SI-NEXT: v_lshlrev_b32_e32 v59, 24, v59
-; SI-NEXT: v_lshlrev_b32_e32 v24, 24, v56
-; SI-NEXT: v_lshlrev_b32_e32 v44, 24, v60
-; SI-NEXT: v_lshlrev_b32_e32 v45, 24, v45
-; SI-NEXT: v_lshlrev_b32_e32 v47, 24, v61
-; SI-NEXT: v_lshlrev_b32_e32 v56, 24, v42
-; SI-NEXT: v_lshlrev_b32_e32 v57, 24, v41
-; SI-NEXT: v_lshlrev_b32_e32 v58, 24, v31
-; SI-NEXT: v_writelane_b32 v62, s91, 54
-; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v23, 24, v59
+; SI-NEXT: v_lshlrev_b32_e32 v24, 24, v43
+; SI-NEXT: v_lshlrev_b32_e32 v43, 24, v31
+; SI-NEXT: v_lshlrev_b32_e32 v44, 24, v41
+; SI-NEXT: v_lshlrev_b32_e32 v47, 24, v54
+; SI-NEXT: v_lshlrev_b32_e32 v57, 24, v52
+; SI-NEXT: v_lshlrev_b32_e32 v58, 24, v5
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: v_writelane_b32 v62, s66, 44
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v19, 24, v6
+; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v45
+; SI-NEXT: v_lshlrev_b32_e32 v45, 24, v32
+; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB97_4
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
-; SI-NEXT: v_mov_b32_e32 v5, v13
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v13
-; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload
-; SI-NEXT: v_readlane_b32 s5, v62, 5
-; SI-NEXT: s_and_b32 s4, s20, 0xff
+; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload
+; SI-NEXT: v_readlane_b32 s4, v62, 8
+; SI-NEXT: v_readlane_b32 s5, v62, 7
+; SI-NEXT: s_and_b32 s4, s4, 0xff
; SI-NEXT: s_lshl_b32 s5, s5, 8
; SI-NEXT: s_or_b32 s4, s4, s5
-; SI-NEXT: v_writelane_b32 v62, s4, 55
-; SI-NEXT: v_readlane_b32 s4, v62, 4
+; SI-NEXT: v_writelane_b32 v62, s4, 45
+; SI-NEXT: v_readlane_b32 s4, v62, 6
; SI-NEXT: s_and_b32 s4, s4, 0xff
-; SI-NEXT: v_readlane_b32 s5, v62, 3
+; SI-NEXT: v_readlane_b32 s5, v62, 5
; SI-NEXT: s_lshl_b32 s4, s4, 16
; SI-NEXT: s_lshl_b32 s5, s5, 24
; SI-NEXT: s_or_b32 s63, s5, s4
-; SI-NEXT: v_readlane_b32 s4, v62, 6
+; SI-NEXT: v_readlane_b32 s4, v62, 10
; SI-NEXT: s_and_b32 s5, s4, 0xff
+; SI-NEXT: v_readlane_b32 s4, v62, 9
+; SI-NEXT: v_writelane_b32 v62, s24, 46
; SI-NEXT: s_lshl_b32 s5, s5, 16
-; SI-NEXT: s_lshl_b32 s9, s19, 24
+; SI-NEXT: s_lshl_b32 s9, s4, 24
; SI-NEXT: v_readlane_b32 s4, v62, 0
; SI-NEXT: s_or_b32 s9, s9, s5
; SI-NEXT: s_and_b32 s5, s4, 0xff
; SI-NEXT: s_lshl_b32 s10, s29, 8
; SI-NEXT: s_or_b32 s4, s5, s10
-; SI-NEXT: v_writelane_b32 v62, s4, 56
-; SI-NEXT: s_and_b32 s5, s76, 0xff
-; SI-NEXT: v_readlane_b32 s10, v62, 16
+; SI-NEXT: v_writelane_b32 v62, s4, 47
+; SI-NEXT: v_readlane_b32 s4, v62, 20
+; SI-NEXT: s_and_b32 s5, s4, 0xff
+; SI-NEXT: v_readlane_b32 s4, v62, 19
; SI-NEXT: s_lshl_b32 s5, s5, 16
-; SI-NEXT: s_lshl_b32 s11, s10, 24
+; SI-NEXT: s_lshl_b32 s11, s4, 24
+; SI-NEXT: v_readlane_b32 s4, v62, 2
; SI-NEXT: s_or_b32 s5, s11, s5
-; SI-NEXT: s_and_b32 s11, s26, 0xff
+; SI-NEXT: s_and_b32 s11, s4, 0xff
+; SI-NEXT: v_readlane_b32 s4, v62, 1
; SI-NEXT: s_lshl_b32 s11, s11, 16
-; SI-NEXT: s_lshl_b32 s12, s27, 24
+; SI-NEXT: s_lshl_b32 s12, s4, 24
; SI-NEXT: s_or_b32 s14, s12, s11
; SI-NEXT: s_and_b32 s11, s83, 0xff
-; SI-NEXT: s_lshl_b32 s12, s25, 8
-; SI-NEXT: s_or_b32 s10, s11, s12
-; SI-NEXT: v_writelane_b32 v62, s10, 57
-; SI-NEXT: s_and_b32 s11, s64, 0xff
-; SI-NEXT: v_readlane_b32 s10, v62, 15
+; SI-NEXT: s_lshl_b32 s12, s77, 8
+; SI-NEXT: s_or_b32 s4, s11, s12
+; SI-NEXT: s_and_b32 s11, s74, 0xff
; SI-NEXT: s_lshl_b32 s11, s11, 16
-; SI-NEXT: s_lshl_b32 s13, s10, 24
+; SI-NEXT: s_lshl_b32 s13, s65, 24
; SI-NEXT: s_or_b32 s41, s13, s11
-; SI-NEXT: s_and_b32 s11, s43, 0xff
-; SI-NEXT: s_lshl_b32 s13, s15, 8
-; SI-NEXT: s_or_b32 s10, s11, s13
-; SI-NEXT: s_and_b32 s11, s96, 0xff
+; SI-NEXT: s_and_b32 s11, s15, 0xff
+; SI-NEXT: s_lshl_b32 s13, s99, 8
+; SI-NEXT: v_writelane_b32 v62, s4, 48
+; SI-NEXT: s_or_b32 s4, s11, s13
+; SI-NEXT: s_and_b32 s11, s86, 0xff
; SI-NEXT: s_lshl_b32 s11, s11, 16
-; SI-NEXT: s_lshl_b32 s15, s80, 24
+; SI-NEXT: s_lshl_b32 s15, s70, 24
; SI-NEXT: s_or_b32 s43, s15, s11
-; SI-NEXT: s_and_b32 s11, s44, 0xff
-; SI-NEXT: s_lshl_b32 s15, s42, 8
+; SI-NEXT: s_and_b32 s11, s38, 0xff
+; SI-NEXT: s_lshl_b32 s15, s89, 8
; SI-NEXT: s_or_b32 s13, s11, s15
-; SI-NEXT: s_and_b32 s11, s18, 0xff
+; SI-NEXT: s_and_b32 s11, s44, 0xff
; SI-NEXT: s_lshl_b32 s11, s11, 16
-; SI-NEXT: s_lshl_b32 s15, s97, 24
+; SI-NEXT: s_lshl_b32 s15, s96, 24
; SI-NEXT: s_or_b32 s44, s15, s11
-; SI-NEXT: s_and_b32 s11, s59, 0xff
-; SI-NEXT: s_lshl_b32 s15, s46, 8
+; SI-NEXT: s_and_b32 s11, s26, 0xff
+; SI-NEXT: s_lshl_b32 s15, s91, 8
; SI-NEXT: s_or_b32 s12, s11, s15
-; SI-NEXT: s_and_b32 s11, s45, 0xff
+; SI-NEXT: s_and_b32 s11, s46, 0xff
; SI-NEXT: s_lshl_b32 s11, s11, 16
-; SI-NEXT: s_lshl_b32 s15, s6, 24
+; SI-NEXT: s_lshl_b32 s15, s45, 24
; SI-NEXT: s_or_b32 s45, s15, s11
-; SI-NEXT: s_and_b32 s11, s30, 0xff
-; SI-NEXT: s_lshl_b32 s15, s78, 8
-; SI-NEXT: v_writelane_b32 v62, s10, 58
+; SI-NEXT: s_and_b32 s11, s61, 0xff
+; SI-NEXT: s_lshl_b32 s15, s60, 8
; SI-NEXT: s_or_b32 s10, s11, s15
-; SI-NEXT: s_and_b32 s11, s99, 0xff
+; SI-NEXT: s_and_b32 s11, s98, 0xff
; SI-NEXT: s_lshl_b32 s11, s11, 16
-; SI-NEXT: s_lshl_b32 s15, s89, 24
+; SI-NEXT: s_lshl_b32 s15, s16, 24
; SI-NEXT: s_or_b32 s46, s15, s11
-; SI-NEXT: s_and_b32 s11, s61, 0xff
-; SI-NEXT: s_lshl_b32 s15, s60, 8
-; SI-NEXT: s_or_b32 s6, s11, s15
-; SI-NEXT: s_and_b32 s11, s22, 0xff
+; SI-NEXT: s_and_b32 s11, s56, 0xff
+; SI-NEXT: s_lshl_b32 s15, s47, 8
+; SI-NEXT: v_writelane_b32 v62, s4, 49
+; SI-NEXT: s_or_b32 s4, s11, s15
+; SI-NEXT: s_and_b32 s11, s28, 0xff
; SI-NEXT: s_lshl_b32 s11, s11, 16
-; SI-NEXT: s_lshl_b32 s15, s47, 24
+; SI-NEXT: s_lshl_b32 s15, s54, 24
; SI-NEXT: s_or_b32 s47, s15, s11
-; SI-NEXT: s_and_b32 s11, s57, 0xff
-; SI-NEXT: s_lshl_b32 s15, s56, 8
-; SI-NEXT: v_writelane_b32 v62, s6, 59
-; SI-NEXT: s_or_b32 s6, s11, s15
-; SI-NEXT: s_and_b32 s11, s39, 0xff
-; SI-NEXT: v_writelane_b32 v62, s6, 60
+; SI-NEXT: s_and_b32 s11, s21, 0xff
+; SI-NEXT: s_lshl_b32 s15, s87, 8
+; SI-NEXT: s_or_b32 s16, s11, s15
+; SI-NEXT: s_and_b32 s11, s75, 0xff
; SI-NEXT: s_lshl_b32 s11, s11, 16
-; SI-NEXT: s_lshl_b32 s15, s95, 24
+; SI-NEXT: s_lshl_b32 s15, s22, 24
+; SI-NEXT: v_writelane_b32 v62, s4, 50
; SI-NEXT: s_or_b32 s56, s15, s11
-; SI-NEXT: s_and_b32 s11, s48, 0xff
-; SI-NEXT: s_lshl_b32 s15, s72, 8
-; SI-NEXT: v_readlane_b32 s6, v62, 14
-; SI-NEXT: s_or_b32 s48, s11, s15
-; SI-NEXT: s_and_b32 s11, s6, 0xff
+; SI-NEXT: s_and_b32 s11, s59, 0xff
+; SI-NEXT: s_lshl_b32 s15, s58, 8
+; SI-NEXT: s_or_b32 s51, s11, s15
+; SI-NEXT: s_and_b32 s11, s57, 0xff
+; SI-NEXT: v_readlane_b32 s4, v62, 18
; SI-NEXT: s_lshl_b32 s11, s11, 16
-; SI-NEXT: s_lshl_b32 s15, s31, 24
+; SI-NEXT: s_lshl_b32 s15, s4, 24
; SI-NEXT: s_or_b32 vcc_lo, s15, s11
-; SI-NEXT: s_and_b32 s11, s86, 0xff
-; SI-NEXT: s_lshl_b32 s15, s38, 8
+; SI-NEXT: s_and_b32 s11, s19, 0xff
+; SI-NEXT: s_lshl_b32 s15, s82, 8
; SI-NEXT: s_or_b32 s72, s11, s15
; SI-NEXT: s_and_b32 s11, s71, 0xff
; SI-NEXT: s_lshl_b32 s11, s11, 16
-; SI-NEXT: s_lshl_b32 s15, s81, 24
+; SI-NEXT: s_lshl_b32 s15, s80, 24
; SI-NEXT: s_or_b32 vcc_hi, s15, s11
-; SI-NEXT: s_and_b32 s11, s58, 0xff
-; SI-NEXT: s_lshl_b32 s15, s85, 8
+; SI-NEXT: s_and_b32 s11, s30, 0xff
+; SI-NEXT: s_lshl_b32 s15, s6, 8
+; SI-NEXT: v_writelane_b32 v62, s96, 51
; SI-NEXT: s_or_b32 s57, s11, s15
-; SI-NEXT: s_and_b32 s11, s69, 0xff
+; SI-NEXT: s_and_b32 s11, s23, 0xff
+; SI-NEXT: v_writelane_b32 v62, s74, 52
; SI-NEXT: s_lshl_b32 s11, s11, 16
-; SI-NEXT: s_lshl_b32 s15, s74, 24
-; SI-NEXT: v_writelane_b32 v62, s74, 61
+; SI-NEXT: s_lshl_b32 s15, s18, 24
+; SI-NEXT: v_writelane_b32 v62, s22, 53
; SI-NEXT: s_or_b32 s74, s15, s11
-; SI-NEXT: s_and_b32 s11, s87, 0xff
-; SI-NEXT: s_lshl_b32 s15, s21, 8
+; SI-NEXT: s_and_b32 s11, s85, 0xff
+; SI-NEXT: s_lshl_b32 s15, s92, 8
+; SI-NEXT: v_writelane_b32 v62, s75, 54
; SI-NEXT: s_or_b32 s58, s11, s15
-; SI-NEXT: s_and_b32 s11, s68, 0xff
+; SI-NEXT: s_and_b32 s11, s76, 0xff
+; SI-NEXT: v_writelane_b32 v62, s30, 55
; SI-NEXT: s_lshl_b32 s11, s11, 16
-; SI-NEXT: s_lshl_b32 s15, s28, 24
+; SI-NEXT: s_lshl_b32 s15, s7, 24
+; SI-NEXT: v_readlane_b32 s4, v62, 16
; SI-NEXT: s_or_b32 s75, s15, s11
-; SI-NEXT: s_and_b32 s11, s24, 0xff
-; SI-NEXT: s_lshl_b32 s15, s55, 8
-; SI-NEXT: v_writelane_b32 v62, s25, 62
+; SI-NEXT: s_and_b32 s11, s4, 0xff
+; SI-NEXT: s_lshl_b32 s15, s34, 8
+; SI-NEXT: v_readlane_b32 s4, v62, 15
; SI-NEXT: s_or_b32 s59, s11, s15
-; SI-NEXT: s_and_b32 s11, s37, 0xff
+; SI-NEXT: s_and_b32 s11, s4, 0xff
+; SI-NEXT: s_mov_b32 s30, s85
+; SI-NEXT: s_mov_b32 s85, s82
+; SI-NEXT: s_mov_b32 s82, s7
; SI-NEXT: s_lshl_b32 s11, s11, 16
-; SI-NEXT: s_lshl_b32 s15, s51, 24
-; SI-NEXT: v_readlane_b32 s4, v62, 13
-; SI-NEXT: s_mov_b32 s18, s21
-; SI-NEXT: s_mov_b32 s21, s97
-; SI-NEXT: s_mov_b32 s97, s37
-; SI-NEXT: s_mov_b32 s37, s76
+; SI-NEXT: s_lshl_b32 s15, s31, 24
+; SI-NEXT: v_readlane_b32 s7, v62, 13
+; SI-NEXT: s_mov_b32 s4, s86
+; SI-NEXT: s_mov_b32 s86, s76
; SI-NEXT: s_or_b32 s76, s15, s11
-; SI-NEXT: s_and_b32 s11, s35, 0xff
-; SI-NEXT: s_lshl_b32 s15, s4, 8
+; SI-NEXT: s_and_b32 s11, s97, 0xff
+; SI-NEXT: s_lshl_b32 s15, s7, 8
; SI-NEXT: s_or_b32 s60, s11, s15
-; SI-NEXT: s_and_b32 s11, s77, 0xff
-; SI-NEXT: v_readlane_b32 s4, v62, 12
+; SI-NEXT: v_readlane_b32 s11, v62, 12
+; SI-NEXT: s_and_b32 s11, s11, 0xff
; SI-NEXT: s_lshl_b32 s11, s11, 16
-; SI-NEXT: s_lshl_b32 s15, s4, 24
-; SI-NEXT: v_readlane_b32 s4, v62, 11
-; SI-NEXT: s_mov_b32 s6, s95
-; SI-NEXT: s_mov_b32 s95, s39
-; SI-NEXT: s_mov_b32 s39, s89
-; SI-NEXT: s_mov_b32 s89, s99
-; SI-NEXT: s_mov_b32 s99, s83
-; SI-NEXT: s_mov_b32 s83, s55
-; SI-NEXT: s_mov_b32 s55, s64
-; SI-NEXT: s_mov_b32 s64, s35
-; SI-NEXT: s_mov_b32 s35, s77
+; SI-NEXT: s_lshl_b32 s15, s78, 24
+; SI-NEXT: s_mov_b32 s27, s28
+; SI-NEXT: s_mov_b32 s28, s91
+; SI-NEXT: s_mov_b32 s91, s54
+; SI-NEXT: s_mov_b32 s54, s21
+; SI-NEXT: s_mov_b32 s21, s87
+; SI-NEXT: s_mov_b32 s87, s98
+; SI-NEXT: s_mov_b32 s98, s80
+; SI-NEXT: s_mov_b32 s80, s70
+; SI-NEXT: s_mov_b32 s70, s34
+; SI-NEXT: s_mov_b32 s34, s18
+; SI-NEXT: s_mov_b32 s18, s19
+; SI-NEXT: s_mov_b32 s19, s99
+; SI-NEXT: s_mov_b32 s99, s71
+; SI-NEXT: s_mov_b32 s71, s77
; SI-NEXT: s_or_b32 s77, s15, s11
-; SI-NEXT: s_and_b32 s11, s4, 0xff
-; SI-NEXT: v_readlane_b32 s4, v62, 10
-; SI-NEXT: s_lshl_b32 s15, s4, 8
-; SI-NEXT: v_readlane_b32 s4, v62, 9
+; SI-NEXT: s_and_b32 s11, s81, 0xff
+; SI-NEXT: s_lshl_b32 s15, s68, 8
; SI-NEXT: s_or_b32 s61, s11, s15
-; SI-NEXT: s_and_b32 s11, s4, 0xff
-; SI-NEXT: v_readlane_b32 s4, v62, 8
+; SI-NEXT: s_and_b32 s11, s53, 0xff
; SI-NEXT: s_lshl_b32 s11, s11, 16
-; SI-NEXT: s_lshl_b32 s15, s4, 24
+; SI-NEXT: s_lshl_b32 s15, s52, 24
+; SI-NEXT: s_mov_b32 s24, s38
+; SI-NEXT: s_mov_b32 s38, s89
+; SI-NEXT: s_mov_b32 s89, s97
+; SI-NEXT: s_mov_b32 s97, s6
+; SI-NEXT: s_mov_b32 s6, s83
+; SI-NEXT: s_mov_b32 s83, s81
+; SI-NEXT: s_mov_b32 s81, s68
+; SI-NEXT: s_mov_b32 s68, s65
+; SI-NEXT: s_mov_b32 s65, s52
+; SI-NEXT: s_mov_b32 s52, s53
+; SI-NEXT: s_mov_b32 s53, s31
+; SI-NEXT: s_mov_b32 s31, s78
; SI-NEXT: s_or_b32 s78, s15, s11
-; SI-NEXT: v_readlane_b32 s11, v62, 7
+; SI-NEXT: v_readlane_b32 s11, v62, 11
; SI-NEXT: s_and_b32 s11, s11, 0xff
; SI-NEXT: s_lshl_b32 s15, s17, 8
; SI-NEXT: s_or_b32 s11, s11, s15
; SI-NEXT: s_and_b32 s11, s11, 0xffff
; SI-NEXT: v_mov_b32_e32 v51, s9
-; SI-NEXT: s_or_b32 s17, s11, s9
-; SI-NEXT: v_readlane_b32 s9, v62, 2
-; SI-NEXT: v_readlane_b32 s11, v62, 1
+; SI-NEXT: s_or_b32 s48, s11, s9
+; SI-NEXT: v_readlane_b32 s9, v62, 4
+; SI-NEXT: v_readlane_b32 s11, v62, 3
; SI-NEXT: s_and_b32 s9, s9, 0xff
; SI-NEXT: s_lshl_b32 s15, s11, 8
; SI-NEXT: s_or_b32 s9, s9, s15
; SI-NEXT: s_and_b32 s9, s9, 0xffff
-; SI-NEXT: s_mov_b32 s4, s96
-; SI-NEXT: s_mov_b32 s96, s24
; SI-NEXT: v_mov_b32_e32 v52, s14
-; SI-NEXT: s_or_b32 s24, s9, s14
-; SI-NEXT: s_and_b32 s14, s93, 0xff
+; SI-NEXT: s_or_b32 s17, s9, s14
+; SI-NEXT: s_and_b32 s14, s36, 0xff
; SI-NEXT: s_lshl_b32 s15, s84, 8
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v21
; SI-NEXT: s_or_b32 s14, s14, s15
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_or_b32_e32 v53, v6, v1
+; SI-NEXT: v_or_b32_e32 v53, v60, v1
; SI-NEXT: s_and_b32 s14, s14, 0xffff
; SI-NEXT: v_or_b32_e32 v50, s14, v53
-; SI-NEXT: s_and_b32 s14, s8, 0xff
-; SI-NEXT: s_lshl_b32 s15, s49, 8
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v21
+; SI-NEXT: s_and_b32 s14, s42, 0xff
+; SI-NEXT: s_lshl_b32 s15, s25, 8
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v29
; SI-NEXT: s_or_b32 s14, s14, s15
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_or_b32_e32 v54, v14, v1
+; SI-NEXT: v_or_b32_e32 v54, v61, v1
; SI-NEXT: s_and_b32 s14, s14, 0xffff
; SI-NEXT: v_or_b32_e32 v17, s14, v54
-; SI-NEXT: s_and_b32 s14, s40, 0xff
-; SI-NEXT: s_lshl_b32 s15, s53, 8
+; SI-NEXT: s_and_b32 s14, s69, 0xff
+; SI-NEXT: s_lshl_b32 s15, s39, 8
; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v29
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v22
; SI-NEXT: s_or_b32 s14, s14, s15
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_or_b32_e32 v55, v18, v1
; SI-NEXT: s_and_b32 s14, s14, 0xffff
; SI-NEXT: v_or_b32_e32 v16, s14, v55
-; SI-NEXT: s_and_b32 s14, s34, 0xff
-; SI-NEXT: s_lshl_b32 s15, s23, 8
+; SI-NEXT: s_and_b32 s14, s62, 0xff
+; SI-NEXT: s_lshl_b32 s15, s49, 8
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v13
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v20
; SI-NEXT: s_or_b32 s14, s14, s15
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_or_b32_e32 v40, v19, v1
; SI-NEXT: s_and_b32 s14, s14, 0xffff
; SI-NEXT: v_or_b32_e32 v15, s14, v40
-; SI-NEXT: s_and_b32 s14, s91, 0xff
-; SI-NEXT: s_lshl_b32 s15, s66, 8
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v43
+; SI-NEXT: s_and_b32 s14, s50, 0xff
+; SI-NEXT: s_lshl_b32 s15, s73, 8
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v46
; SI-NEXT: s_or_b32 s14, s14, s15
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_or_b32_e32 v41, v22, v1
+; SI-NEXT: v_or_b32_e32 v41, v6, v1
; SI-NEXT: s_and_b32 s14, s14, 0xffff
; SI-NEXT: v_or_b32_e32 v12, s14, v41
-; SI-NEXT: s_and_b32 s14, s50, 0xff
-; SI-NEXT: s_lshl_b32 s15, s54, 8
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v32
+; SI-NEXT: s_and_b32 s14, s93, 0xff
+; SI-NEXT: s_lshl_b32 s15, s40, 8
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v56
; SI-NEXT: s_or_b32 s14, s14, s15
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_or_b32_e32 v42, v23, v1
+; SI-NEXT: v_or_b32_e32 v42, v14, v1
; SI-NEXT: s_and_b32 s14, s14, 0xffff
; SI-NEXT: v_or_b32_e32 v11, s14, v42
-; SI-NEXT: s_and_b32 s14, s73, 0xff
-; SI-NEXT: s_lshl_b32 s15, s36, 8
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v46
+; SI-NEXT: s_and_b32 s14, s66, 0xff
+; SI-NEXT: s_lshl_b32 s15, s37, 8
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v26
; SI-NEXT: s_or_b32 s14, s14, s15
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_or_b32_e32 v28, v59, v1
+; SI-NEXT: v_or_b32_e32 v59, v23, v1
; SI-NEXT: s_and_b32 s14, s14, 0xffff
-; SI-NEXT: v_or_b32_e32 v10, s14, v28
-; SI-NEXT: s_and_b32 s14, s82, 0xff
-; SI-NEXT: s_lshl_b32 s15, s52, 8
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v26
+; SI-NEXT: v_or_b32_e32 v10, s14, v59
+; SI-NEXT: s_and_b32 s14, s94, 0xff
+; SI-NEXT: s_lshl_b32 s15, s35, 8
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v28
; SI-NEXT: s_or_b32 s14, s14, s15
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT: s_waitcnt expcnt(6)
+; SI-NEXT: v_mov_b32_e32 v13, v21
+; SI-NEXT: v_mov_b32_e32 v21, v60
; SI-NEXT: v_or_b32_e32 v60, v24, v1
; SI-NEXT: s_and_b32 s14, s14, 0xffff
+; SI-NEXT: v_readlane_b32 s9, v62, 46
; SI-NEXT: v_or_b32_e32 v9, s14, v60
-; SI-NEXT: s_and_b32 s14, s90, 0xff
-; SI-NEXT: s_lshl_b32 s15, s16, 8
+; SI-NEXT: s_and_b32 s14, s9, 0xff
+; SI-NEXT: s_lshl_b32 s15, s79, 8
; SI-NEXT: v_and_b32_e32 v1, 0xff, v35
; SI-NEXT: s_or_b32 s14, s14, s15
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_or_b32_e32 v31, v44, v1
+; SI-NEXT: v_or_b32_e32 v5, v43, v1
; SI-NEXT: s_and_b32 s14, s14, 0xffff
-; SI-NEXT: v_or_b32_e32 v8, s14, v31
-; SI-NEXT: s_and_b32 s14, s62, 0xff
-; SI-NEXT: s_lshl_b32 s15, s65, 8
+; SI-NEXT: v_or_b32_e32 v8, s14, v5
+; SI-NEXT: s_and_b32 s14, s8, 0xff
+; SI-NEXT: s_lshl_b32 s15, s67, 8
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v49
+; SI-NEXT: s_or_b32 s14, s14, s15
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT: v_or_b32_e32 v32, v44, v1
+; SI-NEXT: s_and_b32 s14, s14, 0xffff
+; SI-NEXT: v_or_b32_e32 v7, s14, v32
+; SI-NEXT: s_and_b32 s14, s90, 0xff
+; SI-NEXT: s_lshl_b32 s15, s20, 8
; SI-NEXT: v_and_b32_e32 v1, 0xff, v37
; SI-NEXT: s_or_b32 s14, s14, s15
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT: v_mov_b32_e32 v30, v28
+; SI-NEXT: v_mov_b32_e32 v28, v24
+; SI-NEXT: v_mov_b32_e32 v24, v19
+; SI-NEXT: v_mov_b32_e32 v19, v61
; SI-NEXT: v_or_b32_e32 v61, v45, v1
; SI-NEXT: s_and_b32 s14, s14, 0xffff
-; SI-NEXT: v_or_b32_e32 v7, s14, v61
-; SI-NEXT: s_and_b32 s14, s98, 0xff
-; SI-NEXT: s_lshl_b32 s15, s67, 8
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v38
+; SI-NEXT: v_readlane_b32 s7, v62, 17
+; SI-NEXT: v_or_b32_e32 v4, s14, v61
+; SI-NEXT: s_and_b32 s14, s7, 0xff
+; SI-NEXT: s_lshl_b32 s15, s55, 8
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v34
; SI-NEXT: s_or_b32 s14, s14, s15
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: s_waitcnt expcnt(5)
; SI-NEXT: v_or_b32_e32 v6, v47, v1
; SI-NEXT: s_and_b32 s14, s14, 0xffff
-; SI-NEXT: v_or_b32_e32 v4, s14, v6
-; SI-NEXT: s_and_b32 s14, s92, 0xff
-; SI-NEXT: s_lshl_b32 s15, s7, 8
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v33
-; SI-NEXT: s_or_b32 s14, s14, s15
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_readlane_b32 s8, v62, 55
-; SI-NEXT: s_waitcnt expcnt(4)
-; SI-NEXT: v_mov_b32_e32 v22, v14
-; SI-NEXT: v_or_b32_e32 v14, v56, v1
-; SI-NEXT: s_and_b32 s14, s14, 0xffff
-; SI-NEXT: s_and_b32 s8, s8, 0xffff
-; SI-NEXT: v_or_b32_e32 v2, s14, v14
-; SI-NEXT: s_and_b32 s14, s70, 0xff
-; SI-NEXT: s_lshl_b32 s15, s94, 8
+; SI-NEXT: v_readlane_b32 s9, v62, 14
+; SI-NEXT: v_or_b32_e32 v2, s14, v6
+; SI-NEXT: s_and_b32 s14, s64, 0xff
+; SI-NEXT: s_lshl_b32 s15, s9, 8
; SI-NEXT: v_and_b32_e32 v1, 0xff, v39
-; SI-NEXT: s_or_b32 s42, s8, s63
-; SI-NEXT: v_readlane_b32 s8, v62, 56
; SI-NEXT: s_or_b32 s14, s14, s15
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: s_and_b32 s8, s8, 0xffff
-; SI-NEXT: s_waitcnt expcnt(2)
-; SI-NEXT: v_mov_b32_e32 v32, v23
-; SI-NEXT: v_mov_b32_e32 v23, v18
-; SI-NEXT: v_or_b32_e32 v18, v57, v1
+; SI-NEXT: v_readlane_b32 s7, v62, 45
+; SI-NEXT: s_waitcnt expcnt(3)
+; SI-NEXT: v_or_b32_e32 v14, v57, v1
; SI-NEXT: s_and_b32 s14, s14, 0xffff
-; SI-NEXT: s_or_b32 s40, s8, s5
-; SI-NEXT: v_readlane_b32 s8, v62, 57
-; SI-NEXT: v_or_b32_e32 v1, s14, v18
+; SI-NEXT: s_and_b32 s8, s7, 0xffff
+; SI-NEXT: v_readlane_b32 s7, v62, 47
+; SI-NEXT: v_or_b32_e32 v1, s14, v14
; SI-NEXT: s_and_b32 s14, s88, 0xff
-; SI-NEXT: s_lshl_b32 s15, s79, 8
-; SI-NEXT: v_and_b32_e32 v3, 0xff, v34
-; SI-NEXT: s_and_b32 s8, s8, 0xffff
-; SI-NEXT: v_readlane_b32 s9, v62, 60
+; SI-NEXT: s_lshl_b32 s15, s95, 8
+; SI-NEXT: v_and_b32_e32 v3, 0xff, v36
+; SI-NEXT: s_or_b32 s42, s8, s63
+; SI-NEXT: s_and_b32 s8, s7, 0xffff
+; SI-NEXT: v_readlane_b32 s7, v62, 48
+; SI-NEXT: s_and_b32 s16, s16, 0xffff
; SI-NEXT: s_or_b32 s14, s14, s15
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; SI-NEXT: s_or_b32 s15, s8, s41
-; SI-NEXT: v_readlane_b32 s8, v62, 58
-; SI-NEXT: s_and_b32 s16, s9, 0xffff
+; SI-NEXT: s_or_b32 s40, s8, s5
+; SI-NEXT: s_and_b32 s8, s7, 0xffff
+; SI-NEXT: v_readlane_b32 s7, v62, 49
+; SI-NEXT: s_or_b32 s35, s16, s56
+; SI-NEXT: s_and_b32 s16, s51, 0xffff
; SI-NEXT: v_mov_b32_e32 v27, v26
-; SI-NEXT: v_mov_b32_e32 v26, v24
-; SI-NEXT: v_mov_b32_e32 v24, v19
-; SI-NEXT: v_or_b32_e32 v19, v58, v3
+; SI-NEXT: v_mov_b32_e32 v26, v23
+; SI-NEXT: v_mov_b32_e32 v23, v18
+; SI-NEXT: v_or_b32_e32 v18, v58, v3
; SI-NEXT: s_and_b32 s14, s14, 0xffff
-; SI-NEXT: s_and_b32 s8, s8, 0xffff
-; SI-NEXT: s_or_b32 s36, s16, s56
-; SI-NEXT: s_and_b32 s16, s48, 0xffff
-; SI-NEXT: v_or_b32_e32 v3, s14, v19
+; SI-NEXT: s_or_b32 s15, s8, s41
+; SI-NEXT: s_and_b32 s8, s7, 0xffff
+; SI-NEXT: s_or_b32 s51, s16, vcc_lo
+; SI-NEXT: s_and_b32 s16, s72, 0xffff
+; SI-NEXT: v_or_b32_e32 v3, s14, v18
; SI-NEXT: s_or_b32 s14, s8, s43
; SI-NEXT: s_and_b32 s8, s13, 0xffff
-; SI-NEXT: s_or_b32 s53, s16, vcc_lo
-; SI-NEXT: s_and_b32 s16, s72, 0xffff
+; SI-NEXT: s_or_b32 s93, s16, vcc_hi
+; SI-NEXT: s_and_b32 s16, s57, 0xffff
; SI-NEXT: s_or_b32 s13, s8, s44
; SI-NEXT: s_and_b32 s8, s12, 0xffff
-; SI-NEXT: s_or_b32 s94, s16, vcc_hi
-; SI-NEXT: s_and_b32 s16, s57, 0xffff
-; SI-NEXT: s_or_b32 s12, s8, s45
-; SI-NEXT: s_and_b32 s8, s10, 0xffff
; SI-NEXT: s_or_b32 s49, s16, s74
; SI-NEXT: s_and_b32 s16, s58, 0xffff
-; SI-NEXT: s_or_b32 s10, s8, s46
-; SI-NEXT: v_readlane_b32 s8, v62, 59
-; SI-NEXT: s_or_b32 s48, s16, s75
+; SI-NEXT: s_or_b32 s12, s8, s45
+; SI-NEXT: s_and_b32 s8, s10, 0xffff
+; SI-NEXT: v_readlane_b32 s7, v62, 50
+; SI-NEXT: s_or_b32 s9, s16, s75
; SI-NEXT: s_and_b32 s16, s59, 0xffff
-; SI-NEXT: s_and_b32 s8, s8, 0xffff
+; SI-NEXT: s_mov_b32 s22, s23
+; SI-NEXT: s_or_b32 s10, s8, s46
+; SI-NEXT: s_and_b32 s8, s7, 0xffff
; SI-NEXT: s_or_b32 s11, s16, s76
; SI-NEXT: s_and_b32 s16, s60, 0xffff
; SI-NEXT: s_and_b32 s23, s61, 0xffff
-; SI-NEXT: s_mov_b32 s30, s87
-; SI-NEXT: s_mov_b32 s87, s85
; SI-NEXT: s_or_b32 s8, s8, s47
-; SI-NEXT: s_or_b32 s9, s16, s77
-; SI-NEXT: s_or_b32 s16, s23, s78
-; SI-NEXT: v_mov_b32_e32 v36, v35
-; SI-NEXT: v_mov_b32_e32 v30, v37
-; SI-NEXT: v_mov_b32_e32 v35, v45
-; SI-NEXT: v_mov_b32_e32 v20, v47
-; SI-NEXT: v_mov_b32_e32 v49, v56
+; SI-NEXT: s_or_b32 s16, s16, s77
+; SI-NEXT: s_or_b32 s94, s23, s78
+; SI-NEXT: v_mov_b32_e32 v31, v49
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v35, v44
+; SI-NEXT: v_mov_b32_e32 v38, v37
+; SI-NEXT: v_mov_b32_e32 v37, v45
+; SI-NEXT: v_mov_b32_e32 v33, v47
; SI-NEXT: v_mov_b32_e32 v48, v39
; SI-NEXT: v_mov_b32_e32 v39, v57
; SI-NEXT: v_mov_b32_e32 v25, v58
@@ -199151,24 +199822,24 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: v_alignbit_b32 v58, s40, v52, 16
; SI-NEXT: v_alignbit_b32 v56, s15, v53, 16
; SI-NEXT: v_alignbit_b32 v47, s14, v54, 16
-; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: v_alignbit_b32 v46, s13, v55, 16
; SI-NEXT: v_alignbit_b32 v45, s12, v40, 16
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_alignbit_b32 v44, s10, v41, 16
; SI-NEXT: v_alignbit_b32 v43, s8, v42, 16
-; SI-NEXT: v_alignbit_b32 v42, s36, v28, 16
-; SI-NEXT: v_alignbit_b32 v41, s53, v60, 16
-; SI-NEXT: v_alignbit_b32 v40, s94, v31, 16
-; SI-NEXT: v_alignbit_b32 v55, s49, v61, 16
-; SI-NEXT: v_alignbit_b32 v54, s48, v6, 16
-; SI-NEXT: v_alignbit_b32 v53, s11, v14, 16
-; SI-NEXT: v_mov_b32_e32 v14, v22
-; SI-NEXT: v_alignbit_b32 v52, s9, v18, 16
-; SI-NEXT: v_mov_b32_e32 v18, v23
-; SI-NEXT: v_alignbit_b32 v51, s16, v19, 16
+; SI-NEXT: v_alignbit_b32 v42, s35, v59, 16
+; SI-NEXT: v_alignbit_b32 v41, s51, v60, 16
+; SI-NEXT: v_mov_b32_e32 v60, v21
+; SI-NEXT: v_alignbit_b32 v40, s93, v5, 16
+; SI-NEXT: v_alignbit_b32 v55, s49, v32, 16
+; SI-NEXT: v_alignbit_b32 v54, s9, v61, 16
+; SI-NEXT: v_mov_b32_e32 v61, v19
; SI-NEXT: v_mov_b32_e32 v19, v24
-; SI-NEXT: v_mov_b32_e32 v24, v26
+; SI-NEXT: v_mov_b32_e32 v24, v28
+; SI-NEXT: v_alignbit_b32 v53, s11, v6, 16
+; SI-NEXT: v_alignbit_b32 v52, s16, v14, 16
+; SI-NEXT: v_alignbit_b32 v51, s94, v18, 16
+; SI-NEXT: v_mov_b32_e32 v18, v23
+; SI-NEXT: v_mov_b32_e32 v23, v26
; SI-NEXT: s_lshr_b32 s73, s63, 16
; SI-NEXT: s_lshr_b32 s72, s5, 16
; SI-NEXT: s_lshr_b32 s63, s41, 16
@@ -199181,43 +199852,56 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: s_lshr_b32 s56, vcc_lo, 16
; SI-NEXT: s_lshr_b32 s47, vcc_hi, 16
; SI-NEXT: s_lshr_b32 s46, s74, 16
-; SI-NEXT: v_readlane_b32 s25, v62, 62
-; SI-NEXT: v_readlane_b32 s74, v62, 61
+; SI-NEXT: v_readlane_b32 s74, v62, 52
+; SI-NEXT: v_readlane_b32 s96, v62, 51
; SI-NEXT: s_lshr_b32 s45, s75, 16
+; SI-NEXT: s_mov_b32 s23, s22
+; SI-NEXT: s_mov_b32 s7, s82
+; SI-NEXT: s_mov_b32 s82, s85
+; SI-NEXT: s_mov_b32 s85, s30
+; SI-NEXT: v_readlane_b32 s30, v62, 55
+; SI-NEXT: v_readlane_b32 s75, v62, 54
+; SI-NEXT: v_readlane_b32 s22, v62, 53
; SI-NEXT: s_lshr_b32 s44, s76, 16
-; SI-NEXT: s_mov_b32 s76, s37
-; SI-NEXT: s_mov_b32 s37, s97
-; SI-NEXT: s_mov_b32 s97, s21
-; SI-NEXT: s_mov_b32 s21, s18
-; SI-NEXT: s_mov_b32 s18, s17
-; SI-NEXT: s_mov_b32 s85, s87
-; SI-NEXT: s_mov_b32 s87, s30
-; SI-NEXT: s_mov_b32 s17, s24
+; SI-NEXT: s_mov_b32 s76, s86
+; SI-NEXT: s_mov_b32 s86, s4
; SI-NEXT: s_lshr_b32 s43, s77, 16
-; SI-NEXT: s_mov_b32 s77, s35
-; SI-NEXT: s_mov_b32 s35, s64
-; SI-NEXT: s_mov_b32 s64, s55
-; SI-NEXT: s_mov_b32 s55, s83
-; SI-NEXT: s_mov_b32 s83, s99
-; SI-NEXT: s_mov_b32 s99, s89
-; SI-NEXT: s_mov_b32 s89, s39
-; SI-NEXT: s_mov_b32 s39, s95
-; SI-NEXT: s_mov_b32 s95, s6
+; SI-NEXT: s_mov_b32 s77, s71
+; SI-NEXT: s_mov_b32 s71, s99
+; SI-NEXT: s_mov_b32 s99, s19
+; SI-NEXT: s_mov_b32 s19, s18
+; SI-NEXT: s_mov_b32 s18, s34
+; SI-NEXT: s_mov_b32 s34, s70
+; SI-NEXT: s_mov_b32 s70, s80
+; SI-NEXT: s_mov_b32 s80, s98
+; SI-NEXT: s_mov_b32 s98, s87
+; SI-NEXT: s_mov_b32 s87, s21
+; SI-NEXT: s_mov_b32 s21, s54
+; SI-NEXT: s_mov_b32 s54, s91
+; SI-NEXT: s_mov_b32 s91, s28
+; SI-NEXT: s_mov_b32 s28, s27
; SI-NEXT: s_lshr_b32 s41, s78, 16
-; SI-NEXT: s_mov_b32 s24, s96
-; SI-NEXT: s_mov_b32 s96, s4
+; SI-NEXT: s_mov_b32 s78, s31
+; SI-NEXT: s_mov_b32 s31, s53
+; SI-NEXT: s_mov_b32 s53, s52
+; SI-NEXT: s_mov_b32 s52, s65
+; SI-NEXT: s_mov_b32 s65, s68
+; SI-NEXT: s_mov_b32 s68, s81
+; SI-NEXT: s_mov_b32 s81, s83
+; SI-NEXT: s_mov_b32 s83, s6
+; SI-NEXT: s_mov_b32 s6, s97
+; SI-NEXT: s_mov_b32 s97, s89
+; SI-NEXT: s_mov_b32 s89, s38
+; SI-NEXT: s_mov_b32 s38, s24
+; SI-NEXT: v_readlane_b32 s24, v62, 46
+; SI-NEXT: v_mov_b32_e32 v6, v22
+; SI-NEXT: v_mov_b32_e32 v14, v20
; SI-NEXT: s_cbranch_execnz .LBB97_3
; SI-NEXT: .LBB97_2: ; %cmp.true
-; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
-; SI-NEXT: v_add_i32_e32 v8, vcc, 3, v36
-; SI-NEXT: v_and_b32_e32 v8, 0xff, v8
-; SI-NEXT: v_mov_b32_e32 v6, v5
-; SI-NEXT: v_mov_b32_e32 v5, v27
-; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8
; SI-NEXT: s_add_i32 s4, s88, 3
; SI-NEXT: s_and_b32 s4, s4, 0xff
-; SI-NEXT: s_lshl_b32 s5, s79, 8
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v34
+; SI-NEXT: s_lshl_b32 s5, s95, 8
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v36
; SI-NEXT: s_or_b32 s4, s5, s4
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: s_addk_i32 s4, 0x300
@@ -199225,26 +199909,22 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: s_and_b32 s4, s4, 0xffff
; SI-NEXT: v_or_b32_e32 v1, v25, v1
; SI-NEXT: v_or_b32_e32 v1, s4, v1
-; SI-NEXT: v_readlane_b32 s4, v62, 11
-; SI-NEXT: s_add_i32 s4, s4, 3
-; SI-NEXT: v_readlane_b32 s5, v62, 10
-; SI-NEXT: v_readlane_b32 s6, v62, 9
+; SI-NEXT: s_add_i32 s4, s81, 3
; SI-NEXT: s_and_b32 s4, s4, 0xff
-; SI-NEXT: s_lshl_b32 s5, s5, 8
-; SI-NEXT: s_add_i32 s8, s6, 3
+; SI-NEXT: s_lshl_b32 s5, s68, 8
+; SI-NEXT: s_add_i32 s8, s53, 3
; SI-NEXT: s_or_b32 s4, s5, s4
-; SI-NEXT: v_readlane_b32 s5, v62, 8
; SI-NEXT: s_and_b32 s8, s8, 0xff
-; SI-NEXT: s_lshl_b32 s5, s5, 24
+; SI-NEXT: s_lshl_b32 s5, s52, 24
; SI-NEXT: s_lshl_b32 s8, s8, 16
; SI-NEXT: s_addk_i32 s4, 0x300
; SI-NEXT: s_or_b32 s5, s5, s8
; SI-NEXT: s_and_b32 s4, s4, 0xffff
-; SI-NEXT: s_add_i32 s70, s70, 3
-; SI-NEXT: v_readlane_b32 s6, v62, 30
+; SI-NEXT: s_add_i32 s64, s64, 3
+; SI-NEXT: v_readlane_b32 s8, v62, 14
; SI-NEXT: s_or_b32 s4, s5, s4
-; SI-NEXT: s_and_b32 s5, s70, 0xff
-; SI-NEXT: s_lshl_b32 s8, s6, 8
+; SI-NEXT: s_and_b32 s5, s64, 0xff
+; SI-NEXT: s_lshl_b32 s8, s8, 8
; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v48
; SI-NEXT: s_or_b32 s5, s8, s5
; SI-NEXT: v_and_b32_e32 v2, 0xff, v2
@@ -199252,301 +199932,306 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; SI-NEXT: s_and_b32 s5, s5, 0xffff
; SI-NEXT: v_or_b32_e32 v2, v39, v2
+; SI-NEXT: s_add_i32 s95, s97, 3
+; SI-NEXT: v_readlane_b32 s8, v62, 13
+; SI-NEXT: v_readlane_b32 s9, v62, 12
; SI-NEXT: v_or_b32_e32 v2, s5, v2
-; SI-NEXT: s_add_i32 s5, s35, 3
-; SI-NEXT: v_readlane_b32 s6, v62, 13
-; SI-NEXT: s_and_b32 s5, s5, 0xff
-; SI-NEXT: s_lshl_b32 s8, s6, 8
-; SI-NEXT: s_add_i32 s9, s77, 3
+; SI-NEXT: s_and_b32 s5, s95, 0xff
+; SI-NEXT: s_lshl_b32 s8, s8, 8
+; SI-NEXT: s_add_i32 s9, s9, 3
; SI-NEXT: s_or_b32 s5, s8, s5
-; SI-NEXT: v_readlane_b32 s6, v62, 12
; SI-NEXT: s_and_b32 s9, s9, 0xff
-; SI-NEXT: s_lshl_b32 s8, s6, 24
+; SI-NEXT: s_lshl_b32 s8, s78, 24
; SI-NEXT: s_lshl_b32 s9, s9, 16
; SI-NEXT: s_addk_i32 s5, 0x300
; SI-NEXT: s_or_b32 s8, s8, s9
; SI-NEXT: s_and_b32 s5, s5, 0xffff
+; SI-NEXT: s_add_i32 s14, s4, 0x3000000
+; SI-NEXT: v_readlane_b32 s4, v62, 17
; SI-NEXT: s_or_b32 s5, s8, s5
-; SI-NEXT: s_add_i32 s79, s92, 3
+; SI-NEXT: s_add_i32 s4, s4, 3
; SI-NEXT: v_add_i32_e32 v3, vcc, 0x3000000, v1
-; SI-NEXT: s_add_i32 s16, s4, 0x3000000
; SI-NEXT: v_add_i32_e32 v1, vcc, 0x3000000, v2
-; SI-NEXT: s_add_i32 s9, s5, 0x3000000
-; SI-NEXT: s_and_b32 s4, s79, 0xff
-; SI-NEXT: s_lshl_b32 s5, s7, 8
-; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v33
+; SI-NEXT: s_add_i32 s16, s5, 0x3000000
+; SI-NEXT: s_and_b32 s4, s4, 0xff
+; SI-NEXT: s_lshl_b32 s5, s55, 8
+; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v34
; SI-NEXT: s_or_b32 s4, s5, s4
; SI-NEXT: v_and_b32_e32 v2, 0xff, v2
; SI-NEXT: s_addk_i32 s4, 0x300
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; SI-NEXT: s_and_b32 s4, s4, 0xffff
-; SI-NEXT: v_or_b32_e32 v2, v49, v2
+; SI-NEXT: v_or_b32_e32 v2, v33, v2
; SI-NEXT: v_or_b32_e32 v2, s4, v2
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_or_b32_e32 v8, v9, v8
-; SI-NEXT: v_add_i32_e32 v9, vcc, 3, v5
-; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
-; SI-NEXT: s_add_i32 s4, s24, 3
+; SI-NEXT: v_readlane_b32 s4, v62, 16
+; SI-NEXT: s_add_i32 s4, s4, 3
+; SI-NEXT: v_readlane_b32 s8, v62, 15
; SI-NEXT: s_and_b32 s4, s4, 0xff
-; SI-NEXT: s_lshl_b32 s5, s55, 8
-; SI-NEXT: s_add_i32 s8, s37, 3
+; SI-NEXT: s_lshl_b32 s5, s34, 8
+; SI-NEXT: s_add_i32 s8, s8, 3
; SI-NEXT: s_or_b32 s4, s5, s4
; SI-NEXT: s_and_b32 s8, s8, 0xff
; SI-NEXT: s_addk_i32 s4, 0x300
-; SI-NEXT: s_lshl_b32 s5, s51, 24
+; SI-NEXT: s_lshl_b32 s5, s31, 24
; SI-NEXT: s_lshl_b32 s8, s8, 16
; SI-NEXT: s_and_b32 s4, s4, 0xffff
; SI-NEXT: s_or_b32 s5, s5, s8
; SI-NEXT: s_or_b32 s4, s5, s4
-; SI-NEXT: s_add_i32 s52, s98, 3
+; SI-NEXT: s_add_i32 s48, s90, 3
; SI-NEXT: s_add_i32 s11, s4, 0x3000000
-; SI-NEXT: s_and_b32 s4, s52, 0xff
-; SI-NEXT: s_lshl_b32 s5, s67, 8
+; SI-NEXT: s_and_b32 s4, s48, 0xff
+; SI-NEXT: s_lshl_b32 s5, s20, 8
; SI-NEXT: v_add_i32_e32 v4, vcc, 3, v38
; SI-NEXT: s_or_b32 s4, s5, s4
; SI-NEXT: v_and_b32_e32 v4, 0xff, v4
; SI-NEXT: s_addk_i32 s4, 0x300
; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
; SI-NEXT: s_and_b32 s4, s4, 0xffff
-; SI-NEXT: v_or_b32_e32 v4, v20, v4
-; SI-NEXT: s_add_i32 s30, s87, 3
+; SI-NEXT: v_or_b32_e32 v4, v37, v4
+; SI-NEXT: s_add_i32 s90, s85, 3
; SI-NEXT: v_or_b32_e32 v4, s4, v4
-; SI-NEXT: s_and_b32 s4, s30, 0xff
-; SI-NEXT: s_lshl_b32 s5, s21, 8
-; SI-NEXT: s_add_i32 s8, s68, 3
+; SI-NEXT: s_and_b32 s4, s90, 0xff
+; SI-NEXT: s_lshl_b32 s5, s92, 8
+; SI-NEXT: s_add_i32 s8, s76, 3
; SI-NEXT: s_or_b32 s4, s5, s4
; SI-NEXT: s_and_b32 s8, s8, 0xff
; SI-NEXT: s_addk_i32 s4, 0x300
-; SI-NEXT: s_lshl_b32 s5, s28, 24
+; SI-NEXT: s_lshl_b32 s5, s7, 24
; SI-NEXT: s_lshl_b32 s8, s8, 16
; SI-NEXT: s_and_b32 s4, s4, 0xffff
; SI-NEXT: s_or_b32 s5, s5, s8
; SI-NEXT: s_or_b32 s4, s5, s4
-; SI-NEXT: s_add_i32 s48, s4, 0x3000000
-; SI-NEXT: v_readlane_b32 s4, v62, 42
-; SI-NEXT: v_mov_b32_e32 v22, v30
-; SI-NEXT: s_add_i32 s87, s4, 3
-; SI-NEXT: v_readlane_b32 s5, v62, 39
-; SI-NEXT: s_and_b32 s4, s87, 0xff
-; SI-NEXT: s_lshl_b32 s5, s5, 8
-; SI-NEXT: v_add_i32_e32 v7, vcc, 3, v22
+; SI-NEXT: s_add_i32 s9, s4, 0x3000000
+; SI-NEXT: v_readlane_b32 s4, v62, 30
+; SI-NEXT: s_add_i32 s85, s4, 3
+; SI-NEXT: s_and_b32 s4, s85, 0xff
+; SI-NEXT: s_lshl_b32 s5, s67, 8
+; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v31
; SI-NEXT: s_or_b32 s4, s5, s4
-; SI-NEXT: v_and_b32_e32 v7, 0xff, v7
+; SI-NEXT: v_and_b32_e32 v5, 0xff, v5
; SI-NEXT: s_addk_i32 s4, 0x300
-; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7
+; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: s_and_b32 s4, s4, 0xffff
-; SI-NEXT: v_or_b32_e32 v7, v35, v7
-; SI-NEXT: v_or_b32_e32 v7, s4, v7
-; SI-NEXT: v_readlane_b32 s4, v62, 32
-; SI-NEXT: s_add_i32 s67, s4, 3
+; SI-NEXT: v_or_b32_e32 v5, v35, v5
+; SI-NEXT: v_or_b32_e32 v5, s4, v5
+; SI-NEXT: v_add_i32_e32 v7, vcc, 0x3000000, v5
+; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
+; SI-NEXT: s_add_i32 s67, s30, 3
; SI-NEXT: s_and_b32 s4, s67, 0xff
-; SI-NEXT: s_lshl_b32 s5, s85, 8
-; SI-NEXT: s_add_i32 s8, s69, 3
+; SI-NEXT: s_lshl_b32 s5, s6, 8
+; SI-NEXT: s_add_i32 s8, s23, 3
; SI-NEXT: s_or_b32 s4, s5, s4
; SI-NEXT: s_and_b32 s8, s8, 0xff
; SI-NEXT: s_addk_i32 s4, 0x300
-; SI-NEXT: s_lshl_b32 s5, s74, 24
+; SI-NEXT: s_lshl_b32 s5, s18, 24
; SI-NEXT: s_lshl_b32 s8, s8, 16
; SI-NEXT: s_and_b32 s4, s4, 0xffff
; SI-NEXT: s_or_b32 s5, s5, s8
; SI-NEXT: s_or_b32 s4, s5, s4
-; SI-NEXT: s_add_i32 s50, s90, 3
-; SI-NEXT: v_readlane_b32 s5, v62, 23
+; SI-NEXT: s_add_i32 s50, s24, 3
; SI-NEXT: s_add_i32 s49, s4, 0x3000000
; SI-NEXT: s_and_b32 s4, s50, 0xff
-; SI-NEXT: s_lshl_b32 s5, s5, 8
+; SI-NEXT: s_lshl_b32 s5, s79, 8
; SI-NEXT: s_or_b32 s4, s5, s4
; SI-NEXT: s_addk_i32 s4, 0x300
; SI-NEXT: s_and_b32 s4, s4, 0xffff
-; SI-NEXT: s_add_i32 s94, s86, 3
-; SI-NEXT: v_or_b32_e32 v8, s4, v8
-; SI-NEXT: s_and_b32 s4, s94, 0xff
-; SI-NEXT: s_lshl_b32 s5, s38, 8
+; SI-NEXT: s_add_i32 s93, s19, 3
+; SI-NEXT: s_lshl_b32 s5, s82, 8
; SI-NEXT: s_add_i32 s8, s71, 3
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_add_i32_e32 v10, vcc, 3, v5
-; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
-; SI-NEXT: s_or_b32 s4, s5, s4
; SI-NEXT: s_and_b32 s8, s8, 0xff
-; SI-NEXT: s_addk_i32 s4, 0x300
-; SI-NEXT: s_lshl_b32 s5, s81, 24
; SI-NEXT: s_lshl_b32 s8, s8, 16
+; SI-NEXT: v_mov_b32_e32 v21, v30
+; SI-NEXT: v_readlane_b32 s6, v62, 28
+; SI-NEXT: s_add_i32 s82, s6, 3
+; SI-NEXT: v_mov_b32_e32 v20, v27
+; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
+; SI-NEXT: s_add_i32 s66, s21, 3
+; SI-NEXT: s_add_i32 s71, s28, 3
+; SI-NEXT: s_and_b32 s7, s71, 0xff
+; SI-NEXT: s_lshl_b32 s7, s7, 16
+; SI-NEXT: s_add_i32 s34, s98, 3
+; SI-NEXT: s_add_i32 s68, s69, 3
+; SI-NEXT: s_add_i32 s31, s38, 3
+; SI-NEXT: s_add_i32 s79, s36, 3
+; SI-NEXT: v_mov_b32_e32 v30, s14
+; SI-NEXT: v_add_i32_e32 v13, vcc, 3, v13
+; SI-NEXT: v_and_b32_e32 v13, 0xff, v13
+; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
+; SI-NEXT: v_mov_b32_e32 v39, s16
+; SI-NEXT: v_add_i32_e32 v2, vcc, 0x3000000, v2
+; SI-NEXT: v_mov_b32_e32 v28, s11
+; SI-NEXT: v_add_i32_e32 v4, vcc, 0x3000000, v4
+; SI-NEXT: v_mov_b32_e32 v27, s9
+; SI-NEXT: v_mov_b32_e32 v26, s49
+; SI-NEXT: v_alignbit_b32 v55, v26, v7, 16
+; SI-NEXT: v_alignbit_b32 v54, v27, v4, 16
+; SI-NEXT: v_alignbit_b32 v53, v28, v2, 16
+; SI-NEXT: v_alignbit_b32 v52, v39, v1, 16
+; SI-NEXT: v_alignbit_b32 v51, v30, v3, 16
+; SI-NEXT: s_lshr_b32 s46, s49, 16
+; SI-NEXT: s_lshr_b32 s45, s9, 16
+; SI-NEXT: s_lshr_b32 s44, s11, 16
+; SI-NEXT: s_lshr_b32 s43, s16, 16
+; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v5
+; SI-NEXT: v_and_b32_e32 v5, 0xff, v5
+; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: v_or_b32_e32 v5, v8, v5
+; SI-NEXT: v_or_b32_e32 v5, s4, v5
+; SI-NEXT: s_and_b32 s4, s93, 0xff
+; SI-NEXT: s_or_b32 s4, s5, s4
+; SI-NEXT: s_addk_i32 s4, 0x300
+; SI-NEXT: s_lshl_b32 s5, s80, 24
; SI-NEXT: s_and_b32 s4, s4, 0xffff
; SI-NEXT: s_or_b32 s5, s5, s8
; SI-NEXT: s_or_b32 s4, s5, s4
-; SI-NEXT: s_add_i32 s94, s4, 0x3000000
-; SI-NEXT: v_readlane_b32 s4, v62, 52
-; SI-NEXT: s_add_i32 s18, s4, 3
-; SI-NEXT: v_readlane_b32 s5, v62, 51
-; SI-NEXT: s_and_b32 s4, s18, 0xff
+; SI-NEXT: s_add_i32 s93, s4, 0x3000000
+; SI-NEXT: v_readlane_b32 s4, v62, 41
+; SI-NEXT: s_add_i32 s20, s4, 3
+; SI-NEXT: v_readlane_b32 s5, v62, 39
+; SI-NEXT: v_add_i32_e32 v8, vcc, 0x3000000, v5
+; SI-NEXT: s_and_b32 s4, s20, 0xff
; SI-NEXT: s_lshl_b32 s5, s5, 8
+; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v21
; SI-NEXT: s_or_b32 s4, s5, s4
-; SI-NEXT: v_and_b32_e32 v9, 0xff, v9
+; SI-NEXT: v_and_b32_e32 v5, 0xff, v5
; SI-NEXT: s_addk_i32 s4, 0x300
-; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9
+; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: s_and_b32 s4, s4, 0xffff
-; SI-NEXT: v_or_b32_e32 v9, v24, v9
-; SI-NEXT: v_or_b32_e32 v9, s4, v9
-; SI-NEXT: v_readlane_b32 s4, v62, 45
-; SI-NEXT: s_add_i32 s98, s4, 3
-; SI-NEXT: v_readlane_b32 s5, v62, 43
-; SI-NEXT: v_readlane_b32 s6, v62, 14
-; SI-NEXT: s_and_b32 s4, s98, 0xff
+; SI-NEXT: v_or_b32_e32 v5, v24, v5
+; SI-NEXT: v_or_b32_e32 v5, s4, v5
+; SI-NEXT: v_readlane_b32 s4, v62, 34
+; SI-NEXT: s_add_i32 s97, s4, 3
+; SI-NEXT: v_readlane_b32 s5, v62, 31
+; SI-NEXT: s_and_b32 s4, s97, 0xff
; SI-NEXT: s_lshl_b32 s5, s5, 8
-; SI-NEXT: s_add_i32 s8, s6, 3
; SI-NEXT: s_or_b32 s4, s5, s4
-; SI-NEXT: s_and_b32 s8, s8, 0xff
+; SI-NEXT: v_readlane_b32 s5, v62, 18
+; SI-NEXT: s_and_b32 s8, s82, 0xff
; SI-NEXT: s_addk_i32 s4, 0x300
-; SI-NEXT: s_lshl_b32 s5, s31, 24
+; SI-NEXT: s_lshl_b32 s5, s5, 24
; SI-NEXT: s_lshl_b32 s8, s8, 16
; SI-NEXT: s_and_b32 s4, s4, 0xffff
; SI-NEXT: s_or_b32 s5, s5, s8
; SI-NEXT: s_or_b32 s4, s5, s4
-; SI-NEXT: s_add_i32 s53, s4, 0x3000000
-; SI-NEXT: v_readlane_b32 s4, v62, 41
-; SI-NEXT: s_add_i32 s86, s4, 3
-; SI-NEXT: v_readlane_b32 s5, v62, 38
-; SI-NEXT: s_and_b32 s4, s86, 0xff
-; SI-NEXT: s_lshl_b32 s5, s5, 8
+; SI-NEXT: s_add_i32 s51, s4, 0x3000000
+; SI-NEXT: v_readlane_b32 s4, v62, 44
+; SI-NEXT: s_add_i32 s17, s4, 3
+; SI-NEXT: v_add_i32_e32 v9, vcc, 0x3000000, v5
+; SI-NEXT: s_and_b32 s4, s17, 0xff
+; SI-NEXT: s_lshl_b32 s5, s37, 8
+; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v20
; SI-NEXT: s_or_b32 s4, s5, s4
-; SI-NEXT: v_and_b32_e32 v10, 0xff, v10
+; SI-NEXT: v_and_b32_e32 v5, 0xff, v5
; SI-NEXT: s_addk_i32 s4, 0x300
-; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10
+; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: s_and_b32 s4, s4, 0xffff
-; SI-NEXT: v_or_b32_e32 v10, v59, v10
-; SI-NEXT: v_or_b32_e32 v10, s4, v10
-; SI-NEXT: v_readlane_b32 s4, v62, 31
-; SI-NEXT: s_add_i32 s66, s4, 3
-; SI-NEXT: v_readlane_b32 s5, v62, 27
+; SI-NEXT: v_or_b32_e32 v5, v23, v5
+; SI-NEXT: v_or_b32_e32 v5, s4, v5
+; SI-NEXT: v_add_i32_e32 v10, vcc, 0x3000000, v5
+; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
; SI-NEXT: s_and_b32 s4, s66, 0xff
-; SI-NEXT: s_lshl_b32 s5, s5, 8
-; SI-NEXT: s_add_i32 s37, s39, 3
+; SI-NEXT: s_lshl_b32 s5, s87, 8
+; SI-NEXT: s_add_i32 s37, s75, 3
; SI-NEXT: s_or_b32 s4, s5, s4
; SI-NEXT: s_and_b32 s8, s37, 0xff
; SI-NEXT: s_addk_i32 s4, 0x300
-; SI-NEXT: s_lshl_b32 s5, s95, 24
+; SI-NEXT: s_lshl_b32 s5, s22, 24
; SI-NEXT: s_lshl_b32 s8, s8, 16
; SI-NEXT: s_and_b32 s4, s4, 0xffff
; SI-NEXT: s_or_b32 s5, s5, s8
; SI-NEXT: s_or_b32 s4, s5, s4
-; SI-NEXT: s_add_i32 s36, s4, 0x3000000
-; SI-NEXT: v_readlane_b32 s4, v62, 50
+; SI-NEXT: s_add_i32 s35, s4, 0x3000000
+; SI-NEXT: v_readlane_b32 s4, v62, 40
; SI-NEXT: s_add_i32 s21, s4, 3
-; SI-NEXT: v_readlane_b32 s5, v62, 49
+; SI-NEXT: v_readlane_b32 s5, v62, 38
; SI-NEXT: s_and_b32 s4, s21, 0xff
; SI-NEXT: s_lshl_b32 s5, s5, 8
; SI-NEXT: s_or_b32 s4, s5, s4
; SI-NEXT: s_addk_i32 s4, 0x300
; SI-NEXT: s_and_b32 s4, s4, 0xffff
-; SI-NEXT: v_readlane_b32 s5, v62, 37
+; SI-NEXT: v_readlane_b32 s5, v62, 29
; SI-NEXT: s_lshl_b32 s5, s5, 8
-; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
-; SI-NEXT: s_add_i32 s71, s22, 3
-; SI-NEXT: s_and_b32 s8, s71, 0xff
-; SI-NEXT: s_lshl_b32 s8, s8, 16
-; SI-NEXT: s_add_i32 s35, s99, 3
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_add_i32_e32 v11, vcc, 3, v5
+; SI-NEXT: s_add_i32 s80, s26, 3
+; SI-NEXT: v_mov_b32_e32 v25, s93
+; SI-NEXT: v_mov_b32_e32 v24, s51
+; SI-NEXT: v_mov_b32_e32 v23, s35
+; SI-NEXT: v_alignbit_b32 v42, v23, v10, 16
+; SI-NEXT: v_alignbit_b32 v41, v24, v9, 16
+; SI-NEXT: v_alignbit_b32 v40, v25, v8, 16
+; SI-NEXT: s_lshr_b32 s57, s35, 16
+; SI-NEXT: s_lshr_b32 s56, s51, 16
+; SI-NEXT: s_lshr_b32 s47, s93, 16
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v5
+; SI-NEXT: v_and_b32_e32 v5, 0xff, v5
+; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; SI-NEXT: v_or_b32_e32 v5, v11, v5
+; SI-NEXT: v_or_b32_e32 v5, s4, v5
+; SI-NEXT: v_add_i32_e32 v11, vcc, 0x3000000, v5
; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v11, 0xff, v11
-; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11
-; SI-NEXT: v_or_b32_e32 v11, v32, v11
-; SI-NEXT: v_or_b32_e32 v11, s4, v11
-; SI-NEXT: v_readlane_b32 s4, v62, 40
-; SI-NEXT: s_add_i32 s85, s4, 3
-; SI-NEXT: s_and_b32 s4, s85, 0xff
+; SI-NEXT: v_readlane_b32 s4, v62, 32
+; SI-NEXT: s_add_i32 s87, s4, 3
+; SI-NEXT: s_and_b32 s4, s87, 0xff
; SI-NEXT: s_or_b32 s4, s5, s4
-; SI-NEXT: v_readlane_b32 s5, v62, 33
; SI-NEXT: s_addk_i32 s4, 0x300
-; SI-NEXT: s_lshl_b32 s5, s5, 24
+; SI-NEXT: s_lshl_b32 s5, s54, 24
; SI-NEXT: s_and_b32 s4, s4, 0xffff
-; SI-NEXT: s_or_b32 s5, s5, s8
+; SI-NEXT: s_or_b32 s5, s5, s7
; SI-NEXT: s_or_b32 s4, s5, s4
; SI-NEXT: s_add_i32 s8, s4, 0x3000000
-; SI-NEXT: v_readlane_b32 s4, v62, 54
-; SI-NEXT: s_add_i32 s17, s4, 3
-; SI-NEXT: v_readlane_b32 s5, v62, 53
-; SI-NEXT: s_and_b32 s4, s17, 0xff
+; SI-NEXT: v_readlane_b32 s4, v62, 43
+; SI-NEXT: s_add_i32 s19, s4, 3
+; SI-NEXT: v_readlane_b32 s5, v62, 42
+; SI-NEXT: s_and_b32 s4, s19, 0xff
; SI-NEXT: s_lshl_b32 s5, s5, 8
; SI-NEXT: s_or_b32 s4, s5, s4
; SI-NEXT: s_addk_i32 s4, 0x300
; SI-NEXT: s_and_b32 s4, s4, 0xffff
-; SI-NEXT: v_readlane_b32 s5, v62, 47
+; SI-NEXT: v_readlane_b32 s5, v62, 36
; SI-NEXT: s_lshl_b32 s5, s5, 8
-; SI-NEXT: s_and_b32 s6, s35, 0xff
-; SI-NEXT: s_lshl_b32 s6, s6, 16
-; SI-NEXT: v_mov_b32_e32 v30, s16
-; SI-NEXT: v_mov_b32_e32 v39, s9
-; SI-NEXT: v_add_i32_e32 v2, vcc, 0x3000000, v2
-; SI-NEXT: v_mov_b32_e32 v28, s11
-; SI-NEXT: v_add_i32_e32 v4, vcc, 0x3000000, v4
-; SI-NEXT: v_mov_b32_e32 v27, s48
-; SI-NEXT: v_add_i32_e32 v7, vcc, 0x3000000, v7
-; SI-NEXT: v_mov_b32_e32 v26, s49
-; SI-NEXT: v_add_i32_e32 v8, vcc, 0x3000000, v8
-; SI-NEXT: v_mov_b32_e32 v25, s94
-; SI-NEXT: v_add_i32_e32 v9, vcc, 0x3000000, v9
-; SI-NEXT: v_mov_b32_e32 v24, s53
-; SI-NEXT: v_add_i32_e32 v10, vcc, 0x3000000, v10
-; SI-NEXT: v_mov_b32_e32 v23, s36
-; SI-NEXT: v_add_i32_e32 v11, vcc, 0x3000000, v11
; SI-NEXT: v_mov_b32_e32 v22, s8
; SI-NEXT: v_alignbit_b32 v43, v22, v11, 16
-; SI-NEXT: v_alignbit_b32 v42, v23, v10, 16
-; SI-NEXT: v_alignbit_b32 v41, v24, v9, 16
-; SI-NEXT: v_alignbit_b32 v40, v25, v8, 16
-; SI-NEXT: v_alignbit_b32 v55, v26, v7, 16
-; SI-NEXT: v_alignbit_b32 v54, v27, v4, 16
-; SI-NEXT: v_alignbit_b32 v53, v28, v2, 16
-; SI-NEXT: v_alignbit_b32 v52, v39, v1, 16
-; SI-NEXT: v_alignbit_b32 v51, v30, v3, 16
; SI-NEXT: s_lshr_b32 s58, s8, 16
-; SI-NEXT: s_lshr_b32 s57, s36, 16
-; SI-NEXT: s_lshr_b32 s56, s53, 16
-; SI-NEXT: s_lshr_b32 s47, s94, 16
-; SI-NEXT: s_lshr_b32 s46, s49, 16
-; SI-NEXT: s_lshr_b32 s45, s48, 16
-; SI-NEXT: s_lshr_b32 s44, s11, 16
-; SI-NEXT: s_lshr_b32 s43, s9, 16
-; SI-NEXT: s_lshr_b32 s41, s16, 16
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v5
; SI-NEXT: v_and_b32_e32 v5, 0xff, v5
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: v_or_b32_e32 v5, v12, v5
; SI-NEXT: v_or_b32_e32 v5, s4, v5
-; SI-NEXT: v_add_i32_e32 v12, vcc, 0x3000000, v5
-; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v13
-; SI-NEXT: v_add_i32_e32 v13, vcc, 3, v6
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
-; SI-NEXT: v_readlane_b32 s4, v62, 48
-; SI-NEXT: s_add_i32 s7, s4, 3
-; SI-NEXT: s_and_b32 s4, s7, 0xff
+; SI-NEXT: v_readlane_b32 s4, v62, 37
+; SI-NEXT: s_add_i32 s6, s4, 3
+; SI-NEXT: s_and_b32 s4, s6, 0xff
; SI-NEXT: s_or_b32 s4, s5, s4
+; SI-NEXT: v_readlane_b32 s5, v62, 24
+; SI-NEXT: s_and_b32 s6, s34, 0xff
; SI-NEXT: s_addk_i32 s4, 0x300
-; SI-NEXT: s_lshl_b32 s5, s89, 24
+; SI-NEXT: s_lshl_b32 s5, s5, 24
+; SI-NEXT: s_lshl_b32 s6, s6, 16
; SI-NEXT: s_and_b32 s4, s4, 0xffff
; SI-NEXT: s_or_b32 s5, s5, s6
; SI-NEXT: s_or_b32 s4, s5, s4
; SI-NEXT: s_add_i32 s10, s4, 0x3000000
-; SI-NEXT: v_readlane_b32 s4, v62, 46
-; SI-NEXT: s_add_i32 s99, s4, 3
-; SI-NEXT: v_readlane_b32 s5, v62, 44
-; SI-NEXT: s_and_b32 s4, s99, 0xff
+; SI-NEXT: v_readlane_b32 s4, v62, 35
+; SI-NEXT: s_add_i32 s98, s4, 3
+; SI-NEXT: v_readlane_b32 s5, v62, 33
+; SI-NEXT: v_add_i32_e32 v12, vcc, 0x3000000, v5
+; SI-NEXT: s_and_b32 s4, s98, 0xff
; SI-NEXT: s_lshl_b32 s5, s5, 8
+; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v14
; SI-NEXT: s_or_b32 s4, s5, s4
; SI-NEXT: v_and_b32_e32 v5, 0xff, v5
; SI-NEXT: s_addk_i32 s4, 0x300
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: s_and_b32 s4, s4, 0xffff
; SI-NEXT: v_or_b32_e32 v5, v19, v5
+; SI-NEXT: v_readlane_b32 s6, v62, 27
; SI-NEXT: v_or_b32_e32 v5, s4, v5
-; SI-NEXT: v_readlane_b32 s4, v62, 36
-; SI-NEXT: s_add_i32 s81, s4, 3
-; SI-NEXT: v_readlane_b32 s5, v62, 35
-; SI-NEXT: v_readlane_b32 s6, v62, 28
-; SI-NEXT: s_and_b32 s4, s81, 0xff
-; SI-NEXT: s_lshl_b32 s5, s5, 8
+; SI-NEXT: s_and_b32 s4, s80, 0xff
+; SI-NEXT: s_lshl_b32 s5, s91, 8
; SI-NEXT: s_add_i32 s55, s6, 3
; SI-NEXT: s_or_b32 s4, s5, s4
; SI-NEXT: v_readlane_b32 s5, v62, 26
@@ -199557,101 +200242,91 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: s_and_b32 s4, s4, 0xffff
; SI-NEXT: s_or_b32 s5, s5, s6
; SI-NEXT: s_or_b32 s4, s5, s4
-; SI-NEXT: s_add_i32 s12, s4, 0x3000000
-; SI-NEXT: v_readlane_b32 s4, v62, 34
-; SI-NEXT: s_add_i32 s69, s4, 3
-; SI-NEXT: v_readlane_b32 s5, v62, 29
; SI-NEXT: v_add_i32_e32 v15, vcc, 0x3000000, v5
-; SI-NEXT: s_and_b32 s4, s69, 0xff
-; SI-NEXT: s_lshl_b32 s5, s5, 8
-; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v29
+; SI-NEXT: s_add_i32 s12, s4, 0x3000000
+; SI-NEXT: s_and_b32 s4, s68, 0xff
+; SI-NEXT: s_lshl_b32 s5, s39, 8
+; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v6
; SI-NEXT: s_or_b32 s4, s5, s4
; SI-NEXT: v_and_b32_e32 v5, 0xff, v5
; SI-NEXT: s_addk_i32 s4, 0x300
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: s_and_b32 s4, s4, 0xffff
; SI-NEXT: v_or_b32_e32 v5, v18, v5
+; SI-NEXT: v_readlane_b32 s6, v62, 22
; SI-NEXT: v_or_b32_e32 v5, s4, v5
-; SI-NEXT: v_readlane_b32 s4, v62, 22
-; SI-NEXT: s_add_i32 s34, s4, 3
-; SI-NEXT: v_readlane_b32 s5, v62, 21
-; SI-NEXT: v_readlane_b32 s6, v62, 19
-; SI-NEXT: s_and_b32 s4, s34, 0xff
-; SI-NEXT: s_lshl_b32 s5, s5, 8
+; SI-NEXT: s_and_b32 s4, s31, 0xff
+; SI-NEXT: s_lshl_b32 s5, s89, 8
; SI-NEXT: s_add_i32 s92, s6, 3
; SI-NEXT: s_or_b32 s4, s5, s4
; SI-NEXT: s_and_b32 s6, s92, 0xff
; SI-NEXT: s_addk_i32 s4, 0x300
-; SI-NEXT: s_lshl_b32 s5, s97, 24
+; SI-NEXT: s_lshl_b32 s5, s96, 24
; SI-NEXT: s_lshl_b32 s6, s6, 16
; SI-NEXT: s_and_b32 s4, s4, 0xffff
; SI-NEXT: s_or_b32 s5, s5, s6
; SI-NEXT: s_or_b32 s4, s5, s4
; SI-NEXT: s_add_i32 s13, s4, 0x3000000
; SI-NEXT: v_readlane_b32 s4, v62, 25
-; SI-NEXT: s_add_i32 s51, s4, 3
-; SI-NEXT: v_readlane_b32 s5, v62, 24
+; SI-NEXT: s_add_i32 s52, s4, 3
; SI-NEXT: v_add_i32_e32 v16, vcc, 0x3000000, v5
-; SI-NEXT: s_and_b32 s4, s51, 0xff
-; SI-NEXT: s_lshl_b32 s5, s5, 8
-; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v21
+; SI-NEXT: s_and_b32 s4, s52, 0xff
+; SI-NEXT: s_lshl_b32 s5, s25, 8
+; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v29
; SI-NEXT: s_or_b32 s4, s5, s4
; SI-NEXT: v_and_b32_e32 v5, 0xff, v5
; SI-NEXT: s_addk_i32 s4, 0x300
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: s_and_b32 s4, s4, 0xffff
-; SI-NEXT: v_or_b32_e32 v5, v14, v5
+; SI-NEXT: v_or_b32_e32 v5, v61, v5
; SI-NEXT: v_or_b32_e32 v5, s4, v5
-; SI-NEXT: v_readlane_b32 s4, v62, 20
-; SI-NEXT: s_add_i32 s95, s4, 3
-; SI-NEXT: v_readlane_b32 s5, v62, 18
-; SI-NEXT: s_and_b32 s4, s95, 0xff
-; SI-NEXT: s_lshl_b32 s5, s5, 8
-; SI-NEXT: s_add_i32 s6, s96, 3
+; SI-NEXT: v_readlane_b32 s4, v62, 23
+; SI-NEXT: s_add_i32 s94, s4, 3
+; SI-NEXT: s_and_b32 s4, s94, 0xff
+; SI-NEXT: s_lshl_b32 s5, s99, 8
+; SI-NEXT: s_add_i32 s6, s86, 3
; SI-NEXT: s_or_b32 s4, s5, s4
; SI-NEXT: s_and_b32 s6, s6, 0xff
; SI-NEXT: s_addk_i32 s4, 0x300
-; SI-NEXT: s_lshl_b32 s5, s80, 24
+; SI-NEXT: s_lshl_b32 s5, s70, 24
; SI-NEXT: s_lshl_b32 s6, s6, 16
; SI-NEXT: s_and_b32 s4, s4, 0xffff
; SI-NEXT: s_or_b32 s5, s5, s6
; SI-NEXT: s_or_b32 s4, s5, s4
+; SI-NEXT: s_mov_b32 s94, s14
; SI-NEXT: s_add_i32 s14, s4, 0x3000000
-; SI-NEXT: s_add_i32 s4, s93, 3
-; SI-NEXT: s_and_b32 s4, s4, 0xff
+; SI-NEXT: s_and_b32 s4, s79, 0xff
; SI-NEXT: s_lshl_b32 s5, s84, 8
; SI-NEXT: s_or_b32 s4, s5, s4
-; SI-NEXT: v_and_b32_e32 v13, 0xff, v13
; SI-NEXT: s_addk_i32 s4, 0x300
-; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
; SI-NEXT: s_and_b32 s4, s4, 0xffff
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_or_b32_e32 v6, v6, v13
+; SI-NEXT: v_or_b32_e32 v6, v60, v13
; SI-NEXT: v_or_b32_e32 v6, s4, v6
; SI-NEXT: s_add_i32 s4, s83, 3
; SI-NEXT: s_and_b32 s4, s4, 0xff
-; SI-NEXT: s_lshl_b32 s5, s25, 8
-; SI-NEXT: s_add_i32 s6, s64, 3
+; SI-NEXT: s_lshl_b32 s5, s77, 8
+; SI-NEXT: s_add_i32 s6, s74, 3
; SI-NEXT: s_or_b32 s4, s5, s4
-; SI-NEXT: v_readlane_b32 s5, v62, 15
; SI-NEXT: s_and_b32 s6, s6, 0xff
; SI-NEXT: s_addk_i32 s4, 0x300
-; SI-NEXT: s_lshl_b32 s5, s5, 24
+; SI-NEXT: s_lshl_b32 s5, s65, 24
; SI-NEXT: s_lshl_b32 s6, s6, 16
; SI-NEXT: s_and_b32 s4, s4, 0xffff
; SI-NEXT: s_or_b32 s5, s5, s6
; SI-NEXT: s_or_b32 s4, s5, s4
; SI-NEXT: s_add_i32 s15, s4, 0x3000000
-; SI-NEXT: v_readlane_b32 s4, v62, 2
+; SI-NEXT: v_readlane_b32 s4, v62, 4
; SI-NEXT: s_add_i32 s4, s4, 3
-; SI-NEXT: v_readlane_b32 s5, v62, 1
+; SI-NEXT: v_readlane_b32 s5, v62, 3
+; SI-NEXT: v_readlane_b32 s6, v62, 2
; SI-NEXT: s_and_b32 s4, s4, 0xff
; SI-NEXT: s_lshl_b32 s5, s5, 8
-; SI-NEXT: s_add_i32 s6, s26, 3
+; SI-NEXT: s_add_i32 s6, s6, 3
; SI-NEXT: s_or_b32 s4, s5, s4
+; SI-NEXT: v_readlane_b32 s5, v62, 1
; SI-NEXT: s_and_b32 s6, s6, 0xff
; SI-NEXT: s_addk_i32 s4, 0x300
-; SI-NEXT: s_lshl_b32 s5, s27, 24
+; SI-NEXT: s_lshl_b32 s5, s5, 24
; SI-NEXT: s_lshl_b32 s6, s6, 16
; SI-NEXT: s_and_b32 s4, s4, 0xffff
; SI-NEXT: s_or_b32 s5, s5, s6
@@ -199659,11 +200334,12 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: s_add_i32 s17, s4, 0x3000000
; SI-NEXT: v_readlane_b32 s4, v62, 0
; SI-NEXT: s_add_i32 s4, s4, 3
+; SI-NEXT: v_readlane_b32 s6, v62, 20
; SI-NEXT: s_and_b32 s4, s4, 0xff
; SI-NEXT: s_lshl_b32 s5, s29, 8
-; SI-NEXT: s_add_i32 s6, s76, 3
+; SI-NEXT: s_add_i32 s6, s6, 3
; SI-NEXT: s_or_b32 s4, s5, s4
-; SI-NEXT: v_readlane_b32 s5, v62, 16
+; SI-NEXT: v_readlane_b32 s5, v62, 19
; SI-NEXT: s_and_b32 s6, s6, 0xff
; SI-NEXT: s_addk_i32 s4, 0x300
; SI-NEXT: s_lshl_b32 s5, s5, 24
@@ -199672,30 +200348,32 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: s_or_b32 s5, s5, s6
; SI-NEXT: s_or_b32 s4, s5, s4
; SI-NEXT: s_add_i32 s40, s4, 0x3000000
-; SI-NEXT: v_readlane_b32 s4, v62, 7
+; SI-NEXT: v_readlane_b32 s4, v62, 11
; SI-NEXT: s_add_i32 s4, s4, 3
-; SI-NEXT: v_readlane_b32 s5, v62, 17
-; SI-NEXT: v_readlane_b32 s6, v62, 6
+; SI-NEXT: v_readlane_b32 s5, v62, 21
+; SI-NEXT: v_readlane_b32 s6, v62, 10
; SI-NEXT: s_and_b32 s4, s4, 0xff
; SI-NEXT: s_lshl_b32 s5, s5, 8
; SI-NEXT: s_add_i32 s6, s6, 3
; SI-NEXT: s_or_b32 s4, s5, s4
+; SI-NEXT: v_readlane_b32 s5, v62, 9
; SI-NEXT: s_and_b32 s6, s6, 0xff
; SI-NEXT: s_addk_i32 s4, 0x300
-; SI-NEXT: s_lshl_b32 s5, s19, 24
+; SI-NEXT: s_lshl_b32 s5, s5, 24
; SI-NEXT: s_lshl_b32 s6, s6, 16
; SI-NEXT: s_and_b32 s4, s4, 0xffff
; SI-NEXT: s_or_b32 s5, s5, s6
; SI-NEXT: s_or_b32 s4, s5, s4
-; SI-NEXT: s_add_i32 s18, s4, 0x3000000
-; SI-NEXT: s_add_i32 s4, s20, 3
-; SI-NEXT: v_readlane_b32 s5, v62, 5
-; SI-NEXT: v_readlane_b32 s6, v62, 4
+; SI-NEXT: s_add_i32 s48, s4, 0x3000000
+; SI-NEXT: v_readlane_b32 s4, v62, 8
+; SI-NEXT: s_add_i32 s4, s4, 3
+; SI-NEXT: v_readlane_b32 s5, v62, 7
+; SI-NEXT: v_readlane_b32 s6, v62, 6
; SI-NEXT: s_and_b32 s4, s4, 0xff
; SI-NEXT: s_lshl_b32 s5, s5, 8
; SI-NEXT: s_add_i32 s6, s6, 3
; SI-NEXT: s_or_b32 s4, s5, s4
-; SI-NEXT: v_readlane_b32 s5, v62, 3
+; SI-NEXT: v_readlane_b32 s5, v62, 5
; SI-NEXT: s_and_b32 s6, s6, 0xff
; SI-NEXT: s_addk_i32 s4, 0x300
; SI-NEXT: s_lshl_b32 s5, s5, 24
@@ -199704,7 +200382,7 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: s_or_b32 s5, s5, s6
; SI-NEXT: s_or_b32 s4, s5, s4
; SI-NEXT: s_add_i32 s42, s4, 0x3000000
-; SI-NEXT: v_mov_b32_e32 v13, s18
+; SI-NEXT: v_mov_b32_e32 v13, s48
; SI-NEXT: v_mov_b32_e32 v20, s10
; SI-NEXT: v_mov_b32_e32 v19, s12
; SI-NEXT: v_mov_b32_e32 v18, s13
@@ -199717,10 +200395,8 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: v_alignbit_b32 v58, s40, v13, 16
; SI-NEXT: v_alignbit_b32 v56, v6, v50, 16
; SI-NEXT: v_alignbit_b32 v47, v5, v17, 16
-; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: v_alignbit_b32 v46, v18, v16, 16
; SI-NEXT: v_alignbit_b32 v45, v19, v15, 16
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_alignbit_b32 v44, v20, v12, 16
; SI-NEXT: s_lshr_b32 s73, s42, 16
; SI-NEXT: s_lshr_b32 s72, s40, 16
@@ -199729,8 +200405,9 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: s_lshr_b32 s61, s13, 16
; SI-NEXT: s_lshr_b32 s60, s12, 16
; SI-NEXT: s_lshr_b32 s59, s10, 16
+; SI-NEXT: s_lshr_b32 s41, s94, 16
; SI-NEXT: .LBB97_3: ; %end
-; SI-NEXT: s_and_b32 s4, s18, 0xffff
+; SI-NEXT: s_and_b32 s4, s48, 0xffff
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v57
; SI-NEXT: v_or_b32_e32 v5, s4, v5
; SI-NEXT: s_and_b32 s4, s42, 0xffff
@@ -199739,6 +200416,7 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: buffer_store_dword v5, v0, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_add_i32_e32 v5, vcc, 4, v0
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_mov_b32_e32 v6, s4
; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen
; SI-NEXT: s_and_b32 s4, s17, 0xffff
@@ -199835,7 +200513,7 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: v_and_b32_e32 v5, 0xffff, v10
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v42
-; SI-NEXT: s_and_b32 s4, s36, 0xffff
+; SI-NEXT: s_and_b32 s4, s35, 0xffff
; SI-NEXT: s_lshl_b32 s5, s57, 16
; SI-NEXT: v_or_b32_e32 v5, v5, v6
; SI-NEXT: v_add_i32_e32 v6, vcc, 64, v0
@@ -199848,7 +200526,7 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: v_and_b32_e32 v5, 0xffff, v9
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v41
-; SI-NEXT: s_and_b32 s4, s53, 0xffff
+; SI-NEXT: s_and_b32 s4, s51, 0xffff
; SI-NEXT: s_lshl_b32 s5, s56, 16
; SI-NEXT: v_or_b32_e32 v5, v5, v6
; SI-NEXT: v_add_i32_e32 v6, vcc, 0x48, v0
@@ -199861,7 +200539,7 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: v_and_b32_e32 v5, 0xffff, v8
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v40
-; SI-NEXT: s_and_b32 s4, s94, 0xffff
+; SI-NEXT: s_and_b32 s4, s93, 0xffff
; SI-NEXT: s_lshl_b32 s5, s47, 16
; SI-NEXT: v_or_b32_e32 v5, v5, v6
; SI-NEXT: v_add_i32_e32 v6, vcc, 0x50, v0
@@ -199886,7 +200564,7 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: buffer_store_dword v6, v5, s[0:3], 0 offen
; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v54
-; SI-NEXT: s_and_b32 s4, s48, 0xffff
+; SI-NEXT: s_and_b32 s4, s9, 0xffff
; SI-NEXT: s_lshl_b32 s5, s45, 16
; SI-NEXT: v_or_b32_e32 v4, v4, v5
; SI-NEXT: v_add_i32_e32 v5, vcc, 0x60, v0
@@ -199910,7 +200588,7 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: buffer_store_dword v4, v2, s[0:3], 0 offen
; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v52
-; SI-NEXT: s_and_b32 s4, s9, 0xffff
+; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s43, 16
; SI-NEXT: v_or_b32_e32 v1, v1, v2
; SI-NEXT: v_add_i32_e32 v2, vcc, 0x70, v0
@@ -199923,7 +200601,7 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: v_and_b32_e32 v1, 0xffff, v3
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v51
-; SI-NEXT: s_and_b32 s4, s16, 0xffff
+; SI-NEXT: s_and_b32 s4, s94, 0xffff
; SI-NEXT: s_lshl_b32 s5, s41, 16
; SI-NEXT: v_or_b32_e32 v1, v1, v2
; SI-NEXT: v_add_i32_e32 v2, vcc, 0x78, v0
@@ -199984,27 +200662,30 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: v_readlane_b32 s31, v63, 1
; SI-NEXT: v_readlane_b32 s30, v63, 0
; SI-NEXT: s_or_saveexec_b64 s[4:5], -1
-; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
; SI-NEXT: s_mov_b64 exec, s[4:5]
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB97_4:
-; SI-NEXT: v_mov_b32_e32 v5, v13
-; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(5)
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(3)
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload
; SI-NEXT: v_mov_b32_e32 v25, v58
; SI-NEXT: v_mov_b32_e32 v48, v39
; SI-NEXT: v_mov_b32_e32 v39, v57
-; SI-NEXT: v_mov_b32_e32 v49, v56
-; SI-NEXT: v_mov_b32_e32 v20, v47
-; SI-NEXT: v_mov_b32_e32 v30, v37
-; SI-NEXT: v_mov_b32_e32 v36, v35
-; SI-NEXT: v_mov_b32_e32 v35, v45
+; SI-NEXT: v_mov_b32_e32 v33, v47
+; SI-NEXT: v_mov_b32_e32 v38, v37
+; SI-NEXT: v_mov_b32_e32 v37, v45
+; SI-NEXT: v_mov_b32_e32 v31, v49
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v35, v44
+; SI-NEXT: v_mov_b32_e32 v30, v28
; SI-NEXT: v_mov_b32_e32 v27, v26
-; SI-NEXT: s_waitcnt expcnt(2)
-; SI-NEXT: v_mov_b32_e32 v32, v23
-; SI-NEXT: ; implicit-def: $sgpr18
+; SI-NEXT: v_mov_b32_e32 v13, v21
+; SI-NEXT: ; implicit-def: $sgpr48
; SI-NEXT: ; implicit-def: $vgpr57
; SI-NEXT: ; implicit-def: $sgpr42
; SI-NEXT: ; implicit-def: $sgpr73
@@ -200038,15 +200719,15 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: ; implicit-def: $sgpr58
; SI-NEXT: ; implicit-def: $vgpr10
; SI-NEXT: ; implicit-def: $vgpr42
-; SI-NEXT: ; implicit-def: $sgpr36
+; SI-NEXT: ; implicit-def: $sgpr35
; SI-NEXT: ; implicit-def: $sgpr57
; SI-NEXT: ; implicit-def: $vgpr9
; SI-NEXT: ; implicit-def: $vgpr41
-; SI-NEXT: ; implicit-def: $sgpr53
+; SI-NEXT: ; implicit-def: $sgpr51
; SI-NEXT: ; implicit-def: $sgpr56
; SI-NEXT: ; implicit-def: $vgpr8
; SI-NEXT: ; implicit-def: $vgpr40
-; SI-NEXT: ; implicit-def: $sgpr94
+; SI-NEXT: ; implicit-def: $sgpr93
; SI-NEXT: ; implicit-def: $sgpr47
; SI-NEXT: ; implicit-def: $vgpr7
; SI-NEXT: ; implicit-def: $vgpr55
@@ -200054,7 +200735,7 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: ; implicit-def: $sgpr46
; SI-NEXT: ; implicit-def: $vgpr4
; SI-NEXT: ; implicit-def: $vgpr54
-; SI-NEXT: ; implicit-def: $sgpr48
+; SI-NEXT: ; implicit-def: $sgpr9
; SI-NEXT: ; implicit-def: $sgpr45
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr53
@@ -200062,13 +200743,15 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; SI-NEXT: ; implicit-def: $sgpr44
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; implicit-def: $vgpr52
-; SI-NEXT: ; implicit-def: $sgpr9
+; SI-NEXT: ; implicit-def: $sgpr16
; SI-NEXT: ; implicit-def: $sgpr43
; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; implicit-def: $vgpr51
-; SI-NEXT: ; implicit-def: $sgpr16
+; SI-NEXT: ; implicit-def: $sgpr94
; SI-NEXT: ; implicit-def: $sgpr41
-; SI-NEXT: s_branch .LBB97_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB97_2
+; SI-NEXT: s_branch .LBB97_3
;
; VI-LABEL: bitcast_v128i8_to_v64i16_scalar:
; VI: ; %bb.0:
@@ -200089,22 +200772,22 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:332
; VI-NEXT: buffer_load_ushort v2, off, s[0:3], s32
; VI-NEXT: buffer_load_ushort v4, off, s[0:3], s32 offset:8
@@ -200129,14 +200812,17 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:160
; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:168
; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:176
+; VI-NEXT: v_mov_b32_e32 v48, v27
+; VI-NEXT: v_mov_b32_e32 v39, v29
; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v1
-; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v3
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v5
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v3
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v7
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v5
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v7
+; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_lshlrev_b32_e32 v9, 8, v9
; VI-NEXT: v_lshlrev_b32_e32 v11, 8, v11
; VI-NEXT: v_lshlrev_b32_e32 v13, 8, v13
@@ -200144,50 +200830,51 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: v_lshlrev_b32_e32 v17, 8, v17
; VI-NEXT: v_lshlrev_b32_e32 v19, 8, v19
; VI-NEXT: v_lshlrev_b32_e32 v21, 8, v21
-; VI-NEXT: v_lshlrev_b32_e32 v23, 8, v23
-; VI-NEXT: v_lshlrev_b32_e32 v25, 8, v25
; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v27
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v29
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v23
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v2
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v25
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v4
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v48
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v6
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v39
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v8
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v2
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v10
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v4
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v12
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v6
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v26
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v8
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v10
; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v12
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v26
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v28
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v30
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v31
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v32
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v33
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v34
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v35
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v36
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v37
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(14)
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v38
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:184
; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:192
; VI-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:200
@@ -200196,9 +200883,9 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:224
; VI-NEXT: buffer_load_ushort v6, off, s[0:3], s32 offset:232
; VI-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:240
-; VI-NEXT: v_lshlrev_b32_e32 v45, 8, v22
-; VI-NEXT: v_lshlrev_b32_e32 v8, 8, v24
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: v_lshlrev_b32_e32 v8, 8, v22
+; VI-NEXT: v_lshlrev_b32_e32 v10, 8, v24
+; VI-NEXT: s_and_b64 s[6:7], vcc, exec
; VI-NEXT: v_lshlrev_b32_e32 v14, 8, v14
; VI-NEXT: v_lshlrev_b32_e32 v16, 8, v16
; VI-NEXT: v_lshlrev_b32_e32 v18, 8, v18
@@ -200207,23 +200894,25 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: v_lshlrev_b32_e32 v22, 8, v0
; VI-NEXT: s_waitcnt vmcnt(6)
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v1
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(5)
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(6)
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v2
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(6)
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v3
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(5)
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(6)
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v4
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(5)
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(6)
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v5
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(5)
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(6)
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v6
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(5)
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(6)
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v7
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
-; VI-NEXT: v_lshlrev_b32_e32 v24, 8, v2
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:248
; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:256
; VI-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:264
@@ -200236,128 +200925,123 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: v_lshlrev_b32_e32 v26, 8, v0
; VI-NEXT: s_waitcnt vmcnt(6)
; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v1
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
; VI-NEXT: s_waitcnt vmcnt(6)
-; VI-NEXT: v_lshlrev_b32_e32 v27, 8, v2
-; VI-NEXT: s_waitcnt vmcnt(4)
-; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v4
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v2
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(6)
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v3
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:312
; VI-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:320
; VI-NEXT: buffer_load_ushort v2, off, s[0:3], s32 offset:328
-; VI-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:4
-; VI-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:12
-; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:20
-; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:28
-; VI-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:36
-; VI-NEXT: s_waitcnt vmcnt(11)
-; VI-NEXT: v_lshlrev_b32_e32 v4, 8, v6
-; VI-NEXT: v_lshlrev_b32_e32 v28, 8, v3
-; VI-NEXT: v_lshlrev_b32_e32 v3, 8, v5
+; VI-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:324
; VI-NEXT: s_waitcnt vmcnt(10)
-; VI-NEXT: v_lshlrev_b32_e32 v5, 8, v7
+; VI-NEXT: v_lshlrev_b32_e32 v4, 8, v4
+; VI-NEXT: s_waitcnt vmcnt(9)
+; VI-NEXT: v_lshlrev_b32_e32 v5, 8, v5
+; VI-NEXT: s_waitcnt vmcnt(8)
+; VI-NEXT: v_lshlrev_b32_e32 v6, 8, v6
; VI-NEXT: s_waitcnt vmcnt(7)
-; VI-NEXT: v_lshlrev_b32_e32 v6, 8, v0
-; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:44
-; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:52
-; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:60
-; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:68
-; VI-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:76
-; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:84
-; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:92
-; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:100
-; VI-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:108
-; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:116
-; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:124
-; VI-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:132
-; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:140
-; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:148
-; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:156
-; VI-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:164
-; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:172
-; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:180
-; VI-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:188
-; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:196
-; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:204
-; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:212
-; VI-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:220
-; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:228
-; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:236
-; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:244
-; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:252
-; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:260
-; VI-NEXT: s_waitcnt vmcnt(14)
+; VI-NEXT: v_lshlrev_b32_e32 v33, 8, v7
+; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v1
-; VI-NEXT: v_lshlrev_b32_e32 v2, 8, v2
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:268
-; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:276
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:284
-; VI-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:292
-; VI-NEXT: buffer_load_ushort v0, off, s[0:3], s32 offset:300
-; VI-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:308
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:316
-; VI-NEXT: buffer_load_ushort v10, off, s[0:3], s32 offset:324
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
+; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:316
+; VI-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:308
+; VI-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:300
+; VI-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:292
+; VI-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:284
+; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:276
+; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:268
+; VI-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:260
+; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:252
+; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:244
+; VI-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:236
+; VI-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:228
+; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:220
+; VI-NEXT: buffer_load_ushort v24, off, s[0:3], s32 offset:212
+; VI-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:204
+; VI-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:196
+; VI-NEXT: buffer_load_ushort v27, off, s[0:3], s32 offset:188
+; VI-NEXT: buffer_load_ushort v25, off, s[0:3], s32 offset:180
+; VI-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:172
+; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:164
+; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:156
+; VI-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:148
+; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:140
+; VI-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:132
+; VI-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:124
+; VI-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:116
+; VI-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:108
+; VI-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:100
+; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:92
+; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:84
+; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:76
+; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:68
+; VI-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:60
+; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:52
+; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:44
+; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:36
+; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:28
+; VI-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:20
+; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:12
+; VI-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:4
+; VI-NEXT: v_lshlrev_b32_e32 v3, 8, v0
+; VI-NEXT: v_lshlrev_b32_e32 v0, 8, v2
+; VI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
+; VI-NEXT: s_waitcnt vmcnt(13)
+; VI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
; VI-NEXT: s_cbranch_scc0 .LBB97_4
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
; VI-NEXT: s_and_b32 s4, s28, 0xff
; VI-NEXT: s_lshl_b32 s5, s29, 8
; VI-NEXT: s_or_b32 s4, s4, s5
@@ -200368,222 +201052,228 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: s_lshl_b32 s8, s27, 8
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
-; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
-; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill
-; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
+; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill
; VI-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill
+; VI-NEXT: v_mov_b32_e32 v33, v6
+; VI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(5)
; VI-NEXT: v_or_b32_sdwa v2, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_or_b32_sdwa v0, v0, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v1, v1, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(2)
; VI-NEXT: v_or_b32_sdwa v3, v3, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v3, v8
+; VI-NEXT: v_mov_b32_e32 v2, v8
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v0, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v1, v17 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_or_b32_sdwa v0, v0, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v1, v21 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v0, v0, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v1, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v1, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v3, v10
; VI-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v2, v50, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v1, v12, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v34, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v33, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v35, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v0, v34, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v36, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v35, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v36, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v37, v16 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v38, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v39, v16 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v39, v22
; VI-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v38, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v39, v20 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v48, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v49, v20 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v49, v26
; VI-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v48, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v49, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
-; VI-NEXT: v_mov_b32_e32 v45, v62
+; VI-NEXT: v_or_b32_sdwa v0, v50, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v51, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
; VI-NEXT: v_or_b32_sdwa v16, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(3)
-; VI-NEXT: v_or_b32_sdwa v3, v51, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v17, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v2, v52, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(2)
-; VI-NEXT: v_or_b32_sdwa v0, v54, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v3, v53, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v17, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v32, v1
-; VI-NEXT: v_or_b32_sdwa v1, v41, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v54, v22
-; VI-NEXT: v_mov_b32_e32 v41, v24
+; VI-NEXT: v_or_b32_sdwa v0, v54, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v1, v55, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v18, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v34, v0
-; VI-NEXT: v_or_b32_sdwa v0, v56, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v45, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v37, v1
-; VI-NEXT: v_or_b32_sdwa v1, v55, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v55, v26
+; VI-NEXT: v_mov_b32_e32 v51, v1
+; VI-NEXT: v_or_b32_sdwa v1, v46, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v19, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v39, v0
-; VI-NEXT: v_or_b32_sdwa v0, v52, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v53, v0
+; VI-NEXT: v_or_b32_sdwa v0, v47, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v49, v1
-; VI-NEXT: v_or_b32_sdwa v1, v43, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v43, v27
+; VI-NEXT: v_mov_b32_e32 v54, v1
+; VI-NEXT: v_or_b32_sdwa v1, v56, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v47, v32
; VI-NEXT: v_or_b32_sdwa v20, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v51, v0
-; VI-NEXT: v_or_b32_sdwa v0, v53, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v55, v0
+; VI-NEXT: v_or_b32_sdwa v0, v60, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_mov_b32_e32 v35, v1
-; VI-NEXT: v_or_b32_sdwa v1, v46, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v53, v28
+; VI-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v21, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
-; VI-NEXT: v_or_b32_sdwa v1, v47, v22 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v33, v0
-; VI-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v22, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
-; VI-NEXT: v_or_b32_sdwa v1, v57, v24 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v25, v22 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v36, v0
+; VI-NEXT: v_mov_b32_e32 v38, v0
; VI-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v63, v33
+; VI-NEXT: v_or_b32_sdwa v22, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_mov_b32_e32 v48, v0
+; VI-NEXT: v_or_b32_sdwa v0, v27, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v50, v1
+; VI-NEXT: v_or_b32_sdwa v1, v23, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v23, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v56, v0
-; VI-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v36, v0
+; VI-NEXT: v_or_b32_sdwa v0, v28, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v58, v1
-; VI-NEXT: v_or_b32_sdwa v1, v60, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v61, v60
-; VI-NEXT: v_mov_b32_e32 v60, v59
+; VI-NEXT: v_mov_b32_e32 v60, v1
+; VI-NEXT: v_or_b32_sdwa v1, v24, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v24, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v38, v0
-; VI-NEXT: v_or_b32_sdwa v0, v59, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v61, v0
+; VI-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v48, v1
-; VI-NEXT: v_or_b32_sdwa v1, v40, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v34, v1
+; VI-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v62, v43
; VI-NEXT: v_or_b32_sdwa v25, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v1, v44, v26 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v44, v42
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v37, v0
+; VI-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v58, v57
+; VI-NEXT: v_or_b32_sdwa v26, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
-; VI-NEXT: v_or_b32_sdwa v1, v45, v26 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v50, v0
-; VI-NEXT: v_or_b32_sdwa v0, v42, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v26, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
-; VI-NEXT: v_or_b32_sdwa v1, v62, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_mov_b32_e32 v52, v0
-; VI-NEXT: v_or_b32_sdwa v0, v44, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v27, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
-; VI-NEXT: v_or_b32_sdwa v0, v29, v28 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v57, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_mov_b32_e32 v46, v1
-; VI-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v32, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v32, v30
+; VI-NEXT: v_mov_b32_e32 v57, v40
+; VI-NEXT: v_or_b32_sdwa v27, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_mov_b32_e32 v45, v0
+; VI-NEXT: v_or_b32_sdwa v0, v42, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_mov_b32_e32 v56, v1
+; VI-NEXT: v_or_b32_sdwa v1, v41, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v42, v41
+; VI-NEXT: v_mov_b32_e32 v41, v29
; VI-NEXT: v_or_b32_sdwa v28, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v63, v0
-; VI-NEXT: v_or_b32_sdwa v0, v30, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v29, v33 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v47, v1
-; VI-NEXT: v_or_b32_sdwa v1, v31, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v59, v0
+; VI-NEXT: v_or_b32_sdwa v0, v43, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v29, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v43, v31
; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_mov_b32_e32 v57, v1
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v33, v0
+; VI-NEXT: v_or_b32_sdwa v0, v31, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v30, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v30, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v40, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v40, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v31, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_e32 v3, s4, v0
@@ -200615,12 +201305,14 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v2, s6
; VI-NEXT: s_cbranch_execnz .LBB97_3
; VI-NEXT: .LBB97_2: ; %cmp.true
-; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v59
-; VI-NEXT: v_or_b32_sdwa v29, v46, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v40
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v42
+; VI-NEXT: v_or_b32_sdwa v28, v56, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v44
+; VI-NEXT: v_or_b32_sdwa v27, v45, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v47
; VI-NEXT: s_add_i32 s28, s28, 3
; VI-NEXT: s_and_b32 s4, s28, 0xff
; VI-NEXT: s_lshl_b32 s5, s29, 8
@@ -200639,302 +201331,309 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: s_lshl_b32 s9, s19, 8
; VI-NEXT: s_add_i32 s16, s16, 3
; VI-NEXT: s_lshl_b32 s10, s17, 8
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(3)
+; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(4)
+; VI-NEXT: v_or_b32_sdwa v31, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v57
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v32
+; VI-NEXT: v_or_b32_sdwa v30, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v41
+; VI-NEXT: v_or_b32_sdwa v29, v63, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v63, v46, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v58
+; VI-NEXT: v_or_b32_sdwa v24, v52, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v43
+; VI-NEXT: v_or_b32_sdwa v1, v33, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v62
+; VI-NEXT: v_or_b32_sdwa v2, v59, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: v_or_b32_sdwa v26, v53, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v62
-; VI-NEXT: v_or_b32_sdwa v28, v43, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v44
-; VI-NEXT: v_or_b32_sdwa v53, v52, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v45
-; VI-NEXT: v_or_b32_sdwa v27, v55, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v42
-; VI-NEXT: v_or_b32_sdwa v52, v50, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v40
-; VI-NEXT: v_or_b32_sdwa v25, v48, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v60
-; VI-NEXT: v_or_b32_sdwa v59, v38, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v61
-; VI-NEXT: v_or_b32_sdwa v24, v58, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v26, v49, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: v_or_b32_sdwa v48, v56, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v48, vcc, 0x300, v48
-; VI-NEXT: v_or_b32_sdwa v24, v24, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v24, vcc, 0x3000000, v24
+; VI-NEXT: v_or_b32_sdwa v32, v37, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v32, vcc, 0x300, v32
+; VI-NEXT: v_or_b32_sdwa v26, v26, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v26, vcc, 0x3000000, v26
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: v_or_b32_sdwa v23, v41, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v25, v34, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: v_or_b32_sdwa v38, v36, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v38, vcc, 0x300, v38
-; VI-NEXT: v_or_b32_sdwa v23, v23, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v23, vcc, 0x3000000, v23
+; VI-NEXT: v_or_b32_sdwa v33, v61, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v33, vcc, 0x300, v33
+; VI-NEXT: v_or_b32_sdwa v25, v25, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v25, vcc, 0x3000000, v25
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: v_or_b32_sdwa v22, v54, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v60, v60, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: v_or_b32_sdwa v50, v33, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v50, vcc, 0x300, v50
-; VI-NEXT: v_or_b32_sdwa v22, v22, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v22, vcc, 0x3000000, v22
+; VI-NEXT: v_or_b32_sdwa v34, v36, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v34, vcc, 0x300, v34
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: v_or_b32_sdwa v21, v35, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v23, v50, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: v_or_b32_sdwa v54, v51, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v37, v48, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v37, vcc, 0x300, v37
+; VI-NEXT: v_or_b32_sdwa v23, v23, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v23, vcc, 0x3000000, v23
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: v_or_b32_sdwa v20, v49, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v22, v39, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: v_or_b32_sdwa v49, v39, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v49, v38, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v49, vcc, 0x300, v49
-; VI-NEXT: v_or_b32_sdwa v20, v20, v49 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v20, vcc, 0x3000000, v20
-; VI-NEXT: s_waitcnt vmcnt(3)
+; VI-NEXT: v_or_b32_sdwa v22, v22, v49 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v22, vcc, 0x3000000, v22
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
+; VI-NEXT: v_or_b32_sdwa v21, v35, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: v_or_b32_sdwa v19, v37, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v52, v55, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(2)
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: v_or_b32_sdwa v37, v34, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v20, v54, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v37, vcc, 0x300, v37
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
-; VI-NEXT: v_or_b32_sdwa v31, v2, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload
-; VI-NEXT: v_or_b32_sdwa v19, v19, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v19, vcc, 0x3000000, v19
-; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: v_or_b32_sdwa v18, v32, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v54, v53, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(4)
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
-; VI-NEXT: v_or_b32_sdwa v1, v57, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: s_waitcnt vmcnt(2)
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v57, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v19, v51, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v35, vcc, 0x300, v57
-; VI-NEXT: v_or_b32_sdwa v18, v18, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v18, vcc, 0x3000000, v18
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v16, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
+; VI-NEXT: v_or_b32_sdwa v51, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v51, vcc, 0x300, v51
+; VI-NEXT: v_or_b32_sdwa v19, v19, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v19, vcc, 0x3000000, v19
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v10, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v18, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v17, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v48, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v48, vcc, 0x300, v48
+; VI-NEXT: v_or_b32_sdwa v18, v18, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v18, vcc, 0x3000000, v18
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v11, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v16, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v15, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v10, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v56, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v17, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v14, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v11, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v34, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v15, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v34, vcc, 0x300, v34
-; VI-NEXT: v_or_b32_sdwa v14, v14, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v14, vcc, 0x3000000, v14
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v13, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v35, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v35, vcc, 0x300, v35
+; VI-NEXT: v_or_b32_sdwa v15, v15, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v15, vcc, 0x3000000, v15
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v36, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v14, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v36, vcc, 0x300, v36
-; VI-NEXT: v_or_b32_sdwa v13, v13, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v36, vcc, 0x300, v26
-; VI-NEXT: v_add_u32_e32 v26, vcc, 0x300, v52
-; VI-NEXT: v_or_b32_sdwa v26, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v52, vcc, 0x300, v54
-; VI-NEXT: v_or_b32_sdwa v21, v21, v52 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v13, vcc, 0x3000000, v13
-; VI-NEXT: v_add_u32_e32 v21, vcc, 0x3000000, v21
-; VI-NEXT: v_add_u32_e32 v26, vcc, 0x3000000, v26
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v12, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v38, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v38, vcc, 0x300, v38
+; VI-NEXT: v_or_b32_sdwa v14, v14, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v38, vcc, 0x300, v2
+; VI-NEXT: v_or_b32_sdwa v29, v29, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v14, vcc, 0x3000000, v14
+; VI-NEXT: v_add_u32_e32 v29, vcc, 0x3000000, v29
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v51, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v13, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v51, vcc, 0x300, v51
-; VI-NEXT: v_or_b32_sdwa v12, v12, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v51, vcc, 0x300, v59
-; VI-NEXT: v_or_b32_sdwa v25, v25, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v39, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v39, vcc, 0x300, v39
+; VI-NEXT: v_or_b32_sdwa v13, v13, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v39, vcc, 0x300, v27
+; VI-NEXT: v_add_u32_e32 v27, vcc, 0x300, v24
+; VI-NEXT: v_or_b32_sdwa v24, v60, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_or_b32_sdwa v27, v63, v27 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_or_b32_sdwa v28, v28, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v13, vcc, 0x3000000, v13
+; VI-NEXT: v_add_u32_e32 v24, vcc, 0x3000000, v24
+; VI-NEXT: v_add_u32_e32 v27, vcc, 0x3000000, v27
+; VI-NEXT: v_add_u32_e32 v28, vcc, 0x3000000, v28
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v12, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v53, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v53, vcc, 0x300, v53
+; VI-NEXT: v_or_b32_sdwa v12, v12, v53 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v12, vcc, 0x3000000, v12
-; VI-NEXT: v_add_u32_e32 v25, vcc, 0x3000000, v25
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v33, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v36, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v41, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v40, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v50, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
-; VI-NEXT: v_or_b32_sdwa v30, v47, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(3)
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: s_waitcnt vmcnt(2)
-; VI-NEXT: v_or_b32_sdwa v39, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
-; VI-NEXT: s_waitcnt vmcnt(3)
-; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
-; VI-NEXT: v_or_b32_sdwa v2, v63, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v34, vcc, 0x300, v2
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v55, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v9, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v41, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v41, vcc, 0x300, v41
-; VI-NEXT: v_or_b32_sdwa v9, v9, v41 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v41, vcc, 0x300, v10
+; VI-NEXT: v_or_b32_sdwa v42, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v42, vcc, 0x300, v42
+; VI-NEXT: v_or_b32_sdwa v9, v9, v42 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v42, vcc, 0x300, v10
; VI-NEXT: v_add_u32_e32 v10, vcc, 0x300, v55
-; VI-NEXT: v_or_b32_sdwa v10, v39, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v39, vcc, 0x300, v53
-; VI-NEXT: v_or_b32_sdwa v27, v28, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_or_b32_sdwa v28, v29, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_or_b32_sdwa v29, v30, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_or_b32_sdwa v35, v16, v42 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_or_b32_sdwa v10, v50, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v50, vcc, 0x300, v52
+; VI-NEXT: v_add_u32_e32 v52, vcc, 0x300, v54
+; VI-NEXT: v_or_b32_sdwa v20, v20, v52 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_or_b32_sdwa v21, v21, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v9, vcc, 0x3000000, v9
; VI-NEXT: v_add_u32_e32 v10, vcc, 0x3000000, v10
-; VI-NEXT: v_add_u32_e32 v27, vcc, 0x3000000, v27
-; VI-NEXT: v_add_u32_e32 v28, vcc, 0x3000000, v28
-; VI-NEXT: v_add_u32_e32 v29, vcc, 0x3000000, v29
+; VI-NEXT: v_add_u32_e32 v20, vcc, 0x3000000, v20
+; VI-NEXT: v_add_u32_e32 v21, vcc, 0x3000000, v21
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v8, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v42, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v42, vcc, 0x300, v42
-; VI-NEXT: v_or_b32_sdwa v8, v8, v42 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v42, vcc, 0x300, v11
-; VI-NEXT: v_add_u32_e32 v11, vcc, 0x300, v40
-; VI-NEXT: v_or_b32_sdwa v11, v33, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v33, vcc, 0x300, v1
-; VI-NEXT: v_or_b32_sdwa v30, v31, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
-; VI-NEXT: v_or_b32_sdwa v17, v17, v42 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_or_b32_sdwa v43, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v43, vcc, 0x300, v43
+; VI-NEXT: v_or_b32_sdwa v8, v8, v43 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v43, vcc, 0x300, v11
+; VI-NEXT: v_add_u32_e32 v11, vcc, 0x300, v41
+; VI-NEXT: v_or_b32_sdwa v17, v17, v43 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_or_b32_sdwa v11, v36, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v16, vcc, 0x3000000, v17
+; VI-NEXT: v_add_u32_e32 v17, vcc, 0x3000000, v35
+; VI-NEXT: v_add_u32_e32 v35, vcc, 0x300, v0
+; VI-NEXT: v_add_u32_e32 v36, vcc, 0x300, v1
+; VI-NEXT: v_or_b32_sdwa v30, v30, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_or_b32_sdwa v31, v31, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v8, vcc, 0x3000000, v8
; VI-NEXT: v_add_u32_e32 v11, vcc, 0x3000000, v11
; VI-NEXT: v_add_u32_e32 v30, vcc, 0x3000000, v30
-; VI-NEXT: s_waitcnt vmcnt(2)
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
+; VI-NEXT: v_add_u32_e32 v31, vcc, 0x3000000, v31
; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
+; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v7, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v44, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v44, vcc, 0x300, v44
; VI-NEXT: v_or_b32_sdwa v7, v7, v44 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v7, vcc, 0x3000000, v7
@@ -200942,14 +201641,14 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v6, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v45, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v45, vcc, 0x300, v45
; VI-NEXT: v_or_b32_sdwa v6, v6, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v6, vcc, 0x3000000, v6
@@ -200957,14 +201656,14 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v5, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(1)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v46, v4, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v46, vcc, 0x300, v46
; VI-NEXT: v_or_b32_sdwa v5, v5, v46 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v5, vcc, 0x3000000, v5
@@ -200972,17 +201671,17 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v3, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v4, vcc, 3, v4
; VI-NEXT: v_or_b32_sdwa v4, v47, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v47, vcc, 3, v32
-; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v4, vcc, 0x300, v4
; VI-NEXT: v_or_b32_sdwa v4, v3, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v4, vcc, 0x3000000, v4
; VI-NEXT: s_waitcnt vmcnt(0)
-; VI-NEXT: v_or_b32_sdwa v47, v32, v47 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v47, vcc, 3, v47
+; VI-NEXT: v_or_b32_sdwa v47, v56, v47 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_e32 v47, s4, v47
; VI-NEXT: s_and_b32 s4, s26, 0xff
; VI-NEXT: s_or_b32 s4, s5, s4
@@ -200996,34 +201695,25 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: s_or_b32 s8, s9, s8
; VI-NEXT: s_and_b32 s9, s16, 0xff
; VI-NEXT: s_or_b32 s9, s10, s9
-; VI-NEXT: v_add_u32_e32 v32, vcc, 0x300, v56
; VI-NEXT: s_addk_i32 s5, 0x300
; VI-NEXT: s_addk_i32 s7, 0x300
; VI-NEXT: s_addk_i32 s9, 0x300
-; VI-NEXT: v_or_b32_sdwa v15, v15, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_or_b32_sdwa v32, v16, v41 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_lshl_b32 s4, s4, 16
; VI-NEXT: s_lshl_b32 s6, s6, 16
; VI-NEXT: s_lshl_b32 s8, s8, 16
; VI-NEXT: s_and_b32 s9, s9, 0xffff
; VI-NEXT: s_and_b32 s7, s7, 0xffff
; VI-NEXT: s_and_b32 s5, s5, 0xffff
-; VI-NEXT: v_add_u32_e32 v16, vcc, 0x3000000, v17
-; VI-NEXT: v_add_u32_e32 v17, vcc, 0x3000000, v32
-; VI-NEXT: v_add_u32_e32 v32, vcc, 0x300, v0
; VI-NEXT: s_or_b32 s8, s8, s9
; VI-NEXT: s_or_b32 s6, s6, s7
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s8, s8, 0x3000000
; VI-NEXT: s_add_i32 s6, s6, 0x3000000
; VI-NEXT: s_add_i32 s4, s4, 0x3000000
-; VI-NEXT: v_or_b32_sdwa v31, v31, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v3, vcc, 0x3000000, v47
-; VI-NEXT: v_add_u32_e32 v15, vcc, 0x3000000, v15
; VI-NEXT: v_mov_b32_e32 v0, s8
; VI-NEXT: v_mov_b32_e32 v1, s6
; VI-NEXT: v_mov_b32_e32 v2, s4
-; VI-NEXT: v_add_u32_e32 v31, vcc, 0x3000000, v31
; VI-NEXT: .LBB97_3: ; %end
; VI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload
@@ -201044,38 +201734,42 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB97_4:
-; VI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
-; VI-NEXT: v_mov_b32_e32 v61, v60
-; VI-NEXT: v_mov_b32_e32 v60, v59
-; VI-NEXT: v_mov_b32_e32 v45, v62
-; VI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
-; VI-NEXT: v_mov_b32_e32 v57, v5
-; VI-NEXT: v_mov_b32_e32 v47, v4
-; VI-NEXT: v_mov_b32_e32 v63, v3
-; VI-NEXT: v_mov_b32_e32 v53, v28
-; VI-NEXT: v_mov_b32_e32 v43, v27
-; VI-NEXT: v_mov_b32_e32 v55, v26
-; VI-NEXT: v_mov_b32_e32 v41, v24
-; VI-NEXT: v_mov_b32_e32 v54, v22
+; VI-NEXT: v_mov_b32_e32 v63, v6
+; VI-NEXT: v_mov_b32_e32 v59, v5
+; VI-NEXT: v_mov_b32_e32 v58, v57
+; VI-NEXT: v_mov_b32_e32 v47, v32
+; VI-NEXT: v_mov_b32_e32 v57, v40
+; VI-NEXT: v_mov_b32_e32 v32, v30
+; VI-NEXT: v_mov_b32_e32 v44, v42
+; VI-NEXT: v_mov_b32_e32 v62, v43
+; VI-NEXT: v_mov_b32_e32 v43, v31
+; VI-NEXT: v_mov_b32_e32 v42, v41
+; VI-NEXT: v_mov_b32_e32 v41, v29
+; VI-NEXT: v_mov_b32_e32 v56, v4
+; VI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v49, v26
+; VI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; VI-NEXT: v_mov_b32_e32 v39, v22
+; VI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; VI-NEXT: ; implicit-def: $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB97_2
+; VI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB97_2
+; VI-NEXT: s_branch .LBB97_3
;
; GFX9-LABEL: bitcast_v128i8_to_v64i16_scalar:
; GFX9: ; %bb.0:
@@ -201096,16 +201790,18 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:484 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:332
; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32
; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:8
@@ -201130,93 +201826,97 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:160
; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:168
; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:176
-; GFX9-NEXT: v_lshlrev_b32_e32 v16, 8, v1
-; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v29
+; GFX9-NEXT: v_lshlrev_b32_e32 v63, 8, v1
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v19
; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
-; GFX9-NEXT: v_lshlrev_b32_e32 v14, 8, v3
-; GFX9-NEXT: v_lshlrev_b32_e32 v46, 8, v5
-; GFX9-NEXT: v_lshlrev_b32_e32 v22, 8, v7
-; GFX9-NEXT: v_lshlrev_b32_e32 v24, 8, v9
-; GFX9-NEXT: v_lshlrev_b32_e32 v26, 8, v11
-; GFX9-NEXT: v_lshlrev_b32_e32 v20, 8, v13
-; GFX9-NEXT: v_lshlrev_b32_e32 v28, 8, v15
-; GFX9-NEXT: v_lshlrev_b32_e32 v18, 8, v17
-; GFX9-NEXT: v_lshlrev_b32_e32 v17, 8, v25
-; GFX9-NEXT: v_lshlrev_b32_e32 v12, 8, v27
-; GFX9-NEXT: v_lshlrev_b32_e32 v19, 8, v19
-; GFX9-NEXT: v_lshlrev_b32_e32 v21, 8, v21
-; GFX9-NEXT: v_lshlrev_b32_e32 v23, 8, v23
-; GFX9-NEXT: s_waitcnt vmcnt(24)
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v23
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v25
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v27
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v29
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v18, 8, v3
+; GFX9-NEXT: v_lshlrev_b32_e32 v20, 8, v5
+; GFX9-NEXT: v_lshlrev_b32_e32 v14, 8, v7
+; GFX9-NEXT: v_lshlrev_b32_e32 v16, 8, v9
+; GFX9-NEXT: v_lshlrev_b32_e32 v22, 8, v11
+; GFX9-NEXT: v_lshlrev_b32_e32 v8, 8, v13
+; GFX9-NEXT: v_lshlrev_b32_e32 v12, 8, v15
+; GFX9-NEXT: v_lshlrev_b32_e32 v10, 8, v17
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_waitcnt vmcnt(28)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v43
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v45
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v44
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v42
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v41
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v40
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v55
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v54
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v53
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v52
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v51
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v50
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v49
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:492 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v48
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v39
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v30
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v31
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v32
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v33
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v34
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:472 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:448 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v35
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v36
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:480 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v37
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(23)
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(27)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v38
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:184
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:192
@@ -201226,31 +201926,32 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:224
; GFX9-NEXT: buffer_load_ushort v9, off, s[0:3], s32 offset:232
; GFX9-NEXT: buffer_load_ushort v7, off, s[0:3], s32 offset:240
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: v_lshlrev_b32_e32 v43, 8, v21
+; GFX9-NEXT: s_and_b64 s[6:7], vcc, exec
; GFX9-NEXT: s_waitcnt vmcnt(7)
; GFX9-NEXT: v_lshlrev_b32_e32 v11, 8, v11
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:488 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v15
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v3
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v13
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v5
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:612 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v9
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:544 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v7
-; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:460 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:248
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:256
@@ -201264,148 +201965,145 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: v_lshlrev_b32_e32 v11, 8, v11
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:556 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:588 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v15
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:572 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v3
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:552 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v13
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:568 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:600 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v5
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:580 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(6)
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v9
-; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:564 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:560 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:592 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_load_ushort v3, off, s[0:3], s32 offset:312
; GFX9-NEXT: s_nop 0
; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:320
; GFX9-NEXT: buffer_load_ushort v5, off, s[0:3], s32 offset:328
-; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:4
-; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:12
-; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:20
-; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:28
+; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:324
+; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:316
+; GFX9-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:308
+; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:300
+; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:292
+; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:284
+; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:276
+; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:268
+; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:260
+; GFX9-NEXT: buffer_load_ushort v27, off, s[0:3], s32 offset:252
+; GFX9-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:244
+; GFX9-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:236
+; GFX9-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:228
+; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:220
+; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:212
+; GFX9-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:204
+; GFX9-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:196
+; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:188
+; GFX9-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:180
+; GFX9-NEXT: buffer_load_ushort v26, off, s[0:3], s32 offset:172
+; GFX9-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:164
+; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:156
+; GFX9-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:148
+; GFX9-NEXT: buffer_load_ushort v25, off, s[0:3], s32 offset:140
+; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:132
+; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:124
+; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:116
+; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:108
+; GFX9-NEXT: buffer_load_ushort v23, off, s[0:3], s32 offset:100
+; GFX9-NEXT: buffer_load_ushort v21, off, s[0:3], s32 offset:92
+; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:84
+; GFX9-NEXT: buffer_load_ushort v19, off, s[0:3], s32 offset:76
+; GFX9-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:68
+; GFX9-NEXT: buffer_load_ushort v17, off, s[0:3], s32 offset:60
+; GFX9-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:52
+; GFX9-NEXT: buffer_load_ushort v24, off, s[0:3], s32 offset:44
; GFX9-NEXT: buffer_load_ushort v13, off, s[0:3], s32 offset:36
-; GFX9-NEXT: s_waitcnt vmcnt(15)
+; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:28
+; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:20
+; GFX9-NEXT: buffer_load_ushort v53, off, s[0:3], s32 offset:12
+; GFX9-NEXT: buffer_load_ushort v11, off, s[0:3], s32 offset:4
+; GFX9-NEXT: s_waitcnt vmcnt(51)
; GFX9-NEXT: v_lshlrev_b32_e32 v7, 8, v7
-; GFX9-NEXT: s_waitcnt vmcnt(7)
-; GFX9-NEXT: v_lshlrev_b32_e32 v3, 8, v3
-; GFX9-NEXT: s_waitcnt vmcnt(6)
-; GFX9-NEXT: v_lshlrev_b32_e32 v9, 8, v1
-; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:44
-; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:52
-; GFX9-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:60
-; GFX9-NEXT: buffer_load_ushort v15, off, s[0:3], s32 offset:68
-; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:76
-; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:84
-; GFX9-NEXT: buffer_load_ushort v25, off, s[0:3], s32 offset:92
-; GFX9-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:100
-; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:108
-; GFX9-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:116
-; GFX9-NEXT: buffer_load_ushort v43, off, s[0:3], s32 offset:124
-; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:132
-; GFX9-NEXT: buffer_load_ushort v56, off, s[0:3], s32 offset:140
-; GFX9-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:148
-; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:156
-; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:164
-; GFX9-NEXT: s_waitcnt vmcnt(21)
-; GFX9-NEXT: v_lshlrev_b32_e32 v5, 8, v5
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:576 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:172
-; GFX9-NEXT: buffer_load_ushort v32, off, s[0:3], s32 offset:180
-; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:188
-; GFX9-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:196
-; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:204
-; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:212
-; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:220
-; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:228
-; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:236
-; GFX9-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:244
-; GFX9-NEXT: buffer_load_ushort v29, off, s[0:3], s32 offset:252
-; GFX9-NEXT: buffer_load_ushort v27, off, s[0:3], s32 offset:260
-; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:268
-; GFX9-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:276
-; GFX9-NEXT: buffer_load_ushort v59, off, s[0:3], s32 offset:284
-; GFX9-NEXT: buffer_load_ushort v63, off, s[0:3], s32 offset:292
-; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:300
-; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:308
-; GFX9-NEXT: buffer_load_ushort v58, off, s[0:3], s32 offset:316
-; GFX9-NEXT: buffer_load_ushort v1, off, s[0:3], s32 offset:324
-; GFX9-NEXT: s_nop 0
-; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:620 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:624 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:628 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:632 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:636 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:508 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(28)
-; GFX9-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(30)
-; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:512 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(33)
-; GFX9-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:516 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(36)
-; GFX9-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:520 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(39)
-; GFX9-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(41)
-; GFX9-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(41)
-; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(41)
-; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
-; GFX9-NEXT: s_waitcnt vmcnt(41)
-; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(43)
+; GFX9-NEXT: v_lshlrev_b32_e32 v9, 8, v3
+; GFX9-NEXT: s_waitcnt vmcnt(42)
+; GFX9-NEXT: v_lshlrev_b32_e32 v3, 8, v1
; GFX9-NEXT: s_waitcnt vmcnt(41)
-; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
+; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v5
+; GFX9-NEXT: s_waitcnt vmcnt(14)
+; GFX9-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:640 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:644 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:648 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:652 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:656 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:660 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:664 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:668 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:672 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:676 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:680 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:528 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:684 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:688 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:692 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:696 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:700 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:704 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:708 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:712 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:716 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:720 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:724 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:728 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:732 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:736 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:740 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:744 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:748 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:752 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v47, off, s[0:3], s32 offset:756 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:760 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:764 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:768 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:532 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:772 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:776 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:812 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v55, off, s[0:3], s32 offset:816 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:828 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:832 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:548 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:780 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:540 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:784 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:788 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:792 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(54)
+; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:796 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:800 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(55)
+; GFX9-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:804 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:808 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(56)
+; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:812 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(56)
+; GFX9-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:816 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(56)
+; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:820 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(56)
+; GFX9-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:824 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(56)
+; GFX9-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:828 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(56)
+; GFX9-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:832 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(56)
+; GFX9-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:836 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(56)
+; GFX9-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:840 ; 4-byte Folded Spill
+; GFX9-NEXT: s_waitcnt vmcnt(56)
+; GFX9-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:844 ; 4-byte Folded Spill
; GFX9-NEXT: s_cbranch_scc0 .LBB97_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_and_b32 s4, s28, 0xff
@@ -201413,17 +202111,13 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: s_or_b32 s4, s4, s5
; GFX9-NEXT: v_mov_b32_e32 v1, 0xffff
; GFX9-NEXT: v_and_b32_e32 v3, s4, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v2, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v2, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX9-NEXT: v_or_b32_sdwa v2, v0, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v4, v4, v46 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v0, v6, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v4, v4, v20 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v4, v4, 16, v1
-; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: v_or_b32_sdwa v1, v8, v24 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_lshl_or_b32 v5, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
-; GFX9-NEXT: v_or_b32_sdwa v0, v10, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v2, v0, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v6, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_lshl_or_b32 v3, v2, 16, v3
; GFX9-NEXT: s_and_b32 s4, s16, 0xff
@@ -201448,266 +202142,279 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: s_or_b32 s7, s7, s8
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s7
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v1, v20 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v1, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_lshl_or_b32 v5, v1, 16, v0
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_or_b32_sdwa v0, v0, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v1, v1, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v6, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v0, v28 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v0, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v1, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v1, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v7, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v0, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v1, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v1, v43 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v8, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v0, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v1, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v9, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_or_b32_sdwa v0, v0, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v10, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(4)
; GFX9-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_or_b32_sdwa v2, v39, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_mov_b32_e32 v39, v16
-; GFX9-NEXT: v_or_b32_sdwa v17, v34, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_waitcnt vmcnt(4)
+; GFX9-NEXT: v_or_b32_sdwa v2, v33, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v33, v32
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v1, v11, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v11, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v42, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v55, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v53, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v55, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v54, v1
+; GFX9-NEXT: v_or_b32_sdwa v1, v52, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v12, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
-; GFX9-NEXT: v_mov_b32_e32 v42, v61
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v53, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v53, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v36, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v55, v1
+; GFX9-NEXT: v_mov_b32_e32 v52, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v13, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v13, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_mov_b32_e32 v53, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v52, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v36, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v24, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v52, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v50, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v57, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v14, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_mov_b32_e32 v50, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v49, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v57, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v17, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v49, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v15, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v15, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v48, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v17, v23, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_mov_b32_e32 v63, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v19, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_lshl_or_b32 v16, v2, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_mov_b32_e32 v48, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v25, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v1, v21, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX9-NEXT: v_lshl_or_b32 v17, v17, 16, v1
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v2, s6
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: v_mov_b32_e32 v33, v45
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v45, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_or_b32_sdwa v1, v51, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v18, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v43, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v35, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v38, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v19, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v56, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v35, v61
+; GFX9-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: v_or_b32_sdwa v0, v25, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v47, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v31, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v20, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_or_b32_sdwa v0, v42, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_or_b32_sdwa v0, v49, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v61, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v29, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v21, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v31, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: v_or_b32_sdwa v0, v26, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v32, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_lshl_or_b32 v22, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v51, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v28, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_lshl_or_b32 v22, v1, 16, v0
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v34, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v30, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v34, v62 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
; GFX9-NEXT: v_lshl_or_b32 v23, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(4)
-; GFX9-NEXT: v_mov_b32_e32 v46, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v44, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v42, v49 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_or_b32_sdwa v1, v35, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_or_b32_sdwa v1, v50, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v24, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
-; GFX9-NEXT: v_mov_b32_e32 v35, v45
-; GFX9-NEXT: v_mov_b32_e32 v45, v61
-; GFX9-NEXT: v_mov_b32_e32 v61, v42
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_mov_b32_e32 v38, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v36, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v51, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v44, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v37, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_or_b32_sdwa v1, v30, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v25, v1, 16, v0
-; GFX9-NEXT: v_or_b32_sdwa v1, v54, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_mov_b32_e32 v54, v2
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v0, v41, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v0, v46, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_or_b32_sdwa v1, v59, v44 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
; GFX9-NEXT: v_lshl_or_b32 v26, v1, 16, v0
-; GFX9-NEXT: v_or_b32_sdwa v1, v27, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v0, v29, v41 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v27, v42 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-NEXT: v_or_b32_sdwa v1, v40, v43 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v27, v1, 16, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v60, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
-; GFX9-NEXT: v_or_b32_sdwa v1, v57, v44 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v0, v56, v61 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: v_lshl_or_b32 v28, v1, 16, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v59, v60 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v45, v46 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_lshl_or_b32 v28, v1, 16, v0
+; GFX9-NEXT: v_mov_b32_e32 v45, v34
+; GFX9-NEXT: v_mov_b32_e32 v34, v38
+; GFX9-NEXT: v_mov_b32_e32 v56, v39
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v0, v48, v59 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v63, v57 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v41, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v29, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v41, v43
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v60, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v40, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v47, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v30, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v60, v49
+; GFX9-NEXT: v_mov_b32_e32 v47, v61
+; GFX9-NEXT: v_mov_b32_e32 v49, v48
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_or_b32_sdwa v0, v37, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v58, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v31, v1, 16, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
-; GFX9-NEXT: v_mov_b32_e32 v2, s6
; GFX9-NEXT: s_mov_b64 s[4:5], 0
; GFX9-NEXT: s_branch .LBB97_3
; GFX9-NEXT: .LBB97_2:
-; GFX9-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
-; GFX9-NEXT: v_mov_b32_e32 v33, v45
-; GFX9-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
-; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: buffer_load_dword v55, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:616 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:608 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v33, v32
+; GFX9-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:612 ; 4-byte Folded Reload
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; GFX9-NEXT: ; implicit-def: $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
+; GFX9-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:564 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:568 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:572 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:560 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:576 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:556 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:552 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:592 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:604 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:588 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:600 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v47, off, s[0:3], s32 offset:584 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:596 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:580 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v35, v61
; GFX9-NEXT: .LBB97_3: ; %Flow
-; GFX9-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:848 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:548 ; 4-byte Folded Reload
; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
; GFX9-NEXT: s_cbranch_vccnz .LBB97_5
; GFX9-NEXT: ; %bb.4: ; %cmp.true
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
; GFX9-NEXT: s_add_i32 s28, s28, 3
; GFX9-NEXT: s_and_b32 s4, s28, 0xff
; GFX9-NEXT: s_lshl_b32 s5, s29, 8
@@ -201725,59 +202432,66 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: s_lshl_b32 s9, s17, 8
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_lshl_b32 s10, s19, 8
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:824 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(15)
+; GFX9-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:752 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:744 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:740 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:488 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:724 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(13)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: s_waitcnt vmcnt(14)
+; GFX9-NEXT: s_waitcnt vmcnt(12)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: s_waitcnt vmcnt(12)
+; GFX9-NEXT: s_waitcnt vmcnt(9)
+; GFX9-NEXT: v_add_u32_e32 v22, 3, v22
+; GFX9-NEXT: s_waitcnt vmcnt(8)
; GFX9-NEXT: v_add_u32_e32 v25, 3, v25
-; GFX9-NEXT: s_waitcnt vmcnt(11)
+; GFX9-NEXT: s_waitcnt vmcnt(7)
+; GFX9-NEXT: v_add_u32_e32 v20, 3, v20
+; GFX9-NEXT: v_or_b32_sdwa v20, v49, v20 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: s_waitcnt vmcnt(5)
+; GFX9-NEXT: v_add_u32_e32 v24, 3, v24
+; GFX9-NEXT: v_or_b32_sdwa v24, v41, v24 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: s_waitcnt vmcnt(4)
+; GFX9-NEXT: v_add_u32_e32 v23, 3, v23
+; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
-; GFX9-NEXT: v_or_b32_sdwa v25, v37, v25 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_or_b32_sdwa v37, v51, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v37, 0x300, v37
-; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_or_b32_sdwa v23, v42, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v21, 3, v21
+; GFX9-NEXT: v_or_b32_sdwa v21, v47, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v22, v46, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v25, v32, v25 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v28, 0x300, v21
+; GFX9-NEXT: v_and_b32_e32 v28, 0xffff, v28
+; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
; GFX9-NEXT: v_lshl_or_b32 v4, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:628 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:732 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(3)
-; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
-; GFX9-NEXT: v_or_b32_sdwa v38, v38, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:644 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:748 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:648 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(3)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:512 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_add_u32_e32 v2, 3, v2
; GFX9-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_mov_b32_e32 v3, 0xffff
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: v_add_u32_e32 v2, 0x300, v2
; GFX9-NEXT: v_and_b32_e32 v3, s4, v3
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:632 ; 4-byte Folded Reload
; GFX9-NEXT: v_lshl_or_b32 v3, v2, 16, v3
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:736 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_and_b32 s4, s24, 0xff
@@ -201791,8 +202505,6 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: s_and_b32 s8, s16, 0xff
; GFX9-NEXT: s_or_b32 s8, s9, s8
; GFX9-NEXT: s_and_b32 s9, s18, 0xff
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
; GFX9-NEXT: s_or_b32 s9, s10, s9
; GFX9-NEXT: s_addk_i32 s4, 0x300
; GFX9-NEXT: s_addk_i32 s5, 0x300
@@ -201809,14 +202521,14 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
; GFX9-NEXT: v_lshl_or_b32 v5, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:620 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:712 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:704 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
@@ -201824,14 +202536,14 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
; GFX9-NEXT: v_lshl_or_b32 v6, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:492 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:496 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
@@ -201839,264 +202551,240 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
; GFX9-NEXT: v_lshl_or_b32 v7, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:680 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:484 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v48, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_lshl_or_b32 v8, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v37, v44, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:728 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v37, 0x300, v37
; GFX9-NEXT: s_waitcnt vmcnt(2)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v58, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
; GFX9-NEXT: v_lshl_or_b32 v9, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:652 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:500 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v63, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v43, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
; GFX9-NEXT: v_lshl_or_b32 v10, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:504 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:844 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:832 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v38, v51, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:716 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
+; GFX9-NEXT: v_or_b32_sdwa v1, v39, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
+; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-NEXT: v_lshl_or_b32 v11, v1, 16, v0
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:836 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:840 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
+; GFX9-NEXT: v_or_b32_sdwa v39, v50, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:720 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
+; GFX9-NEXT: v_or_b32_sdwa v48, v60, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:700 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v58, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v54, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
-; GFX9-NEXT: v_lshl_or_b32 v11, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:820 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:816 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v40, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
+; GFX9-NEXT: v_or_b32_sdwa v49, v45, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:708 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v55, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
-; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_lshl_or_b32 v12, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:812 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:828 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:832 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v42, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v55, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
-; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
-; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: v_lshl_or_b32 v13, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v53, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
+; GFX9-NEXT: v_or_b32_sdwa v50, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:544 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(3)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v52, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
-; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: v_lshl_or_b32 v14, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:792 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:788 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v50, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
+; GFX9-NEXT: v_or_b32_sdwa v51, v62, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:696 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v49, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
-; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
-; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: v_lshl_or_b32 v15, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:784 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v1, v39, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_or_b32_sdwa v39, v36, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:692 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
+; GFX9-NEXT: v_or_b32_sdwa v52, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:684 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v48, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v0, v53, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
-; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_lshl_or_b32 v13, v1, 16, v0
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:820 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:824 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(3)
; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
-; GFX9-NEXT: v_or_b32_sdwa v48, v46, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: v_or_b32_sdwa v53, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:688 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:456 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
-; GFX9-NEXT: v_or_b32_sdwa v49, v35, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v54, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:668 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:452 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v21, 0x300, v54
+; GFX9-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
-; GFX9-NEXT: v_or_b32_sdwa v50, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:672 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v2, 3, v2
-; GFX9-NEXT: v_or_b32_sdwa v2, v16, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:528 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v2, 0x300, v2
-; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v55, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:676 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
-; GFX9-NEXT: v_or_b32_sdwa v51, v34, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v40, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:660 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_add_u32_e32 v16, 3, v16
-; GFX9-NEXT: v_or_b32_sdwa v16, v17, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_lshl_or_b32 v17, v1, 16, v0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:520 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:768 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v16, 0x300, v16
-; GFX9-NEXT: v_lshl_or_b32 v16, v16, 16, v2
-; GFX9-NEXT: s_waitcnt vmcnt(2)
-; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
-; GFX9-NEXT: v_or_b32_sdwa v52, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:656 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:532 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v53, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:480 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v24, 3, v24
-; GFX9-NEXT: v_add_u32_e32 v26, 3, v61
-; GFX9-NEXT: v_or_b32_sdwa v24, v54, v24 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v41, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:664 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v36, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_add_u32_e32 v36, 0x300, v24
; GFX9-NEXT: v_add_u32_e32 v24, 0x300, v48
; GFX9-NEXT: v_add_u32_e32 v48, 0x300, v51
+; GFX9-NEXT: v_add_u32_e32 v51, 0x300, v41
; GFX9-NEXT: v_and_b32_e32 v24, 0xffff, v24
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v54, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:476 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v26, 3, v45
-; GFX9-NEXT: v_add_u32_e32 v20, 3, v20
-; GFX9-NEXT: v_or_b32_sdwa v20, v57, v20 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v34, 0x300, v20
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v55, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:472 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v26, 3, v56
-; GFX9-NEXT: v_add_u32_e32 v21, 3, v21
-; GFX9-NEXT: v_or_b32_sdwa v21, v32, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v28, 0x300, v21
-; GFX9-NEXT: v_add_u32_e32 v21, 0x300, v54
-; GFX9-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX9-NEXT: v_and_b32_e32 v28, 0xffff, v28
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v40, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:468 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v23, 3, v23
-; GFX9-NEXT: v_add_u32_e32 v26, 3, v47
-; GFX9-NEXT: v_or_b32_sdwa v23, v41, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v20, 0x300, v40
-; GFX9-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v41, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:464 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v26, 3, v43
-; GFX9-NEXT: v_add_u32_e32 v22, 3, v22
-; GFX9-NEXT: v_or_b32_sdwa v22, v44, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v42, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v26, 3, v35
; GFX9-NEXT: v_add_u32_e32 v35, 0x300, v22
; GFX9-NEXT: v_add_u32_e32 v22, 0x300, v52
-; GFX9-NEXT: v_add_u32_e32 v51, 0x300, v41
; GFX9-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX9-NEXT: v_lshl_or_b32 v20, v51, 16, v20
; GFX9-NEXT: v_lshl_or_b32 v28, v35, 16, v28
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v42, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:636 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:460 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
-; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_or_b32_sdwa v43, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:624 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:516 ; 4-byte Folded Reload
-; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:448 ; 4-byte Folded Reload
-; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:524 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v31, 0x300, v0
+; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
+; GFX9-NEXT: v_lshl_or_b32 v14, v1, 16, v0
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:812 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:848 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:508 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:796 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:816 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v52, 0x300, v43
-; GFX9-NEXT: v_and_b32_e32 v31, 0xffff, v31
-; GFX9-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-NEXT: s_waitcnt vmcnt(4)
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: s_waitcnt vmcnt(3)
; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
-; GFX9-NEXT: s_waitcnt vmcnt(1)
; GFX9-NEXT: v_or_b32_sdwa v44, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:444 ; 4-byte Folded Reload
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
+; GFX9-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:640 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(4)
; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v26, 3, v33
-; GFX9-NEXT: v_add_u32_e32 v32, 0x300, v1
-; GFX9-NEXT: v_mov_b32_e32 v1, s6
-; GFX9-NEXT: v_lshl_or_b32 v31, v32, 16, v31
-; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:804 ; 4-byte Folded Reload
+; GFX9-NEXT: s_waitcnt vmcnt(3)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v57, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
+; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-NEXT: v_lshl_or_b32 v15, v1, 16, v0
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:808 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:800 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v16, 3, v16
+; GFX9-NEXT: v_or_b32_sdwa v16, v17, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v16, 0x300, v16
+; GFX9-NEXT: s_waitcnt vmcnt(4)
+; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
+; GFX9-NEXT: s_waitcnt vmcnt(3)
; GFX9-NEXT: v_or_b32_sdwa v45, v27, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v27, 0x300, v23
; GFX9-NEXT: v_add_u32_e32 v26, 0x300, v25
-; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_add_u32_e32 v2, 3, v2
-; GFX9-NEXT: v_or_b32_sdwa v2, v18, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:760 ; 4-byte Folded Reload
-; GFX9-NEXT: v_add_u32_e32 v30, 0x300, v2
; GFX9-NEXT: v_add_u32_e32 v25, 0x300, v38
; GFX9-NEXT: v_add_u32_e32 v23, 0x300, v50
; GFX9-NEXT: v_add_u32_e32 v38, 0x300, v39
; GFX9-NEXT: v_add_u32_e32 v39, 0x300, v49
; GFX9-NEXT: v_add_u32_e32 v49, 0x300, v53
; GFX9-NEXT: v_add_u32_e32 v50, 0x300, v55
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v56, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
+; GFX9-NEXT: v_or_b32_sdwa v1, v34, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
+; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-NEXT: v_lshl_or_b32 v17, v1, 16, v0
+; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:780 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:540 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v2, 3, v2
+; GFX9-NEXT: v_or_b32_sdwa v2, v63, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v2, 0x300, v2
+; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; GFX9-NEXT: v_lshl_or_b32 v16, v16, 16, v2
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:536 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v34, 0x300, v20
+; GFX9-NEXT: v_add_u32_e32 v20, 0x300, v40
; GFX9-NEXT: v_add_u32_e32 v53, 0x300, v45
+; GFX9-NEXT: v_and_b32_e32 v20, 0xffff, v20
; GFX9-NEXT: v_and_b32_e32 v23, 0xffff, v23
; GFX9-NEXT: v_and_b32_e32 v25, 0xffff, v25
; GFX9-NEXT: v_and_b32_e32 v26, 0xffff, v26
; GFX9-NEXT: v_and_b32_e32 v27, 0xffff, v27
-; GFX9-NEXT: v_and_b32_e32 v30, 0xffff, v30
-; GFX9-NEXT: v_mov_b32_e32 v2, s4
+; GFX9-NEXT: v_lshl_or_b32 v20, v51, 16, v20
; GFX9-NEXT: v_lshl_or_b32 v21, v50, 16, v21
; GFX9-NEXT: v_lshl_or_b32 v22, v49, 16, v22
; GFX9-NEXT: v_lshl_or_b32 v23, v48, 16, v23
@@ -202104,10 +202792,32 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: v_lshl_or_b32 v25, v38, 16, v25
; GFX9-NEXT: v_lshl_or_b32 v26, v37, 16, v26
; GFX9-NEXT: v_lshl_or_b32 v27, v36, 16, v27
+; GFX9-NEXT: s_waitcnt vmcnt(2)
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
+; GFX9-NEXT: s_waitcnt vmcnt(1)
+; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:764 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v31, 0x300, v0
+; GFX9-NEXT: v_and_b32_e32 v31, 0xffff, v31
+; GFX9-NEXT: v_mov_b32_e32 v0, s8
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
+; GFX9-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:772 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v32, 0x300, v1
+; GFX9-NEXT: v_mov_b32_e32 v1, s6
+; GFX9-NEXT: v_lshl_or_b32 v31, v32, 16, v31
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_add_u32_e32 v2, 3, v2
+; GFX9-NEXT: v_or_b32_sdwa v2, v18, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v30, 0x300, v2
+; GFX9-NEXT: v_and_b32_e32 v30, 0xffff, v30
+; GFX9-NEXT: v_mov_b32_e32 v2, s4
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v18, 3, v18
; GFX9-NEXT: v_or_b32_sdwa v18, v19, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:756 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:776 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v33, 0x300, v18
; GFX9-NEXT: v_add_u32_e32 v18, 0x300, v44
; GFX9-NEXT: v_and_b32_e32 v18, 0xffff, v18
@@ -202115,7 +202825,7 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX9-NEXT: v_lshl_or_b32 v30, v33, 16, v30
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v19, 3, v19
-; GFX9-NEXT: v_or_b32_sdwa v19, v60, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v19, v59, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v29, 0x300, v19
; GFX9-NEXT: v_add_u32_e32 v19, 0x300, v42
; GFX9-NEXT: v_and_b32_e32 v19, 0xffff, v19
@@ -202283,7 +202993,7 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v83, 8, v25
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v86, 8, v27
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v85, 8, v29
-; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, -1
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(62)
; GFX11-TRUE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v97, 8, v2
@@ -202352,38 +203062,38 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB97_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: s_and_b32 s5, s28, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s29, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-TRUE16-NEXT: v_and_b32_e64 v5, 0xffff, s5
-; GFX11-TRUE16-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s3, 8
-; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s5, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s7, s8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s23, 8
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s25, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s11, s26, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s12, s27, 8
-; GFX11-TRUE16-NEXT: s_or_b32 s9, s9, s10
-; GFX11-TRUE16-NEXT: s_or_b32 s10, s11, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s9, s10
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s28, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s29, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s2, 0xff
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-TRUE16-NEXT: v_and_b32_e64 v5, 0xffff, s4
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s0, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s16, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s4, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s6, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s20, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s21, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s25, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s26, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s27, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s8, s9
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s8, s9
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v36
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v32
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v35
@@ -202470,12 +203180,12 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v21, 0xffff, v15
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v17, 16, v19
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v17, v18, 16, v22
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, s7
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v2, s6
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v1, 16, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v151
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v149
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v16, v20, 16, v21
-; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, s8
+; GFX11-TRUE16-NEXT: v_mov_b32_e32 v3, s7
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v180
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v177
@@ -202578,9 +203288,8 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v31, v1, 16, v0
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB97_3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB97_3
; GFX11-TRUE16-NEXT: .LBB97_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_add_i32 s28, s28, 3
; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s29, 8
@@ -202966,7 +203675,9 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: .LBB97_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB97_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB97_2
+; GFX11-TRUE16-NEXT: s_branch .LBB97_3
;
; GFX11-FAKE16-LABEL: bitcast_v128i8_to_v64i16_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -203109,7 +203820,7 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v83, 8, v25
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v86, 8, v27
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v85, 8, v29
-; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, -1
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(62)
; GFX11-FAKE16-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v97, 8, v2
@@ -203178,38 +203889,38 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX11-FAKE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB97_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-FAKE16-NEXT: s_and_b32 s5, s28, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s29, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-FAKE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-FAKE16-NEXT: v_and_b32_e64 v5, 0xffff, s5
-; GFX11-FAKE16-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s3, 8
-; GFX11-FAKE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-FAKE16-NEXT: s_or_b32 s6, s7, s8
-; GFX11-FAKE16-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s5, s6
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s7, s8
-; GFX11-FAKE16-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s23, 8
-; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-FAKE16-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s25, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s11, s26, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s12, s27, 8
-; GFX11-FAKE16-NEXT: s_or_b32 s9, s9, s10
-; GFX11-FAKE16-NEXT: s_or_b32 s10, s11, s12
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s7, s8
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s9, s10
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s28, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s29, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s2, 0xff
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-FAKE16-NEXT: v_and_b32_e64 v5, 0xffff, s4
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s0, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s16, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s4, s5
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s6, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s20, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s21, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s25, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s10, s26, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s11, s27, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s8, s9
+; GFX11-FAKE16-NEXT: s_or_b32 s9, s10, s11
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s6, s7
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s8, s9
; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v36
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v32
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v35
@@ -203296,12 +204007,12 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX11-FAKE16-NEXT: v_and_b32_e32 v21, 0xffff, v15
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v17, 16, v19
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v17, v18, 16, v22
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, s7
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v2, s6
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v1, 16, v0
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v151
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v149
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v16, v20, 16, v21
-; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, s8
+; GFX11-FAKE16-NEXT: v_mov_b32_e32 v3, s7
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(NEXT) | instid1(VALU_DEP_4)
; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v180
; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, v1, v177
@@ -203404,9 +204115,8 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v31, v1, 16, v0
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB97_3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB97_3
; GFX11-FAKE16-NEXT: .LBB97_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_add_i32 s28, s28, 3
; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s29, 8
@@ -203792,7 +204502,9 @@ define inreg <64 x i16> @bitcast_v128i8_to_v64i16_scalar(<128 x i8> inreg %a, i3
; GFX11-FAKE16-NEXT: .LBB97_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB97_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB97_2
+; GFX11-FAKE16-NEXT: s_branch .LBB97_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -209430,16 +210142,17 @@ define inreg <128 x i8> @bitcast_v64i16_to_v128i8_scalar(<64 x i16> inreg %a, i3
; SI-NEXT: s_waitcnt vmcnt(10)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v54
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v28
; SI-NEXT: s_waitcnt vmcnt(7)
; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v40
-; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v55
; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v41
; SI-NEXT: s_waitcnt vmcnt(4)
; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v42
; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v43
+; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v55
; SI-NEXT: v_writelane_b32 v62, s6, 0
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
@@ -210577,10 +211290,6 @@ define inreg <128 x i8> @bitcast_v64i16_to_v128i8_scalar(<64 x i16> inreg %a, i3
; SI-NEXT: ; kill: killed $vgpr1
; SI-NEXT: ; implicit-def: $sgpr6
; SI-NEXT: ; kill: killed $sgpr6
-; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: ; kill: killed $vgpr1
-; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: ; kill: killed $sgpr6
; SI-NEXT: ; kill: killed $vcc_lo
; SI-NEXT: ; implicit-def: $vcc_lo
; SI-NEXT: ; implicit-def: $sgpr56
@@ -210684,8 +211393,10 @@ define inreg <128 x i8> @bitcast_v64i16_to_v128i8_scalar(<64 x i16> inreg %a, i3
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; kill: killed $vgpr1
; SI-NEXT: ; implicit-def: $sgpr6
+; SI-NEXT: ; kill: killed $sgpr6
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; kill: killed $vgpr1
+; SI-NEXT: ; implicit-def: $sgpr6
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; kill: killed $vgpr1
; SI-NEXT: ; implicit-def: $vgpr1
@@ -210699,7 +211410,11 @@ define inreg <128 x i8> @bitcast_v64i16_to_v128i8_scalar(<64 x i16> inreg %a, i3
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; kill: killed $vgpr1
; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: s_branch .LBB99_2
+; SI-NEXT: ; kill: killed $vgpr1
+; SI-NEXT: ; implicit-def: $vgpr1
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB99_2
+; SI-NEXT: s_branch .LBB99_3
;
; VI-LABEL: bitcast_v64i16_to_v128i8_scalar:
; VI: ; %bb.0:
@@ -210757,8 +211472,9 @@ define inreg <128 x i8> @bitcast_v64i16_to_v128i8_scalar(<64 x i16> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s4, v17
; VI-NEXT: v_readfirstlane_b32 s5, v18
; VI-NEXT: v_readfirstlane_b32 s44, v1
-; VI-NEXT: s_and_b64 s[46:47], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s45, v2
+; VI-NEXT: s_and_b64 s[46:47], vcc, exec
+; VI-NEXT: s_mov_b64 vcc, -1
; VI-NEXT: v_writelane_b32 v20, s87, 31
; VI-NEXT: ; implicit-def: $vgpr21 : SGPR spill to VGPR lane
; VI-NEXT: s_cbranch_scc0 .LBB99_4
@@ -211716,8 +212432,6 @@ define inreg <128 x i8> @bitcast_v64i16_to_v128i8_scalar(<64 x i16> inreg %a, i3
; VI-NEXT: .LBB99_4:
; VI-NEXT: ; implicit-def: $sgpr46
; VI-NEXT: ; kill: killed $sgpr46
-; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: ; kill: killed $sgpr46
; VI-NEXT: ; implicit-def: $sgpr71
; VI-NEXT: ; implicit-def: $sgpr70
; VI-NEXT: ; implicit-def: $sgpr69
@@ -211868,7 +212582,11 @@ define inreg <128 x i8> @bitcast_v64i16_to_v128i8_scalar(<64 x i16> inreg %a, i3
; VI-NEXT: ; implicit-def: $sgpr46
; VI-NEXT: ; kill: killed $sgpr46
; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: s_branch .LBB99_2
+; VI-NEXT: ; kill: killed $sgpr46
+; VI-NEXT: ; implicit-def: $sgpr46
+; VI-NEXT: s_andn2_b64 vcc, exec, vcc
+; VI-NEXT: s_cbranch_vccz .LBB99_2
+; VI-NEXT: s_branch .LBB99_3
;
; GFX9-LABEL: bitcast_v64i16_to_v128i8_scalar:
; GFX9: ; %bb.0:
@@ -211931,8 +212649,9 @@ define inreg <128 x i8> @bitcast_v64i16_to_v128i8_scalar(<64 x i16> inreg %a, i3
; GFX9-NEXT: v_readfirstlane_b32 s6, v17
; GFX9-NEXT: v_readfirstlane_b32 s7, v18
; GFX9-NEXT: v_readfirstlane_b32 s4, v1
-; GFX9-NEXT: s_and_b64 s[46:47], vcc, exec
; GFX9-NEXT: v_readfirstlane_b32 s5, v2
+; GFX9-NEXT: s_and_b64 s[46:47], vcc, exec
+; GFX9-NEXT: s_mov_b64 vcc, -1
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
@@ -212330,8 +213049,6 @@ define inreg <128 x i8> @bitcast_v64i16_to_v128i8_scalar(<64 x i16> inreg %a, i3
; GFX9-NEXT: .LBB99_3:
; GFX9-NEXT: ; implicit-def: $sgpr46
; GFX9-NEXT: ; kill: killed $sgpr46
-; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: ; kill: killed $sgpr46
; GFX9-NEXT: ; implicit-def: $sgpr81
; GFX9-NEXT: ; implicit-def: $sgpr71
; GFX9-NEXT: ; implicit-def: $sgpr80
@@ -212474,7 +213191,10 @@ define inreg <128 x i8> @bitcast_v64i16_to_v128i8_scalar(<64 x i16> inreg %a, i3
; GFX9-NEXT: ; implicit-def: $sgpr46
; GFX9-NEXT: ; kill: killed $sgpr46
; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: s_branch .LBB99_2
+; GFX9-NEXT: ; kill: killed $sgpr46
+; GFX9-NEXT: ; implicit-def: $sgpr46
+; GFX9-NEXT: s_andn2_b64 vcc, exec, vcc
+; GFX9-NEXT: s_cbranch_vccz .LBB99_2
; GFX9-NEXT: .LBB99_4:
; GFX9-NEXT: v_mov_b32_e32 v15, s71
; GFX9-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
@@ -213206,16 +213926,16 @@ define inreg <128 x i8> @bitcast_v64i16_to_v128i8_scalar(<64 x i16> inreg %a, i3
; GFX11-NEXT: v_writelane_b32 v76, s99, 3
; GFX11-NEXT: v_readfirstlane_b32 s8, v9
; GFX11-NEXT: v_readfirstlane_b32 s9, v10
-; GFX11-NEXT: v_readfirstlane_b32 s6, v11
+; GFX11-NEXT: v_readfirstlane_b32 s4, v11
; GFX11-NEXT: v_writelane_b32 v75, s36, 4
; GFX11-NEXT: v_writelane_b32 v76, s100, 4
-; GFX11-NEXT: v_readfirstlane_b32 s7, v12
-; GFX11-NEXT: v_readfirstlane_b32 s4, v13
-; GFX11-NEXT: v_readfirstlane_b32 s5, v14
+; GFX11-NEXT: v_readfirstlane_b32 s5, v12
+; GFX11-NEXT: v_readfirstlane_b32 s6, v13
+; GFX11-NEXT: v_readfirstlane_b32 s7, v14
; GFX11-NEXT: v_writelane_b32 v75, s37, 5
; GFX11-NEXT: v_writelane_b32 v76, s101, 5
-; GFX11-NEXT: s_mov_b32 s99, 0
; GFX11-NEXT: s_and_b32 s42, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 vcc_lo, -1
; GFX11-NEXT: s_clause 0x12
; GFX11-NEXT: scratch_store_b32 off, v40, s32 offset:72
; GFX11-NEXT: scratch_store_b32 off, v41, s32 offset:68
@@ -213238,8 +213958,8 @@ define inreg <128 x i8> @bitcast_v64i16_to_v128i8_scalar(<64 x i16> inreg %a, i3
; GFX11-NEXT: scratch_store_b32 off, v74, s32
; GFX11-NEXT: v_writelane_b32 v75, s38, 6
; GFX11-NEXT: v_writelane_b32 v76, s102, 6
-; GFX11-NEXT: ; implicit-def: $vgpr78 : SGPR spill to VGPR lane
; GFX11-NEXT: ; implicit-def: $vgpr77 : SGPR spill to VGPR lane
+; GFX11-NEXT: ; implicit-def: $vgpr78 : SGPR spill to VGPR lane
; GFX11-NEXT: v_writelane_b32 v75, s39, 7
; GFX11-NEXT: v_writelane_b32 v76, s103, 7
; GFX11-NEXT: v_writelane_b32 v75, s48, 8
@@ -213269,160 +213989,158 @@ define inreg <128 x i8> @bitcast_v64i16_to_v128i8_scalar(<64 x i16> inreg %a, i3
; GFX11-NEXT: v_writelane_b32 v75, s87, 31
; GFX11-NEXT: s_cbranch_scc0 .LBB99_3
; GFX11-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-NEXT: s_lshr_b32 s42, s27, 24
+; GFX11-NEXT: s_lshr_b32 vcc_hi, s7, 24
+; GFX11-NEXT: v_writelane_b32 v78, s42, 7
; GFX11-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-NEXT: s_lshr_b64 s[74:75], s[0:1], 24
-; GFX11-NEXT: v_writelane_b32 v77, s42, 8
+; GFX11-NEXT: s_lshr_b32 s34, s7, 16
+; GFX11-NEXT: s_lshr_b32 s36, s7, 8
+; GFX11-NEXT: s_lshr_b32 s35, s6, 16
+; GFX11-NEXT: v_writelane_b32 v78, s42, 6
; GFX11-NEXT: s_lshr_b32 s42, s27, 8
-; GFX11-NEXT: s_lshr_b32 s43, s27, 24
-; GFX11-NEXT: s_lshr_b32 s34, s5, 24
-; GFX11-NEXT: s_lshr_b32 s35, s5, 16
-; GFX11-NEXT: v_writelane_b32 v77, s42, 7
+; GFX11-NEXT: s_lshr_b32 s37, s6, 8
+; GFX11-NEXT: s_lshr_b32 s38, s5, 24
+; GFX11-NEXT: s_lshr_b32 s39, s5, 16
+; GFX11-NEXT: v_writelane_b32 v78, s42, 5
; GFX11-NEXT: s_lshr_b32 s42, s26, 16
-; GFX11-NEXT: s_lshr_b32 s37, s5, 8
-; GFX11-NEXT: s_lshr_b32 s36, s4, 16
-; GFX11-NEXT: s_lshr_b32 s38, s4, 8
-; GFX11-NEXT: v_writelane_b32 v77, s42, 6
+; GFX11-NEXT: s_lshr_b32 s49, s5, 8
+; GFX11-NEXT: s_lshr_b32 s48, s4, 16
+; GFX11-NEXT: s_lshr_b32 s50, s4, 8
+; GFX11-NEXT: v_writelane_b32 v78, s42, 4
; GFX11-NEXT: s_lshr_b32 s42, s26, 8
-; GFX11-NEXT: s_lshr_b32 s39, s7, 24
-; GFX11-NEXT: s_lshr_b32 s48, s7, 16
-; GFX11-NEXT: s_lshr_b32 s50, s7, 8
-; GFX11-NEXT: v_writelane_b32 v77, s42, 5
+; GFX11-NEXT: s_lshr_b32 s51, s9, 24
+; GFX11-NEXT: s_lshr_b32 s52, s9, 16
+; GFX11-NEXT: s_lshr_b32 s54, s9, 8
+; GFX11-NEXT: v_writelane_b32 v78, s42, 3
; GFX11-NEXT: s_lshr_b32 s42, s25, 24
-; GFX11-NEXT: s_lshr_b32 s49, s6, 16
-; GFX11-NEXT: s_lshr_b32 s51, s6, 8
-; GFX11-NEXT: s_lshr_b32 s52, s9, 24
-; GFX11-NEXT: v_writelane_b32 v77, s42, 4
+; GFX11-NEXT: s_lshr_b32 s53, s8, 16
+; GFX11-NEXT: s_lshr_b32 s55, s8, 8
+; GFX11-NEXT: s_lshr_b32 s64, s11, 24
+; GFX11-NEXT: v_writelane_b32 v78, s42, 2
; GFX11-NEXT: s_lshr_b32 s42, s25, 16
-; GFX11-NEXT: s_lshr_b32 s53, s9, 16
-; GFX11-NEXT: s_lshr_b32 s55, s9, 8
-; GFX11-NEXT: s_lshr_b32 s54, s8, 16
-; GFX11-NEXT: v_writelane_b32 v77, s42, 3
+; GFX11-NEXT: s_lshr_b32 s65, s11, 16
+; GFX11-NEXT: s_lshr_b32 s67, s11, 8
+; GFX11-NEXT: s_lshr_b32 s66, s10, 16
+; GFX11-NEXT: v_writelane_b32 v78, s42, 1
; GFX11-NEXT: s_lshr_b32 s42, s25, 8
-; GFX11-NEXT: s_lshr_b32 s64, s8, 8
-; GFX11-NEXT: s_lshr_b32 s65, s11, 24
-; GFX11-NEXT: s_lshr_b32 s66, s11, 16
-; GFX11-NEXT: v_writelane_b32 v77, s42, 2
+; GFX11-NEXT: s_lshr_b32 s68, s10, 8
+; GFX11-NEXT: s_lshr_b32 s69, s13, 24
+; GFX11-NEXT: s_lshr_b32 s70, s13, 16
+; GFX11-NEXT: v_writelane_b32 v78, s42, 0
; GFX11-NEXT: s_lshr_b32 s42, s24, 16
-; GFX11-NEXT: s_lshr_b32 s68, s11, 8
-; GFX11-NEXT: s_lshr_b32 s67, s10, 16
-; GFX11-NEXT: s_lshr_b32 s69, s10, 8
-; GFX11-NEXT: v_writelane_b32 v77, s42, 1
+; GFX11-NEXT: s_lshr_b32 s80, s13, 8
+; GFX11-NEXT: v_writelane_b32 v77, s42, 31
; GFX11-NEXT: s_lshr_b32 s42, s24, 8
-; GFX11-NEXT: s_lshr_b32 s70, s13, 24
-; GFX11-NEXT: s_lshr_b32 s71, s13, 16
-; GFX11-NEXT: s_lshr_b32 s81, s13, 8
-; GFX11-NEXT: v_writelane_b32 v77, s42, 0
+; GFX11-NEXT: s_lshr_b32 s71, s12, 16
+; GFX11-NEXT: s_lshr_b32 s81, s12, 8
+; GFX11-NEXT: s_lshr_b32 s82, s15, 24
+; GFX11-NEXT: v_writelane_b32 v77, s42, 30
; GFX11-NEXT: s_lshr_b32 s42, s23, 24
-; GFX11-NEXT: s_lshr_b32 s80, s12, 16
-; GFX11-NEXT: v_writelane_b32 v78, s42, 31
+; GFX11-NEXT: s_lshr_b32 s83, s15, 16
+; GFX11-NEXT: s_lshr_b32 s85, s15, 8
+; GFX11-NEXT: s_lshr_b32 s84, s14, 16
+; GFX11-NEXT: v_writelane_b32 v77, s42, 29
; GFX11-NEXT: s_lshr_b32 s42, s23, 16
-; GFX11-NEXT: s_lshr_b32 s82, s12, 8
-; GFX11-NEXT: s_lshr_b32 s83, s15, 24
-; GFX11-NEXT: s_lshr_b32 s84, s15, 16
-; GFX11-NEXT: v_writelane_b32 v78, s42, 30
+; GFX11-NEXT: s_lshr_b32 s86, s14, 8
+; GFX11-NEXT: s_lshr_b32 s87, s41, 24
+; GFX11-NEXT: s_lshr_b32 s96, s41, 16
+; GFX11-NEXT: v_writelane_b32 v77, s42, 28
; GFX11-NEXT: s_lshr_b32 s42, s23, 8
-; GFX11-NEXT: s_lshr_b32 s86, s15, 8
-; GFX11-NEXT: s_lshr_b32 s85, s14, 16
-; GFX11-NEXT: s_lshr_b32 s87, s14, 8
-; GFX11-NEXT: v_writelane_b32 v78, s42, 29
+; GFX11-NEXT: s_lshr_b32 s98, s41, 8
+; GFX11-NEXT: s_lshr_b32 s97, s40, 16
+; GFX11-NEXT: s_lshr_b32 s99, s40, 8
+; GFX11-NEXT: v_writelane_b32 v77, s42, 27
; GFX11-NEXT: s_lshr_b32 s42, s22, 16
-; GFX11-NEXT: s_lshr_b32 s96, s41, 24
-; GFX11-NEXT: s_lshr_b32 s97, s41, 16
-; GFX11-NEXT: s_lshr_b32 s100, s41, 8
-; GFX11-NEXT: v_writelane_b32 v78, s42, 28
+; GFX11-NEXT: s_lshr_b32 s100, s29, 24
+; GFX11-NEXT: s_lshr_b32 s101, s29, 16
+; GFX11-NEXT: s_lshr_b32 s103, s29, 8
+; GFX11-NEXT: v_writelane_b32 v77, s42, 26
; GFX11-NEXT: s_lshr_b32 s42, s22, 8
-; GFX11-NEXT: s_lshr_b32 s98, s40, 16
-; GFX11-NEXT: s_lshr_b32 s101, s40, 8
-; GFX11-NEXT: s_lshr_b32 s102, s29, 24
-; GFX11-NEXT: v_writelane_b32 v78, s42, 27
+; GFX11-NEXT: s_lshr_b32 s102, s28, 16
+; GFX11-NEXT: s_lshr_b32 s104, s28, 8
+; GFX11-NEXT: v_writelane_b32 v77, s42, 25
; GFX11-NEXT: s_lshr_b32 s42, s21, 24
-; GFX11-NEXT: s_lshr_b32 s103, s29, 16
-; GFX11-NEXT: s_lshr_b32 vcc_hi, s29, 8
-; GFX11-NEXT: s_lshr_b32 s104, s28, 16
-; GFX11-NEXT: v_writelane_b32 v78, s42, 26
-; GFX11-NEXT: s_lshr_b32 s42, s21, 16
-; GFX11-NEXT: s_lshr_b64 s[62:63], s[26:27], 24
-; GFX11-NEXT: s_lshr_b64 s[72:73], s[24:25], 24
+; GFX11-NEXT: s_lshr_b64 s[72:73], s[26:27], 24
+; GFX11-NEXT: s_lshr_b64 s[62:63], s[24:25], 24
; GFX11-NEXT: s_lshr_b64 s[60:61], s[22:23], 24
-; GFX11-NEXT: v_writelane_b32 v78, s42, 25
-; GFX11-NEXT: s_lshr_b32 s42, s21, 8
+; GFX11-NEXT: v_writelane_b32 v77, s42, 24
+; GFX11-NEXT: s_lshr_b32 s42, s21, 16
; GFX11-NEXT: s_lshr_b64 s[58:59], s[20:21], 24
; GFX11-NEXT: s_lshr_b64 s[56:57], s[18:19], 24
; GFX11-NEXT: s_lshr_b64 s[46:47], s[16:17], 24
-; GFX11-NEXT: v_writelane_b32 v78, s42, 24
-; GFX11-NEXT: s_lshr_b32 s42, s20, 16
+; GFX11-NEXT: v_writelane_b32 v77, s42, 23
+; GFX11-NEXT: s_lshr_b32 s42, s21, 8
; GFX11-NEXT: s_lshr_b64 s[44:45], s[2:3], 24
-; GFX11-NEXT: s_lshr_b64 s[76:77], s[6:7], 24
-; GFX11-NEXT: s_lshr_b64 s[78:79], s[8:9], 24
-; GFX11-NEXT: v_writelane_b32 v78, s42, 23
+; GFX11-NEXT: s_lshr_b64 s[30:31], s[6:7], 24
+; GFX11-NEXT: s_lshr_b64 s[94:95], s[4:5], 24
+; GFX11-NEXT: v_writelane_b32 v77, s42, 22
+; GFX11-NEXT: s_lshr_b32 s42, s20, 16
+; GFX11-NEXT: s_lshr_b64 s[92:93], s[8:9], 24
+; GFX11-NEXT: s_lshr_b64 s[90:91], s[10:11], 24
+; GFX11-NEXT: s_lshr_b64 s[88:89], s[12:13], 24
+; GFX11-NEXT: v_writelane_b32 v77, s42, 21
; GFX11-NEXT: s_lshr_b32 s42, s20, 8
-; GFX11-NEXT: s_lshr_b64 s[88:89], s[10:11], 24
-; GFX11-NEXT: s_lshr_b64 s[90:91], s[12:13], 24
-; GFX11-NEXT: s_lshr_b64 s[92:93], s[14:15], 24
-; GFX11-NEXT: v_writelane_b32 v78, s42, 22
+; GFX11-NEXT: s_lshr_b64 s[78:79], s[14:15], 24
+; GFX11-NEXT: s_lshr_b64 s[76:77], s[40:41], 24
+; GFX11-NEXT: s_lshr_b64 s[74:75], s[28:29], 24
+; GFX11-NEXT: v_writelane_b32 v77, s42, 20
; GFX11-NEXT: s_lshr_b32 s42, s19, 24
-; GFX11-NEXT: s_lshr_b64 s[94:95], s[40:41], 24
-; GFX11-NEXT: s_lshr_b64 s[30:31], s[28:29], 24
-; GFX11-NEXT: v_writelane_b32 v78, s42, 21
-; GFX11-NEXT: s_lshr_b32 s42, s19, 16
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v78, s42, 20
+; GFX11-NEXT: v_writelane_b32 v77, s42, 19
+; GFX11-NEXT: s_lshr_b32 s42, s19, 16
+; GFX11-NEXT: v_writelane_b32 v77, s42, 18
; GFX11-NEXT: s_lshr_b32 s42, s19, 8
-; GFX11-NEXT: v_writelane_b32 v78, s42, 19
-; GFX11-NEXT: s_lshr_b32 s42, s18, 16
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v78, s42, 18
+; GFX11-NEXT: v_writelane_b32 v77, s42, 17
+; GFX11-NEXT: s_lshr_b32 s42, s18, 16
+; GFX11-NEXT: v_writelane_b32 v77, s42, 16
; GFX11-NEXT: s_lshr_b32 s42, s18, 8
-; GFX11-NEXT: v_writelane_b32 v78, s42, 17
-; GFX11-NEXT: s_lshr_b32 s42, s17, 24
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v78, s42, 16
+; GFX11-NEXT: v_writelane_b32 v77, s42, 15
+; GFX11-NEXT: s_lshr_b32 s42, s17, 24
+; GFX11-NEXT: v_writelane_b32 v77, s42, 14
; GFX11-NEXT: s_lshr_b32 s42, s17, 16
-; GFX11-NEXT: v_writelane_b32 v78, s42, 15
-; GFX11-NEXT: s_lshr_b32 s42, s17, 8
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v78, s42, 14
+; GFX11-NEXT: v_writelane_b32 v77, s42, 13
+; GFX11-NEXT: s_lshr_b32 s42, s17, 8
+; GFX11-NEXT: v_writelane_b32 v77, s42, 12
; GFX11-NEXT: s_lshr_b32 s42, s16, 16
-; GFX11-NEXT: v_writelane_b32 v78, s42, 13
-; GFX11-NEXT: s_lshr_b32 s42, s16, 8
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v78, s42, 12
+; GFX11-NEXT: v_writelane_b32 v77, s42, 11
+; GFX11-NEXT: s_lshr_b32 s42, s16, 8
+; GFX11-NEXT: v_writelane_b32 v77, s42, 10
; GFX11-NEXT: s_lshr_b32 s42, s3, 24
-; GFX11-NEXT: v_writelane_b32 v78, s42, 11
-; GFX11-NEXT: s_lshr_b32 s42, s3, 16
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v78, s42, 10
+; GFX11-NEXT: v_writelane_b32 v77, s42, 9
+; GFX11-NEXT: s_lshr_b32 s42, s3, 16
+; GFX11-NEXT: v_writelane_b32 v77, s42, 8
; GFX11-NEXT: s_lshr_b32 s42, s3, 8
-; GFX11-NEXT: v_writelane_b32 v78, s42, 9
-; GFX11-NEXT: s_lshr_b32 s42, s2, 16
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v78, s42, 8
+; GFX11-NEXT: v_writelane_b32 v77, s42, 7
+; GFX11-NEXT: s_lshr_b32 s42, s2, 16
+; GFX11-NEXT: v_writelane_b32 v77, s42, 6
; GFX11-NEXT: s_lshr_b32 s42, s2, 8
-; GFX11-NEXT: v_writelane_b32 v78, s42, 7
-; GFX11-NEXT: s_lshr_b32 s42, s1, 24
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v78, s42, 6
+; GFX11-NEXT: v_writelane_b32 v77, s42, 5
+; GFX11-NEXT: s_lshr_b32 s42, s1, 24
+; GFX11-NEXT: v_writelane_b32 v77, s42, 4
; GFX11-NEXT: s_lshr_b32 s42, s1, 16
-; GFX11-NEXT: v_writelane_b32 v78, s42, 5
-; GFX11-NEXT: s_lshr_b32 s42, s1, 8
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v78, s42, 4
+; GFX11-NEXT: v_writelane_b32 v77, s42, 3
+; GFX11-NEXT: s_lshr_b32 s42, s1, 8
+; GFX11-NEXT: v_writelane_b32 v77, s42, 2
; GFX11-NEXT: s_lshr_b32 s42, s0, 16
-; GFX11-NEXT: v_writelane_b32 v78, s42, 3
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(SKIP_1) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: v_writelane_b32 v77, s42, 1
; GFX11-NEXT: s_lshr_b32 s42, s0, 8
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_writelane_b32 v78, s42, 2
-; GFX11-NEXT: s_lshr_b32 s42, s28, 8
-; GFX11-NEXT: v_writelane_b32 v78, s74, 0
-; GFX11-NEXT: v_writelane_b32 v78, s75, 1
-; GFX11-NEXT: s_lshr_b64 s[74:75], s[4:5], 24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s99
-; GFX11-NEXT: s_cbranch_vccnz .LBB99_4
+; GFX11-NEXT: v_writelane_b32 v77, s42, 0
+; GFX11-NEXT: s_lshr_b64 s[42:43], s[0:1], 24
+; GFX11-NEXT: s_cbranch_execnz .LBB99_4
; GFX11-NEXT: .LBB99_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v39, s17, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v38, s16, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v2, s5, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v1, s4, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v2, s7, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v1, s6, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v51, s3, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v50, s2, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v33, s21, 3 op_sel_hi:[1,0]
@@ -213441,8 +214159,8 @@ define inreg <128 x i8> @bitcast_v64i16_to_v128i8_scalar(<64 x i16> inreg %a, i3
; GFX11-NEXT: v_pk_add_u16 v7, s10, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v6, s9, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v5, s8, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v4, s7, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v3, s6, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v4, s5, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v3, s4, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v53, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v52, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v37, s19, 3 op_sel_hi:[1,0]
@@ -213467,8 +214185,8 @@ define inreg <128 x i8> @bitcast_v64i16_to_v128i8_scalar(<64 x i16> inreg %a, i3
; GFX11-NEXT: v_lshrrev_b64 v[34:35], 24, v[11:12]
; GFX11-NEXT: v_lshrrev_b64 v[65:66], 24, v[13:14]
; GFX11-NEXT: v_lshrrev_b64 v[68:69], 24, v[15:16]
-; GFX11-NEXT: v_lshrrev_b32_e32 v147, 24, v21
-; GFX11-NEXT: v_lshrrev_b32_e32 v148, 16, v21
+; GFX11-NEXT: v_lshrrev_b32_e32 v148, 24, v21
+; GFX11-NEXT: v_lshrrev_b32_e32 v147, 16, v21
; GFX11-NEXT: v_lshrrev_b32_e32 v149, 8, v21
; GFX11-NEXT: v_lshrrev_b32_e32 v150, 16, v20
; GFX11-NEXT: v_lshrrev_b32_e32 v151, 8, v20
@@ -213549,9 +214267,8 @@ define inreg <128 x i8> @bitcast_v64i16_to_v128i8_scalar(<64 x i16> inreg %a, i3
; GFX11-NEXT: v_lshrrev_b32_e32 v146, 8, v15
; GFX11-NEXT: s_branch .LBB99_5
; GFX11-NEXT: .LBB99_3:
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: s_mov_b32 s99, -1
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
; GFX11-NEXT: ; implicit-def: $sgpr42
; GFX11-NEXT: ; kill: killed $sgpr42
; GFX11-NEXT: ; implicit-def: $sgpr44
@@ -213560,285 +214277,284 @@ define inreg <128 x i8> @bitcast_v64i16_to_v128i8_scalar(<64 x i16> inreg %a, i3
; GFX11-NEXT: ; implicit-def: $sgpr58
; GFX11-NEXT: ; implicit-def: $sgpr60
; GFX11-NEXT: ; implicit-def: $sgpr104
-; GFX11-NEXT: ; implicit-def: $vcc_hi
-; GFX11-NEXT: ; implicit-def: $sgpr103
; GFX11-NEXT: ; implicit-def: $sgpr102
+; GFX11-NEXT: ; implicit-def: $sgpr103
; GFX11-NEXT: ; implicit-def: $sgpr101
-; GFX11-NEXT: ; implicit-def: $sgpr98
; GFX11-NEXT: ; implicit-def: $sgpr100
+; GFX11-NEXT: ; implicit-def: $sgpr99
; GFX11-NEXT: ; implicit-def: $sgpr97
+; GFX11-NEXT: ; implicit-def: $sgpr98
; GFX11-NEXT: ; implicit-def: $sgpr96
; GFX11-NEXT: ; implicit-def: $sgpr87
-; GFX11-NEXT: ; implicit-def: $sgpr85
; GFX11-NEXT: ; implicit-def: $sgpr86
; GFX11-NEXT: ; implicit-def: $sgpr84
+; GFX11-NEXT: ; implicit-def: $sgpr85
; GFX11-NEXT: ; implicit-def: $sgpr83
; GFX11-NEXT: ; implicit-def: $sgpr82
-; GFX11-NEXT: ; implicit-def: $sgpr80
; GFX11-NEXT: ; implicit-def: $sgpr81
; GFX11-NEXT: ; implicit-def: $sgpr71
+; GFX11-NEXT: ; implicit-def: $sgpr80
; GFX11-NEXT: ; implicit-def: $sgpr70
; GFX11-NEXT: ; implicit-def: $sgpr69
-; GFX11-NEXT: ; implicit-def: $sgpr67
; GFX11-NEXT: ; implicit-def: $sgpr68
; GFX11-NEXT: ; implicit-def: $sgpr66
+; GFX11-NEXT: ; implicit-def: $sgpr67
; GFX11-NEXT: ; implicit-def: $sgpr65
; GFX11-NEXT: ; implicit-def: $sgpr64
-; GFX11-NEXT: ; implicit-def: $sgpr54
; GFX11-NEXT: ; implicit-def: $sgpr55
; GFX11-NEXT: ; implicit-def: $sgpr53
+; GFX11-NEXT: ; implicit-def: $sgpr54
; GFX11-NEXT: ; implicit-def: $sgpr52
; GFX11-NEXT: ; implicit-def: $sgpr51
-; GFX11-NEXT: ; implicit-def: $sgpr49
; GFX11-NEXT: ; implicit-def: $sgpr50
; GFX11-NEXT: ; implicit-def: $sgpr48
+; GFX11-NEXT: ; implicit-def: $sgpr49
; GFX11-NEXT: ; implicit-def: $sgpr39
; GFX11-NEXT: ; implicit-def: $sgpr38
-; GFX11-NEXT: ; implicit-def: $sgpr36
; GFX11-NEXT: ; implicit-def: $sgpr37
; GFX11-NEXT: ; implicit-def: $sgpr35
+; GFX11-NEXT: ; implicit-def: $sgpr36
; GFX11-NEXT: ; implicit-def: $sgpr34
-; GFX11-NEXT: ; implicit-def: $sgpr72
+; GFX11-NEXT: ; implicit-def: $vcc_hi
; GFX11-NEXT: ; implicit-def: $sgpr62
-; GFX11-NEXT: ; implicit-def: $sgpr30
-; GFX11-NEXT: ; implicit-def: $sgpr94
-; GFX11-NEXT: ; implicit-def: $sgpr92
-; GFX11-NEXT: ; implicit-def: $sgpr90
-; GFX11-NEXT: ; implicit-def: $sgpr88
-; GFX11-NEXT: ; implicit-def: $sgpr78
-; GFX11-NEXT: ; implicit-def: $sgpr76
+; GFX11-NEXT: ; implicit-def: $sgpr72
; GFX11-NEXT: ; implicit-def: $sgpr74
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: v_writelane_b32 v78, s42, 0
-; GFX11-NEXT: v_writelane_b32 v78, s43, 1
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
-; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: ; kill: killed $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr76
+; GFX11-NEXT: ; implicit-def: $sgpr78
+; GFX11-NEXT: ; implicit-def: $sgpr88
+; GFX11-NEXT: ; implicit-def: $sgpr90
+; GFX11-NEXT: ; implicit-def: $sgpr92
+; GFX11-NEXT: ; implicit-def: $sgpr94
+; GFX11-NEXT: ; implicit-def: $sgpr30
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
; GFX11-NEXT: ; implicit-def: $sgpr42
; GFX11-NEXT: ; kill: killed $sgpr42
; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: s_branch .LBB99_2
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; kill: killed $sgpr43
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, vcc_lo
+; GFX11-NEXT: s_cbranch_vccz .LBB99_2
; GFX11-NEXT: .LBB99_4:
; GFX11-NEXT: v_dual_mov_b32 v52, s0 :: v_dual_mov_b32 v53, s1
-; GFX11-NEXT: v_readlane_b32 s0, v78, 2
-; GFX11-NEXT: v_mov_b32_e32 v71, s50
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(SKIP_3) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_readlane_b32 s0, v77, 0
+; GFX11-NEXT: v_mov_b32_e32 v71, s49
; GFX11-NEXT: v_dual_mov_b32 v15, s28 :: v_dual_mov_b32 v16, s29
; GFX11-NEXT: v_dual_mov_b32 v13, s40 :: v_dual_mov_b32 v14, s41
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_mov_b32_e32 v74, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 3
+; GFX11-NEXT: v_readlane_b32 s0, v77, 1
; GFX11-NEXT: v_dual_mov_b32 v11, s14 :: v_dual_mov_b32 v12, s15
; GFX11-NEXT: v_dual_mov_b32 v9, s12 :: v_dual_mov_b32 v10, s13
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_mov_b32_e32 v73, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 4
-; GFX11-NEXT: v_mov_b32_e32 v55, s48
+; GFX11-NEXT: v_readlane_b32 s0, v77, 2
+; GFX11-NEXT: v_mov_b32_e32 v55, s39
; GFX11-NEXT: v_dual_mov_b32 v7, s10 :: v_dual_mov_b32 v8, s11
; GFX11-NEXT: v_dual_mov_b32 v5, s8 :: v_dual_mov_b32 v6, s9
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_4) | instid1(VALU_DEP_4)
; GFX11-NEXT: v_mov_b32_e32 v72, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 5
-; GFX11-NEXT: v_mov_b32_e32 v49, s39
-; GFX11-NEXT: v_dual_mov_b32 v3, s6 :: v_dual_mov_b32 v4, s7
-; GFX11-NEXT: v_dual_mov_b32 v1, s4 :: v_dual_mov_b32 v2, s5
+; GFX11-NEXT: v_readlane_b32 s0, v77, 3
+; GFX11-NEXT: v_mov_b32_e32 v49, s38
+; GFX11-NEXT: v_dual_mov_b32 v3, s4 :: v_dual_mov_b32 v4, s5
+; GFX11-NEXT: v_dual_mov_b32 v1, s6 :: v_dual_mov_b32 v2, s7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_mov_b32_e32 v62, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 6
+; GFX11-NEXT: v_readlane_b32 s0, v77, 4
; GFX11-NEXT: v_dual_mov_b32 v50, s2 :: v_dual_mov_b32 v51, s3
; GFX11-NEXT: v_dual_mov_b32 v38, s16 :: v_dual_mov_b32 v39, s17
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_mov_b32_e32 v63, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 7
-; GFX11-NEXT: v_dual_mov_b32 v35, s38 :: v_dual_mov_b32 v36, s18
+; GFX11-NEXT: v_readlane_b32 s0, v77, 5
+; GFX11-NEXT: v_dual_mov_b32 v35, s37 :: v_dual_mov_b32 v36, s18
; GFX11-NEXT: v_dual_mov_b32 v37, s19 :: v_dual_mov_b32 v32, s20
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_dual_mov_b32 v33, s21 :: v_dual_mov_b32 v60, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 8
+; GFX11-NEXT: v_readlane_b32 s0, v77, 6
; GFX11-NEXT: v_dual_mov_b32 v28, s22 :: v_dual_mov_b32 v29, s23
; GFX11-NEXT: v_dual_mov_b32 v24, s24 :: v_dual_mov_b32 v25, s25
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
; GFX11-NEXT: v_mov_b32_e32 v61, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 9
+; GFX11-NEXT: v_readlane_b32 s0, v77, 7
; GFX11-NEXT: v_dual_mov_b32 v20, s26 :: v_dual_mov_b32 v21, s27
-; GFX11-NEXT: v_dual_mov_b32 v146, s42 :: v_dual_mov_b32 v145, s104
-; GFX11-NEXT: v_mov_b32_e32 v59, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 10
-; GFX11-NEXT: v_dual_mov_b32 v144, vcc_hi :: v_dual_mov_b32 v135, s103
-; GFX11-NEXT: v_dual_mov_b32 v134, s102 :: v_dual_mov_b32 v133, s101
+; GFX11-NEXT: v_dual_mov_b32 v146, s104 :: v_dual_mov_b32 v145, s102
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_mov_b32_e32 v59, s0
+; GFX11-NEXT: v_readlane_b32 s0, v77, 8
+; GFX11-NEXT: v_dual_mov_b32 v144, s103 :: v_dual_mov_b32 v135, s101
+; GFX11-NEXT: v_dual_mov_b32 v134, s100 :: v_dual_mov_b32 v133, s99
; GFX11-NEXT: v_mov_b32_e32 v57, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 11
-; GFX11-NEXT: v_dual_mov_b32 v31, s36 :: v_dual_mov_b32 v132, s98
-; GFX11-NEXT: v_dual_mov_b32 v131, s100 :: v_dual_mov_b32 v130, s97
-; GFX11-NEXT: v_dual_mov_b32 v129, s96 :: v_dual_mov_b32 v58, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 12
-; GFX11-NEXT: v_dual_mov_b32 v27, s37 :: v_dual_mov_b32 v128, s87
-; GFX11-NEXT: v_dual_mov_b32 v119, s85 :: v_dual_mov_b32 v118, s86
+; GFX11-NEXT: v_readlane_b32 s0, v77, 9
+; GFX11-NEXT: v_dual_mov_b32 v31, s35 :: v_dual_mov_b32 v132, s97
+; GFX11-NEXT: v_dual_mov_b32 v131, s98 :: v_dual_mov_b32 v130, s96
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_mov_b32 v117, s84 :: v_dual_mov_b32 v56, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 13
-; GFX11-NEXT: v_dual_mov_b32 v116, s83 :: v_dual_mov_b32 v115, s82
-; GFX11-NEXT: v_dual_mov_b32 v114, s80 :: v_dual_mov_b32 v113, s81
-; GFX11-NEXT: v_mov_b32_e32 v47, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 14
-; GFX11-NEXT: v_dual_mov_b32 v23, s35 :: v_dual_mov_b32 v112, s71
-; GFX11-NEXT: v_dual_mov_b32 v103, s70 :: v_dual_mov_b32 v102, s69
+; GFX11-NEXT: v_dual_mov_b32 v129, s87 :: v_dual_mov_b32 v58, s0
+; GFX11-NEXT: v_readlane_b32 s0, v77, 10
+; GFX11-NEXT: v_dual_mov_b32 v27, s36 :: v_dual_mov_b32 v128, s86
+; GFX11-NEXT: v_dual_mov_b32 v119, s84 :: v_dual_mov_b32 v118, s85
+; GFX11-NEXT: v_dual_mov_b32 v117, s83 :: v_dual_mov_b32 v56, s0
+; GFX11-NEXT: v_readlane_b32 s0, v77, 11
+; GFX11-NEXT: v_dual_mov_b32 v116, s82 :: v_dual_mov_b32 v115, s81
+; GFX11-NEXT: v_dual_mov_b32 v114, s71 :: v_dual_mov_b32 v113, s80
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_mov_b32 v101, s67 :: v_dual_mov_b32 v46, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 15
-; GFX11-NEXT: v_dual_mov_b32 v19, s34 :: v_dual_mov_b32 v100, s68
-; GFX11-NEXT: v_dual_mov_b32 v99, s66 :: v_dual_mov_b32 v98, s65
-; GFX11-NEXT: v_dual_mov_b32 v97, s64 :: v_dual_mov_b32 v44, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 16
-; GFX11-NEXT: v_dual_mov_b32 v96, s54 :: v_dual_mov_b32 v87, s55
-; GFX11-NEXT: v_dual_mov_b32 v86, s53 :: v_dual_mov_b32 v85, s52
+; GFX11-NEXT: v_mov_b32_e32 v47, s0
+; GFX11-NEXT: v_readlane_b32 s0, v77, 12
+; GFX11-NEXT: v_dual_mov_b32 v23, s34 :: v_dual_mov_b32 v112, s70
+; GFX11-NEXT: v_dual_mov_b32 v103, s69 :: v_dual_mov_b32 v102, s68
+; GFX11-NEXT: v_dual_mov_b32 v101, s66 :: v_dual_mov_b32 v46, s0
+; GFX11-NEXT: v_readlane_b32 s0, v77, 13
+; GFX11-NEXT: v_dual_mov_b32 v19, vcc_hi :: v_dual_mov_b32 v100, s67
+; GFX11-NEXT: v_dual_mov_b32 v99, s65 :: v_dual_mov_b32 v98, s64
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_dual_mov_b32 v97, s55 :: v_dual_mov_b32 v44, s0
+; GFX11-NEXT: v_readlane_b32 s0, v77, 14
+; GFX11-NEXT: v_dual_mov_b32 v96, s53 :: v_dual_mov_b32 v87, s54
+; GFX11-NEXT: v_dual_mov_b32 v86, s52 :: v_dual_mov_b32 v85, s51
; GFX11-NEXT: v_mov_b32_e32 v45, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 17
-; GFX11-NEXT: v_dual_mov_b32 v84, s51 :: v_dual_mov_b32 v83, s49
-; GFX11-NEXT: v_dual_mov_b32 v147, s43 :: v_dual_mov_b32 v22, s78
-; GFX11-NEXT: v_mov_b32_e32 v43, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 18
-; GFX11-NEXT: v_dual_mov_b32 v67, s58 :: v_dual_mov_b32 v26, s88
-; GFX11-NEXT: v_dual_mov_b32 v81, s44 :: v_dual_mov_b32 v30, s90
+; GFX11-NEXT: v_readlane_b32 s0, v77, 15
+; GFX11-NEXT: v_dual_mov_b32 v84, s50 :: v_dual_mov_b32 v83, s48
+; GFX11-NEXT: v_dual_mov_b32 v67, s58 :: v_dual_mov_b32 v26, s90
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_3) | instid1(VALU_DEP_3)
+; GFX11-NEXT: v_mov_b32_e32 v43, s0
+; GFX11-NEXT: v_readlane_b32 s0, v77, 16
+; GFX11-NEXT: v_dual_mov_b32 v81, s44 :: v_dual_mov_b32 v30, s88
+; GFX11-NEXT: v_dual_mov_b32 v17, s30 :: v_dual_mov_b32 v34, s78
; GFX11-NEXT: v_mov_b32_e32 v42, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 19
-; GFX11-NEXT: v_dual_mov_b32 v17, s74 :: v_dual_mov_b32 v34, s92
-; GFX11-NEXT: v_dual_mov_b32 v65, s94 :: v_dual_mov_b32 v68, s30
-; GFX11-NEXT: v_mov_b32_e32 v41, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 20
-; GFX11-NEXT: v_mov_b32_e32 v48, s62
-; GFX11-NEXT: v_mov_b32_e32 v54, s72
+; GFX11-NEXT: v_readlane_b32 s0, v77, 17
+; GFX11-NEXT: v_dual_mov_b32 v65, s76 :: v_dual_mov_b32 v68, s74
+; GFX11-NEXT: v_mov_b32_e32 v48, s72
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_4) | instid1(VALU_DEP_4)
+; GFX11-NEXT: v_dual_mov_b32 v54, s62 :: v_dual_mov_b32 v41, s0
+; GFX11-NEXT: v_readlane_b32 s0, v77, 18
; GFX11-NEXT: v_mov_b32_e32 v64, s60
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4) | instskip(SKIP_3) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_dual_mov_b32 v70, s56 :: v_dual_mov_b32 v183, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 21
+; GFX11-NEXT: v_mov_b32_e32 v70, s56
; GFX11-NEXT: v_mov_b32_e32 v80, s46
-; GFX11-NEXT: v_mov_b32_e32 v18, s76
+; GFX11-NEXT: v_dual_mov_b32 v82, s42 :: v_dual_mov_b32 v183, s0
+; GFX11-NEXT: v_readlane_b32 s0, v77, 19
+; GFX11-NEXT: v_mov_b32_e32 v18, s94
+; GFX11-NEXT: v_mov_b32_e32 v22, s92
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_mov_b32_e32 v40, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 22
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_readlane_b32 s0, v77, 20
; GFX11-NEXT: v_mov_b32_e32 v182, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 23
-; GFX11-NEXT: v_mov_b32_e32 v181, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 24
+; GFX11-NEXT: v_readlane_b32 s0, v77, 21
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v181, s0
+; GFX11-NEXT: v_readlane_b32 s0, v77, 22
; GFX11-NEXT: v_mov_b32_e32 v180, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 25
-; GFX11-NEXT: v_mov_b32_e32 v178, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 26
+; GFX11-NEXT: v_readlane_b32 s0, v77, 23
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v178, s0
+; GFX11-NEXT: v_readlane_b32 s0, v77, 24
; GFX11-NEXT: v_mov_b32_e32 v179, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 27
-; GFX11-NEXT: v_mov_b32_e32 v177, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 28
+; GFX11-NEXT: v_readlane_b32 s0, v77, 25
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v177, s0
+; GFX11-NEXT: v_readlane_b32 s0, v77, 26
; GFX11-NEXT: v_mov_b32_e32 v176, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 29
-; GFX11-NEXT: v_mov_b32_e32 v167, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 30
+; GFX11-NEXT: v_readlane_b32 s0, v77, 27
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v167, s0
+; GFX11-NEXT: v_readlane_b32 s0, v77, 28
; GFX11-NEXT: v_mov_b32_e32 v165, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 31
-; GFX11-NEXT: v_mov_b32_e32 v166, s0
-; GFX11-NEXT: v_readlane_b32 s0, v77, 0
+; GFX11-NEXT: v_readlane_b32 s0, v77, 29
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v166, s0
+; GFX11-NEXT: v_readlane_b32 s0, v77, 30
; GFX11-NEXT: v_mov_b32_e32 v164, s0
-; GFX11-NEXT: v_readlane_b32 s0, v77, 1
-; GFX11-NEXT: v_mov_b32_e32 v163, s0
-; GFX11-NEXT: v_readlane_b32 s0, v77, 2
+; GFX11-NEXT: v_readlane_b32 s0, v77, 31
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v163, s0
+; GFX11-NEXT: v_readlane_b32 s0, v78, 0
; GFX11-NEXT: v_mov_b32_e32 v162, s0
-; GFX11-NEXT: v_readlane_b32 s0, v77, 3
-; GFX11-NEXT: v_mov_b32_e32 v160, s0
-; GFX11-NEXT: v_readlane_b32 s0, v77, 4
+; GFX11-NEXT: v_readlane_b32 s0, v78, 1
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v160, s0
+; GFX11-NEXT: v_readlane_b32 s0, v78, 2
; GFX11-NEXT: v_mov_b32_e32 v161, s0
-; GFX11-NEXT: v_readlane_b32 s0, v77, 5
-; GFX11-NEXT: v_mov_b32_e32 v151, s0
-; GFX11-NEXT: v_readlane_b32 s0, v77, 6
+; GFX11-NEXT: v_readlane_b32 s0, v78, 3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
+; GFX11-NEXT: v_mov_b32_e32 v151, s0
+; GFX11-NEXT: v_readlane_b32 s0, v78, 4
; GFX11-NEXT: v_mov_b32_e32 v150, s0
-; GFX11-NEXT: v_readlane_b32 s0, v77, 7
+; GFX11-NEXT: v_readlane_b32 s0, v78, 5
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_mov_b32_e32 v149, s0
-; GFX11-NEXT: v_readlane_b32 s0, v77, 8
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_2) | instid1(VALU_DEP_2)
+; GFX11-NEXT: v_readlane_b32 s0, v78, 6
+; GFX11-NEXT: v_mov_b32_e32 v147, s0
+; GFX11-NEXT: v_readlane_b32 s0, v78, 7
+; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_mov_b32_e32 v148, s0
-; GFX11-NEXT: v_readlane_b32 s0, v78, 0
-; GFX11-NEXT: v_readlane_b32 s1, v78, 1
-; GFX11-NEXT: v_mov_b32_e32 v82, s0
; GFX11-NEXT: .LBB99_5: ; %end
; GFX11-NEXT: v_lshlrev_b32_e32 v69, 8, v74
; GFX11-NEXT: v_and_b32_e32 v52, 0xff, v52
-; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
; GFX11-NEXT: v_lshlrev_b32_e32 v66, 8, v82
; GFX11-NEXT: v_and_b32_e32 v53, 0xff, v53
; GFX11-NEXT: v_lshlrev_b32_e32 v82, 8, v63
@@ -213967,8 +214683,8 @@ define inreg <128 x i8> @bitcast_v64i16_to_v128i8_scalar(<64 x i16> inreg %a, i3
; GFX11-NEXT: v_lshlrev_b32_e32 v33, 8, v48
; GFX11-NEXT: v_and_b32_e32 v21, 0xff, v21
; GFX11-NEXT: v_lshlrev_b32_e32 v48, 8, v149
-; GFX11-NEXT: v_and_b32_e32 v50, 0xff, v148
-; GFX11-NEXT: v_lshlrev_b32_e32 v51, 8, v147
+; GFX11-NEXT: v_and_b32_e32 v50, 0xff, v147
+; GFX11-NEXT: v_lshlrev_b32_e32 v51, 8, v148
; GFX11-NEXT: v_and_b32_e32 v15, 0xff, v15
; GFX11-NEXT: v_lshlrev_b32_e32 v52, 8, v146
; GFX11-NEXT: v_and_b32_e32 v53, 0xff, v145
@@ -217871,7 +218587,7 @@ define inreg <64 x half> @bitcast_v64bf16_to_v64f16_scalar(<64 x bfloat> inreg %
; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:76
; SI-NEXT: v_mul_f32_e32 v45, 1.0, v1
; SI-NEXT: v_mul_f32_e32 v1, 1.0, v30
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2
; SI-NEXT: v_mul_f32_e32 v3, 1.0, v3
; SI-NEXT: v_mul_f32_e32 v4, 1.0, v4
@@ -217894,410 +218610,425 @@ define inreg <64 x half> @bitcast_v64bf16_to_v64f16_scalar(<64 x bfloat> inreg %
; SI-NEXT: v_mul_f32_e32 v62, 1.0, v17
; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: v_mul_f32_e32 v63, 1.0, v18
-; SI-NEXT: v_mul_f32_e32 v19, 1.0, v19
+; SI-NEXT: v_mul_f32_e32 v44, 1.0, v19
; SI-NEXT: v_mul_f32_e32 v20, 1.0, v20
-; SI-NEXT: v_mul_f32_e32 v44, 1.0, v21
+; SI-NEXT: v_mul_f32_e32 v21, 1.0, v21
; SI-NEXT: v_mul_f32_e32 v46, 1.0, v22
; SI-NEXT: v_mul_f32_e32 v47, 1.0, v23
; SI-NEXT: v_mul_f32_e32 v56, 1.0, v24
; SI-NEXT: v_mul_f32_e32 v57, 1.0, v25
; SI-NEXT: v_mul_f32_e32 v58, 1.0, v26
; SI-NEXT: v_mul_f32_e32 v14, 1.0, v27
+; SI-NEXT: v_mul_f32_e32 v18, 1.0, v28
; SI-NEXT: v_mul_f32_e32 v29, 1.0, v29
; SI-NEXT: v_mul_f32_e64 v15, 1.0, s17
; SI-NEXT: v_mul_f32_e64 v16, 1.0, s18
; SI-NEXT: v_mul_f32_e64 v17, 1.0, s19
-; SI-NEXT: v_mul_f32_e64 v18, 1.0, s20
-; SI-NEXT: v_mul_f32_e64 v21, 1.0, s21
-; SI-NEXT: v_mul_f32_e64 v22, 1.0, s22
-; SI-NEXT: v_mul_f32_e64 v23, 1.0, s23
-; SI-NEXT: v_mul_f32_e64 v24, 1.0, s24
-; SI-NEXT: v_mul_f32_e64 v25, 1.0, s25
-; SI-NEXT: v_mul_f32_e64 v26, 1.0, s26
-; SI-NEXT: v_mul_f32_e64 v27, 1.0, s27
-; SI-NEXT: v_mul_f32_e64 v30, 1.0, s29
+; SI-NEXT: v_mul_f32_e64 v19, 1.0, s20
+; SI-NEXT: v_mul_f32_e64 v22, 1.0, s21
+; SI-NEXT: v_mul_f32_e64 v23, 1.0, s22
+; SI-NEXT: v_mul_f32_e64 v24, 1.0, s23
+; SI-NEXT: v_mul_f32_e64 v25, 1.0, s24
+; SI-NEXT: v_mul_f32_e64 v26, 1.0, s25
+; SI-NEXT: v_mul_f32_e64 v27, 1.0, s26
+; SI-NEXT: v_mul_f32_e64 v28, 1.0, s27
+; SI-NEXT: v_mul_f32_e64 v30, 1.0, s28
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31
-; SI-NEXT: v_mul_f32_e32 v31, 1.0, v28
-; SI-NEXT: v_mul_f32_e32 v32, 1.0, v32
-; SI-NEXT: v_mul_f32_e32 v33, 1.0, v33
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mul_f32_e32 v1, 1.0, v32
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: v_mul_f32_e32 v33, 1.0, v33
; SI-NEXT: v_mul_f32_e32 v34, 1.0, v34
; SI-NEXT: v_mul_f32_e32 v35, 1.0, v35
; SI-NEXT: v_mul_f32_e32 v36, 1.0, v36
-; SI-NEXT: v_mul_f32_e32 v37, 1.0, v37
+; SI-NEXT: v_mul_f32_e32 v31, 1.0, v37
; SI-NEXT: v_mul_f32_e32 v38, 1.0, v38
+; SI-NEXT: s_waitcnt vmcnt(14)
+; SI-NEXT: v_mul_f32_e32 v39, 1.0, v39
; SI-NEXT: s_waitcnt vmcnt(11) expcnt(0)
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v49
+; SI-NEXT: v_mul_f32_e32 v1, 1.0, v50
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(10) expcnt(0)
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v51
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
-; SI-NEXT: v_mul_f32_e32 v39, 1.0, v39
+; SI-NEXT: v_mul_f32_e32 v1, 1.0, v52
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
; SI-NEXT: v_mul_f32_e32 v48, 1.0, v48
-; SI-NEXT: s_waitcnt vmcnt(7) expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(8) expcnt(0)
; SI-NEXT: v_mul_f32_e32 v1, 1.0, v55
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(6) expcnt(0)
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v41
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(5) expcnt(0)
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v43
-; SI-NEXT: v_mul_f32_e32 v49, 1.0, v50
-; SI-NEXT: v_mul_f32_e32 v51, 1.0, v52
-; SI-NEXT: v_mul_f32_e32 v53, 1.0, v53
-; SI-NEXT: v_mul_f32_e32 v50, 1.0, v54
-; SI-NEXT: v_mul_f32_e32 v52, 1.0, v40
-; SI-NEXT: v_mul_f32_e32 v54, 1.0, v42
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
+; SI-NEXT: v_mul_f32_e32 v49, 1.0, v49
+; SI-NEXT: s_waitcnt vmcnt(6) expcnt(0)
+; SI-NEXT: v_mul_f32_e32 v1, 1.0, v42
+; SI-NEXT: v_mul_f32_e32 v50, 1.0, v51
+; SI-NEXT: v_mul_f32_e32 v52, 1.0, v53
+; SI-NEXT: v_mul_f32_e32 v54, 1.0, v54
+; SI-NEXT: v_mul_f32_e32 v55, 1.0, v40
+; SI-NEXT: v_mul_f32_e32 v53, 1.0, v41
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(6)
+; SI-NEXT: v_mul_f32_e32 v40, 1.0, v43
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e64 v1, 1.0, s16
-; SI-NEXT: v_mul_f32_e64 v28, 1.0, s28
-; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
+; SI-NEXT: v_mul_f32_e64 v32, 1.0, s29
+; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v54, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB101_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-NEXT: v_mov_b32_e32 v42, v51
-; SI-NEXT: v_mov_b32_e32 v55, v50
-; SI-NEXT: v_mov_b32_e32 v40, v52
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; SI-NEXT: v_mov_b32_e32 v51, v49
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v15
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v16
-; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v16
+; SI-NEXT: v_cvt_f32_f16_e32 v49, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v17
-; SI-NEXT: v_cvt_f32_f16_e32 v41, v1
-; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v18
-; SI-NEXT: v_cvt_f32_f16_e32 v15, v1
-; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v21
+; SI-NEXT: v_cvt_f32_f16_e32 v43, v1
+; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v19
; SI-NEXT: v_cvt_f32_f16_e32 v16, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v22
-; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_cvt_f32_f16_e32 v41, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v23
-; SI-NEXT: v_cvt_f32_f16_e32 v18, v1
+; SI-NEXT: v_cvt_f32_f16_e32 v22, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v24
-; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-NEXT: v_mov_b32_e32 v24, v47
-; SI-NEXT: v_mov_b32_e32 v23, v46
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_cvt_f32_f16_e32 v19, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v25
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
+; SI-NEXT: v_mov_b32_e32 v23, v46
+; SI-NEXT: v_mov_b32_e32 v24, v47
; SI-NEXT: v_mov_b32_e32 v25, v56
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v26
+; SI-NEXT: v_cvt_f32_f16_e32 v17, v1
+; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v27
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
; SI-NEXT: v_mov_b32_e32 v26, v57
+; SI-NEXT: v_mov_b32_e32 v27, v58
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v27
-; SI-NEXT: v_cvt_f32_f16_e32 v22, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v28
-; SI-NEXT: v_cvt_f32_f16_e32 v43, v1
+; SI-NEXT: v_cvt_f32_f16_e32 v15, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v30
-; SI-NEXT: v_cvt_f32_f16_e32 v30, v1
+; SI-NEXT: v_cvt_f32_f16_e32 v42, v1
+; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v32
+; SI-NEXT: v_cvt_f32_f16_e32 v32, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v45
-; SI-NEXT: v_cvt_f32_f16_e32 v28, v1
+; SI-NEXT: v_cvt_f32_f16_e32 v30, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v2
; SI-NEXT: v_cvt_f32_f16_e32 v45, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v3
-; SI-NEXT: v_cvt_f32_f16_e32 v2, v1
+; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
+; SI-NEXT: v_mov_b32_e32 v28, v14
+; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v14
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v4
-; SI-NEXT: v_cvt_f32_f16_e32 v17, v1
-; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v5
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v32
-; SI-NEXT: v_cvt_f32_f16_e32 v5, v5
; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v29
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v5
+; SI-NEXT: v_cvt_f32_f16_e32 v2, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v6
-; SI-NEXT: v_cvt_f32_f16_e32 v21, v1
-; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v7
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v34
+; SI-NEXT: v_mov_b32_e32 v34, v41
+; SI-NEXT: v_mov_b32_e32 v41, v55
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v7
+; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v36
+; SI-NEXT: v_cvt_f32_f16_e32 v7, v7
; SI-NEXT: v_cvt_f32_f16_e32 v6, v6
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v8
-; SI-NEXT: v_cvt_f32_f16_e32 v8, v1
+; SI-NEXT: v_cvt_f32_f16_e32 v37, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v9
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
-; SI-NEXT: v_mov_b32_e32 v36, v8
-; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v38
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v35
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v10
; SI-NEXT: v_cvt_f32_f16_e32 v9, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v11
-; SI-NEXT: v_cvt_f32_f16_e32 v3, v1
+; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
+; SI-NEXT: v_mov_b32_e32 v11, v31
+; SI-NEXT: v_mov_b32_e32 v31, v18
+; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v12
-; SI-NEXT: v_cvt_f32_f16_e32 v10, v1
+; SI-NEXT: v_cvt_f32_f16_e32 v8, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v13
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-NEXT: v_cvt_f32_f16_e32 v8, v8
-; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v51
-; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v33
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
+; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v55
+; SI-NEXT: v_mov_b32_e32 v36, v8
+; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v38
+; SI-NEXT: v_mov_b32_e32 v38, v9
+; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v48
+; SI-NEXT: v_mov_b32_e32 v48, v15
+; SI-NEXT: v_mov_b32_e32 v15, v49
+; SI-NEXT: v_mov_b32_e32 v49, v51
+; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v59
; SI-NEXT: v_cvt_f32_f16_e32 v59, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v60
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v35
-; SI-NEXT: v_mov_b32_e32 v35, v43
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
+; SI-NEXT: v_cvt_f32_f16_e32 v8, v8
+; SI-NEXT: v_cvt_f32_f16_e32 v55, v13
+; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v53
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v61
; SI-NEXT: v_cvt_f32_f16_e32 v60, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v62
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-NEXT: v_mov_b32_e32 v38, v10
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v39
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v63
-; SI-NEXT: v_cvt_f32_f16_e32 v61, v1
-; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v19
-; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
-; SI-NEXT: v_mov_b32_e32 v19, v28
-; SI-NEXT: v_mov_b32_e32 v28, v14
; SI-NEXT: v_mov_b32_e32 v39, v22
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v20
-; SI-NEXT: v_cvt_f32_f16_e32 v62, v1
+; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v63
+; SI-NEXT: v_cvt_f32_f16_e32 v61, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v44
; SI-NEXT: v_cvt_f32_f16_e32 v44, v1
+; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v20
+; SI-NEXT: v_cvt_f32_f16_e32 v62, v1
+; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v21
+; SI-NEXT: v_cvt_f32_f16_e32 v10, v1
+; SI-NEXT: v_mov_b32_e32 v20, v30
+; SI-NEXT: v_mov_b32_e32 v30, v43
+; SI-NEXT: v_mov_b32_e32 v43, v53
+; SI-NEXT: v_mov_b32_e32 v22, v10
+; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
+; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v11
+; SI-NEXT: v_mov_b32_e32 v35, v42
+; SI-NEXT: v_cvt_f32_f16_e32 v9, v9
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v46
+; SI-NEXT: v_mov_b32_e32 v21, v2
; SI-NEXT: v_cvt_f32_f16_e32 v63, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v47
-; SI-NEXT: v_mov_b32_e32 v47, v3
-; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v14
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
-; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v49
-; SI-NEXT: v_mov_b32_e32 v49, v15
-; SI-NEXT: v_mov_b32_e32 v15, v41
-; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
-; SI-NEXT: v_cvt_f32_f16_e32 v33, v11
-; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v53
-; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v50
-; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
-; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v52
-; SI-NEXT: v_mov_b32_e32 v51, v53
-; SI-NEXT: v_mov_b32_e32 v53, v54
-; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v54
-; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
-; SI-NEXT: v_cvt_f32_f16_e32 v7, v7
-; SI-NEXT: v_cvt_f32_f16_e32 v10, v10
-; SI-NEXT: v_cvt_f32_f16_e32 v12, v12
-; SI-NEXT: v_cvt_f32_f16_e32 v43, v43
-; SI-NEXT: v_mov_b32_e32 v20, v2
; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v57
-; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v37
-; SI-NEXT: v_mov_b32_e32 v37, v9
-; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v48
+; SI-NEXT: s_waitcnt vmcnt(14)
+; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v14
; SI-NEXT: v_cvt_f32_f16_e32 v46, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v56
-; SI-NEXT: v_cvt_f32_f16_e32 v56, v2
-; SI-NEXT: v_mov_b32_e32 v27, v58
+; SI-NEXT: v_cvt_f32_f16_e32 v47, v2
; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v58
-; SI-NEXT: v_cvt_f32_f16_e32 v57, v3
+; SI-NEXT: v_cvt_f32_f16_e32 v56, v3
; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v31
-; SI-NEXT: v_cvt_f32_f16_e32 v58, v4
-; SI-NEXT: v_cvt_f32_f16_e32 v32, v9
-; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
-; SI-NEXT: v_cvt_f32_f16_e32 v34, v13
-; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
+; SI-NEXT: v_cvt_f32_f16_e32 v57, v4
+; SI-NEXT: v_cvt_f32_f16_e32 v58, v5
+; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v33
+; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v49
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
; SI-NEXT: v_cvt_f32_f16_e32 v5, v5
; SI-NEXT: v_cvt_f32_f16_e32 v6, v6
+; SI-NEXT: v_mov_b32_e32 v33, v37
; SI-NEXT: v_cvt_f32_f16_e32 v7, v7
; SI-NEXT: v_cvt_f32_f16_e32 v8, v8
-; SI-NEXT: v_cvt_f32_f16_e32 v11, v11
-; SI-NEXT: s_waitcnt vmcnt(9)
-; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v14
+; SI-NEXT: v_cvt_f32_f16_e32 v9, v9
+; SI-NEXT: v_cvt_f32_f16_e32 v13, v13
+; SI-NEXT: v_mov_b32_e32 v37, v40
; SI-NEXT: s_waitcnt vmcnt(8)
-; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v22
+; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v18
; SI-NEXT: v_cvt_f32_f16_e32 v4, v4
-; SI-NEXT: v_cvt_f32_f16_e32 v9, v9
-; SI-NEXT: s_waitcnt vmcnt(7) expcnt(2)
-; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v41
+; SI-NEXT: s_waitcnt vmcnt(6)
+; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v51
+; SI-NEXT: v_cvt_f32_f16_e32 v10, v10
+; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v50
+; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
; SI-NEXT: v_cvt_f32_f16_e32 v10, v10
-; SI-NEXT: s_waitcnt vmcnt(6) expcnt(1)
-; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v50
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v53
+; SI-NEXT: v_cvt_f32_f16_e32 v42, v42
+; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v40
+; SI-NEXT: v_cvt_f32_f16_e32 v42, v42
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v50
+; SI-NEXT: v_cvt_f32_f16_e32 v12, v11
+; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v52
+; SI-NEXT: v_cvt_f32_f16_e32 v11, v11
+; SI-NEXT: v_mov_b32_e32 v52, v12
+; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v54
+; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
+; SI-NEXT: v_cvt_f32_f16_e32 v12, v12
+; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(1) expcnt(0)
+; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v54
; SI-NEXT: v_cvt_f32_f16_e32 v12, v12
-; SI-NEXT: s_waitcnt vmcnt(5) expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v54
-; SI-NEXT: s_waitcnt vmcnt(4)
-; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v52
-; SI-NEXT: v_cvt_f32_f16_e32 v13, v13
-; SI-NEXT: v_cvt_f32_f16_e32 v43, v43
; SI-NEXT: s_cbranch_execnz .LBB101_3
; SI-NEXT: .LBB101_2: ; %cmp.true
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v54
+; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v37
; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(1) expcnt(0)
; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v53
; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v52
+; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v43
; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
-; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v40
+; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v41
; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v50
-; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
-; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v55
+; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v54
; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v51
-; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
-; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v42
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v41
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v50
+; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v1
+; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v51
+; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; SI-NEXT: v_lshrrev_b32_e32 v51, 16, v1
+; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v49
+; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v22
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v48
-; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
-; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
-; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
+; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
-; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
+; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
-; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
+; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
+; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v14
; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v1
-; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v14
+; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v18
; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v1
; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v29
@@ -218318,7 +219049,7 @@ define inreg <64 x half> @bitcast_v64bf16_to_v64f16_scalar(<64 x bfloat> inreg %
; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v25
; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v24
; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v14
@@ -218328,114 +219059,131 @@ define inreg <64 x half> @bitcast_v64bf16_to_v64f16_scalar(<64 x bfloat> inreg %
; SI-NEXT: v_cvt_f32_f16_e32 v63, v63
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
-; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v14
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload
+; SI-NEXT: v_lshrrev_b32_e32 v29, 16, v14
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v14
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload
; SI-NEXT: v_cvt_f32_f16_e32 v62, v62
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v1
; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
-; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v14
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload
+; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v14
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v14
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload
; SI-NEXT: v_cvt_f32_f16_e32 v61, v61
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
-; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v14
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload
+; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v14
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v14
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload
; SI-NEXT: v_cvt_f32_f16_e32 v60, v60
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
-; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v14
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload
+; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v14
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v14
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
; SI-NEXT: v_cvt_f32_f16_e32 v59, v59
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
-; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v14
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
+; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v14
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v14
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload
-; SI-NEXT: v_cvt_f32_f16_e32 v38, v58
-; SI-NEXT: v_cvt_f32_f16_e32 v58, v8
-; SI-NEXT: v_cvt_f32_f16_e32 v8, v43
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload
+; SI-NEXT: v_cvt_f32_f16_e32 v36, v58
+; SI-NEXT: v_cvt_f32_f16_e32 v58, v10
+; SI-NEXT: v_cvt_f32_f16_e32 v10, v51
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
+; SI-NEXT: v_lshrrev_b32_e32 v25, 16, v14
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
+; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v14
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
+; SI-NEXT: v_cvt_f32_f16_e32 v38, v57
+; SI-NEXT: v_cvt_f32_f16_e32 v57, v8
+; SI-NEXT: v_cvt_f32_f16_e32 v8, v42
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v14
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
-; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v14
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
-; SI-NEXT: v_cvt_f32_f16_e32 v37, v57
-; SI-NEXT: v_cvt_f32_f16_e32 v57, v6
+; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v14
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload
+; SI-NEXT: v_cvt_f32_f16_e32 v33, v56
+; SI-NEXT: v_cvt_f32_f16_e32 v56, v6
; SI-NEXT: v_cvt_f32_f16_e32 v6, v12
; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
-; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v6, v13
; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
+; SI-NEXT: v_cvt_f32_f16_e32 v8, v8
; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v14
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(3)
-; SI-NEXT: v_cvt_f32_f16_e32 v12, v12
-; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v12, v50
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
-; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v14
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
-; SI-NEXT: v_cvt_f32_f16_e32 v36, v56
-; SI-NEXT: v_cvt_f32_f16_e32 v56, v4
-; SI-NEXT: v_cvt_f32_f16_e32 v4, v9
-; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v14
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v14
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload
-; SI-NEXT: v_cvt_f32_f16_e32 v22, v22
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_cvt_f32_f16_e32 v9, v9
-; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
+; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v14
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
+; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
+; SI-NEXT: v_lshrrev_b32_e32 v21, 16, v14
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
+; SI-NEXT: v_cvt_f32_f16_e32 v21, v21
+; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_cvt_f32_f16_e32 v21, v22
; SI-NEXT: v_cvt_f32_f16_e32 v22, v23
; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_cvt_f32_f16_e32 v22, v24
+; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v22, v25
; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -218444,230 +219192,208 @@ define inreg <64 x half> @bitcast_v64bf16_to_v64f16_scalar(<64 x bfloat> inreg %
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v22, v27
; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(5)
-; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
-; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
-; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v14
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
-; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
-; SI-NEXT: v_lshrrev_b32_e32 v21, 16, v14
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
-; SI-NEXT: v_cvt_f32_f16_e32 v21, v21
-; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v21, v47
-; SI-NEXT: v_cvt_f32_f16_e32 v47, v24
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_cvt_f32_f16_e32 v22, v29
+; SI-NEXT: s_waitcnt vmcnt(6)
; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
-; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v14
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
+; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v14
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
+; SI-NEXT: v_cvt_f32_f16_e32 v45, v45
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v14
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:384 ; 4-byte Folded Reload
; SI-NEXT: v_cvt_f32_f16_e32 v20, v20
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
-; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v14
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
-; SI-NEXT: v_cvt_f32_f16_e32 v45, v45
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
-; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
-; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v14
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload
-; SI-NEXT: v_cvt_f32_f16_e32 v19, v19
+; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v14
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:380 ; 4-byte Folded Reload
+; SI-NEXT: v_cvt_f32_f16_e32 v32, v31
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
-; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v14
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:384 ; 4-byte Folded Reload
-; SI-NEXT: v_cvt_f32_f16_e32 v30, v17
-; SI-NEXT: v_cvt_f32_f16_e32 v17, v46
-; SI-NEXT: v_cvt_f32_f16_e32 v46, v2
-; SI-NEXT: v_cvt_f32_f16_e32 v2, v5
-; SI-NEXT: v_cvt_f32_f16_e32 v5, v10
-; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v5, v11
-; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(3)
-; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
-; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
; SI-NEXT: v_lshrrev_b32_e32 v55, 16, v14
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:380 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:376 ; 4-byte Folded Reload
; SI-NEXT: v_cvt_f32_f16_e32 v35, v55
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_cvt_f32_f16_e32 v55, v13
+; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
+; SI-NEXT: v_cvt_f32_f16_e32 v12, v12
+; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v14
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:376 ; 4-byte Folded Reload
-; SI-NEXT: v_cvt_f32_f16_e32 v39, v54
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:372 ; 4-byte Folded Reload
+; SI-NEXT: v_cvt_f32_f16_e32 v48, v54
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: v_cvt_f32_f16_e32 v13, v13
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v14
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:372 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:368 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v14
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:368 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:364 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v14
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:364 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
-; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v14
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload
-; SI-NEXT: v_cvt_f32_f16_e32 v18, v18
+; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v14
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload
+; SI-NEXT: v_cvt_f32_f16_e32 v19, v19
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
-; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v14
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload
+; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v14
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload
+; SI-NEXT: v_cvt_f32_f16_e32 v39, v17
+; SI-NEXT: v_cvt_f32_f16_e32 v17, v41
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
-; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v14
+; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v14
; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload
+; SI-NEXT: v_cvt_f32_f16_e32 v34, v53
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v14
; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload
-; SI-NEXT: v_cvt_f32_f16_e32 v49, v16
-; SI-NEXT: v_cvt_f32_f16_e32 v16, v42
+; SI-NEXT: v_cvt_f32_f16_e32 v16, v16
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
-; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v14
+; SI-NEXT: v_lshrrev_b32_e32 v43, 16, v14
; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload
+; SI-NEXT: v_cvt_f32_f16_e32 v30, v43
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v14
; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload
+; SI-NEXT: v_cvt_f32_f16_e32 v15, v15
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
-; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v14
+; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v14
; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v14
; SI-NEXT: v_cvt_f32_f16_e32 v14, v14
-; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v14, v53
-; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v14, v15
-; SI-NEXT: v_cvt_f32_f16_e32 v15, v32
-; SI-NEXT: v_cvt_f32_f16_e32 v32, v48
+; SI-NEXT: v_cvt_f32_f16_e32 v14, v18
; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v14, v31
-; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v14, v44
; SI-NEXT: v_cvt_f32_f16_e32 v44, v1
; SI-NEXT: v_cvt_f32_f16_e32 v1, v3
; SI-NEXT: v_cvt_f32_f16_e32 v3, v7
; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v14, v41
-; SI-NEXT: v_cvt_f32_f16_e32 v7, v33
-; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v14, v40
-; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v7, v34
-; SI-NEXT: v_cvt_f32_f16_e32 v34, v13
+; SI-NEXT: v_cvt_f32_f16_e32 v7, v37
; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_cvt_f32_f16_e32 v14, v46
+; SI-NEXT: v_cvt_f32_f16_e32 v46, v2
+; SI-NEXT: v_cvt_f32_f16_e32 v2, v5
+; SI-NEXT: v_cvt_f32_f16_e32 v5, v11
+; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_cvt_f32_f16_e32 v14, v47
+; SI-NEXT: v_cvt_f32_f16_e32 v47, v4
+; SI-NEXT: v_cvt_f32_f16_e32 v4, v9
+; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
+; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v14, v28
-; SI-NEXT: v_cvt_f32_f16_e32 v13, v52
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_cvt_f32_f16_e32 v7, v49
; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
-; SI-NEXT: v_cvt_f32_f16_e32 v10, v10
-; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(5)
+; SI-NEXT: v_cvt_f32_f16_e32 v9, v9
+; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: v_cvt_f32_f16_e32 v11, v11
+; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
-; SI-NEXT: v_cvt_f32_f16_e32 v8, v8
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_cvt_f32_f16_e32 v43, v14
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
-; SI-NEXT: v_cvt_f32_f16_e32 v33, v11
-; SI-NEXT: v_cvt_f32_f16_e32 v11, v51
-; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
+; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_cvt_f32_f16_e32 v10, v10
-; SI-NEXT: s_waitcnt vmcnt(1) expcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v43, v14
-; SI-NEXT: .LBB101_3: ; %end
+; SI-NEXT: v_cvt_f32_f16_e32 v42, v14
; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_cvt_f32_f16_e32 v10, v52
+; SI-NEXT: v_cvt_f32_f16_e32 v52, v50
+; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
+; SI-NEXT: v_cvt_f32_f16_e32 v12, v12
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: v_cvt_f32_f16_e32 v9, v9
+; SI-NEXT: s_waitcnt vmcnt(2) expcnt(0)
+; SI-NEXT: v_cvt_f32_f16_e32 v42, v14
+; SI-NEXT: .LBB101_3: ; %end
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
+; SI-NEXT: v_cvt_f16_f32_e32 v15, v15
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v53, v14
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
-; SI-NEXT: v_lshlrev_b32_e32 v53, 16, v53
+; SI-NEXT: v_cvt_f16_f32_e32 v18, v14
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
+; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v14, v14
-; SI-NEXT: v_or_b32_e32 v14, v14, v53
+; SI-NEXT: v_or_b32_e32 v14, v14, v18
; SI-NEXT: buffer_store_dword v14, v0, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v14, v15
-; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
+; SI-NEXT: v_cvt_f16_f32_e32 v14, v30
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v15, v15
; SI-NEXT: v_or_b32_e32 v14, v15, v14
; SI-NEXT: v_add_i32_e32 v15, vcc, 4, v0
; SI-NEXT: buffer_store_dword v14, v15, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v14, v16
-; SI-NEXT: v_cvt_f16_f32_e32 v15, v49
+; SI-NEXT: v_cvt_f16_f32_e32 v14, v34
+; SI-NEXT: v_cvt_f16_f32_e32 v15, v16
; SI-NEXT: v_add_i32_e32 v16, vcc, 12, v0
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
; SI-NEXT: v_or_b32_e32 v14, v15, v14
; SI-NEXT: v_add_i32_e32 v15, vcc, 8, v0
; SI-NEXT: buffer_store_dword v14, v15, s[0:3], 0 offen
-; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v14, v18
+; SI-NEXT: v_cvt_f16_f32_e32 v14, v19
+; SI-NEXT: v_cvt_f16_f32_e32 v15, v39
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v15, v15
; SI-NEXT: v_or_b32_e32 v14, v15, v14
; SI-NEXT: buffer_store_dword v14, v16, s[0:3], 0 offen
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v14, v17
; SI-NEXT: v_add_i32_e32 v16, vcc, 16, v0
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_cvt_f16_f32_e32 v14, v14
+; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v15, v15
-; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
; SI-NEXT: v_or_b32_e32 v14, v15, v14
; SI-NEXT: buffer_store_dword v14, v16, s[0:3], 0 offen
; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v14, v39
+; SI-NEXT: v_cvt_f16_f32_e32 v14, v48
; SI-NEXT: v_add_i32_e32 v16, vcc, 20, v0
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -218675,7 +219401,7 @@ define inreg <64 x half> @bitcast_v64bf16_to_v64f16_scalar(<64 x bfloat> inreg %
; SI-NEXT: v_or_b32_e32 v14, v15, v14
; SI-NEXT: buffer_store_dword v14, v16, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v14, v30
+; SI-NEXT: v_cvt_f16_f32_e32 v14, v32
; SI-NEXT: v_cvt_f16_f32_e32 v15, v35
; SI-NEXT: v_add_i32_e32 v16, vcc, 24, v0
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
@@ -218683,53 +219409,59 @@ define inreg <64 x half> @bitcast_v64bf16_to_v64f16_scalar(<64 x bfloat> inreg %
; SI-NEXT: buffer_store_dword v14, v16, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v14, v45
-; SI-NEXT: v_cvt_f16_f32_e32 v15, v19
+; SI-NEXT: v_cvt_f16_f32_e32 v15, v20
; SI-NEXT: v_add_i32_e32 v16, vcc, 28, v0
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
; SI-NEXT: v_or_b32_e32 v14, v15, v14
; SI-NEXT: buffer_store_dword v14, v16, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v14, v17
-; SI-NEXT: v_cvt_f16_f32_e32 v15, v20
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v16, vcc, 32, v0
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_cvt_f16_f32_e32 v14, v14
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v15, v15
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
; SI-NEXT: v_or_b32_e32 v14, v15, v14
; SI-NEXT: buffer_store_dword v14, v16, s[0:3], 0 offen
-; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v14, v21
+; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
+; SI-NEXT: v_cvt_f16_f32_e32 v15, v21
; SI-NEXT: v_add_i32_e32 v16, vcc, 36, v0
-; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v15, v15
+; SI-NEXT: v_cvt_f16_f32_e32 v14, v14
+; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
; SI-NEXT: v_or_b32_e32 v14, v15, v14
; SI-NEXT: buffer_store_dword v14, v16, s[0:3], 0 offen
-; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v14, v36
+; SI-NEXT: v_cvt_f16_f32_e32 v14, v33
; SI-NEXT: v_add_i32_e32 v16, vcc, 40, v0
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v15, v15
; SI-NEXT: v_or_b32_e32 v14, v15, v14
; SI-NEXT: buffer_store_dword v14, v16, s[0:3], 0 offen
-; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v14, v37
+; SI-NEXT: v_cvt_f16_f32_e32 v14, v38
; SI-NEXT: v_add_i32_e32 v16, vcc, 44, v0
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v15, v15
; SI-NEXT: v_or_b32_e32 v14, v15, v14
; SI-NEXT: buffer_store_dword v14, v16, s[0:3], 0 offen
+; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v14, v38
-; SI-NEXT: v_cvt_f16_f32_e32 v15, v47
+; SI-NEXT: v_cvt_f16_f32_e32 v14, v36
; SI-NEXT: v_add_i32_e32 v16, vcc, 48, v0
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v15, v15
; SI-NEXT: v_or_b32_e32 v14, v15, v14
; SI-NEXT: buffer_store_dword v14, v16, s[0:3], 0 offen
-; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v14, v59
; SI-NEXT: v_add_i32_e32 v16, vcc, 52, v0
@@ -218738,7 +219470,7 @@ define inreg <64 x half> @bitcast_v64bf16_to_v64f16_scalar(<64 x bfloat> inreg %
; SI-NEXT: v_cvt_f16_f32_e32 v15, v15
; SI-NEXT: v_or_b32_e32 v14, v15, v14
; SI-NEXT: buffer_store_dword v14, v16, s[0:3], 0 offen
-; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v14, v60
; SI-NEXT: v_add_i32_e32 v16, vcc, 56, v0
@@ -218747,7 +219479,7 @@ define inreg <64 x half> @bitcast_v64bf16_to_v64f16_scalar(<64 x bfloat> inreg %
; SI-NEXT: v_cvt_f16_f32_e32 v15, v15
; SI-NEXT: v_or_b32_e32 v14, v15, v14
; SI-NEXT: buffer_store_dword v14, v16, s[0:3], 0 offen
-; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v14, v61
; SI-NEXT: v_add_i32_e32 v16, vcc, 60, v0
@@ -218756,18 +219488,16 @@ define inreg <64 x half> @bitcast_v64bf16_to_v64f16_scalar(<64 x bfloat> inreg %
; SI-NEXT: v_cvt_f16_f32_e32 v15, v15
; SI-NEXT: v_or_b32_e32 v14, v15, v14
; SI-NEXT: buffer_store_dword v14, v16, s[0:3], 0 offen
-; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v14, v62
+; SI-NEXT: v_cvt_f16_f32_e32 v15, v44
; SI-NEXT: v_add_i32_e32 v16, vcc, 64, v0
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v15, v15
; SI-NEXT: v_or_b32_e32 v14, v15, v14
; SI-NEXT: buffer_store_dword v14, v16, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v14, v63
-; SI-NEXT: v_cvt_f16_f32_e32 v15, v44
+; SI-NEXT: v_cvt_f16_f32_e32 v15, v22
; SI-NEXT: v_add_i32_e32 v16, vcc, 0x44, v0
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
; SI-NEXT: v_or_b32_e32 v14, v15, v14
@@ -218779,35 +219509,33 @@ define inreg <64 x half> @bitcast_v64bf16_to_v64f16_scalar(<64 x bfloat> inreg %
; SI-NEXT: buffer_store_dword v1, v15, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v1, v2
-; SI-NEXT: v_cvt_f16_f32_e32 v2, v56
+; SI-NEXT: v_cvt_f16_f32_e32 v2, v47
; SI-NEXT: v_add_i32_e32 v14, vcc, 0x4c, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: buffer_store_dword v1, v14, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v1, v3
-; SI-NEXT: v_cvt_f16_f32_e32 v2, v57
+; SI-NEXT: v_cvt_f16_f32_e32 v2, v56
; SI-NEXT: v_add_i32_e32 v3, vcc, 0x50, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v1, v4
-; SI-NEXT: v_cvt_f16_f32_e32 v2, v58
+; SI-NEXT: v_cvt_f16_f32_e32 v2, v57
; SI-NEXT: v_add_i32_e32 v3, vcc, 0x54, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v1, v5
+; SI-NEXT: v_cvt_f16_f32_e32 v2, v58
; SI-NEXT: v_add_i32_e32 v3, vcc, 0x58, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v1, v6
; SI-NEXT: v_add_i32_e32 v3, vcc, 0x5c, v0
@@ -218816,7 +219544,7 @@ define inreg <64 x half> @bitcast_v64bf16_to_v64f16_scalar(<64 x bfloat> inreg %
; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v1, v7
; SI-NEXT: v_add_i32_e32 v3, vcc, 0x60, v0
@@ -218825,7 +219553,7 @@ define inreg <64 x half> @bitcast_v64bf16_to_v64f16_scalar(<64 x bfloat> inreg %
; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v1, v8
; SI-NEXT: v_add_i32_e32 v3, vcc, 0x64, v0
@@ -218834,11 +219562,13 @@ define inreg <64 x half> @bitcast_v64bf16_to_v64f16_scalar(<64 x bfloat> inreg %
; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v1, v9
-; SI-NEXT: v_cvt_f16_f32_e32 v2, v32
; SI-NEXT: v_add_i32_e32 v3, vcc, 0x68, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
@@ -218852,7 +219582,7 @@ define inreg <64 x half> @bitcast_v64bf16_to_v64f16_scalar(<64 x bfloat> inreg %
; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v1, v11
-; SI-NEXT: v_cvt_f16_f32_e32 v2, v33
+; SI-NEXT: v_cvt_f16_f32_e32 v2, v52
; SI-NEXT: v_add_i32_e32 v3, vcc, 0x70, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_or_b32_e32 v1, v2, v1
@@ -218868,14 +219598,14 @@ define inreg <64 x half> @bitcast_v64bf16_to_v64f16_scalar(<64 x bfloat> inreg %
; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v1, v13
-; SI-NEXT: v_cvt_f16_f32_e32 v2, v34
+; SI-NEXT: v_cvt_f16_f32_e32 v2, v55
; SI-NEXT: v_add_i32_e32 v3, vcc, 0x78, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: buffer_store_dword v1, v3, s[0:3], 0 offen
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v1, v43
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v42
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x7c, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -218901,22 +219631,17 @@ define inreg <64 x half> @bitcast_v64bf16_to_v64f16_scalar(<64 x bfloat> inreg %
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB101_4:
-; SI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v53, v54
-; SI-NEXT: v_mov_b32_e32 v40, v52
-; SI-NEXT: v_mov_b32_e32 v55, v50
-; SI-NEXT: v_mov_b32_e32 v42, v51
+; SI-NEXT: v_mov_b32_e32 v43, v53
; SI-NEXT: v_mov_b32_e32 v28, v14
+; SI-NEXT: v_mov_b32_e32 v31, v18
+; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(2)
; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
-; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: ; kill: killed $vgpr1
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; kill: killed $vgpr1
; SI-NEXT: ; implicit-def: $vgpr1
@@ -218940,6 +219665,8 @@ define inreg <64 x half> @bitcast_v64bf16_to_v64f16_scalar(<64 x bfloat> inreg %
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; kill: killed $vgpr1
; SI-NEXT: ; implicit-def: $vgpr1
+; SI-NEXT: v_mov_b32_e32 v37, v40
+; SI-NEXT: v_mov_b32_e32 v41, v55
; SI-NEXT: v_mov_b32_e32 v27, v58
; SI-NEXT: v_mov_b32_e32 v26, v57
; SI-NEXT: v_mov_b32_e32 v25, v56
@@ -218947,45 +219674,46 @@ define inreg <64 x half> @bitcast_v64bf16_to_v64f16_scalar(<64 x bfloat> inreg %
; SI-NEXT: v_mov_b32_e32 v23, v46
; SI-NEXT: ; kill: killed $vgpr1
; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: ; implicit-def: $vgpr5
; SI-NEXT: ; implicit-def: $vgpr6
; SI-NEXT: ; implicit-def: $vgpr7
; SI-NEXT: ; implicit-def: $vgpr8
+; SI-NEXT: ; implicit-def: $vgpr9
; SI-NEXT: ; implicit-def: $vgpr10
; SI-NEXT: ; implicit-def: $vgpr12
-; SI-NEXT: ; implicit-def: $vgpr43
+; SI-NEXT: ; implicit-def: $vgpr42
; SI-NEXT: ; implicit-def: $vgpr15
-; SI-NEXT: ; implicit-def: $vgpr49
+; SI-NEXT: ; implicit-def: $vgpr30
; SI-NEXT: ; implicit-def: $vgpr16
-; SI-NEXT: ; implicit-def: $vgpr18
+; SI-NEXT: ; implicit-def: $vgpr34
; SI-NEXT: ; implicit-def: $vgpr39
-; SI-NEXT: ; implicit-def: $vgpr35
-; SI-NEXT: ; implicit-def: $vgpr30
; SI-NEXT: ; implicit-def: $vgpr19
-; SI-NEXT: ; implicit-def: $vgpr45
-; SI-NEXT: ; implicit-def: $vgpr20
; SI-NEXT: ; implicit-def: $vgpr17
+; SI-NEXT: ; implicit-def: $vgpr48
+; SI-NEXT: ; implicit-def: $vgpr35
+; SI-NEXT: ; implicit-def: $vgpr32
+; SI-NEXT: ; implicit-def: $vgpr20
+; SI-NEXT: ; implicit-def: $vgpr45
; SI-NEXT: ; implicit-def: $vgpr21
-; SI-NEXT: ; implicit-def: $vgpr36
-; SI-NEXT: ; implicit-def: $vgpr37
-; SI-NEXT: ; implicit-def: $vgpr47
+; SI-NEXT: ; implicit-def: $vgpr33
; SI-NEXT: ; implicit-def: $vgpr38
+; SI-NEXT: ; implicit-def: $vgpr36
; SI-NEXT: ; implicit-def: $vgpr59
; SI-NEXT: ; implicit-def: $vgpr60
-; SI-NEXT: ; implicit-def: $vgpr61
; SI-NEXT: ; kill: killed $vgpr1
-; SI-NEXT: ; implicit-def: $vgpr62
+; SI-NEXT: ; implicit-def: $vgpr61
; SI-NEXT: ; implicit-def: $vgpr44
+; SI-NEXT: ; implicit-def: $vgpr62
+; SI-NEXT: ; implicit-def: $vgpr22
; SI-NEXT: ; implicit-def: $vgpr63
; SI-NEXT: ; implicit-def: $vgpr46
; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: ; implicit-def: $vgpr56
+; SI-NEXT: ; implicit-def: $vgpr47
; SI-NEXT: ; implicit-def: $vgpr2
-; SI-NEXT: ; implicit-def: $vgpr57
+; SI-NEXT: ; implicit-def: $vgpr56
; SI-NEXT: ; implicit-def: $vgpr3
-; SI-NEXT: ; implicit-def: $vgpr58
+; SI-NEXT: ; implicit-def: $vgpr57
; SI-NEXT: ; implicit-def: $vgpr4
-; SI-NEXT: ; kill: killed $vgpr5
+; SI-NEXT: ; implicit-def: $vgpr58
; SI-NEXT: ; implicit-def: $vgpr5
; SI-NEXT: ; kill: killed $vgpr6
; SI-NEXT: ; implicit-def: $vgpr6
@@ -218993,19 +219721,21 @@ define inreg <64 x half> @bitcast_v64bf16_to_v64f16_scalar(<64 x bfloat> inreg %
; SI-NEXT: ; implicit-def: $vgpr7
; SI-NEXT: ; kill: killed $vgpr8
; SI-NEXT: ; implicit-def: $vgpr8
-; SI-NEXT: ; implicit-def: $vgpr32
+; SI-NEXT: ; kill: killed $vgpr9
; SI-NEXT: ; implicit-def: $vgpr9
; SI-NEXT: ; kill: killed $vgpr10
; SI-NEXT: ; implicit-def: $vgpr10
-; SI-NEXT: ; implicit-def: $vgpr33
+; SI-NEXT: ; implicit-def: $vgpr52
; SI-NEXT: ; implicit-def: $vgpr11
; SI-NEXT: ; kill: killed $vgpr12
; SI-NEXT: ; implicit-def: $vgpr12
-; SI-NEXT: ; implicit-def: $vgpr34
+; SI-NEXT: ; implicit-def: $vgpr55
; SI-NEXT: ; implicit-def: $vgpr13
-; SI-NEXT: ; kill: killed $vgpr43
-; SI-NEXT: ; implicit-def: $vgpr43
-; SI-NEXT: s_branch .LBB101_2
+; SI-NEXT: ; kill: killed $vgpr42
+; SI-NEXT: ; implicit-def: $vgpr42
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB101_2
+; SI-NEXT: s_branch .LBB101_3
;
; VI-LABEL: bitcast_v64bf16_to_v64f16_scalar:
; VI: ; %bb.0:
@@ -219013,8 +219743,9 @@ define inreg <64 x half> @bitcast_v64bf16_to_v64f16_scalar(<64 x bfloat> inreg %
; VI-NEXT: s_or_saveexec_b64 s[4:5], -1
; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; VI-NEXT: s_mov_b64 exec, s[4:5]
-; VI-NEXT: v_writelane_b32 v42, s30, 0
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: v_writelane_b32 v42, s30, 0
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_writelane_b32 v42, s31, 1
; VI-NEXT: v_mov_b32_e32 v31, v17
; VI-NEXT: v_mov_b32_e32 v30, v16
@@ -219033,14 +219764,17 @@ define inreg <64 x half> @bitcast_v64bf16_to_v64f16_scalar(<64 x bfloat> inreg %
; VI-NEXT: v_mov_b32_e32 v17, v3
; VI-NEXT: v_mov_b32_e32 v16, v2
; VI-NEXT: v_readfirstlane_b32 s30, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s31, v1
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 ; 4-byte Folded Spill
-; VI-NEXT: s_cbranch_scc0 .LBB101_3
+; VI-NEXT: s_cbranch_scc0 .LBB101_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB101_4
-; VI-NEXT: .LBB101_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB101_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB101_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v0, 0x40c00000
; VI-NEXT: s_lshl_b32 s4, s30, 16
; VI-NEXT: v_add_f32_e32 v1, s4, v0
@@ -219619,8 +220353,6 @@ define inreg <64 x half> @bitcast_v64bf16_to_v64f16_scalar(<64 x bfloat> inreg %
; VI-NEXT: v_alignbit_b32 v17, v17, v33, 16
; VI-NEXT: v_alignbit_b32 v16, v16, v18, 16
; VI-NEXT: s_branch .LBB101_5
-; VI-NEXT: .LBB101_3:
-; VI-NEXT: s_branch .LBB101_2
; VI-NEXT: .LBB101_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -219656,8 +220388,9 @@ define inreg <64 x half> @bitcast_v64bf16_to_v64f16_scalar(<64 x bfloat> inreg %
; GFX9-NEXT: s_or_saveexec_b64 s[4:5], -1
; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; GFX9-NEXT: s_mov_b64 exec, s[4:5]
-; GFX9-NEXT: v_writelane_b32 v43, s30, 0
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: v_writelane_b32 v43, s30, 0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_writelane_b32 v43, s31, 1
; GFX9-NEXT: v_mov_b32_e32 v31, v17
; GFX9-NEXT: v_mov_b32_e32 v30, v16
@@ -219676,15 +220409,18 @@ define inreg <64 x half> @bitcast_v64bf16_to_v64f16_scalar(<64 x bfloat> inreg %
; GFX9-NEXT: v_mov_b32_e32 v17, v3
; GFX9-NEXT: v_mov_b32_e32 v16, v2
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX9-NEXT: s_cbranch_scc0 .LBB101_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB101_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB101_4
-; GFX9-NEXT: .LBB101_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB101_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB101_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
; GFX9-NEXT: s_and_b32 s4, s30, 0xffff0000
; GFX9-NEXT: v_add_f32_e32 v1, s4, v0
@@ -220296,8 +221032,6 @@ define inreg <64 x half> @bitcast_v64bf16_to_v64f16_scalar(<64 x bfloat> inreg %
; GFX9-NEXT: v_lshl_or_b32 v17, v34, 16, v17
; GFX9-NEXT: v_lshl_or_b32 v16, v18, 16, v16
; GFX9-NEXT: s_branch .LBB101_5
-; GFX9-NEXT: .LBB101_3:
-; GFX9-NEXT: s_branch .LBB101_2
; GFX9-NEXT: .LBB101_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -220340,17 +221074,20 @@ define inreg <64 x half> @bitcast_v64bf16_to_v64f16_scalar(<64 x bfloat> inreg %
; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v20, v2
; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v18, v0
; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
+; GFX11-NEXT: s_mov_b32 s12, s0
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
; GFX11-NEXT: s_mov_b32 s15, s3
; GFX11-NEXT: s_mov_b32 s14, s2
; GFX11-NEXT: s_mov_b32 s13, s1
-; GFX11-NEXT: s_mov_b32 s12, s0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB101_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB101_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB101_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB101_4
-; GFX11-NEXT: .LBB101_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v17
; GFX11-NEXT: v_lshlrev_b32_e32 v1, 16, v16
; GFX11-NEXT: v_and_b32_e32 v4, 0xffff0000, v18
@@ -221010,8 +221747,6 @@ define inreg <64 x half> @bitcast_v64bf16_to_v64f16_scalar(<64 x bfloat> inreg %
; GFX11-NEXT: v_lshl_or_b32 v18, v33, 16, v37
; GFX11-NEXT: v_lshl_or_b32 v17, v17, 16, v38
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB101_3:
-; GFX11-NEXT: s_branch .LBB101_2
; GFX11-NEXT: .LBB101_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -222613,54 +223348,54 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg %
; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:72
; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:76
; SI-NEXT: v_cvt_f16_f32_e32 v40, v1
-; SI-NEXT: v_cvt_f16_f32_e32 v1, v8
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v6
; SI-NEXT: v_mov_b32_e32 v46, v26
; SI-NEXT: v_cvt_f16_f32_e32 v43, v2
; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
-; SI-NEXT: v_cvt_f16_f32_e32 v6, v6
-; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; SI-NEXT: v_cvt_f16_f32_e32 v44, v7
+; SI-NEXT: v_cvt_f16_f32_e32 v8, v8
; SI-NEXT: v_cvt_f16_f32_e32 v9, v9
; SI-NEXT: v_cvt_f16_f32_e32 v10, v10
; SI-NEXT: v_cvt_f16_f32_e32 v11, v11
; SI-NEXT: v_cvt_f16_f32_e32 v12, v12
; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
; SI-NEXT: v_cvt_f16_f32_e32 v14, v14
-; SI-NEXT: v_cvt_f16_f32_e32 v15, v15
+; SI-NEXT: v_cvt_f16_f32_e32 v16, v16
; SI-NEXT: v_cvt_f16_f32_e32 v17, v17
; SI-NEXT: v_cvt_f16_f32_e32 v18, v18
; SI-NEXT: v_cvt_f16_f32_e32 v19, v19
; SI-NEXT: v_cvt_f16_f32_e32 v20, v20
; SI-NEXT: v_cvt_f16_f32_e32 v21, v21
-; SI-NEXT: v_cvt_f16_f32_e32 v22, v22
-; SI-NEXT: v_cvt_f16_f32_e32 v45, v24
+; SI-NEXT: v_cvt_f16_f32_e32 v45, v23
+; SI-NEXT: v_cvt_f16_f32_e32 v24, v24
; SI-NEXT: v_cvt_f16_f32_e32 v26, v25
; SI-NEXT: v_cvt_f16_f32_e32 v46, v46
; SI-NEXT: v_cvt_f16_f32_e32 v47, v27
; SI-NEXT: v_cvt_f16_f32_e32 v28, v28
; SI-NEXT: v_cvt_f16_f32_e32 v56, v29
; SI-NEXT: v_cvt_f16_f32_e32 v57, v30
-; SI-NEXT: v_cvt_f16_f32_e32 v8, s16
-; SI-NEXT: v_cvt_f16_f32_e32 v24, s18
+; SI-NEXT: v_cvt_f16_f32_e32 v7, s16
+; SI-NEXT: v_cvt_f16_f32_e32 v23, s18
; SI-NEXT: v_cvt_f16_f32_e32 v25, s19
; SI-NEXT: v_cvt_f16_f32_e32 v29, s20
; SI-NEXT: v_cvt_f16_f32_e32 v30, s21
; SI-NEXT: v_cvt_f16_f32_e32 v27, s24
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31
-; SI-NEXT: v_cvt_f16_f32_e32 v31, v16
-; SI-NEXT: v_cvt_f16_f32_e32 v16, v23
+; SI-NEXT: v_cvt_f16_f32_e32 v31, v15
+; SI-NEXT: v_cvt_f16_f32_e32 v15, v22
; SI-NEXT: v_cvt_f16_f32_e32 v32, v32
; SI-NEXT: v_cvt_f16_f32_e32 v33, v33
; SI-NEXT: v_cvt_f16_f32_e32 v34, v34
; SI-NEXT: v_cvt_f16_f32_e32 v58, v35
; SI-NEXT: v_cvt_f16_f32_e32 v36, v36
; SI-NEXT: v_cvt_f16_f32_e32 v59, v37
-; SI-NEXT: v_cvt_f16_f32_e32 v60, v38
+; SI-NEXT: v_cvt_f16_f32_e32 v38, v38
; SI-NEXT: s_waitcnt vmcnt(13)
-; SI-NEXT: v_cvt_f16_f32_e32 v39, v39
+; SI-NEXT: v_cvt_f16_f32_e32 v60, v39
; SI-NEXT: s_waitcnt vmcnt(12)
; SI-NEXT: v_cvt_f16_f32_e32 v61, v48
; SI-NEXT: s_waitcnt vmcnt(11) expcnt(0)
@@ -222684,448 +223419,446 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg %
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_cvt_f16_f32_e32 v2, v50
; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_cvt_f16_f32_e32 v44, v51
-; SI-NEXT: v_cvt_f16_f32_e32 v23, s17
-; SI-NEXT: v_cvt_f16_f32_e32 v38, s22
-; SI-NEXT: v_cvt_f16_f32_e32 v37, s23
+; SI-NEXT: v_cvt_f16_f32_e32 v6, v51
+; SI-NEXT: v_cvt_f16_f32_e32 v22, s17
+; SI-NEXT: v_cvt_f16_f32_e32 v37, s22
+; SI-NEXT: v_cvt_f16_f32_e32 v39, s23
; SI-NEXT: v_cvt_f16_f32_e32 v48, s25
; SI-NEXT: v_cvt_f16_f32_e32 v49, s26
; SI-NEXT: v_cvt_f16_f32_e32 v35, s27
; SI-NEXT: v_cvt_f16_f32_e32 v50, s28
; SI-NEXT: v_cvt_f16_f32_e32 v51, s29
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v38, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB103_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v23
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v22
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v24
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v23
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v25
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v25
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v29
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v29
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v30
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v30
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v38
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v37
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v37
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v39
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v27
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v27
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v48
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v48
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v49
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v49
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v35
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v35
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v50
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v50
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v51
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v51
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v40
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v40
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v43
-; SI-NEXT: v_mov_b32_e32 v43, v6
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
+; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v43
+; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v5
-; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v43
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v7
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
-; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v20
-; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v21
-; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v28
+; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v46
+; SI-NEXT: v_mov_b32_e32 v48, v46
+; SI-NEXT: v_mov_b32_e32 v49, v47
; SI-NEXT: s_mov_b64 s[4:5], 0
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; SI-NEXT: v_mov_b32_e32 v50, v19
-; SI-NEXT: v_mov_b32_e32 v51, v22
-; SI-NEXT: v_mov_b32_e32 v38, v16
-; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16
+; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17
+; SI-NEXT: v_mov_b32_e32 v50, v21
; SI-NEXT: v_mov_b32_e32 v37, v45
+; SI-NEXT: v_mov_b32_e32 v39, v24
; SI-NEXT: v_mov_b32_e32 v27, v26
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v26
-; SI-NEXT: v_mov_b32_e32 v49, v47
+; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v26
; SI-NEXT: v_mov_b32_e32 v35, v28
-; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v58
-; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v59
-; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v60
-; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v39
-; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v1
+; SI-NEXT: v_mov_b32_e32 v51, v57
+; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v59
+; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v60
+; SI-NEXT: v_lshlrev_b32_e32 v60, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v52
-; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v53
; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v54
-; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v55
; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v41
-; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v62
-; SI-NEXT: s_waitcnt vmcnt(6)
-; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v6
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v2
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v43
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v9
+; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v44
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v8
; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v9
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v10
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v11
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v12
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v13
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v14
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v31
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v17
+; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v16
; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v18
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v19
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v46
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v45
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v47
-; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v57
+; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v24
+; SI-NEXT: v_mov_b32_e32 v46, v5
+; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v47
+; SI-NEXT: v_mov_b32_e32 v47, v7
+; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v28
+; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v56
+; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v57
+; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v20
+; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v21
+; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v15
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
; SI-NEXT: v_mov_b32_e32 v57, v5
-; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v32
-; SI-NEXT: v_mov_b32_e32 v32, v7
+; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v32
+; SI-NEXT: v_mov_b32_e32 v32, v8
; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v33
-; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v34
-; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v15
-; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v22
-; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v45
-; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v56
-; SI-NEXT: v_mov_b32_e32 v33, v12
-; SI-NEXT: v_mov_b32_e32 v34, v5
-; SI-NEXT: v_mov_b32_e32 v58, v7
-; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v36
-; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v61
-; SI-NEXT: v_lshlrev_b32_e32 v47, 16, v42
-; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v63
-; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v2
-; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v44
-; SI-NEXT: v_mov_b32_e32 v44, v18
+; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v34
+; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v58
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v36
+; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v38
+; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v61
+; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v53
+; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v55
+; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v42
+; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v62
+; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v63
+; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v6
; SI-NEXT: v_mov_b32_e32 v5, v43
-; SI-NEXT: v_mov_b32_e32 v18, v6
+; SI-NEXT: v_mov_b32_e32 v43, v19
; SI-NEXT: s_branch .LBB103_3
; SI-NEXT: .LBB103_2:
-; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
; SI-NEXT: ; implicit-def: $vgpr5
-; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; kill: killed $vgpr5
-; SI-NEXT: ; implicit-def: $vgpr5
-; SI-NEXT: ; kill: killed $vgpr3
; SI-NEXT: ; implicit-def: $vgpr3
-; SI-NEXT: ; kill: killed $vgpr5
-; SI-NEXT: ; implicit-def: $vgpr5
; SI-NEXT: ; kill: killed $vgpr3
-; SI-NEXT: ; implicit-def: $vgpr3
-; SI-NEXT: ; kill: killed $vgpr5
+; SI-NEXT: s_waitcnt expcnt(5)
+; SI-NEXT: v_mov_b32_e32 v51, v57
; SI-NEXT: ; implicit-def: $vgpr5
-; SI-NEXT: ; kill: killed $vgpr3
-; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; kill: killed $vgpr5
-; SI-NEXT: ; implicit-def: $vgpr5
-; SI-NEXT: ; kill: killed $vgpr3
; SI-NEXT: ; implicit-def: $vgpr3
-; SI-NEXT: ; kill: killed $vgpr5
-; SI-NEXT: ; implicit-def: $vgpr5
; SI-NEXT: ; kill: killed $vgpr3
-; SI-NEXT: ; implicit-def: $vgpr3
-; SI-NEXT: ; kill: killed $vgpr5
+; SI-NEXT: v_mov_b32_e32 v35, v28
; SI-NEXT: ; implicit-def: $vgpr5
-; SI-NEXT: ; kill: killed $vgpr3
-; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; kill: killed $vgpr5
-; SI-NEXT: ; implicit-def: $vgpr5
-; SI-NEXT: ; kill: killed $vgpr3
; SI-NEXT: ; implicit-def: $vgpr3
-; SI-NEXT: ; kill: killed $vgpr5
-; SI-NEXT: ; implicit-def: $vgpr5
; SI-NEXT: ; kill: killed $vgpr3
-; SI-NEXT: ; implicit-def: $vgpr3
-; SI-NEXT: ; kill: killed $vgpr5
+; SI-NEXT: v_mov_b32_e32 v49, v47
; SI-NEXT: ; implicit-def: $vgpr5
-; SI-NEXT: ; kill: killed $vgpr3
-; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; kill: killed $vgpr5
-; SI-NEXT: ; implicit-def: $vgpr5
-; SI-NEXT: ; kill: killed $vgpr3
; SI-NEXT: ; implicit-def: $vgpr3
-; SI-NEXT: ; kill: killed $vgpr5
-; SI-NEXT: ; implicit-def: $vgpr5
; SI-NEXT: ; kill: killed $vgpr3
-; SI-NEXT: ; implicit-def: $vgpr3
-; SI-NEXT: ; kill: killed $vgpr5
+; SI-NEXT: v_mov_b32_e32 v48, v46
; SI-NEXT: ; implicit-def: $vgpr5
-; SI-NEXT: ; kill: killed $vgpr3
-; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; kill: killed $vgpr5
-; SI-NEXT: ; implicit-def: $vgpr5
-; SI-NEXT: ; kill: killed $vgpr3
; SI-NEXT: ; implicit-def: $vgpr3
-; SI-NEXT: ; kill: killed $vgpr5
-; SI-NEXT: ; implicit-def: $vgpr5
; SI-NEXT: ; kill: killed $vgpr3
-; SI-NEXT: ; implicit-def: $vgpr3
-; SI-NEXT: ; kill: killed $vgpr5
-; SI-NEXT: ; implicit-def: $vgpr5
-; SI-NEXT: v_mov_b32_e32 v35, v28
-; SI-NEXT: v_mov_b32_e32 v49, v47
; SI-NEXT: v_mov_b32_e32 v27, v26
-; SI-NEXT: v_mov_b32_e32 v37, v45
-; SI-NEXT: v_mov_b32_e32 v38, v16
-; SI-NEXT: v_mov_b32_e32 v51, v22
-; SI-NEXT: v_mov_b32_e32 v50, v19
-; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: ; implicit-def: $vgpr5
+; SI-NEXT: ; kill: killed $vgpr5
+; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; kill: killed $vgpr3
+; SI-NEXT: v_mov_b32_e32 v39, v24
+; SI-NEXT: ; implicit-def: $vgpr5
+; SI-NEXT: ; kill: killed $vgpr5
; SI-NEXT: ; implicit-def: $vgpr3
-; SI-NEXT: ; implicit-def: $vgpr4
+; SI-NEXT: ; kill: killed $vgpr3
+; SI-NEXT: v_mov_b32_e32 v37, v45
+; SI-NEXT: ; implicit-def: $vgpr5
; SI-NEXT: ; kill: killed $vgpr5
-; SI-NEXT: v_mov_b32_e32 v5, v6
+; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; kill: killed $vgpr3
+; SI-NEXT: v_mov_b32_e32 v50, v21
+; SI-NEXT: ; implicit-def: $vgpr5
+; SI-NEXT: ; kill: killed $vgpr5
; SI-NEXT: ; implicit-def: $vgpr3
+; SI-NEXT: ; kill: killed $vgpr3
+; SI-NEXT: ; implicit-def: $vgpr4
; SI-NEXT: ; kill: killed $vgpr4
; SI-NEXT: ; implicit-def: $vgpr4
-; SI-NEXT: ; implicit-def: $vgpr11
+; SI-NEXT: ; implicit-def: $vgpr5
+; SI-NEXT: ; kill: killed $vgpr5
+; SI-NEXT: ; implicit-def: $vgpr3
+; SI-NEXT: ; kill: killed $vgpr3
; SI-NEXT: ; implicit-def: $vgpr17
+; SI-NEXT: ; implicit-def: $vgpr31
; SI-NEXT: ; implicit-def: $vgpr16
-; SI-NEXT: ; implicit-def: $vgpr19
-; SI-NEXT: ; implicit-def: $vgpr8
-; SI-NEXT: ; implicit-def: $vgpr57
+; SI-NEXT: ; implicit-def: $vgpr14
+; SI-NEXT: ; implicit-def: $vgpr46
+; SI-NEXT: ; implicit-def: $vgpr26
+; SI-NEXT: ; implicit-def: $vgpr47
; SI-NEXT: ; implicit-def: $vgpr32
-; SI-NEXT: ; implicit-def: $vgpr10
-; SI-NEXT: ; implicit-def: $vgpr33
-; SI-NEXT: ; implicit-def: $vgpr34
-; SI-NEXT: ; implicit-def: $vgpr44
-; SI-NEXT: ; implicit-def: $vgpr58
-; SI-NEXT: ; implicit-def: $vgpr20
+; SI-NEXT: ; implicit-def: $vgpr43
+; SI-NEXT: ; implicit-def: $vgpr57
+; SI-NEXT: ; implicit-def: $vgpr13
+; SI-NEXT: ; implicit-def: $vgpr18
+; SI-NEXT: ; implicit-def: $vgpr12
+; SI-NEXT: ; implicit-def: $vgpr15
+; SI-NEXT: ; implicit-def: $vgpr7
; SI-NEXT: ; implicit-def: $vgpr22
+; SI-NEXT: ; implicit-def: $vgpr45
; SI-NEXT: ; implicit-def: $vgpr23
-; SI-NEXT: ; implicit-def: $vgpr40
; SI-NEXT: ; implicit-def: $vgpr24
-; SI-NEXT: ; implicit-def: $vgpr45
-; SI-NEXT: ; implicit-def: $vgpr48
+; SI-NEXT: ; implicit-def: $vgpr60
; SI-NEXT: ; implicit-def: $vgpr25
-; SI-NEXT: ; implicit-def: $vgpr21
+; SI-NEXT: ; implicit-def: $vgpr8
; SI-NEXT: ; implicit-def: $vgpr29
; SI-NEXT: ; implicit-def: $vgpr28
; SI-NEXT: ; implicit-def: $vgpr30
-; SI-NEXT: ; implicit-def: $vgpr47
-; SI-NEXT: ; implicit-def: $vgpr26
-; SI-NEXT: ; implicit-def: $vgpr15
-; SI-NEXT: ; implicit-def: $vgpr7
-; SI-NEXT: ; implicit-def: $vgpr14
+; SI-NEXT: ; implicit-def: $vgpr10
+; SI-NEXT: ; implicit-def: $vgpr21
+; SI-NEXT: ; implicit-def: $vgpr11
+; SI-NEXT: ; implicit-def: $vgpr40
+; SI-NEXT: ; implicit-def: $vgpr33
+; SI-NEXT: ; implicit-def: $vgpr5
+; SI-NEXT: ; kill: killed $vgpr5
+; SI-NEXT: ; implicit-def: $vgpr3
+; SI-NEXT: ; kill: killed $vgpr3
+; SI-NEXT: ; implicit-def: $vgpr5
+; SI-NEXT: ; kill: killed $vgpr5
+; SI-NEXT: ; implicit-def: $vgpr3
+; SI-NEXT: ; kill: killed $vgpr3
+; SI-NEXT: ; implicit-def: $vgpr5
+; SI-NEXT: ; kill: killed $vgpr5
+; SI-NEXT: ; implicit-def: $vgpr3
+; SI-NEXT: ; kill: killed $vgpr3
+; SI-NEXT: ; implicit-def: $vgpr5
+; SI-NEXT: ; kill: killed $vgpr5
+; SI-NEXT: ; implicit-def: $vgpr3
+; SI-NEXT: ; kill: killed $vgpr3
+; SI-NEXT: ; implicit-def: $vgpr5
+; SI-NEXT: ; kill: killed $vgpr5
+; SI-NEXT: ; implicit-def: $vgpr3
+; SI-NEXT: ; kill: killed $vgpr3
+; SI-NEXT: ; implicit-def: $vgpr3
+; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
+; SI-NEXT: ; kill: killed $vgpr3
+; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: .LBB103_3: ; %Flow
; SI-NEXT: s_waitcnt expcnt(1)
-; SI-NEXT: v_mov_b32_e32 v36, v2
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
-; SI-NEXT: v_mov_b32_e32 v43, v9
-; SI-NEXT: v_mov_b32_e32 v12, v31
+; SI-NEXT: v_mov_b32_e32 v61, v2
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v44, v9
+; SI-NEXT: v_mov_b32_e32 v19, v20
; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
-; SI-NEXT: v_mov_b32_e32 v31, v11
-; SI-NEXT: v_mov_b32_e32 v9, v17
+; SI-NEXT: v_mov_b32_e32 v9, v16
; SI-NEXT: s_cbranch_vccnz .LBB103_5
; SI-NEXT: ; %bb.4: ; %cmp.true
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
-; SI-NEXT: v_cvt_f32_f16_e32 v8, v36
-; SI-NEXT: v_cvt_f32_f16_e32 v10, v63
-; SI-NEXT: v_cvt_f32_f16_e32 v14, v62
+; SI-NEXT: v_cvt_f32_f16_e32 v7, v61
+; SI-NEXT: v_cvt_f32_f16_e32 v8, v63
+; SI-NEXT: v_cvt_f32_f16_e32 v10, v62
; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v7
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v8
-; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v10
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v14
-; SI-NEXT: v_cvt_f32_f16_e32 v14, v55
+; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v10
+; SI-NEXT: v_cvt_f32_f16_e32 v10, v55
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
-; SI-NEXT: v_cvt_f32_f16_e32 v15, v54
-; SI-NEXT: v_cvt_f32_f16_e32 v8, v42
+; SI-NEXT: v_cvt_f32_f16_e32 v13, v54
+; SI-NEXT: v_cvt_f32_f16_e32 v7, v42
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v14
-; SI-NEXT: v_cvt_f32_f16_e32 v14, v1
+; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v10
+; SI-NEXT: v_cvt_f32_f16_e32 v10, v1
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v15
-; SI-NEXT: v_cvt_f32_f16_e32 v15, v61
-; SI-NEXT: v_add_f32_e32 v54, 0x38000000, v8
-; SI-NEXT: v_cvt_f32_f16_e32 v8, v53
-; SI-NEXT: v_cvt_f32_f16_e32 v10, v41
-; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v15
-; SI-NEXT: v_mov_b32_e32 v6, v37
-; SI-NEXT: v_add_f32_e32 v30, 0x38000000, v8
-; SI-NEXT: v_cvt_f32_f16_e32 v8, v39
-; SI-NEXT: v_add_f32_e32 v55, 0x38000000, v10
-; SI-NEXT: v_cvt_f32_f16_e32 v10, v52
-; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
+; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v13
+; SI-NEXT: v_add_f32_e32 v54, 0x38000000, v7
+; SI-NEXT: v_cvt_f32_f16_e32 v7, v53
+; SI-NEXT: v_cvt_f32_f16_e32 v8, v41
+; SI-NEXT: v_mov_b32_e32 v6, v39
+; SI-NEXT: v_mov_b32_e32 v20, v49
+; SI-NEXT: v_add_f32_e32 v30, 0x38000000, v7
+; SI-NEXT: v_add_f32_e32 v55, 0x38000000, v8
+; SI-NEXT: v_cvt_f32_f16_e32 v8, v52
+; SI-NEXT: v_cvt_f32_f16_e32 v3, v34
+; SI-NEXT: v_cvt_f32_f16_e32 v9, v50
+; SI-NEXT: v_cvt_f32_f16_e32 v42, v44
+; SI-NEXT: v_add_f32_e32 v39, 0x38000000, v8
+; SI-NEXT: v_cvt_f32_f16_e32 v8, v38
+; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v3
+; SI-NEXT: v_cvt_f32_f16_e32 v3, v51
+; SI-NEXT: v_cvt_f32_f16_e32 v4, v58
; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v8
-; SI-NEXT: v_cvt_f32_f16_e32 v9, v51
-; SI-NEXT: v_add_f32_e32 v37, 0x38000000, v10
-; SI-NEXT: v_cvt_f32_f16_e32 v10, v60
-; SI-NEXT: v_add_f32_e32 v48, 0x38000000, v14
-; SI-NEXT: v_cvt_f32_f16_e32 v14, v59
-; SI-NEXT: v_cvt_f32_f16_e32 v28, v50
-; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v10
-; SI-NEXT: v_cvt_f32_f16_e32 v50, v13
-; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v14
-; SI-NEXT: v_cvt_f32_f16_e32 v33, v12
+; SI-NEXT: v_cvt_f32_f16_e32 v11, v19
; SI-NEXT: v_cvt_f32_f16_e32 v45, v5
-; SI-NEXT: v_cvt_f32_f16_e32 v42, v43
-; SI-NEXT: v_cvt_f32_f16_e32 v43, v18
-; SI-NEXT: v_add_f32_e32 v50, 0x38000000, v50
+; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v4
+; SI-NEXT: v_cvt_f32_f16_e32 v4, v56
+; SI-NEXT: v_add_f32_e32 v52, 0x38000000, v10
+; SI-NEXT: v_cvt_f32_f16_e32 v10, v59
; SI-NEXT: v_add_f32_e32 v45, 0x38000000, v45
; SI-NEXT: v_add_f32_e32 v42, 0x38000000, v42
-; SI-NEXT: v_add_f32_e32 v43, 0x38000000, v43
-; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v33
-; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:440 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v4
+; SI-NEXT: v_cvt_f32_f16_e32 v4, v27
; SI-NEXT: v_cvt_f32_f16_e32 v6, v6
-; SI-NEXT: v_cvt_f32_f16_e32 v20, v49
+; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11
+; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v10
+; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4
+; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
+; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6
+; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v3
+; SI-NEXT: v_cvt_f32_f16_e32 v3, v48
; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9
; SI-NEXT: v_cvt_f16_f32_e32 v9, v9
-; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6
-; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20
+; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3
+; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9
; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_cvt_f32_f16_e32 v15, v1
+; SI-NEXT: v_cvt_f32_f16_e32 v13, v1
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v15
+; SI-NEXT: v_add_f32_e32 v49, 0x38000000, v13
+; SI-NEXT: v_cvt_f32_f16_e32 v13, v36
+; SI-NEXT: v_add_f32_e32 v38, 0x38000000, v13
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v4, v1
+; SI-NEXT: v_cvt_f32_f16_e32 v7, v1
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
-; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v4
-; SI-NEXT: v_cvt_f32_f16_e32 v4, v56
-; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v4
-; SI-NEXT: v_cvt_f32_f16_e32 v4, v27
-; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4
-; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
+; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v7
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v3, v1
+; SI-NEXT: v_cvt_f32_f16_e32 v7, v1
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
-; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v3
+; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v7
+; SI-NEXT: v_cvt_f32_f16_e32 v7, v35
+; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v7
+; SI-NEXT: v_cvt_f32_f16_e32 v7, v37
+; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v8, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
-; SI-NEXT: v_add_f32_e32 v52, 0x38000000, v8
-; SI-NEXT: v_cvt_f32_f16_e32 v8, v35
-; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v8
-; SI-NEXT: v_cvt_f32_f16_e32 v8, v38
-; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v10, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
-; SI-NEXT: v_add_f32_e32 v39, 0x38000000, v10
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v3, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:368 ; 4-byte Folded Reload
-; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v3
-; SI-NEXT: v_cvt_f32_f16_e32 v3, v46
-; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3
-; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:384 ; 4-byte Folded Reload
+; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v8
+; SI-NEXT: v_cvt_f32_f16_e32 v8, v20
+; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v8
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v11, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload
-; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11
+; SI-NEXT: v_cvt_f32_f16_e32 v8, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:364 ; 4-byte Folded Reload
+; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8
+; SI-NEXT: v_cvt_f16_f32_e32 v8, v8
+; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v8
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v21, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v31, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload
-; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31
+; SI-NEXT: v_cvt_f32_f16_e32 v28, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload
+; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v32, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v32, 0x38000000, v32
; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_cvt_f32_f16_e32 v33, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload
+; SI-NEXT: v_add_f32_e32 v33, 0x38000000, v33
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v34, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v34, 0x38000000, v34
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v36, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v36, 0x38000000, v36
; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_cvt_f32_f16_e32 v50, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload
+; SI-NEXT: v_add_f32_e32 v50, 0x38000000, v50
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v51, v1
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v51, 0x38000000, v51
@@ -223138,85 +223871,89 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg %
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v41, 0x38000000, v41
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v44, v1
+; SI-NEXT: v_cvt_f32_f16_e32 v43, v1
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload
+; SI-NEXT: v_add_f32_e32 v43, 0x38000000, v43
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_cvt_f32_f16_e32 v44, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v44, 0x38000000, v44
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v46, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v46, 0x38000000, v46
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v47, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v47, 0x38000000, v47
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v56, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v56, 0x38000000, v56
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v57, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v57, 0x38000000, v57
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v58, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v58, 0x38000000, v58
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v26, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v22, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload
-; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22
+; SI-NEXT: v_cvt_f32_f16_e32 v24, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v19, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v35, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:384 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v35, 0x38000000, v35
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v13, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:380 ; 4-byte Folded Reload
-; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13
+; SI-NEXT: v_cvt_f32_f16_e32 v15, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload
+; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v12, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:376 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:380 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v7, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:372 ; 4-byte Folded Reload
-; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7
-; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
+; SI-NEXT: v_cvt_f32_f16_e32 v31, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:376 ; 4-byte Folded Reload
+; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31
+; SI-NEXT: v_cvt_f16_f32_e32 v31, v31
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v5, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:364 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:372 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5
; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v59, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:368 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v59, 0x38000000, v59
; SI-NEXT: v_cvt_f16_f32_e32 v59, v59
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v60, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v60, 0x38000000, v60
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v61, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v61, 0x38000000, v61
; SI-NEXT: v_cvt_f16_f32_e32 v61, v61
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v62, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v62, 0x38000000, v62
; SI-NEXT: v_cvt_f16_f32_e32 v62, v62
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v63, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v63, 0x38000000, v63
; SI-NEXT: v_cvt_f16_f32_e32 v63, v63
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -223245,160 +223982,156 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg %
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v5
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v7
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v31
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v1, v12
-; SI-NEXT: v_cvt_f16_f32_e32 v5, v13
-; SI-NEXT: v_cvt_f16_f32_e32 v7, v35
-; SI-NEXT: v_cvt_f16_f32_e32 v12, v19
+; SI-NEXT: v_cvt_f16_f32_e32 v5, v15
+; SI-NEXT: v_cvt_f16_f32_e32 v12, v35
+; SI-NEXT: v_cvt_f16_f32_e32 v15, v19
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v5
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v7
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v12
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v12
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v15
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v1, v22
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v24
; SI-NEXT: v_cvt_f16_f32_e32 v5, v26
-; SI-NEXT: v_cvt_f16_f32_e32 v7, v58
-; SI-NEXT: v_cvt_f16_f32_e32 v12, v57
+; SI-NEXT: v_cvt_f16_f32_e32 v12, v58
+; SI-NEXT: v_cvt_f16_f32_e32 v15, v57
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v5
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v7
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v12
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v12
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v15
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v1, v56
; SI-NEXT: v_cvt_f16_f32_e32 v5, v47
-; SI-NEXT: v_cvt_f16_f32_e32 v12, v45
-; SI-NEXT: v_cvt_f16_f32_e32 v7, v46
-; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v1
+; SI-NEXT: v_cvt_f16_f32_e32 v15, v45
+; SI-NEXT: v_cvt_f16_f32_e32 v12, v46
+; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v5
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v12
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v15
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v1, v44
; SI-NEXT: v_cvt_f16_f32_e32 v5, v43
-; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v7
-; SI-NEXT: v_cvt_f16_f32_e32 v7, v42
-; SI-NEXT: v_cvt_f16_f32_e32 v12, v41
+; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v12
+; SI-NEXT: v_cvt_f16_f32_e32 v12, v42
+; SI-NEXT: v_cvt_f16_f32_e32 v15, v41
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v5
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v7
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v12
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v15
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v1, v40
; SI-NEXT: v_cvt_f16_f32_e32 v5, v51
-; SI-NEXT: v_cvt_f16_f32_e32 v7, v50
-; SI-NEXT: v_cvt_f16_f32_e32 v12, v36
+; SI-NEXT: v_cvt_f16_f32_e32 v12, v50
+; SI-NEXT: v_cvt_f16_f32_e32 v15, v36
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v5
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v7
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v12
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v15
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v1, v34
; SI-NEXT: v_cvt_f16_f32_e32 v5, v33
-; SI-NEXT: v_cvt_f16_f32_e32 v7, v32
-; SI-NEXT: v_cvt_f16_f32_e32 v12, v31
-; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v1
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v5
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; SI-NEXT: v_cvt_f16_f32_e32 v15, v28
+; SI-NEXT: v_cvt_f16_f32_e32 v12, v32
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v7
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v5
+; SI-NEXT: v_cvt_f16_f32_e32 v5, v11
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v12
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v15
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v5
+; SI-NEXT: v_cvt_f16_f32_e32 v5, v6
+; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v4
+; SI-NEXT: v_cvt_f16_f32_e32 v4, v2
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v1, v28
-; SI-NEXT: v_cvt_f16_f32_e32 v5, v21
-; SI-NEXT: v_cvt_f16_f32_e32 v7, v11
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v21
+; SI-NEXT: v_lshlrev_b32_e32 v46, 16, v5
+; SI-NEXT: v_cvt_f16_f32_e32 v5, v10
+; SI-NEXT: v_lshlrev_b32_e32 v47, 16, v3
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v5
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v7
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v7
+; SI-NEXT: v_cvt_f16_f32_e32 v3, v17
+; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v4
+; SI-NEXT: v_cvt_f16_f32_e32 v4, v16
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v1, v8
-; SI-NEXT: v_cvt_f16_f32_e32 v5, v6
-; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v4
-; SI-NEXT: v_cvt_f16_f32_e32 v4, v16
-; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v1
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v3
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v20
+; SI-NEXT: v_lshlrev_b32_e32 v43, 16, v3
+; SI-NEXT: v_cvt_f16_f32_e32 v3, v18
+; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12
+; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v1
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v5
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v1, v20
-; SI-NEXT: v_cvt_f16_f32_e32 v3, v17
-; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v5
-; SI-NEXT: v_cvt_f16_f32_e32 v5, v10
-; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v1
-; SI-NEXT: v_cvt_f16_f32_e32 v1, v39
-; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v3
-; SI-NEXT: v_cvt_f16_f32_e32 v3, v52
-; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v4
-; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v1
-; SI-NEXT: v_cvt_f16_f32_e32 v1, v2
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
-; SI-NEXT: v_lshlrev_b32_e32 v44, 16, v3
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v13
+; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v3
; SI-NEXT: v_cvt_f16_f32_e32 v3, v23
-; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v1
-; SI-NEXT: v_cvt_f16_f32_e32 v1, v29
-; SI-NEXT: v_cvt_f16_f32_e32 v4, v15
-; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v3
-; SI-NEXT: v_cvt_f16_f32_e32 v3, v48
-; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v1
+; SI-NEXT: v_cvt_f16_f32_e32 v5, v22
+; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v1
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v38
+; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v3
+; SI-NEXT: v_cvt_f16_f32_e32 v3, v52
+; SI-NEXT: v_mov_b32_e32 v17, v12
+; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v1
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v49
+; SI-NEXT: v_lshlrev_b32_e32 v60, 16, v3
+; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v4
+; SI-NEXT: v_cvt_f16_f32_e32 v4, v25
+; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v1
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:440 ; 4-byte Folded Reload
-; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v4
-; SI-NEXT: v_lshlrev_b32_e32 v48, 16, v3
-; SI-NEXT: v_cvt_f16_f32_e32 v4, v24
-; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v5
-; SI-NEXT: v_cvt_f16_f32_e32 v5, v14
-; SI-NEXT: v_mov_b32_e32 v16, v6
-; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v4
-; SI-NEXT: v_cvt_f16_f32_e32 v4, v37
-; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v5
-; SI-NEXT: v_cvt_f16_f32_e32 v5, v25
+; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v5
+; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v4
+; SI-NEXT: v_cvt_f16_f32_e32 v4, v39
+; SI-NEXT: v_cvt_f16_f32_e32 v5, v29
; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v4
; SI-NEXT: v_cvt_f16_f32_e32 v4, v55
-; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v5
+; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v5
; SI-NEXT: v_cvt_f16_f32_e32 v5, v30
; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v4
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
-; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v5
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload
+; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v5
; SI-NEXT: v_cvt_f16_f32_e32 v5, v54
-; SI-NEXT: v_lshlrev_b32_e32 v47, 16, v5
-; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v5
+; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_cvt_f16_f32_e32 v3, v2
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v3
@@ -223408,19 +224141,20 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg %
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
-; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v4
-; SI-NEXT: v_mov_b32_e32 v4, v27
+; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v4
+; SI-NEXT: v_mov_b32_e32 v4, v26
+; SI-NEXT: v_mov_b32_e32 v26, v6
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_cvt_f16_f32_e32 v3, v2
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
-; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v3
-; SI-NEXT: v_mov_b32_e32 v3, v13
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
+; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v3
+; SI-NEXT: v_mov_b32_e32 v3, v19
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v1
+; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v1
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
-; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v2
+; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v2
; SI-NEXT: .LBB103_5: ; %end
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
@@ -223509,7 +224243,7 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg %
; SI-NEXT: v_add_i32_e32 v2, vcc, 28, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
; SI-NEXT: v_mul_f32_e32 v2, 1.0, v3
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
@@ -223518,7 +224252,7 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg %
; SI-NEXT: v_add_i32_e32 v2, vcc, 32, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
; SI-NEXT: v_mul_f32_e32 v2, 1.0, v4
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
@@ -223527,8 +224261,8 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg %
; SI-NEXT: v_add_i32_e32 v2, vcc, 36, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
@@ -223538,8 +224272,8 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg %
; SI-NEXT: v_add_i32_e32 v2, vcc, 40, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
@@ -223549,8 +224283,8 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg %
; SI-NEXT: v_add_i32_e32 v2, vcc, 44, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
@@ -223560,8 +224294,8 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg %
; SI-NEXT: v_add_i32_e32 v2, vcc, 48, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
@@ -223571,112 +224305,110 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg %
; SI-NEXT: v_add_i32_e32 v2, vcc, 52, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
-; SI-NEXT: v_mul_f32_e32 v2, 1.0, v31
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
; SI-NEXT: v_add_i32_e32 v2, vcc, 56, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
+; SI-NEXT: v_mul_f32_e32 v2, 1.0, v17
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
+; SI-NEXT: v_mul_f32_e32 v1, 1.0, v31
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
; SI-NEXT: v_add_i32_e32 v2, vcc, 64, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v9
+; SI-NEXT: v_mul_f32_e32 v1, 1.0, v14
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2
+; SI-NEXT: v_mul_f32_e32 v2, 1.0, v9
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
; SI-NEXT: v_add_i32_e32 v2, vcc, 0x44, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v19
+; SI-NEXT: v_mul_f32_e32 v1, 1.0, v46
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_mul_f32_e32 v2, 1.0, v16
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mul_f32_e32 v2, 1.0, v2
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
; SI-NEXT: v_add_i32_e32 v2, vcc, 0x48, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
-; SI-NEXT: v_mul_f32_e32 v2, 1.0, v8
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
+; SI-NEXT: v_mul_f32_e32 v1, 1.0, v47
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; SI-NEXT: v_mul_f32_e32 v2, 1.0, v26
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
; SI-NEXT: v_add_i32_e32 v2, vcc, 0x4c, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v32
+; SI-NEXT: v_mul_f32_e32 v1, 1.0, v43
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_mul_f32_e32 v2, 1.0, v57
+; SI-NEXT: v_mul_f32_e32 v2, 1.0, v32
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
; SI-NEXT: v_add_i32_e32 v2, vcc, 0x50, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v33
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
+; SI-NEXT: v_mul_f32_e32 v2, 1.0, v57
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mul_f32_e32 v1, 1.0, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_mul_f32_e32 v2, 1.0, v10
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
; SI-NEXT: v_add_i32_e32 v2, vcc, 0x54, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v44
+; SI-NEXT: v_mul_f32_e32 v1, 1.0, v18
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_mul_f32_e32 v2, 1.0, v34
+; SI-NEXT: v_mul_f32_e32 v2, 1.0, v13
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
; SI-NEXT: v_add_i32_e32 v2, vcc, 0x58, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v20
+; SI-NEXT: v_mul_f32_e32 v1, 1.0, v15
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_mul_f32_e32 v2, 1.0, v58
+; SI-NEXT: v_mul_f32_e32 v2, 1.0, v12
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
; SI-NEXT: v_add_i32_e32 v2, vcc, 0x5c, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v23
+; SI-NEXT: v_mul_f32_e32 v1, 1.0, v22
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_mul_f32_e32 v2, 1.0, v22
+; SI-NEXT: v_mul_f32_e32 v2, 1.0, v7
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
; SI-NEXT: v_add_i32_e32 v2, vcc, 0x60, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v24
+; SI-NEXT: v_mul_f32_e32 v1, 1.0, v23
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_mul_f32_e32 v2, 1.0, v40
+; SI-NEXT: v_mul_f32_e32 v2, 1.0, v45
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
; SI-NEXT: v_add_i32_e32 v2, vcc, 0x64, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v48
+; SI-NEXT: v_mul_f32_e32 v1, 1.0, v60
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_mul_f32_e32 v2, 1.0, v45
+; SI-NEXT: v_mul_f32_e32 v2, 1.0, v24
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
; SI-NEXT: v_add_i32_e32 v2, vcc, 0x68, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v21
+; SI-NEXT: v_mul_f32_e32 v1, 1.0, v8
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; SI-NEXT: v_mul_f32_e32 v2, 1.0, v25
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
@@ -223690,23 +224422,23 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg %
; SI-NEXT: v_add_i32_e32 v2, vcc, 0x70, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v47
+; SI-NEXT: v_mul_f32_e32 v1, 1.0, v10
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; SI-NEXT: v_mul_f32_e32 v2, 1.0, v30
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
; SI-NEXT: v_add_i32_e32 v2, vcc, 0x74, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v15
+; SI-NEXT: v_mul_f32_e32 v1, 1.0, v11
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_mul_f32_e32 v2, 1.0, v26
+; SI-NEXT: v_mul_f32_e32 v2, 1.0, v21
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
; SI-NEXT: v_add_i32_e32 v2, vcc, 0x78, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mul_f32_e32 v1, 1.0, v14
+; SI-NEXT: v_mul_f32_e32 v1, 1.0, v33
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_mul_f32_e32 v2, 1.0, v7
+; SI-NEXT: v_mul_f32_e32 v2, 1.0, v40
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x7c, v0
; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
@@ -223733,6 +224465,7 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg %
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v31, v17
; VI-NEXT: v_mov_b32_e32 v30, v16
; VI-NEXT: v_mov_b32_e32 v29, v15
@@ -223753,7 +224486,7 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg %
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
@@ -223766,10 +224499,13 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg %
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB103_4
+; VI-NEXT: s_cbranch_scc0 .LBB103_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB103_3
-; VI-NEXT: .LBB103_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB103_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB103_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v18, 0x200
; VI-NEXT: v_add_f16_e32 v33, 0x200, v15
; VI-NEXT: v_add_f16_sdwa v15, v15, v18 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
@@ -223867,16 +224603,15 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg %
; VI-NEXT: v_add_f16_e32 v16, 0x200, v16
; VI-NEXT: v_or_b32_e32 v17, v33, v17
; VI-NEXT: v_or_b32_e32 v16, v16, v18
-; VI-NEXT: .LBB103_3: ; %end
+; VI-NEXT: .LBB103_4: ; %end
; VI-NEXT: v_mov_b32_e32 v18, v32
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB103_4:
-; VI-NEXT: s_branch .LBB103_2
;
; GFX9-LABEL: bitcast_v64f16_to_v64bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v31, v17
; GFX9-NEXT: v_mov_b32_e32 v30, v16
; GFX9-NEXT: v_mov_b32_e32 v29, v15
@@ -223897,7 +224632,7 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg %
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -223910,10 +224645,13 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg %
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB103_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB103_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB103_3
-; GFX9-NEXT: .LBB103_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB103_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB103_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_movk_i32 s4, 0x200
; GFX9-NEXT: v_pk_add_f16 v15, v15, s4 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v14, v14, s4 op_sel_hi:[1,0]
@@ -223947,11 +224685,9 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg %
; GFX9-NEXT: v_pk_add_f16 v32, v32, s4 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v17, v17, s4 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v16, v16, s4 op_sel_hi:[1,0]
-; GFX9-NEXT: .LBB103_3: ; %end
+; GFX9-NEXT: .LBB103_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v18, v32
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB103_4:
-; GFX9-NEXT: s_branch .LBB103_2
;
; GFX11-LABEL: bitcast_v64f16_to_v64bf16_scalar:
; GFX11: ; %bb.0:
@@ -223965,17 +224701,20 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg %
; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v20, v2
; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v18, v0
; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
+; GFX11-NEXT: s_mov_b32 s12, s0
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
; GFX11-NEXT: s_mov_b32 s15, s3
; GFX11-NEXT: s_mov_b32 s14, s2
; GFX11-NEXT: s_mov_b32 s13, s1
-; GFX11-NEXT: s_mov_b32 s12, s0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB103_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB103_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB103_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB103_4
-; GFX11-NEXT: .LBB103_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v15, 0x200, s27 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v14, 0x200, s26 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v13, 0x200, s25 op_sel_hi:[0,1]
@@ -224009,8 +224748,6 @@ define inreg <64 x bfloat> @bitcast_v64f16_to_v64bf16_scalar(<64 x half> inreg %
; GFX11-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB103_3:
-; GFX11-NEXT: s_branch .LBB103_2
; GFX11-NEXT: .LBB103_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -227567,279 +228304,301 @@ define inreg <64 x i16> @bitcast_v64bf16_to_v64i16_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_mul_f32_e32 v2, 1.0, v3
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mul_f32_e32 v2, 1.0, v4
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e32 v2, 1.0, v6
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e32 v2, 1.0, v7
-; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mul_f32_e32 v2, 1.0, v12
-; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
-; SI-NEXT: v_mul_f32_e32 v61, 1.0, v4
+; SI-NEXT: v_mul_f32_e32 v2, 1.0, v10
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mul_f32_e32 v2, 1.0, v11
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mul_f32_e32 v2, 1.0, v15
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
; SI-NEXT: v_mul_f32_e32 v45, 1.0, v5
; SI-NEXT: v_mul_f32_e32 v5, 1.0, v8
; SI-NEXT: v_mul_f32_e32 v8, 1.0, v9
-; SI-NEXT: v_mul_f32_e32 v6, 1.0, v10
-; SI-NEXT: v_mul_f32_e32 v62, 1.0, v11
+; SI-NEXT: v_mul_f32_e32 v62, 1.0, v12
; SI-NEXT: v_mul_f32_e32 v46, 1.0, v13
-; SI-NEXT: v_mul_f32_e32 v13, 1.0, v14
-; SI-NEXT: v_mul_f32_e32 v60, 1.0, v16
-; SI-NEXT: v_mul_f32_e32 v57, 1.0, v17
+; SI-NEXT: v_mul_f32_e32 v10, 1.0, v14
+; SI-NEXT: v_mul_f32_e32 v61, 1.0, v16
+; SI-NEXT: v_mul_f32_e32 v15, 1.0, v17
; SI-NEXT: v_mul_f32_e32 v16, 1.0, v18
-; SI-NEXT: v_mul_f32_e32 v56, 1.0, v19
-; SI-NEXT: v_mul_f32_e32 v47, 1.0, v20
+; SI-NEXT: v_mul_f32_e32 v60, 1.0, v19
+; SI-NEXT: v_mul_f32_e32 v56, 1.0, v20
; SI-NEXT: v_mul_f32_e32 v17, 1.0, v21
; SI-NEXT: v_mul_f32_e32 v19, 1.0, v22
-; SI-NEXT: v_mul_f32_e32 v18, 1.0, v23
+; SI-NEXT: v_mul_f32_e32 v47, 1.0, v23
; SI-NEXT: v_mul_f32_e32 v20, 1.0, v24
-; SI-NEXT: v_mul_f32_e32 v21, 1.0, v25
+; SI-NEXT: v_mul_f32_e32 v23, 1.0, v25
; SI-NEXT: v_mul_f32_e32 v22, 1.0, v26
; SI-NEXT: v_mul_f32_e32 v63, 1.0, v27
; SI-NEXT: v_mul_f32_e32 v58, 1.0, v28
-; SI-NEXT: v_mul_f32_e32 v26, 1.0, v29
-; SI-NEXT: v_mul_f32_e32 v23, 1.0, v30
-; SI-NEXT: v_mul_f32_e64 v7, 1.0, s16
+; SI-NEXT: v_mul_f32_e32 v27, 1.0, v29
+; SI-NEXT: v_mul_f32_e32 v26, 1.0, v30
+; SI-NEXT: v_mul_f32_e64 v24, 1.0, s16
+; SI-NEXT: v_mul_f32_e64 v21, 1.0, s17
; SI-NEXT: v_mul_f32_e64 v3, 1.0, s18
; SI-NEXT: v_mul_f32_e64 v9, 1.0, s22
; SI-NEXT: v_mul_f32_e64 v11, 1.0, s23
-; SI-NEXT: v_mul_f32_e64 v29, 1.0, s25
+; SI-NEXT: v_mul_f32_e64 v18, 1.0, s26
; SI-NEXT: v_mul_f32_e64 v14, 1.0, s27
-; SI-NEXT: v_mul_f32_e64 v25, 1.0, s28
-; SI-NEXT: v_mul_f32_e64 v24, 1.0, s29
+; SI-NEXT: v_mul_f32_e64 v29, 1.0, s28
+; SI-NEXT: v_mul_f32_e64 v25, 1.0, s29
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31
-; SI-NEXT: v_mul_f32_e32 v31, 1.0, v15
-; SI-NEXT: v_mul_f32_e32 v10, 1.0, v32
-; SI-NEXT: v_mul_f32_e32 v12, 1.0, v33
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mul_f32_e32 v2, 1.0, v36
-; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
-; SI-NEXT: v_mul_f32_e32 v28, 1.0, v34
-; SI-NEXT: v_mul_f32_e32 v27, 1.0, v35
-; SI-NEXT: v_mul_f32_e32 v59, 1.0, v37
-; SI-NEXT: v_mul_f32_e32 v35, 1.0, v38
-; SI-NEXT: v_mul_f32_e32 v30, 1.0, v39
-; SI-NEXT: v_mul_f32_e32 v39, 1.0, v48
+; SI-NEXT: v_mul_f32_e32 v57, 1.0, v32
+; SI-NEXT: v_mul_f32_e32 v13, 1.0, v33
+; SI-NEXT: v_mul_f32_e32 v30, 1.0, v34
+; SI-NEXT: v_mul_f32_e32 v28, 1.0, v35
+; SI-NEXT: v_mul_f32_e32 v32, 1.0, v36
+; SI-NEXT: v_mul_f32_e32 v12, 1.0, v37
+; SI-NEXT: v_mul_f32_e32 v36, 1.0, v38
+; SI-NEXT: v_mul_f32_e32 v34, 1.0, v39
+; SI-NEXT: v_mul_f32_e32 v59, 1.0, v48
; SI-NEXT: v_mul_f32_e32 v4, 1.0, v49
-; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_mul_f32_e32 v48, 1.0, v50
-; SI-NEXT: s_waitcnt vmcnt(10) expcnt(0)
-; SI-NEXT: v_mul_f32_e32 v2, 1.0, v54
-; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
-; SI-NEXT: v_mul_f32_e32 v36, 1.0, v51
+; SI-NEXT: v_mul_f32_e32 v39, 1.0, v51
; SI-NEXT: v_mul_f32_e32 v37, 1.0, v52
+; SI-NEXT: s_waitcnt vmcnt(12) expcnt(0)
+; SI-NEXT: v_mul_f32_e32 v2, 1.0, v54
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(12) expcnt(0)
+; SI-NEXT: v_mul_f32_e32 v2, 1.0, v55
; SI-NEXT: v_mul_f32_e32 v38, 1.0, v53
-; SI-NEXT: s_waitcnt vmcnt(6) expcnt(0)
-; SI-NEXT: v_mul_f32_e32 v2, 1.0, v43
-; SI-NEXT: v_mul_f32_e32 v49, 1.0, v55
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(12)
; SI-NEXT: v_mul_f32_e32 v33, 1.0, v40
-; SI-NEXT: v_mul_f32_e32 v34, 1.0, v41
-; SI-NEXT: v_mul_f32_e32 v32, 1.0, v42
-; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
-; SI-NEXT: v_mul_f32_e64 v53, 1.0, s17
+; SI-NEXT: s_waitcnt vmcnt(11)
+; SI-NEXT: v_mul_f32_e32 v35, 1.0, v41
+; SI-NEXT: s_waitcnt vmcnt(10)
+; SI-NEXT: v_mul_f32_e32 v7, 1.0, v42
+; SI-NEXT: s_waitcnt vmcnt(9)
+; SI-NEXT: v_mul_f32_e32 v6, 1.0, v43
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s19
-; SI-NEXT: v_mul_f32_e64 v52, 1.0, s20
-; SI-NEXT: v_mul_f32_e64 v51, 1.0, s21
-; SI-NEXT: v_mul_f32_e64 v50, 1.0, s24
-; SI-NEXT: v_mul_f32_e64 v15, 1.0, s26
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
+; SI-NEXT: v_mul_f32_e64 v51, 1.0, s20
+; SI-NEXT: v_mul_f32_e64 v50, 1.0, s21
+; SI-NEXT: v_mul_f32_e64 v49, 1.0, s24
+; SI-NEXT: v_mul_f32_e64 v31, 1.0, s25
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:312 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:316 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:320 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:324 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v26, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v32, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v52, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v53, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:356 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v34, off, s[0:3], s32 offset:360 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:364 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:368 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:372 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v48, off, s[0:3], s32 offset:380 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:384 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v33, off, s[0:3], s32 offset:388 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:392 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:396 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:400 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:404 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v25, off, s[0:3], s32 offset:408 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:412 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v50, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v51, off, s[0:3], s32 offset:428 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:432 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:436 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB105_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v2
-; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v52
-; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
+; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v2
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v51
-; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v50
-; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v29
-; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v15
-; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v44
-; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
-; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v7
-; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v53
-; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v49
+; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v10
+; SI-NEXT: v_mov_b32_e32 v10, v32
+; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
+; SI-NEXT: v_lshrrev_b32_e32 v21, 16, v21
+; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; SI-NEXT: v_mov_b32_e32 v50, v5
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v14
-; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v31
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v24
-; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v18
+; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v14
+; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v29
+; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v7, v5
-; SI-NEXT: v_mov_b32_e32 v42, v62
-; SI-NEXT: v_mov_b32_e32 v43, v63
-; SI-NEXT: v_mov_b32_e32 v55, v12
; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v25
-; SI-NEXT: v_mov_b32_e32 v25, v60
-; SI-NEXT: v_mov_b32_e32 v54, v47
-; SI-NEXT: v_mov_b32_e32 v40, v20
-; SI-NEXT: v_mov_b32_e32 v51, v61
+; SI-NEXT: v_mov_b32_e32 v42, v62
+; SI-NEXT: v_mov_b32_e32 v41, v61
+; SI-NEXT: v_mov_b32_e32 v29, v60
+; SI-NEXT: v_mov_b32_e32 v25, v56
+; SI-NEXT: v_mov_b32_e32 v52, v13
+; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v24
; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v9
; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v11
-; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v44
+; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v45
; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v46
-; SI-NEXT: v_mov_b32_e32 v29, v31
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v31
-; SI-NEXT: v_mov_b32_e32 v24, v56
-; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v18
-; SI-NEXT: v_mov_b32_e32 v52, v10
+; SI-NEXT: s_waitcnt expcnt(1)
+; SI-NEXT: v_mov_b32_e32 v24, v47
+; SI-NEXT: v_mov_b32_e32 v54, v20
+; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v20
+; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v23
+; SI-NEXT: v_mov_b32_e32 v40, v63
+; SI-NEXT: v_mov_b32_e32 v43, v57
+; SI-NEXT: v_mov_b32_e32 v55, v12
; SI-NEXT: v_mov_b32_e32 v53, v59
-; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v30
-; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v49
-; SI-NEXT: s_waitcnt vmcnt(5)
+; SI-NEXT: s_waitcnt vmcnt(10)
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v61
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v45
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(8)
+; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v32
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v21, v1
+; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
-; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v13
-; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v57
-; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v26
-; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v4
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
+; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v50, v1
+; SI-NEXT: v_mov_b32_e32 v51, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v5
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v8
-; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v6
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
+; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v8
+; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v63
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v62
-; SI-NEXT: v_mov_b32_e32 v62, v5
-; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v63
-; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v12
-; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
+; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v57
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v10
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v10
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
-; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v27
-; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v59
-; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v36
-; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v32
-; SI-NEXT: s_waitcnt vmcnt(5) expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v12
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v59
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v39
-; SI-NEXT: s_waitcnt vmcnt(3)
-; SI-NEXT: v_mov_b32_e32 v41, v1
-; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v1
-; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v60
-; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v16
-; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v47
-; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v19
-; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v20
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
-; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v33
-; SI-NEXT: v_mov_b32_e32 v33, v34
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v34
-; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
+; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v58
+; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v26
+; SI-NEXT: v_lshrrev_b32_e32 v57, 16, v30
+; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v28
+; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v12
+; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v36
+; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v39
+; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v38
+; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v7
+; SI-NEXT: s_waitcnt vmcnt(5)
+; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v56
-; SI-NEXT: v_mov_b32_e32 v39, v4
-; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v37
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v8
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_mov_b32_e32 v31, v1
+; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
-; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v17
-; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v21
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v62
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v61
+; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v15
+; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v16
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v60
+; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v56
+; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v17
+; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v19
+; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v13
+; SI-NEXT: v_mov_b32_e32 v13, v49
+; SI-NEXT: v_mov_b32_e32 v49, v4
+; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v4
+; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v37
+; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v47
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v22
-; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v58
-; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v23
-; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v28
-; SI-NEXT: v_lshrrev_b32_e32 v21, 16, v35
-; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v48
-; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v27
+; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v34
+; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v48
; SI-NEXT: v_mov_b32_e32 v37, v38
-; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v38
-; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v8
-; SI-NEXT: s_waitcnt vmcnt(3)
-; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v34
+; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v33
+; SI-NEXT: v_mov_b32_e32 v33, v35
+; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v35
+; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v6
+; SI-NEXT: v_mov_b32_e32 v6, v5
+; SI-NEXT: v_mov_b32_e32 v5, v62
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v4
+; SI-NEXT: v_mov_b32_e32 v62, v4
; SI-NEXT: s_branch .LBB105_3
; SI-NEXT: .LBB105_2:
-; SI-NEXT: s_waitcnt expcnt(4)
-; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
-; SI-NEXT: v_mov_b32_e32 v55, v12
-; SI-NEXT: v_mov_b32_e32 v33, v34
-; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
+; SI-NEXT: ; implicit-def: $vgpr8
+; SI-NEXT: ; kill: killed $vgpr8
+; SI-NEXT: s_waitcnt expcnt(1)
+; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload
+; SI-NEXT: ; implicit-def: $vgpr8
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v42, v62
+; SI-NEXT: ; kill: killed $vgpr8
+; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v10, v32
+; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; kill: killed $vgpr1
; SI-NEXT: ; implicit-def: $vgpr1
@@ -227871,112 +228630,103 @@ define inreg <64 x i16> @bitcast_v64bf16_to_v64i16_scalar(<64 x bfloat> inreg %a
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; kill: killed $vgpr1
; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v7, v5
; SI-NEXT: ; kill: killed $vgpr1
; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: ; implicit-def: $vgpr5
; SI-NEXT: ; kill: killed $vgpr1
; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: ; kill: killed $vgpr5
-; SI-NEXT: ; implicit-def: $vgpr5
-; SI-NEXT: v_mov_b32_e32 v51, v61
-; SI-NEXT: v_mov_b32_e32 v42, v62
-; SI-NEXT: v_mov_b32_e32 v29, v31
-; SI-NEXT: v_mov_b32_e32 v25, v60
-; SI-NEXT: v_mov_b32_e32 v24, v56
-; SI-NEXT: v_mov_b32_e32 v54, v47
-; SI-NEXT: v_mov_b32_e32 v40, v20
-; SI-NEXT: v_mov_b32_e32 v43, v63
-; SI-NEXT: v_mov_b32_e32 v52, v10
+; SI-NEXT: v_mov_b32_e32 v49, v4
+; SI-NEXT: ; kill: killed $vgpr1
+; SI-NEXT: ; implicit-def: $vgpr1
+; SI-NEXT: ; implicit-def: $vgpr4
+; SI-NEXT: v_mov_b32_e32 v50, v5
+; SI-NEXT: v_mov_b32_e32 v41, v61
+; SI-NEXT: v_mov_b32_e32 v29, v60
+; SI-NEXT: v_mov_b32_e32 v25, v56
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_mov_b32_e32 v24, v47
+; SI-NEXT: v_mov_b32_e32 v54, v20
+; SI-NEXT: v_mov_b32_e32 v40, v63
+; SI-NEXT: v_mov_b32_e32 v43, v57
+; SI-NEXT: v_mov_b32_e32 v52, v13
+; SI-NEXT: v_mov_b32_e32 v55, v12
; SI-NEXT: v_mov_b32_e32 v53, v59
-; SI-NEXT: v_mov_b32_e32 v39, v4
; SI-NEXT: v_mov_b32_e32 v37, v38
-; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: v_mov_b32_e32 v33, v35
; SI-NEXT: ; kill: killed $vgpr1
; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: ; kill: killed $vgpr5
-; SI-NEXT: ; implicit-def: $vgpr5
+; SI-NEXT: ; kill: killed $vgpr4
; SI-NEXT: ; implicit-def: $vgpr4
; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; implicit-def: $vgpr9
; SI-NEXT: ; implicit-def: $vgpr11
; SI-NEXT: ; implicit-def: $vgpr2
-; SI-NEXT: ; implicit-def: $vgpr15
+; SI-NEXT: ; implicit-def: $vgpr18
; SI-NEXT: ; implicit-def: $vgpr44
; SI-NEXT: ; implicit-def: $vgpr45
-; SI-NEXT: ; implicit-def: $vgpr62
; SI-NEXT: ; implicit-def: $vgpr6
; SI-NEXT: ; implicit-def: $vgpr46
-; SI-NEXT: ; implicit-def: $vgpr61
-; SI-NEXT: ; implicit-def: $vgpr14
; SI-NEXT: ; implicit-def: $vgpr13
-; SI-NEXT: ; implicit-def: $vgpr60
-; SI-NEXT: ; kill: killed $vgpr1
+; SI-NEXT: ; implicit-def: $vgpr14
+; SI-NEXT: ; implicit-def: $vgpr5
+; SI-NEXT: ; implicit-def: $vgpr61
+; SI-NEXT: ; implicit-def: $vgpr15
; SI-NEXT: ; implicit-def: $vgpr16
+; SI-NEXT: ; implicit-def: $vgpr60
; SI-NEXT: ; implicit-def: $vgpr56
-; SI-NEXT: ; implicit-def: $vgpr47
-; SI-NEXT: ; implicit-def: $vgpr18
-; SI-NEXT: ; implicit-def: $vgpr19
-; SI-NEXT: ; implicit-def: $vgpr17
+; SI-NEXT: ; kill: killed $vgpr1
+; SI-NEXT: ; implicit-def: $vgpr20
+; SI-NEXT: ; implicit-def: $vgpr23
; SI-NEXT: ; implicit-def: $vgpr1
+; SI-NEXT: ; implicit-def: $vgpr63
; SI-NEXT: ; implicit-def: $vgpr22
-; SI-NEXT: ; implicit-def: $vgpr57
; SI-NEXT: ; implicit-def: $vgpr58
-; SI-NEXT: ; implicit-def: $vgpr63
-; SI-NEXT: ; implicit-def: $vgpr23
-; SI-NEXT: ; implicit-def: $vgpr10
-; SI-NEXT: ; kill: killed $vgpr5
-; SI-NEXT: ; implicit-def: $vgpr27
-; SI-NEXT: ; implicit-def: $vgpr21
+; SI-NEXT: ; implicit-def: $vgpr19
+; SI-NEXT: ; implicit-def: $vgpr57
; SI-NEXT: ; implicit-def: $vgpr30
-; SI-NEXT: ; kill: killed $vgpr4
; SI-NEXT: ; implicit-def: $vgpr26
; SI-NEXT: ; implicit-def: $vgpr28
+; SI-NEXT: ; implicit-def: $vgpr47
+; SI-NEXT: ; kill: killed $vgpr4
+; SI-NEXT: ; implicit-def: $vgpr17
+; SI-NEXT: ; implicit-def: $vgpr27
; SI-NEXT: ; implicit-def: $vgpr59
; SI-NEXT: ; implicit-def: $vgpr4
; SI-NEXT: ; kill: killed $vgpr4
-; SI-NEXT: ; implicit-def: $vgpr38
+; SI-NEXT: ; implicit-def: $vgpr12
+; SI-NEXT: ; implicit-def: $vgpr39
; SI-NEXT: ; implicit-def: $vgpr48
-; SI-NEXT: ; implicit-def: $vgpr49
-; SI-NEXT: ; implicit-def: $vgpr20
-; SI-NEXT: ; implicit-def: $vgpr5
-; SI-NEXT: ; implicit-def: $vgpr36
+; SI-NEXT: ; implicit-def: $vgpr38
; SI-NEXT: ; implicit-def: $vgpr35
+; SI-NEXT: ; implicit-def: $vgpr36
+; SI-NEXT: ; implicit-def: $vgpr34
; SI-NEXT: .LBB105_3: ; %Flow
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
; SI-NEXT: s_cbranch_vccnz .LBB105_5
; SI-NEXT: ; %bb.4: ; %cmp.true
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:436 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:432 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v19, 0xffff0000, v40
+; SI-NEXT: v_and_b32_e32 v19, 0xffff0000, v54
; SI-NEXT: v_add_f32_e32 v19, 0x40c00000, v19
; SI-NEXT: v_lshrrev_b32_e32 v20, 16, v19
-; SI-NEXT: v_and_b32_e32 v23, 0xffff0000, v55
-; SI-NEXT: v_and_b32_e32 v27, 0xffff0000, v39
+; SI-NEXT: v_and_b32_e32 v23, 0xffff0000, v52
+; SI-NEXT: v_and_b32_e32 v27, 0xffff0000, v49
; SI-NEXT: v_and_b32_e32 v28, 0xffff0000, v37
; SI-NEXT: v_and_b32_e32 v30, 0xffff0000, v33
-; SI-NEXT: v_add_f32_e32 v32, 0x40c00000, v30
-; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v32
-; SI-NEXT: s_waitcnt vmcnt(3)
-; SI-NEXT: v_and_b32_e32 v33, 0xffff0000, v34
-; SI-NEXT: v_add_f32_e32 v33, 0x40c00000, v33
-; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v33
; SI-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
-; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: s_waitcnt vmcnt(4)
; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_add_f32_e32 v4, 0x40c00000, v2
; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v4
; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:428 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:424 ; 4-byte Folded Reload
@@ -227989,7 +228739,7 @@ define inreg <64 x i16> @bitcast_v64bf16_to_v64i16_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v5
; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
@@ -228002,7 +228752,7 @@ define inreg <64 x i16> @bitcast_v64bf16_to_v64i16_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v6
; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:412 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:408 ; 4-byte Folded Reload
@@ -228015,317 +228765,319 @@ define inreg <64 x i16> @bitcast_v64bf16_to_v64i16_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2
; SI-NEXT: v_alignbit_b32 v1, v3, v1, 16
-; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v51
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v31
+; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v7
+; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v21
; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v3
; SI-NEXT: v_alignbit_b32 v1, v9, v1, 16
-; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v7
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
+; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v50
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v50
+; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v51
; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v9
; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v9
; SI-NEXT: v_alignbit_b32 v1, v11, v1, 16
-; SI-NEXT: v_and_b32_e32 v11, 0xffff0000, v41
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
+; SI-NEXT: v_and_b32_e32 v11, 0xffff0000, v42
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v42
+; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v31
; SI-NEXT: v_add_f32_e32 v11, 0x40c00000, v11
; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
; SI-NEXT: v_lshrrev_b32_e32 v14, 16, v11
; SI-NEXT: v_alignbit_b32 v1, v14, v1, 16
-; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v25
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
+; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v41
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v29
+; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v32
; SI-NEXT: v_add_f32_e32 v14, 0x40c00000, v14
; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
; SI-NEXT: v_lshrrev_b32_e32 v16, 16, v14
; SI-NEXT: v_alignbit_b32 v51, v16, v1, 16
-; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v54
-; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v24
+; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v25
+; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v29
; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v16
; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v16
; SI-NEXT: v_alignbit_b32 v1, v18, v1, 16
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:396 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v24
+; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; SI-NEXT: v_alignbit_b32 v1, v20, v1, 16
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v40
+; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
; SI-NEXT: v_add_f32_e32 v24, 0x40c00000, v23
; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v24
-; SI-NEXT: v_and_b32_e32 v25, 0xffff0000, v53
+; SI-NEXT: v_and_b32_e32 v25, 0xffff0000, v55
; SI-NEXT: v_add_f32_e32 v25, 0x40c00000, v25
; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v25
; SI-NEXT: v_add_f32_e32 v29, 0x40c00000, v27
; SI-NEXT: v_lshrrev_b32_e32 v27, 16, v29
; SI-NEXT: v_add_f32_e32 v31, 0x40c00000, v28
; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v31
+; SI-NEXT: v_add_f32_e32 v32, 0x40c00000, v30
+; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v32
+; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v38, 0xffff0000, v2
-; SI-NEXT: v_and_b32_e32 v34, 0xffff0000, v9
+; SI-NEXT: v_and_b32_e32 v35, 0xffff0000, v9
; SI-NEXT: v_and_b32_e32 v37, 0xffff0000, v3
; SI-NEXT: v_and_b32_e32 v16, 0xffff0000, v16
; SI-NEXT: v_and_b32_e32 v14, 0xffff0000, v14
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(5)
-; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
-; SI-NEXT: v_alignbit_b32 v18, v20, v1, 16
-; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:368 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(9)
; SI-NEXT: v_and_b32_e32 v20, 0xffff0000, v7
-; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v43
; SI-NEXT: v_add_f32_e32 v20, 0x40c00000, v20
-; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
; SI-NEXT: v_lshrrev_b32_e32 v22, 16, v20
; SI-NEXT: v_alignbit_b32 v1, v22, v1, 16
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v52
+; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v43
; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
; SI-NEXT: v_alignbit_b32 v1, v23, v1, 16
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v12
+; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v10
; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
; SI-NEXT: v_alignbit_b32 v1, v26, v1, 16
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v53
+; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
+; SI-NEXT: v_alignbit_b32 v1, v27, v1, 16
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:392 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v32
-; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v26, 0xffff0000, v29
-; SI-NEXT: s_waitcnt vmcnt(7)
-; SI-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
-; SI-NEXT: v_add_f32_e32 v33, 0x40c00000, v33
-; SI-NEXT: v_lshrrev_b32_e32 v61, 16, v33
-; SI-NEXT: v_and_b32_e32 v33, 0xffff0000, v11
-; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(7)
-; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
+; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:404 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:376 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v26, 0xffff0000, v24
+; SI-NEXT: s_waitcnt vmcnt(10)
+; SI-NEXT: v_and_b32_e32 v24, 0xffff0000, v21
+; SI-NEXT: v_add_f32_e32 v24, 0x40c00000, v24
+; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v24
+; SI-NEXT: v_and_b32_e32 v24, 0xffff0000, v15
+; SI-NEXT: v_add_f32_e32 v24, 0x40c00000, v24
+; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v24
+; SI-NEXT: s_waitcnt vmcnt(9)
; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
-; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
+; SI-NEXT: s_waitcnt vmcnt(8)
+; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v9
-; SI-NEXT: s_waitcnt vmcnt(6)
-; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
-; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
+; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
-; SI-NEXT: v_alignbit_b32 v1, v27, v1, 16
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v1, v28, v1, 16
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:388 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(3)
-; SI-NEXT: v_and_b32_e32 v29, 0xffff0000, v12
-; SI-NEXT: v_add_f32_e32 v29, 0x40c00000, v29
-; SI-NEXT: v_and_b32_e32 v12, 0xffff0000, v25
-; SI-NEXT: v_and_b32_e32 v25, 0xffff0000, v13
-; SI-NEXT: v_add_f32_e32 v25, 0x40c00000, v25
-; SI-NEXT: v_and_b32_e32 v27, 0xffff0000, v24
-; SI-NEXT: v_and_b32_e32 v24, 0xffff0000, v15
-; SI-NEXT: v_add_f32_e32 v24, 0x40c00000, v24
-; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v24
-; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v33, 0xffff0000, v7
+; SI-NEXT: v_add_f32_e32 v33, 0x40c00000, v33
+; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v33
+; SI-NEXT: v_and_b32_e32 v7, 0xffff0000, v32
+; SI-NEXT: v_and_b32_e32 v32, 0xffff0000, v62
+; SI-NEXT: v_add_f32_e32 v32, 0x40c00000, v32
+; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v32
+; SI-NEXT: v_and_b32_e32 v32, 0xffff0000, v20
+; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
-; SI-NEXT: v_alignbit_b32 v1, v28, v1, 16
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:384 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
-; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
; SI-NEXT: v_alignbit_b32 v52, v30, v1, 16
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:372 ; 4-byte Folded Reload
-; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v29
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:384 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_and_b32_e32 v33, 0xffff0000, v33
+; SI-NEXT: v_add_f32_e32 v33, 0x40c00000, v33
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
-; SI-NEXT: v_alignbit_b32 v36, v35, v1, 16
+; SI-NEXT: v_alignbit_b32 v36, v34, v1, 16
; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v8
-; SI-NEXT: buffer_load_dword v8, off, s[0:3], s32 offset:368 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_and_b32_e32 v32, 0xffff0000, v8
-; SI-NEXT: v_add_f32_e32 v32, 0x40c00000, v32
-; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v32
-; SI-NEXT: v_alignbit_b32 v48, v49, v1, 16
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:364 ; 4-byte Folded Reload
+; SI-NEXT: v_alignbit_b32 v39, v48, v1, 16
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:380 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v8, 0xffff0000, v31
; SI-NEXT: v_and_b32_e32 v31, 0xffff0000, v10
; SI-NEXT: v_add_f32_e32 v31, 0x40c00000, v31
; SI-NEXT: v_lshrrev_b32_e32 v59, 16, v31
-; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v25
-; SI-NEXT: v_and_b32_e32 v32, 0xffff0000, v20
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v29
+; SI-NEXT: v_and_b32_e32 v29, 0xffff0000, v12
+; SI-NEXT: v_add_f32_e32 v29, 0x40c00000, v29
+; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v29
+; SI-NEXT: v_and_b32_e32 v12, 0xffff0000, v25
+; SI-NEXT: v_and_b32_e32 v25, 0xffff0000, v13
+; SI-NEXT: v_add_f32_e32 v25, 0x40c00000, v25
+; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v25
+; SI-NEXT: v_and_b32_e32 v25, 0xffff0000, v17
+; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload
+; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v33
+; SI-NEXT: v_and_b32_e32 v33, 0xffff0000, v11
+; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload
+; SI-NEXT: v_add_f32_e32 v25, 0x40c00000, v25
+; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
-; SI-NEXT: v_alignbit_b32 v28, v59, v1, 16
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:380 ; 4-byte Folded Reload
-; SI-NEXT: v_alignbit_b32 v26, v28, v26, 16
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_alignbit_b32 v27, v59, v1, 16
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:372 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: v_and_b32_e32 v29, 0xffff0000, v17
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_and_b32_e32 v31, 0xffff0000, v31
; SI-NEXT: v_add_f32_e32 v31, 0x40c00000, v31
-; SI-NEXT: v_alignbit_b32 v46, v61, v31, 16
-; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
+; SI-NEXT: v_alignbit_b32 v46, v13, v31, 16
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
; SI-NEXT: v_add_f32_e32 v11, 0x40c00000, v11
+; SI-NEXT: v_add_f32_e32 v29, 0x40c00000, v29
+; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v29
+; SI-NEXT: v_alignbit_b32 v61, v15, v25, 16
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
-; SI-NEXT: v_alignbit_b32 v21, v30, v1, 16
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload
+; SI-NEXT: v_alignbit_b32 v28, v47, v1, 16
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_and_b32_e32 v31, 0xffff0000, v31
; SI-NEXT: v_add_f32_e32 v31, 0x40c00000, v31
-; SI-NEXT: v_lshrrev_b32_e32 v62, 16, v31
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v31
+; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
-; SI-NEXT: v_alignbit_b32 v23, v10, v1, 16
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload
-; SI-NEXT: v_alignbit_b32 v63, v23, v27, 16
-; SI-NEXT: v_alignbit_b32 v27, v21, v12, 16
+; SI-NEXT: v_alignbit_b32 v57, v30, v1, 16
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
-; SI-NEXT: v_alignbit_b32 v57, v58, v1, 16
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload
+; SI-NEXT: v_alignbit_b32 v22, v58, v1, 16
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload
+; SI-NEXT: v_alignbit_b32 v63, v22, v32, 16
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; SI-NEXT: v_add_f32_e32 v20, 0x40c00000, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:400 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v1, 0xffff0000, v1
; SI-NEXT: v_add_f32_e32 v1, 0x40c00000, v1
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_alignbit_b32 v17, v1, v20, 16
-; SI-NEXT: v_and_b32_e32 v20, 0xffff0000, v15
-; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:376 ; 4-byte Folded Reload
-; SI-NEXT: v_add_f32_e32 v20, 0x40c00000, v20
-; SI-NEXT: v_alignbit_b32 v19, v17, v19, 16
+; SI-NEXT: v_alignbit_b32 v23, v1, v20, 16
+; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_and_b32_e32 v24, 0xffff0000, v15
-; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload
-; SI-NEXT: v_add_f32_e32 v24, 0x40c00000, v24
-; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v24
-; SI-NEXT: v_alignbit_b32 v56, v47, v20, 16
-; SI-NEXT: v_alignbit_b32 v20, v62, v11, 16
+; SI-NEXT: v_and_b32_e32 v20, 0xffff0000, v20
+; SI-NEXT: v_add_f32_e32 v20, 0x40c00000, v20
+; SI-NEXT: v_alignbit_b32 v60, v56, v20, 16
+; SI-NEXT: v_alignbit_b32 v20, v17, v11, 16
; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
-; SI-NEXT: v_alignbit_b32 v16, v56, v16, 16
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_and_b32_e32 v25, 0xffff0000, v15
-; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
-; SI-NEXT: v_add_f32_e32 v25, 0x40c00000, v25
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload
+; SI-NEXT: v_alignbit_b32 v16, v60, v16, 16
+; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
+; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
; SI-NEXT: v_add_f32_e32 v11, 0x40c00000, v11
-; SI-NEXT: v_lshrrev_b32_e32 v45, 16, v11
-; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload
-; SI-NEXT: v_alignbit_b32 v22, v45, v9, 16
-; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_and_b32_e32 v29, 0xffff0000, v15
-; SI-NEXT: v_add_f32_e32 v29, 0x40c00000, v29
-; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v29
-; SI-NEXT: v_alignbit_b32 v13, v60, v25, 16
-; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_and_b32_e32 v49, 0xffff0000, v17
+; SI-NEXT: v_add_f32_e32 v49, 0x40c00000, v49
+; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v11
+; SI-NEXT: v_alignbit_b32 v45, v11, v9, 16
+; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:332 ; 4-byte Folded Reload
+; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: v_and_b32_e32 v50, 0xffff0000, v17
+; SI-NEXT: v_alignbit_b32 v17, v27, v10, 16
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v11, 0xffff0000, v11
-; SI-NEXT: v_add_f32_e32 v11, 0x40c00000, v11
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v9
-; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v9
-; SI-NEXT: v_alignbit_b32 v24, v44, v3, 16
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload
+; SI-NEXT: v_add_f32_e32 v50, 0x40c00000, v50
+; SI-NEXT: v_add_f32_e32 v11, 0x40c00000, v11
+; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v9
; SI-NEXT: v_lshrrev_b32_e32 v11, 16, v11
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_and_b32_e32 v39, 0xffff0000, v15
-; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload
-; SI-NEXT: v_add_f32_e32 v39, 0x40c00000, v39
+; SI-NEXT: v_alignbit_b32 v44, v9, v3, 16
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:364 ; 4-byte Folded Reload
+; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v3
; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v9
-; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v3
-; SI-NEXT: v_alignbit_b32 v9, v11, v9, 16
-; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; SI-NEXT: v_alignbit_b32 v2, v3, v2, 16
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_and_b32_e32 v50, 0xffff0000, v15
-; SI-NEXT: v_mov_b32_e32 v15, v24
-; SI-NEXT: v_add_f32_e32 v50, 0x40c00000, v50
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v50
-; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v9, v11, v9, 16
+; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v3, v3, v39, 16
+; SI-NEXT: v_alignbit_b32 v3, v3, v49, 16
+; SI-NEXT: v_alignbit_b32 v2, v18, v2, 16
; SI-NEXT: v_alignbit_b32 v4, v3, v4, 16
-; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_alignbit_b32 v4, v9, v5, 16
-; SI-NEXT: v_alignbit_b32 v5, v36, v7, 16
-; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v5, v61, v14, 16
+; SI-NEXT: v_mov_b32_e32 v14, v51
+; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_alignbit_b32 v4, v2, v6, 16
-; SI-NEXT: v_alignbit_b32 v6, v46, v33, 16
+; SI-NEXT: v_mov_b32_e32 v6, v20
+; SI-NEXT: v_alignbit_b32 v20, v23, v19, 16
+; SI-NEXT: v_alignbit_b32 v19, v57, v26, 16
+; SI-NEXT: v_alignbit_b32 v26, v28, v12, 16
+; SI-NEXT: v_alignbit_b32 v12, v39, v8, 16
+; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_alignbit_b32 v4, v44, v38, 16
+; SI-NEXT: v_mov_b32_e32 v38, v52
+; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_alignbit_b32 v4, v45, v37, 16
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v4, v24, v38, 16
-; SI-NEXT: v_alignbit_b32 v38, v48, v8, 16
+; SI-NEXT: v_alignbit_b32 v4, v6, v35, 16
+; SI-NEXT: v_alignbit_b32 v35, v36, v7, 16
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(1)
-; SI-NEXT: v_alignbit_b32 v4, v22, v37, 16
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v22, v57, v32, 16
+; SI-NEXT: v_alignbit_b32 v4, v46, v33, 16
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(1)
-; SI-NEXT: v_alignbit_b32 v4, v20, v34, 16
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v20, v52
-; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v4, v13, v14, 16
-; SI-NEXT: v_mov_b32_e32 v14, v51
-; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
; SI-NEXT: .LBB105_5: ; %end
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v7, 0xffff, v4
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v4
; SI-NEXT: v_or_b32_e32 v7, v7, v8
; SI-NEXT: buffer_store_dword v7, v0, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_and_b32_e32 v7, 0xffff, v3
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v3
; SI-NEXT: v_or_b32_e32 v7, v7, v8
; SI-NEXT: v_add_i32_e32 v8, vcc, 4, v0
; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1) expcnt(0)
; SI-NEXT: v_and_b32_e32 v7, 0xffff, v4
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -228340,24 +229092,22 @@ define inreg <64 x i16> @bitcast_v64bf16_to_v64i16_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_add_i32_e32 v7, vcc, 12, v0
; SI-NEXT: buffer_store_dword v4, v7, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v3
+; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v18
; SI-NEXT: v_or_b32_e32 v4, v4, v7
; SI-NEXT: v_add_i32_e32 v7, vcc, 16, v0
-; SI-NEXT: buffer_store_dword v4, v7, s[0:3], 0 offen
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; SI-NEXT: v_or_b32_e32 v2, v2, v3
; SI-NEXT: v_add_i32_e32 v3, vcc, 20, v0
+; SI-NEXT: buffer_store_dword v4, v7, s[0:3], 0 offen
; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -228365,15 +229115,17 @@ define inreg <64 x i16> @bitcast_v64bf16_to_v64i16_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_or_b32_e32 v2, v2, v3
; SI-NEXT: v_add_i32_e32 v3, vcc, 24, v0
; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_and_b32_e32 v2, 0xffff, v15
-; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v44
+; SI-NEXT: v_and_b32_e32 v2, 0xffff, v44
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; SI-NEXT: v_or_b32_e32 v2, v2, v3
; SI-NEXT: v_add_i32_e32 v3, vcc, 28, v0
; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -228381,17 +229133,17 @@ define inreg <64 x i16> @bitcast_v64bf16_to_v64i16_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_or_b32_e32 v2, v2, v3
; SI-NEXT: v_add_i32_e32 v3, vcc, 32, v0
; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
-; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v45
+; SI-NEXT: v_and_b32_e32 v2, 0xffff, v45
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; SI-NEXT: v_or_b32_e32 v2, v2, v3
; SI-NEXT: v_add_i32_e32 v3, vcc, 36, v0
; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -228399,44 +229151,44 @@ define inreg <64 x i16> @bitcast_v64bf16_to_v64i16_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_or_b32_e32 v2, v2, v3
; SI-NEXT: v_add_i32_e32 v3, vcc, 40, v0
; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
-; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v62
+; SI-NEXT: v_and_b32_e32 v2, 0xffff, v6
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; SI-NEXT: v_or_b32_e32 v2, v2, v3
; SI-NEXT: v_add_i32_e32 v3, vcc, 44, v0
; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
-; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v6
-; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; SI-NEXT: v_or_b32_e32 v2, v2, v3
; SI-NEXT: v_add_i32_e32 v3, vcc, 48, v0
; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_and_b32_e32 v2, 0xffff, v46
-; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v61
+; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v13
; SI-NEXT: v_or_b32_e32 v2, v2, v3
; SI-NEXT: v_add_i32_e32 v3, vcc, 52, v0
; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_and_b32_e32 v2, 0xffff, v14
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v5
; SI-NEXT: v_or_b32_e32 v2, v2, v3
; SI-NEXT: v_add_i32_e32 v3, vcc, 56, v0
; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_and_b32_e32 v2, 0xffff, v13
-; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v60
+; SI-NEXT: v_and_b32_e32 v2, 0xffff, v61
+; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v15
; SI-NEXT: v_or_b32_e32 v2, v2, v3
; SI-NEXT: v_add_i32_e32 v3, vcc, 60, v0
; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v16
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -228444,101 +229196,103 @@ define inreg <64 x i16> @bitcast_v64bf16_to_v64i16_scalar(<64 x bfloat> inreg %a
; SI-NEXT: v_add_i32_e32 v3, vcc, 64, v0
; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_and_b32_e32 v2, 0xffff, v56
-; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v47
+; SI-NEXT: v_and_b32_e32 v2, 0xffff, v60
+; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v56
; SI-NEXT: v_or_b32_e32 v2, v2, v3
; SI-NEXT: v_add_i32_e32 v3, vcc, 0x44, v0
; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_and_b32_e32 v2, 0xffff, v18
-; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v19
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v20
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
; SI-NEXT: v_or_b32_e32 v2, v2, v3
; SI-NEXT: v_add_i32_e32 v3, vcc, 0x48, v0
; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_and_b32_e32 v2, 0xffff, v17
+; SI-NEXT: v_and_b32_e32 v2, 0xffff, v23
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_add_i32_e32 v2, vcc, 0x4c, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
-; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v22
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
+; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v63
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1
; SI-NEXT: v_or_b32_e32 v1, v1, v2
; SI-NEXT: v_add_i32_e32 v2, vcc, 0x50, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_and_b32_e32 v1, 0xffff, v57
+; SI-NEXT: v_and_b32_e32 v1, 0xffff, v22
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v58
; SI-NEXT: v_or_b32_e32 v1, v1, v2
; SI-NEXT: v_add_i32_e32 v2, vcc, 0x54, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
-; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v63
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
+; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v19
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1
; SI-NEXT: v_or_b32_e32 v1, v1, v2
; SI-NEXT: v_add_i32_e32 v2, vcc, 0x58, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_and_b32_e32 v1, 0xffff, v23
-; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v10
+; SI-NEXT: v_and_b32_e32 v1, 0xffff, v57
+; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v30
; SI-NEXT: v_or_b32_e32 v1, v1, v2
; SI-NEXT: v_add_i32_e32 v2, vcc, 0x5c, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
-; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v27
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
+; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v26
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1
; SI-NEXT: v_or_b32_e32 v1, v1, v2
; SI-NEXT: v_add_i32_e32 v2, vcc, 0x60, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_and_b32_e32 v1, 0xffff, v21
-; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v30
+; SI-NEXT: v_and_b32_e32 v1, 0xffff, v28
+; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v47
; SI-NEXT: v_or_b32_e32 v1, v1, v2
; SI-NEXT: v_add_i32_e32 v2, vcc, 0x64, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
-; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v26
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
+; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v17
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1
; SI-NEXT: v_or_b32_e32 v1, v1, v2
; SI-NEXT: v_add_i32_e32 v2, vcc, 0x68, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_and_b32_e32 v1, 0xffff, v28
+; SI-NEXT: v_and_b32_e32 v1, 0xffff, v27
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v59
; SI-NEXT: v_or_b32_e32 v1, v1, v2
; SI-NEXT: v_add_i32_e32 v2, vcc, 0x6c, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
-; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v38
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
+; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v12
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1
; SI-NEXT: v_or_b32_e32 v1, v1, v2
; SI-NEXT: v_add_i32_e32 v2, vcc, 0x70, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_and_b32_e32 v1, 0xffff, v48
-; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v49
+; SI-NEXT: v_and_b32_e32 v1, 0xffff, v39
+; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v48
; SI-NEXT: v_or_b32_e32 v1, v1, v2
; SI-NEXT: v_add_i32_e32 v2, vcc, 0x74, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_and_b32_e32 v1, 0xffff, v20
-; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v5
+; SI-NEXT: v_and_b32_e32 v1, 0xffff, v38
+; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v35
; SI-NEXT: v_or_b32_e32 v1, v1, v2
; SI-NEXT: v_add_i32_e32 v2, vcc, 0x78, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_and_b32_e32 v1, 0xffff, v36
-; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v35
+; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v34
; SI-NEXT: v_or_b32_e32 v1, v1, v2
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x7c, v0
; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
@@ -228567,8 +229321,9 @@ define inreg <64 x i16> @bitcast_v64bf16_to_v64i16_scalar(<64 x bfloat> inreg %a
; VI-NEXT: s_or_saveexec_b64 s[4:5], -1
; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; VI-NEXT: s_mov_b64 exec, s[4:5]
-; VI-NEXT: v_writelane_b32 v42, s30, 0
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: v_writelane_b32 v42, s30, 0
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_writelane_b32 v42, s31, 1
; VI-NEXT: v_mov_b32_e32 v31, v17
; VI-NEXT: v_mov_b32_e32 v30, v16
@@ -228587,14 +229342,17 @@ define inreg <64 x i16> @bitcast_v64bf16_to_v64i16_scalar(<64 x bfloat> inreg %a
; VI-NEXT: v_mov_b32_e32 v17, v3
; VI-NEXT: v_mov_b32_e32 v16, v2
; VI-NEXT: v_readfirstlane_b32 s30, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s31, v1
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 ; 4-byte Folded Spill
-; VI-NEXT: s_cbranch_scc0 .LBB105_3
+; VI-NEXT: s_cbranch_scc0 .LBB105_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB105_4
-; VI-NEXT: .LBB105_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB105_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB105_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v0, 0x40c00000
; VI-NEXT: s_lshl_b32 s4, s30, 16
; VI-NEXT: v_add_f32_e32 v1, s4, v0
@@ -229173,8 +229931,6 @@ define inreg <64 x i16> @bitcast_v64bf16_to_v64i16_scalar(<64 x bfloat> inreg %a
; VI-NEXT: v_alignbit_b32 v17, v17, v33, 16
; VI-NEXT: v_alignbit_b32 v16, v16, v18, 16
; VI-NEXT: s_branch .LBB105_5
-; VI-NEXT: .LBB105_3:
-; VI-NEXT: s_branch .LBB105_2
; VI-NEXT: .LBB105_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -229210,8 +229966,9 @@ define inreg <64 x i16> @bitcast_v64bf16_to_v64i16_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: s_or_saveexec_b64 s[4:5], -1
; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; GFX9-NEXT: s_mov_b64 exec, s[4:5]
-; GFX9-NEXT: v_writelane_b32 v43, s30, 0
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: v_writelane_b32 v43, s30, 0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_writelane_b32 v43, s31, 1
; GFX9-NEXT: v_mov_b32_e32 v31, v17
; GFX9-NEXT: v_mov_b32_e32 v30, v16
@@ -229230,15 +229987,18 @@ define inreg <64 x i16> @bitcast_v64bf16_to_v64i16_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v17, v3
; GFX9-NEXT: v_mov_b32_e32 v16, v2
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX9-NEXT: s_cbranch_scc0 .LBB105_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB105_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB105_4
-; GFX9-NEXT: .LBB105_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB105_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB105_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
; GFX9-NEXT: s_and_b32 s4, s30, 0xffff0000
; GFX9-NEXT: v_add_f32_e32 v1, s4, v0
@@ -229818,8 +230578,6 @@ define inreg <64 x i16> @bitcast_v64bf16_to_v64i16_scalar(<64 x bfloat> inreg %a
; GFX9-NEXT: v_and_or_b32 v17, v34, v18, v17
; GFX9-NEXT: v_and_or_b32 v16, v33, v18, v16
; GFX9-NEXT: s_branch .LBB105_5
-; GFX9-NEXT: .LBB105_3:
-; GFX9-NEXT: s_branch .LBB105_2
; GFX9-NEXT: .LBB105_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -229862,17 +230620,20 @@ define inreg <64 x i16> @bitcast_v64bf16_to_v64i16_scalar(<64 x bfloat> inreg %a
; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v20, v2
; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v18, v0
; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
+; GFX11-NEXT: s_mov_b32 s12, s0
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
; GFX11-NEXT: s_mov_b32 s15, s3
; GFX11-NEXT: s_mov_b32 s14, s2
; GFX11-NEXT: s_mov_b32 s13, s1
-; GFX11-NEXT: s_mov_b32 s12, s0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB105_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB105_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB105_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB105_4
-; GFX11-NEXT: .LBB105_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_and_b32_e32 v2, 0xffff0000, v17
; GFX11-NEXT: v_lshlrev_b32_e32 v1, 16, v16
; GFX11-NEXT: v_and_b32_e32 v4, 0xffff0000, v18
@@ -230473,8 +231234,6 @@ define inreg <64 x i16> @bitcast_v64bf16_to_v64i16_scalar(<64 x bfloat> inreg %a
; GFX11-NEXT: v_and_or_b32 v17, 0xffff0000, v32, v39
; GFX11-NEXT: v_and_or_b32 v16, 0xffff0000, v16, v48
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB105_3:
-; GFX11-NEXT: s_branch .LBB105_2
; GFX11-NEXT: .LBB105_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -231637,16 +232396,16 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; SI-NEXT: v_writelane_b32 v40, s85, 29
; SI-NEXT: v_writelane_b32 v40, s86, 30
; SI-NEXT: v_writelane_b32 v40, s87, 31
+; SI-NEXT: v_writelane_b32 v40, s96, 32
+; SI-NEXT: s_mov_b32 s60, s17
; SI-NEXT: ; implicit-def: $vgpr41 : SGPR spill to VGPR lane
-; SI-NEXT: s_mov_b32 s60, s16
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_writelane_b32 v41, s17, 0
; SI-NEXT: s_mov_b32 s61, s19
-; SI-NEXT: v_writelane_b32 v41, s60, 1
-; SI-NEXT: s_mov_b32 s63, s18
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_writelane_b32 v41, s60, 0
+; SI-NEXT: v_writelane_b32 v41, s16, 1
; SI-NEXT: v_writelane_b32 v41, s61, 2
; SI-NEXT: s_mov_b32 s72, s21
-; SI-NEXT: v_writelane_b32 v41, s63, 3
+; SI-NEXT: v_writelane_b32 v41, s18, 3
; SI-NEXT: v_writelane_b32 v41, s72, 4
; SI-NEXT: s_mov_b32 s74, s23
; SI-NEXT: v_writelane_b32 v41, s20, 5
@@ -231660,25 +232419,25 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; SI-NEXT: s_mov_b32 s93, s29
; SI-NEXT: v_writelane_b32 v41, s26, 11
; SI-NEXT: v_writelane_b32 v41, s93, 12
-; SI-NEXT: v_readfirstlane_b32 s16, v2
+; SI-NEXT: v_readfirstlane_b32 s7, v2
; SI-NEXT: v_writelane_b32 v41, s28, 13
-; SI-NEXT: v_readfirstlane_b32 s73, v4
-; SI-NEXT: v_writelane_b32 v41, s16, 14
-; SI-NEXT: v_readfirstlane_b32 s89, v3
-; SI-NEXT: v_writelane_b32 v41, s73, 15
-; SI-NEXT: v_readfirstlane_b32 s90, v6
+; SI-NEXT: v_readfirstlane_b32 s63, v1
+; SI-NEXT: v_writelane_b32 v41, s7, 14
+; SI-NEXT: v_readfirstlane_b32 s89, v4
+; SI-NEXT: v_writelane_b32 v41, s63, 15
+; SI-NEXT: v_readfirstlane_b32 s90, v3
; SI-NEXT: v_writelane_b32 v41, s89, 16
-; SI-NEXT: v_readfirstlane_b32 s91, v5
+; SI-NEXT: v_readfirstlane_b32 s91, v6
; SI-NEXT: v_writelane_b32 v41, s90, 17
-; SI-NEXT: v_readfirstlane_b32 s34, v8
+; SI-NEXT: v_readfirstlane_b32 s92, v5
; SI-NEXT: v_writelane_b32 v41, s91, 18
+; SI-NEXT: v_readfirstlane_b32 s34, v8
+; SI-NEXT: v_writelane_b32 v41, s92, 19
; SI-NEXT: v_readfirstlane_b32 s35, v7
-; SI-NEXT: v_writelane_b32 v41, s34, 19
+; SI-NEXT: v_writelane_b32 v41, s34, 20
+; SI-NEXT: v_writelane_b32 v40, s97, 33
; SI-NEXT: v_readfirstlane_b32 s36, v10
-; SI-NEXT: v_writelane_b32 v41, s35, 20
-; SI-NEXT: v_writelane_b32 v40, s96, 32
-; SI-NEXT: v_readfirstlane_b32 s37, v9
-; SI-NEXT: v_writelane_b32 v41, s36, 21
+; SI-NEXT: v_writelane_b32 v41, s35, 21
; SI-NEXT: s_waitcnt vmcnt(7)
; SI-NEXT: v_readfirstlane_b32 s62, v31
; SI-NEXT: s_waitcnt vmcnt(6)
@@ -231700,16 +232459,13 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; SI-NEXT: s_waitcnt vmcnt(10)
; SI-NEXT: v_readfirstlane_b32 s83, v36
; SI-NEXT: s_waitcnt vmcnt(8)
-; SI-NEXT: v_readfirstlane_b32 s87, v38
+; SI-NEXT: v_readfirstlane_b32 s96, v38
; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:80
-; SI-NEXT: v_readfirstlane_b32 s6, v37
+; SI-NEXT: v_readfirstlane_b32 s67, v37
; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:12
; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:8
; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:4
; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32
-; SI-NEXT: v_writelane_b32 v40, s97, 33
-; SI-NEXT: v_readfirstlane_b32 s38, v12
-; SI-NEXT: v_writelane_b32 v41, s37, 22
; SI-NEXT: v_writelane_b32 v40, s98, 34
; SI-NEXT: v_readfirstlane_b32 s14, v30
; SI-NEXT: v_readfirstlane_b32 s15, v29
@@ -231727,13 +232483,14 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; SI-NEXT: v_readfirstlane_b32 s25, v17
; SI-NEXT: v_readfirstlane_b32 s77, v16
; SI-NEXT: v_readfirstlane_b32 s23, v15
-; SI-NEXT: v_readfirstlane_b32 s39, v14
+; SI-NEXT: v_readfirstlane_b32 s73, v14
; SI-NEXT: v_readfirstlane_b32 s21, v13
+; SI-NEXT: v_readfirstlane_b32 s37, v12
; SI-NEXT: v_readfirstlane_b32 s19, v11
-; SI-NEXT: v_readfirstlane_b32 s18, v1
-; SI-NEXT: v_writelane_b32 v41, s38, 23
+; SI-NEXT: v_readfirstlane_b32 s17, v9
+; SI-NEXT: v_writelane_b32 v41, s36, 22
; SI-NEXT: v_writelane_b32 v40, s99, 35
-; SI-NEXT: v_writelane_b32 v41, s39, 24
+; SI-NEXT: v_writelane_b32 v41, s37, 23
; SI-NEXT: s_waitcnt vmcnt(12)
; SI-NEXT: v_readfirstlane_b32 s58, v31
; SI-NEXT: s_waitcnt vmcnt(11)
@@ -231753,76 +232510,78 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_readfirstlane_b32 s42, v34
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v38
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_readfirstlane_b32 s43, v35
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_readfirstlane_b32 s40, v36
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s41, v37
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB107_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_lshl_b32 s4, s60, 16
+; SI-NEXT: s_lshl_b32 s4, s16, 16
; SI-NEXT: v_writelane_b32 v41, s4, 25
-; SI-NEXT: s_lshl_b32 s4, s63, 16
+; SI-NEXT: s_lshl_b32 s4, s60, 16
+; SI-NEXT: v_writelane_b32 v41, s4, 24
+; SI-NEXT: s_lshl_b32 s4, s18, 16
+; SI-NEXT: v_writelane_b32 v41, s4, 27
+; SI-NEXT: s_lshl_b32 s4, s61, 16
; SI-NEXT: v_writelane_b32 v41, s4, 26
; SI-NEXT: s_lshl_b32 s4, s20, 16
-; SI-NEXT: v_writelane_b32 v41, s4, 27
-; SI-NEXT: s_lshl_b32 s4, s22, 16
; SI-NEXT: v_writelane_b32 v41, s4, 28
-; SI-NEXT: s_lshl_b32 s4, s24, 16
+; SI-NEXT: s_lshl_b32 s4, s22, 16
; SI-NEXT: v_writelane_b32 v41, s4, 29
-; SI-NEXT: s_lshl_b32 s4, s26, 16
+; SI-NEXT: s_lshl_b32 s4, s24, 16
; SI-NEXT: v_writelane_b32 v41, s4, 30
-; SI-NEXT: s_lshl_b32 s4, s28, 16
+; SI-NEXT: s_lshl_b32 s4, s26, 16
; SI-NEXT: v_writelane_b32 v41, s4, 31
-; SI-NEXT: s_lshl_b32 s4, s18, 16
+; SI-NEXT: s_lshl_b32 s4, s28, 16
; SI-NEXT: v_writelane_b32 v41, s4, 32
-; SI-NEXT: s_lshl_b32 s4, s89, 16
+; SI-NEXT: s_lshl_b32 s4, s63, 16
; SI-NEXT: v_writelane_b32 v41, s4, 33
-; SI-NEXT: s_lshl_b32 s4, s91, 16
+; SI-NEXT: s_lshl_b32 s4, s90, 16
; SI-NEXT: v_writelane_b32 v41, s4, 34
-; SI-NEXT: s_lshl_b32 s4, s35, 16
+; SI-NEXT: s_lshl_b32 s4, s92, 16
; SI-NEXT: v_writelane_b32 v41, s4, 35
-; SI-NEXT: s_lshl_b32 s4, s37, 16
-; SI-NEXT: s_lshl_b32 s7, s17, 16
-; SI-NEXT: s_lshl_b32 s96, s61, 16
-; SI-NEXT: s_lshl_b32 s99, s72, 16
-; SI-NEXT: s_lshl_b32 s97, s74, 16
-; SI-NEXT: s_lshl_b32 s92, s75, 16
+; SI-NEXT: s_lshl_b32 s4, s35, 16
+; SI-NEXT: s_lshl_b32 s6, s72, 16
+; SI-NEXT: s_lshl_b32 s86, s74, 16
+; SI-NEXT: s_lshl_b32 s98, s75, 16
; SI-NEXT: s_lshl_b32 s94, s76, 16
; SI-NEXT: s_lshl_b32 s95, s93, 16
-; SI-NEXT: s_lshl_b32 s93, s16, 16
-; SI-NEXT: s_lshl_b32 s30, s73, 16
-; SI-NEXT: s_lshl_b32 s31, s90, 16
-; SI-NEXT: s_lshl_b32 s34, s34, 16
+; SI-NEXT: s_lshl_b32 s93, s7, 16
+; SI-NEXT: s_lshl_b32 s30, s89, 16
+; SI-NEXT: s_lshl_b32 s31, s91, 16
; SI-NEXT: v_writelane_b32 v41, s4, 36
+; SI-NEXT: s_lshl_b32 s34, s34, 16
+; SI-NEXT: s_lshl_b32 s87, s17, 16
; SI-NEXT: s_lshl_b32 s35, s36, 16
-; SI-NEXT: s_lshl_b32 s86, s19, 16
-; SI-NEXT: s_lshl_b32 s36, s38, 16
+; SI-NEXT: s_lshl_b32 s18, s19, 16
+; SI-NEXT: s_lshl_b32 s36, s37, 16
; SI-NEXT: s_lshl_b32 s22, s21, 16
-; SI-NEXT: s_lshl_b32 s37, s39, 16
+; SI-NEXT: s_lshl_b32 s37, s73, 16
; SI-NEXT: s_lshl_b32 s24, s23, 16
; SI-NEXT: s_lshl_b32 s38, s77, 16
-; SI-NEXT: s_lshl_b32 s28, s25, 16
+; SI-NEXT: s_lshl_b32 s61, s25, 16
; SI-NEXT: s_lshl_b32 s39, s78, 16
-; SI-NEXT: s_lshl_b32 s61, s27, 16
+; SI-NEXT: s_lshl_b32 s89, s27, 16
; SI-NEXT: s_lshl_b32 s48, s79, 16
-; SI-NEXT: s_lshl_b32 s89, s29, 16
+; SI-NEXT: s_lshl_b32 s90, s29, 16
; SI-NEXT: s_lshl_b32 s49, s88, 16
; SI-NEXT: s_lshl_b32 s60, s9, 16
; SI-NEXT: s_lshl_b32 s50, s8, 16
-; SI-NEXT: s_lshl_b32 s90, s11, 16
-; SI-NEXT: s_lshl_b32 s91, s10, 16
+; SI-NEXT: s_lshl_b32 s91, s11, 16
+; SI-NEXT: s_lshl_b32 s92, s10, 16
; SI-NEXT: s_lshl_b32 s70, s13, 16
; SI-NEXT: s_lshl_b32 s51, s12, 16
-; SI-NEXT: s_lshl_b32 s71, s15, 16
+; SI-NEXT: s_lshl_b32 s20, s15, 16
; SI-NEXT: s_lshl_b32 s52, s14, 16
-; SI-NEXT: s_lshl_b32 s20, s41, 16
+; SI-NEXT: s_lshl_b32 s28, s41, 16
; SI-NEXT: s_lshl_b32 s53, s40, 16
-; SI-NEXT: s_lshl_b32 s81, s43, 16
+; SI-NEXT: s_lshl_b32 s71, s43, 16
; SI-NEXT: s_lshl_b32 s54, s42, 16
-; SI-NEXT: s_lshl_b32 s63, s45, 16
+; SI-NEXT: s_lshl_b32 s81, s45, 16
; SI-NEXT: s_lshl_b32 s55, s44, 16
; SI-NEXT: s_lshl_b32 s72, s47, 16
; SI-NEXT: s_lshl_b32 s64, s46, 16
@@ -231830,75 +232589,77 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; SI-NEXT: s_lshl_b32 s65, s56, 16
; SI-NEXT: s_lshl_b32 s74, s59, 16
; SI-NEXT: s_lshl_b32 s66, s58, 16
-; SI-NEXT: s_lshl_b32 s75, s87, 16
-; SI-NEXT: s_mov_b32 s73, s6
-; SI-NEXT: s_lshl_b32 s67, s6, 16
+; SI-NEXT: s_lshl_b32 s75, s96, 16
+; SI-NEXT: s_mov_b32 s63, s67
+; SI-NEXT: s_lshl_b32 s67, s67, 16
; SI-NEXT: s_lshl_b32 s76, s83, 16
-; SI-NEXT: s_mov_b32 s16, s68
+; SI-NEXT: s_mov_b32 s7, s68
; SI-NEXT: s_lshl_b32 s68, s68, 16
; SI-NEXT: s_lshl_b32 s85, s84, 16
-; SI-NEXT: s_mov_b32 s98, s69
+; SI-NEXT: s_mov_b32 s97, s69
; SI-NEXT: s_lshl_b32 s69, s69, 16
-; SI-NEXT: s_lshl_b32 s17, s80, 16
-; SI-NEXT: s_mov_b32 s6, s62
+; SI-NEXT: s_lshl_b32 s16, s80, 16
+; SI-NEXT: s_mov_b32 s99, s62
; SI-NEXT: s_lshl_b32 s26, s62, 16
; SI-NEXT: s_mov_b64 s[4:5], 0
; SI-NEXT: s_branch .LBB107_3
; SI-NEXT: .LBB107_2:
-; SI-NEXT: ; implicit-def: $sgpr17
-; SI-NEXT: ; kill: killed $sgpr17
-; SI-NEXT: s_mov_b32 s16, s68
-; SI-NEXT: ; implicit-def: $sgpr17
-; SI-NEXT: ; kill: killed $sgpr17
-; SI-NEXT: s_mov_b32 s73, s6
-; SI-NEXT: ; implicit-def: $sgpr17
-; SI-NEXT: ; kill: killed $sgpr17
-; SI-NEXT: s_mov_b32 s6, s62
-; SI-NEXT: ; implicit-def: $sgpr17
-; SI-NEXT: ; kill: killed $sgpr17
-; SI-NEXT: s_mov_b32 s98, s69
-; SI-NEXT: ; implicit-def: $sgpr17
-; SI-NEXT: ; kill: killed $sgpr17
-; SI-NEXT: s_mov_b64 s[4:5], -1
-; SI-NEXT: ; implicit-def: $sgpr17
-; SI-NEXT: ; kill: killed $sgpr17
-; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: ; implicit-def: $sgpr96
-; SI-NEXT: ; implicit-def: $sgpr99
-; SI-NEXT: ; implicit-def: $sgpr97
-; SI-NEXT: ; implicit-def: $sgpr92
+; SI-NEXT: ; implicit-def: $sgpr16
+; SI-NEXT: ; kill: killed $sgpr16
+; SI-NEXT: ; implicit-def: $sgpr6
+; SI-NEXT: s_mov_b32 s7, s68
+; SI-NEXT: ; implicit-def: $sgpr16
+; SI-NEXT: ; kill: killed $sgpr16
+; SI-NEXT: s_mov_b32 s63, s67
+; SI-NEXT: ; implicit-def: $sgpr16
+; SI-NEXT: ; kill: killed $sgpr16
+; SI-NEXT: s_mov_b32 s99, s62
+; SI-NEXT: ; implicit-def: $sgpr16
+; SI-NEXT: ; kill: killed $sgpr16
+; SI-NEXT: s_mov_b32 s97, s69
+; SI-NEXT: ; implicit-def: $sgpr16
+; SI-NEXT: ; kill: killed $sgpr16
+; SI-NEXT: ; kill: killed $sgpr6
+; SI-NEXT: ; implicit-def: $sgpr6
+; SI-NEXT: ; kill: killed $sgpr6
+; SI-NEXT: ; implicit-def: $sgpr6
+; SI-NEXT: ; implicit-def: $sgpr16
+; SI-NEXT: ; kill: killed $sgpr16
+; SI-NEXT: ; implicit-def: $sgpr86
+; SI-NEXT: ; implicit-def: $sgpr98
; SI-NEXT: ; implicit-def: $sgpr94
; SI-NEXT: ; implicit-def: $sgpr95
; SI-NEXT: ; implicit-def: $sgpr93
; SI-NEXT: ; implicit-def: $sgpr30
; SI-NEXT: ; implicit-def: $sgpr31
; SI-NEXT: ; implicit-def: $sgpr34
+; SI-NEXT: ; implicit-def: $sgpr87
; SI-NEXT: ; implicit-def: $sgpr35
-; SI-NEXT: ; implicit-def: $sgpr86
+; SI-NEXT: ; implicit-def: $sgpr18
; SI-NEXT: ; implicit-def: $sgpr36
; SI-NEXT: ; implicit-def: $sgpr22
; SI-NEXT: ; implicit-def: $sgpr37
; SI-NEXT: ; implicit-def: $sgpr24
; SI-NEXT: ; implicit-def: $sgpr38
-; SI-NEXT: ; implicit-def: $sgpr28
-; SI-NEXT: ; implicit-def: $sgpr39
; SI-NEXT: ; implicit-def: $sgpr61
-; SI-NEXT: ; implicit-def: $sgpr48
+; SI-NEXT: ; implicit-def: $sgpr39
; SI-NEXT: ; implicit-def: $sgpr89
+; SI-NEXT: ; implicit-def: $sgpr48
+; SI-NEXT: ; implicit-def: $sgpr90
; SI-NEXT: ; implicit-def: $sgpr49
; SI-NEXT: ; implicit-def: $sgpr60
; SI-NEXT: ; implicit-def: $sgpr50
-; SI-NEXT: ; implicit-def: $sgpr90
; SI-NEXT: ; implicit-def: $sgpr91
+; SI-NEXT: ; implicit-def: $sgpr92
; SI-NEXT: ; implicit-def: $sgpr70
; SI-NEXT: ; implicit-def: $sgpr51
-; SI-NEXT: ; implicit-def: $sgpr71
-; SI-NEXT: ; implicit-def: $sgpr52
; SI-NEXT: ; implicit-def: $sgpr20
+; SI-NEXT: ; implicit-def: $sgpr52
+; SI-NEXT: ; implicit-def: $sgpr28
; SI-NEXT: ; implicit-def: $sgpr53
-; SI-NEXT: ; implicit-def: $sgpr81
+; SI-NEXT: ; implicit-def: $sgpr71
; SI-NEXT: ; implicit-def: $sgpr54
-; SI-NEXT: ; implicit-def: $sgpr63
+; SI-NEXT: ; implicit-def: $sgpr81
; SI-NEXT: ; implicit-def: $sgpr55
; SI-NEXT: ; implicit-def: $sgpr72
; SI-NEXT: ; implicit-def: $sgpr64
@@ -231913,58 +232674,53 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; SI-NEXT: ; implicit-def: $sgpr85
; SI-NEXT: ; implicit-def: $sgpr69
; SI-NEXT: ; implicit-def: $sgpr26
-; SI-NEXT: ; implicit-def: $sgpr17
-; SI-NEXT: ; kill: killed $sgpr17
-; SI-NEXT: ; implicit-def: $sgpr17
-; SI-NEXT: ; kill: killed $sgpr17
-; SI-NEXT: ; implicit-def: $sgpr17
-; SI-NEXT: ; kill: killed $sgpr17
-; SI-NEXT: ; implicit-def: $sgpr17
-; SI-NEXT: ; kill: killed $sgpr17
-; SI-NEXT: ; implicit-def: $sgpr17
-; SI-NEXT: ; kill: killed $sgpr17
-; SI-NEXT: ; implicit-def: $sgpr17
-; SI-NEXT: ; kill: killed $sgpr17
-; SI-NEXT: ; implicit-def: $sgpr17
+; SI-NEXT: ; implicit-def: $sgpr16
+; SI-NEXT: ; kill: killed $sgpr16
+; SI-NEXT: ; implicit-def: $sgpr16
+; SI-NEXT: ; kill: killed $sgpr16
+; SI-NEXT: ; implicit-def: $sgpr16
+; SI-NEXT: ; kill: killed $sgpr16
+; SI-NEXT: ; implicit-def: $sgpr16
+; SI-NEXT: ; kill: killed $sgpr16
+; SI-NEXT: ; implicit-def: $sgpr16
+; SI-NEXT: ; kill: killed $sgpr16
+; SI-NEXT: ; implicit-def: $sgpr16
; SI-NEXT: .LBB107_3: ; %Flow
; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
-; SI-NEXT: s_mov_b32 s5, s17
-; SI-NEXT: s_mov_b32 s17, s86
-; SI-NEXT: s_mov_b32 s86, s7
+; SI-NEXT: s_mov_b32 s5, s16
+; SI-NEXT: s_mov_b32 s16, s87
+; SI-NEXT: s_mov_b32 s87, s6
; SI-NEXT: s_cbranch_vccnz .LBB107_5
; SI-NEXT: ; %bb.4: ; %cmp.true
-; SI-NEXT: s_lshl_b32 s5, s6, 16
-; SI-NEXT: v_readlane_b32 s6, v41, 24
-; SI-NEXT: s_lshl_b32 s20, s6, 16
; SI-NEXT: v_readlane_b32 s6, v41, 23
-; SI-NEXT: s_lshl_b32 s17, s6, 16
+; SI-NEXT: s_lshl_b32 s18, s6, 16
; SI-NEXT: v_readlane_b32 s6, v41, 22
-; SI-NEXT: s_lshl_b32 s61, s16, 16
-; SI-NEXT: s_add_i32 s16, s6, 3
+; SI-NEXT: s_lshl_b32 s16, s6, 16
; SI-NEXT: v_readlane_b32 s6, v41, 21
-; SI-NEXT: s_and_b32 s16, s16, 0xffff
-; SI-NEXT: s_lshl_b32 s7, s6, 16
+; SI-NEXT: s_lshl_b32 s61, s7, 16
+; SI-NEXT: s_add_i32 s7, s6, 3
; SI-NEXT: v_readlane_b32 s6, v41, 20
-; SI-NEXT: s_or_b32 s7, s7, s16
-; SI-NEXT: s_add_i32 s6, s6, 3
-; SI-NEXT: v_readlane_b32 s16, v41, 19
+; SI-NEXT: s_add_i32 s17, s17, 3
+; SI-NEXT: s_and_b32 s7, s7, 0xffff
+; SI-NEXT: s_lshl_b32 s6, s6, 16
+; SI-NEXT: s_and_b32 s17, s17, 0xffff
+; SI-NEXT: s_or_b32 s6, s6, s7
+; SI-NEXT: v_readlane_b32 s7, v41, 19
+; SI-NEXT: s_lshl_b32 s5, s99, 16
+; SI-NEXT: s_or_b32 s16, s16, s17
+; SI-NEXT: s_add_i32 s99, s7, 3
+; SI-NEXT: v_readlane_b32 s17, v41, 18
; SI-NEXT: s_add_i32 s19, s19, 3
-; SI-NEXT: s_and_b32 s6, s6, 0xffff
-; SI-NEXT: s_lshl_b32 s16, s16, 16
+; SI-NEXT: s_and_b32 s7, s99, 0xffff
+; SI-NEXT: s_lshl_b32 s17, s17, 16
; SI-NEXT: s_and_b32 s19, s19, 0xffff
-; SI-NEXT: s_or_b32 s6, s16, s6
-; SI-NEXT: v_readlane_b32 s16, v41, 18
-; SI-NEXT: s_lshl_b32 s60, s98, 16
-; SI-NEXT: s_or_b32 s17, s17, s19
-; SI-NEXT: s_add_i32 s98, s16, 3
-; SI-NEXT: v_readlane_b32 s19, v41, 17
-; SI-NEXT: s_add_i32 s21, s21, 3
-; SI-NEXT: s_and_b32 s16, s98, 0xffff
-; SI-NEXT: s_lshl_b32 s19, s19, 16
+; SI-NEXT: s_or_b32 s7, s17, s7
+; SI-NEXT: v_readlane_b32 s17, v41, 17
+; SI-NEXT: s_lshl_b32 s60, s97, 16
; SI-NEXT: s_add_i32 s11, s11, 3
; SI-NEXT: s_add_i32 s9, s9, 3
-; SI-NEXT: s_and_b32 s21, s21, 0xffff
-; SI-NEXT: s_or_b32 s16, s19, s16
+; SI-NEXT: s_or_b32 s18, s18, s19
+; SI-NEXT: s_add_i32 s97, s17, 3
; SI-NEXT: v_readlane_b32 s19, v41, 16
; SI-NEXT: s_add_i32 s13, s13, 3
; SI-NEXT: s_and_b32 s11, s11, 0xffff
@@ -231972,9 +232728,9 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; SI-NEXT: s_and_b32 s9, s9, 0xffff
; SI-NEXT: s_lshl_b32 s8, s8, 16
; SI-NEXT: s_add_i32 s29, s29, 3
-; SI-NEXT: s_or_b32 s20, s20, s21
-; SI-NEXT: s_add_i32 s96, s19, 3
-; SI-NEXT: v_readlane_b32 s21, v41, 15
+; SI-NEXT: s_add_i32 s21, s21, 3
+; SI-NEXT: s_and_b32 s17, s97, 0xffff
+; SI-NEXT: s_lshl_b32 s19, s19, 16
; SI-NEXT: s_add_i32 s15, s15, 3
; SI-NEXT: s_and_b32 s13, s13, 0xffff
; SI-NEXT: s_lshl_b32 s12, s12, 16
@@ -231983,8 +232739,10 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; SI-NEXT: s_and_b32 s9, s29, 0xffff
; SI-NEXT: s_lshl_b32 s11, s88, 16
; SI-NEXT: s_add_i32 s27, s27, 3
-; SI-NEXT: s_and_b32 s19, s96, 0xffff
-; SI-NEXT: s_lshl_b32 s21, s21, 16
+; SI-NEXT: s_and_b32 s21, s21, 0xffff
+; SI-NEXT: s_lshl_b32 s20, s73, 16
+; SI-NEXT: s_or_b32 s17, s19, s17
+; SI-NEXT: v_readlane_b32 s19, v41, 15
; SI-NEXT: s_and_b32 s15, s15, 0xffff
; SI-NEXT: s_lshl_b32 s14, s14, 16
; SI-NEXT: s_or_b32 s12, s12, s13
@@ -231992,20 +232750,20 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; SI-NEXT: s_and_b32 s11, s27, 0xffff
; SI-NEXT: s_lshl_b32 s13, s79, 16
; SI-NEXT: s_add_i32 s25, s25, 3
-; SI-NEXT: s_or_b32 s19, s21, s19
-; SI-NEXT: s_add_i32 s18, s18, 3
+; SI-NEXT: s_or_b32 s20, s20, s21
+; SI-NEXT: s_add_i32 s86, s19, 3
; SI-NEXT: v_readlane_b32 s21, v41, 14
; SI-NEXT: s_or_b32 s14, s14, s15
; SI-NEXT: s_or_b32 s11, s13, s11
; SI-NEXT: s_and_b32 s13, s25, 0xffff
; SI-NEXT: s_lshl_b32 s15, s78, 16
; SI-NEXT: s_add_i32 s23, s23, 3
-; SI-NEXT: s_and_b32 s18, s18, 0xffff
+; SI-NEXT: s_and_b32 s19, s86, 0xffff
; SI-NEXT: s_lshl_b32 s21, s21, 16
; SI-NEXT: s_or_b32 s13, s15, s13
; SI-NEXT: s_and_b32 s15, s23, 0xffff
; SI-NEXT: s_lshl_b32 s22, s77, 16
-; SI-NEXT: s_or_b32 s18, s21, s18
+; SI-NEXT: s_or_b32 s19, s21, s19
; SI-NEXT: v_readlane_b32 s21, v41, 13
; SI-NEXT: s_or_b32 s15, s22, s15
; SI-NEXT: s_add_i32 s21, s21, 3
@@ -232050,42 +232808,40 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; SI-NEXT: s_lshl_b32 s28, s28, 16
; SI-NEXT: s_or_b32 s27, s28, s27
; SI-NEXT: s_add_i32 s27, s27, 0x30000
+; SI-NEXT: s_and_b32 s28, s27, 0xffff0000
; SI-NEXT: s_add_i32 s26, s26, 0x30000
-; SI-NEXT: s_and_b32 s86, s27, 0xffff0000
+; SI-NEXT: v_writelane_b32 v41, s28, 24
; SI-NEXT: s_lshl_b32 s27, s27, 16
-; SI-NEXT: s_add_i32 s25, s25, 0x30000
; SI-NEXT: v_writelane_b32 v41, s27, 25
-; SI-NEXT: s_and_b32 s96, s26, 0xffff0000
+; SI-NEXT: s_and_b32 s27, s26, 0xffff0000
+; SI-NEXT: s_add_i32 s25, s25, 0x30000
+; SI-NEXT: v_writelane_b32 v41, s27, 26
; SI-NEXT: s_lshl_b32 s26, s26, 16
; SI-NEXT: s_add_i32 s24, s24, 0x30000
-; SI-NEXT: v_writelane_b32 v41, s26, 26
-; SI-NEXT: s_and_b32 s99, s25, 0xffff0000
+; SI-NEXT: v_writelane_b32 v41, s26, 27
+; SI-NEXT: s_and_b32 s87, s25, 0xffff0000
; SI-NEXT: s_lshl_b32 s25, s25, 16
+; SI-NEXT: s_add_i32 s80, s80, 3
; SI-NEXT: s_add_i32 s23, s23, 0x30000
-; SI-NEXT: v_writelane_b32 v41, s25, 27
-; SI-NEXT: s_and_b32 s97, s24, 0xffff0000
+; SI-NEXT: v_writelane_b32 v41, s25, 28
+; SI-NEXT: s_and_b32 s86, s24, 0xffff0000
; SI-NEXT: s_lshl_b32 s24, s24, 16
-; SI-NEXT: s_add_i32 s80, s80, 3
-; SI-NEXT: s_add_i32 s22, s22, 0x30000
-; SI-NEXT: v_writelane_b32 v41, s24, 28
-; SI-NEXT: s_and_b32 s92, s23, 0xffff0000
-; SI-NEXT: s_lshl_b32 s23, s23, 16
; SI-NEXT: s_and_b32 s4, s80, 0xffff
; SI-NEXT: s_add_i32 s84, s84, 3
-; SI-NEXT: s_add_i32 s21, s21, 0x30000
-; SI-NEXT: v_writelane_b32 v41, s23, 29
-; SI-NEXT: s_and_b32 s94, s22, 0xffff0000
-; SI-NEXT: s_lshl_b32 s22, s22, 16
+; SI-NEXT: s_add_i32 s22, s22, 0x30000
+; SI-NEXT: v_writelane_b32 v41, s24, 29
+; SI-NEXT: s_and_b32 s98, s23, 0xffff0000
+; SI-NEXT: s_lshl_b32 s23, s23, 16
; SI-NEXT: s_or_b32 s4, s5, s4
; SI-NEXT: s_and_b32 s5, s84, 0xffff
; SI-NEXT: s_add_i32 s83, s83, 3
-; SI-NEXT: s_add_i32 s18, s18, 0x30000
-; SI-NEXT: v_writelane_b32 v41, s22, 30
-; SI-NEXT: s_and_b32 s95, s21, 0xffff0000
-; SI-NEXT: s_lshl_b32 s21, s21, 16
+; SI-NEXT: s_add_i32 s21, s21, 0x30000
+; SI-NEXT: v_writelane_b32 v41, s23, 30
+; SI-NEXT: s_and_b32 s94, s22, 0xffff0000
+; SI-NEXT: s_lshl_b32 s22, s22, 16
; SI-NEXT: s_or_b32 s5, s60, s5
; SI-NEXT: s_and_b32 s60, s83, 0xffff
-; SI-NEXT: s_add_i32 s87, s87, 3
+; SI-NEXT: s_add_i32 s96, s96, 3
; SI-NEXT: s_add_i32 s59, s59, 3
; SI-NEXT: s_add_i32 s57, s57, 3
; SI-NEXT: s_add_i32 s47, s47, 3
@@ -232093,12 +232849,12 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; SI-NEXT: s_add_i32 s43, s43, 3
; SI-NEXT: s_add_i32 s41, s41, 3
; SI-NEXT: s_add_i32 s19, s19, 0x30000
-; SI-NEXT: v_writelane_b32 v41, s21, 31
-; SI-NEXT: s_and_b32 s93, s18, 0xffff0000
-; SI-NEXT: s_lshl_b32 s18, s18, 16
+; SI-NEXT: v_writelane_b32 v41, s22, 31
+; SI-NEXT: s_and_b32 s95, s21, 0xffff0000
+; SI-NEXT: s_lshl_b32 s21, s21, 16
; SI-NEXT: s_or_b32 s76, s61, s60
-; SI-NEXT: s_and_b32 s60, s87, 0xffff
-; SI-NEXT: s_lshl_b32 s61, s73, 16
+; SI-NEXT: s_and_b32 s60, s96, 0xffff
+; SI-NEXT: s_lshl_b32 s61, s63, 16
; SI-NEXT: s_and_b32 s59, s59, 0xffff
; SI-NEXT: s_lshl_b32 s58, s58, 16
; SI-NEXT: s_and_b32 s57, s57, 0xffff
@@ -232111,9 +232867,10 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; SI-NEXT: s_lshl_b32 s42, s42, 16
; SI-NEXT: s_and_b32 s41, s41, 0xffff
; SI-NEXT: s_lshl_b32 s40, s40, 16
-; SI-NEXT: s_add_i32 s16, s16, 0x30000
-; SI-NEXT: v_writelane_b32 v41, s18, 32
-; SI-NEXT: s_lshl_b32 s18, s19, 16
+; SI-NEXT: s_add_i32 s17, s17, 0x30000
+; SI-NEXT: v_writelane_b32 v41, s21, 32
+; SI-NEXT: s_and_b32 s93, s19, 0xffff0000
+; SI-NEXT: s_lshl_b32 s19, s19, 16
; SI-NEXT: s_or_b32 s75, s61, s60
; SI-NEXT: s_or_b32 s58, s58, s59
; SI-NEXT: s_or_b32 s56, s56, s57
@@ -232121,10 +232878,10 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; SI-NEXT: s_or_b32 s44, s44, s45
; SI-NEXT: s_or_b32 s42, s42, s43
; SI-NEXT: s_or_b32 s40, s40, s41
-; SI-NEXT: s_add_i32 s6, s6, 0x30000
-; SI-NEXT: v_writelane_b32 v41, s18, 33
-; SI-NEXT: s_and_b32 s31, s16, 0xffff0000
-; SI-NEXT: s_lshl_b32 s16, s16, 16
+; SI-NEXT: s_add_i32 s7, s7, 0x30000
+; SI-NEXT: v_writelane_b32 v41, s19, 33
+; SI-NEXT: s_and_b32 s30, s17, 0xffff0000
+; SI-NEXT: s_lshl_b32 s17, s17, 16
; SI-NEXT: s_add_i32 s4, s4, 0x30000
; SI-NEXT: s_add_i32 s5, s5, 0x30000
; SI-NEXT: s_add_i32 s76, s76, 0x30000
@@ -232144,41 +232901,43 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; SI-NEXT: s_add_i32 s13, s13, 0x30000
; SI-NEXT: s_add_i32 s15, s15, 0x30000
; SI-NEXT: s_add_i32 s20, s20, 0x30000
-; SI-NEXT: s_add_i32 s17, s17, 0x30000
-; SI-NEXT: s_add_i32 s7, s7, 0x30000
-; SI-NEXT: v_writelane_b32 v41, s16, 34
+; SI-NEXT: s_add_i32 s18, s18, 0x30000
+; SI-NEXT: s_add_i32 s16, s16, 0x30000
+; SI-NEXT: s_add_i32 s6, s6, 0x30000
+; SI-NEXT: v_writelane_b32 v41, s17, 34
+; SI-NEXT: s_and_b32 s31, s7, 0xffff0000
+; SI-NEXT: s_lshl_b32 s7, s7, 16
+; SI-NEXT: v_writelane_b32 v41, s7, 35
; SI-NEXT: s_and_b32 s34, s6, 0xffff0000
; SI-NEXT: s_lshl_b32 s6, s6, 16
-; SI-NEXT: s_and_b32 s30, s19, 0xffff0000
-; SI-NEXT: v_writelane_b32 v41, s6, 35
-; SI-NEXT: s_and_b32 s35, s7, 0xffff0000
-; SI-NEXT: s_lshl_b32 s6, s7, 16
-; SI-NEXT: s_and_b32 s36, s17, 0xffff0000
-; SI-NEXT: s_lshl_b32 s17, s17, 16
+; SI-NEXT: s_and_b32 s35, s16, 0xffff0000
+; SI-NEXT: s_lshl_b32 s16, s16, 16
+; SI-NEXT: s_and_b32 s36, s18, 0xffff0000
+; SI-NEXT: s_lshl_b32 s18, s18, 16
; SI-NEXT: s_and_b32 s37, s20, 0xffff0000
; SI-NEXT: s_lshl_b32 s22, s20, 16
; SI-NEXT: s_and_b32 s38, s15, 0xffff0000
; SI-NEXT: s_lshl_b32 s24, s15, 16
; SI-NEXT: s_and_b32 s39, s13, 0xffff0000
-; SI-NEXT: s_lshl_b32 s28, s13, 16
+; SI-NEXT: s_lshl_b32 s61, s13, 16
; SI-NEXT: s_and_b32 s48, s11, 0xffff0000
-; SI-NEXT: s_lshl_b32 s61, s11, 16
+; SI-NEXT: s_lshl_b32 s89, s11, 16
; SI-NEXT: s_and_b32 s49, s9, 0xffff0000
-; SI-NEXT: s_lshl_b32 s89, s9, 16
+; SI-NEXT: s_lshl_b32 s90, s9, 16
; SI-NEXT: s_and_b32 s50, s8, 0xffff0000
; SI-NEXT: s_lshl_b32 s60, s8, 16
-; SI-NEXT: s_and_b32 s91, s10, 0xffff0000
-; SI-NEXT: s_lshl_b32 s90, s10, 16
+; SI-NEXT: s_and_b32 s92, s10, 0xffff0000
+; SI-NEXT: s_lshl_b32 s91, s10, 16
; SI-NEXT: s_and_b32 s51, s12, 0xffff0000
; SI-NEXT: s_lshl_b32 s70, s12, 16
; SI-NEXT: s_and_b32 s52, s14, 0xffff0000
-; SI-NEXT: s_lshl_b32 s71, s14, 16
+; SI-NEXT: s_lshl_b32 s20, s14, 16
; SI-NEXT: s_and_b32 s53, s40, 0xffff0000
-; SI-NEXT: s_lshl_b32 s20, s40, 16
+; SI-NEXT: s_lshl_b32 s28, s40, 16
; SI-NEXT: s_and_b32 s54, s42, 0xffff0000
-; SI-NEXT: s_lshl_b32 s81, s42, 16
+; SI-NEXT: s_lshl_b32 s71, s42, 16
; SI-NEXT: s_and_b32 s55, s44, 0xffff0000
-; SI-NEXT: s_lshl_b32 s63, s44, 16
+; SI-NEXT: s_lshl_b32 s81, s44, 16
; SI-NEXT: s_and_b32 s64, s46, 0xffff0000
; SI-NEXT: s_lshl_b32 s72, s46, 16
; SI-NEXT: s_and_b32 s65, s56, 0xffff0000
@@ -232195,39 +232954,41 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; SI-NEXT: s_lshl_b32 s5, s4, 16
; SI-NEXT: v_writelane_b32 v41, s6, 36
; SI-NEXT: .LBB107_5: ; %end
-; SI-NEXT: v_mul_f32_e64 v1, 1.0, s86
+; SI-NEXT: v_readlane_b32 s4, v41, 24
+; SI-NEXT: v_mul_f32_e64 v1, 1.0, s4
; SI-NEXT: v_readlane_b32 s4, v41, 25
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s4
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
+; SI-NEXT: v_readlane_b32 s4, v41, 26
; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mul_f32_e64 v1, 1.0, s96
-; SI-NEXT: v_readlane_b32 s4, v41, 26
+; SI-NEXT: v_mul_f32_e64 v1, 1.0, s4
+; SI-NEXT: v_readlane_b32 s4, v41, 27
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s4
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
; SI-NEXT: v_add_i32_e32 v2, vcc, 4, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mul_f32_e64 v1, 1.0, s99
-; SI-NEXT: v_readlane_b32 s4, v41, 27
+; SI-NEXT: v_mul_f32_e64 v1, 1.0, s87
+; SI-NEXT: v_readlane_b32 s4, v41, 28
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s4
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
; SI-NEXT: v_add_i32_e32 v2, vcc, 8, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mul_f32_e64 v1, 1.0, s97
-; SI-NEXT: v_readlane_b32 s4, v41, 28
+; SI-NEXT: v_mul_f32_e64 v1, 1.0, s86
+; SI-NEXT: v_readlane_b32 s4, v41, 29
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s4
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
; SI-NEXT: v_add_i32_e32 v2, vcc, 12, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mul_f32_e64 v1, 1.0, s92
-; SI-NEXT: v_readlane_b32 s4, v41, 29
+; SI-NEXT: v_mul_f32_e64 v1, 1.0, s98
+; SI-NEXT: v_readlane_b32 s4, v41, 30
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s4
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
@@ -232235,7 +232996,7 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e64 v1, 1.0, s94
-; SI-NEXT: v_readlane_b32 s4, v41, 30
+; SI-NEXT: v_readlane_b32 s4, v41, 31
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s4
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
@@ -232243,7 +233004,7 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e64 v1, 1.0, s95
-; SI-NEXT: v_readlane_b32 s4, v41, 31
+; SI-NEXT: v_readlane_b32 s4, v41, 32
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s4
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
@@ -232251,7 +233012,7 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e64 v1, 1.0, s93
-; SI-NEXT: v_readlane_b32 s4, v41, 32
+; SI-NEXT: v_readlane_b32 s4, v41, 33
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s4
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
@@ -232259,7 +233020,7 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e64 v1, 1.0, s30
-; SI-NEXT: v_readlane_b32 s4, v41, 33
+; SI-NEXT: v_readlane_b32 s4, v41, 34
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s4
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
@@ -232267,7 +233028,7 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e64 v1, 1.0, s31
-; SI-NEXT: v_readlane_b32 s4, v41, 34
+; SI-NEXT: v_readlane_b32 s4, v41, 35
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s4
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
@@ -232275,7 +233036,7 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e64 v1, 1.0, s34
-; SI-NEXT: v_readlane_b32 s4, v41, 35
+; SI-NEXT: v_readlane_b32 s4, v41, 36
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s4
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
@@ -232283,16 +233044,15 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e64 v1, 1.0, s35
-; SI-NEXT: v_readlane_b32 s4, v41, 36
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_mul_f32_e64 v2, 1.0, s4
+; SI-NEXT: v_mul_f32_e64 v2, 1.0, s16
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
; SI-NEXT: v_add_i32_e32 v2, vcc, 44, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e64 v1, 1.0, s36
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_mul_f32_e64 v2, 1.0, s17
+; SI-NEXT: v_mul_f32_e64 v2, 1.0, s18
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
; SI-NEXT: v_add_i32_e32 v2, vcc, 48, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
@@ -232313,21 +233073,21 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e64 v1, 1.0, s39
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_mul_f32_e64 v2, 1.0, s28
+; SI-NEXT: v_mul_f32_e64 v2, 1.0, s61
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
; SI-NEXT: v_add_i32_e32 v2, vcc, 60, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e64 v1, 1.0, s48
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_mul_f32_e64 v2, 1.0, s61
+; SI-NEXT: v_mul_f32_e64 v2, 1.0, s89
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
; SI-NEXT: v_add_i32_e32 v2, vcc, 64, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e64 v1, 1.0, s49
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_mul_f32_e64 v2, 1.0, s89
+; SI-NEXT: v_mul_f32_e64 v2, 1.0, s90
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
; SI-NEXT: v_add_i32_e32 v2, vcc, 0x44, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
@@ -232339,9 +233099,9 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; SI-NEXT: v_add_i32_e32 v2, vcc, 0x48, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mul_f32_e64 v1, 1.0, s91
+; SI-NEXT: v_mul_f32_e64 v1, 1.0, s92
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_mul_f32_e64 v2, 1.0, s90
+; SI-NEXT: v_mul_f32_e64 v2, 1.0, s91
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
; SI-NEXT: v_add_i32_e32 v2, vcc, 0x4c, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
@@ -232355,28 +233115,28 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e64 v1, 1.0, s52
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_mul_f32_e64 v2, 1.0, s71
+; SI-NEXT: v_mul_f32_e64 v2, 1.0, s20
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
; SI-NEXT: v_add_i32_e32 v2, vcc, 0x54, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e64 v1, 1.0, s53
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_mul_f32_e64 v2, 1.0, s20
+; SI-NEXT: v_mul_f32_e64 v2, 1.0, s28
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
; SI-NEXT: v_add_i32_e32 v2, vcc, 0x58, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e64 v1, 1.0, s54
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_mul_f32_e64 v2, 1.0, s81
+; SI-NEXT: v_mul_f32_e64 v2, 1.0, s71
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
; SI-NEXT: v_add_i32_e32 v2, vcc, 0x5c, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e64 v1, 1.0, s55
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_mul_f32_e64 v2, 1.0, s63
+; SI-NEXT: v_mul_f32_e64 v2, 1.0, s81
; SI-NEXT: v_alignbit_b32 v1, v1, v2, 16
; SI-NEXT: v_add_i32_e32 v2, vcc, 0x60, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
@@ -232483,8 +233243,9 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; VI-NEXT: v_writelane_b32 v32, s34, 2
; VI-NEXT: v_writelane_b32 v32, s35, 3
; VI-NEXT: v_writelane_b32 v32, s36, 4
-; VI-NEXT: v_writelane_b32 v32, s37, 5
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: v_writelane_b32 v32, s37, 5
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_writelane_b32 v32, s38, 6
; VI-NEXT: v_readfirstlane_b32 s47, v2
; VI-NEXT: v_readfirstlane_b32 s46, v3
@@ -232502,14 +233263,17 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; VI-NEXT: v_readfirstlane_b32 s10, v15
; VI-NEXT: v_readfirstlane_b32 s9, v16
; VI-NEXT: v_readfirstlane_b32 s8, v17
-; VI-NEXT: v_readfirstlane_b32 s6, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: v_readfirstlane_b32 s7, v1
+; VI-NEXT: v_readfirstlane_b32 s7, v0
+; VI-NEXT: v_readfirstlane_b32 s6, v1
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_writelane_b32 v32, s39, 7
-; VI-NEXT: s_cbranch_scc0 .LBB107_4
+; VI-NEXT: s_cbranch_scc0 .LBB107_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB107_3
-; VI-NEXT: .LBB107_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB107_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB107_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_and_b32 s4, s47, 0xffff0000
; VI-NEXT: s_add_i32 s5, s47, 3
; VI-NEXT: s_and_b32 s47, s46, 0xffff0000
@@ -232570,12 +233334,12 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; VI-NEXT: s_add_i32 s28, s28, 3
; VI-NEXT: s_and_b32 s37, s29, 0xffff0000
; VI-NEXT: s_add_i32 s29, s29, 3
-; VI-NEXT: s_and_b32 s38, s6, 0xffff0000
-; VI-NEXT: s_add_i32 s6, s6, 3
-; VI-NEXT: s_and_b32 s39, s7, 0xffff0000
+; VI-NEXT: s_and_b32 s38, s7, 0xffff0000
; VI-NEXT: s_add_i32 s7, s7, 3
-; VI-NEXT: s_and_b32 s7, s7, 0xffff
+; VI-NEXT: s_and_b32 s39, s6, 0xffff0000
+; VI-NEXT: s_add_i32 s6, s6, 3
; VI-NEXT: s_and_b32 s6, s6, 0xffff
+; VI-NEXT: s_and_b32 s7, s7, 0xffff
; VI-NEXT: s_and_b32 s29, s29, 0xffff
; VI-NEXT: s_and_b32 s28, s28, 0xffff
; VI-NEXT: s_and_b32 s27, s27, 0xffff
@@ -232606,8 +233370,8 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; VI-NEXT: s_and_b32 s45, s45, 0xffff
; VI-NEXT: s_and_b32 s46, s46, 0xffff
; VI-NEXT: s_and_b32 s5, s5, 0xffff
-; VI-NEXT: s_or_b32 s7, s39, s7
-; VI-NEXT: s_or_b32 s6, s38, s6
+; VI-NEXT: s_or_b32 s6, s39, s6
+; VI-NEXT: s_or_b32 s7, s38, s7
; VI-NEXT: s_or_b32 s29, s37, s29
; VI-NEXT: s_or_b32 s28, s36, s28
; VI-NEXT: s_or_b32 s27, s35, s27
@@ -232638,8 +233402,8 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; VI-NEXT: s_or_b32 s45, s56, s45
; VI-NEXT: s_or_b32 s46, s47, s46
; VI-NEXT: s_or_b32 s4, s4, s5
-; VI-NEXT: s_add_i32 s7, s7, 0x30000
; VI-NEXT: s_add_i32 s6, s6, 0x30000
+; VI-NEXT: s_add_i32 s7, s7, 0x30000
; VI-NEXT: s_add_i32 s29, s29, 0x30000
; VI-NEXT: s_add_i32 s28, s28, 0x30000
; VI-NEXT: s_add_i32 s27, s27, 0x30000
@@ -232670,7 +233434,7 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; VI-NEXT: s_add_i32 s45, s45, 0x30000
; VI-NEXT: s_add_i32 s46, s46, 0x30000
; VI-NEXT: s_add_i32 s47, s4, 0x30000
-; VI-NEXT: .LBB107_3: ; %end
+; VI-NEXT: .LBB107_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -232685,8 +233449,8 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: v_mov_b32_e32 v14, s6
-; VI-NEXT: v_mov_b32_e32 v15, s7
+; VI-NEXT: v_mov_b32_e32 v14, s7
+; VI-NEXT: v_mov_b32_e32 v15, s6
; VI-NEXT: v_mov_b32_e32 v16, s47
; VI-NEXT: v_mov_b32_e32 v17, s46
; VI-NEXT: v_mov_b32_e32 v18, s45
@@ -232716,13 +233480,12 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; VI-NEXT: s_mov_b64 exec, s[4:5]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB107_4:
-; VI-NEXT: s_branch .LBB107_2
;
; GFX9-LABEL: bitcast_v64i16_to_v64bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v31, v17
; GFX9-NEXT: v_mov_b32_e32 v30, v16
; GFX9-NEXT: v_mov_b32_e32 v29, v15
@@ -232743,7 +233506,7 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -232756,10 +233519,13 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB107_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB107_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB107_3
-; GFX9-NEXT: .LBB107_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB107_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB107_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
@@ -232792,11 +233558,9 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; GFX9-NEXT: v_pk_add_u16 v32, v32, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: .LBB107_3: ; %end
+; GFX9-NEXT: .LBB107_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v18, v32
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB107_4:
-; GFX9-NEXT: s_branch .LBB107_2
;
; GFX11-LABEL: bitcast_v64i16_to_v64bf16_scalar:
; GFX11: ; %bb.0:
@@ -232810,17 +233574,20 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v20, v2
; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v18, v0
; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
+; GFX11-NEXT: s_mov_b32 s12, s0
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
; GFX11-NEXT: s_mov_b32 s15, s3
; GFX11-NEXT: s_mov_b32 s14, s2
; GFX11-NEXT: s_mov_b32 s13, s1
-; GFX11-NEXT: s_mov_b32 s12, s0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB107_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB107_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB107_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB107_4
-; GFX11-NEXT: .LBB107_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v15, s27, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v14, s26, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v13, s25, 3 op_sel_hi:[1,0]
@@ -232854,8 +233621,6 @@ define inreg <64 x bfloat> @bitcast_v64i16_to_v64bf16_scalar(<64 x i16> inreg %a
; GFX11-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB107_3:
-; GFX11-NEXT: s_branch .LBB107_2
; GFX11-NEXT: .LBB107_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -233959,10 +234724,14 @@ define inreg <64 x i16> @bitcast_v64f16_to_v64i16_scalar(<64 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v45, s28
; SI-NEXT: v_cvt_f16_f32_e32 v61, s29
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: s_cbranch_scc0 .LBB109_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB109_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB109_3
-; SI-NEXT: .LBB109_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB109_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB109_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v25, v25
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
@@ -234269,7 +235038,7 @@ define inreg <64 x i16> @bitcast_v64f16_to_v64i16_scalar(<64 x half> inreg %a, i
; SI-NEXT: v_alignbit_b32 v24, v5, v24, 16
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_alignbit_b32 v37, v1, v37, 16
-; SI-NEXT: .LBB109_3: ; %end
+; SI-NEXT: .LBB109_4: ; %end
; SI-NEXT: v_and_b32_e32 v48, 0xffff, v60
; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
; SI-NEXT: v_or_b32_e32 v4, v48, v4
@@ -234491,13 +235260,12 @@ define inreg <64 x i16> @bitcast_v64f16_to_v64i16_scalar(<64 x half> inreg %a, i
; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB109_4:
-; SI-NEXT: s_branch .LBB109_2
;
; VI-LABEL: bitcast_v64f16_to_v64i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v31, v17
; VI-NEXT: v_mov_b32_e32 v30, v16
; VI-NEXT: v_mov_b32_e32 v29, v15
@@ -234518,7 +235286,7 @@ define inreg <64 x i16> @bitcast_v64f16_to_v64i16_scalar(<64 x half> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
@@ -234531,10 +235299,13 @@ define inreg <64 x i16> @bitcast_v64f16_to_v64i16_scalar(<64 x half> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB109_4
+; VI-NEXT: s_cbranch_scc0 .LBB109_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB109_3
-; VI-NEXT: .LBB109_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB109_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB109_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v18, 0x200
; VI-NEXT: v_add_f16_e32 v33, 0x200, v15
; VI-NEXT: v_add_f16_sdwa v15, v15, v18 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
@@ -234632,16 +235403,15 @@ define inreg <64 x i16> @bitcast_v64f16_to_v64i16_scalar(<64 x half> inreg %a, i
; VI-NEXT: v_add_f16_e32 v16, 0x200, v16
; VI-NEXT: v_or_b32_e32 v17, v33, v17
; VI-NEXT: v_or_b32_e32 v16, v16, v18
-; VI-NEXT: .LBB109_3: ; %end
+; VI-NEXT: .LBB109_4: ; %end
; VI-NEXT: v_mov_b32_e32 v18, v32
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB109_4:
-; VI-NEXT: s_branch .LBB109_2
;
; GFX9-LABEL: bitcast_v64f16_to_v64i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v31, v17
; GFX9-NEXT: v_mov_b32_e32 v30, v16
; GFX9-NEXT: v_mov_b32_e32 v29, v15
@@ -234662,7 +235432,7 @@ define inreg <64 x i16> @bitcast_v64f16_to_v64i16_scalar(<64 x half> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -234675,10 +235445,13 @@ define inreg <64 x i16> @bitcast_v64f16_to_v64i16_scalar(<64 x half> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB109_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB109_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB109_3
-; GFX9-NEXT: .LBB109_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB109_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB109_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_movk_i32 s4, 0x200
; GFX9-NEXT: v_pk_add_f16 v15, v15, s4 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v14, v14, s4 op_sel_hi:[1,0]
@@ -234712,11 +235485,9 @@ define inreg <64 x i16> @bitcast_v64f16_to_v64i16_scalar(<64 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v32, v32, s4 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v17, v17, s4 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v16, v16, s4 op_sel_hi:[1,0]
-; GFX9-NEXT: .LBB109_3: ; %end
+; GFX9-NEXT: .LBB109_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v18, v32
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB109_4:
-; GFX9-NEXT: s_branch .LBB109_2
;
; GFX11-LABEL: bitcast_v64f16_to_v64i16_scalar:
; GFX11: ; %bb.0:
@@ -234730,17 +235501,20 @@ define inreg <64 x i16> @bitcast_v64f16_to_v64i16_scalar(<64 x half> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v20, v2
; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v18, v0
; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
+; GFX11-NEXT: s_mov_b32 s12, s0
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
; GFX11-NEXT: s_mov_b32 s15, s3
; GFX11-NEXT: s_mov_b32 s14, s2
; GFX11-NEXT: s_mov_b32 s13, s1
-; GFX11-NEXT: s_mov_b32 s12, s0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB109_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB109_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB109_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB109_4
-; GFX11-NEXT: .LBB109_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v15, 0x200, s27 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v14, 0x200, s26 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v13, 0x200, s25 op_sel_hi:[0,1]
@@ -234774,8 +235548,6 @@ define inreg <64 x i16> @bitcast_v64f16_to_v64i16_scalar(<64 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v17, 0x200, v17 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v16, 0x200, v16 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB109_3:
-; GFX11-NEXT: s_branch .LBB109_2
; GFX11-NEXT: .LBB109_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -236173,11 +236945,11 @@ define inreg <64 x half> @bitcast_v64i16_to_v64f16_scalar(<64 x i16> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v42, v4
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:300 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:304 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:308 ; 4-byte Folded Spill
@@ -236188,7 +236960,7 @@ define inreg <64 x half> @bitcast_v64i16_to_v64f16_scalar(<64 x i16> inreg %a, i
; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:332 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:336 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:340 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:344 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:348 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:352 ; 4-byte Folded Spill
@@ -236200,6 +236972,7 @@ define inreg <64 x half> @bitcast_v64i16_to_v64f16_scalar(<64 x i16> inreg %a, i
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v41
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB111_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_cvt_f32_f16_e32 v4, v1
@@ -236226,14 +236999,14 @@ define inreg <64 x half> @bitcast_v64i16_to_v64f16_scalar(<64 x i16> inreg %a, i
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v41, s19
-; SI-NEXT: v_mov_b32_e32 v2, v9
+; SI-NEXT: v_mov_b32_e32 v2, v7
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v4, v5
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v41, s20
-; SI-NEXT: v_mov_b32_e32 v3, v10
+; SI-NEXT: v_mov_b32_e32 v3, v8
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v4, v6
@@ -236268,14 +237041,14 @@ define inreg <64 x half> @bitcast_v64i16_to_v64f16_scalar(<64 x i16> inreg %a, i
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v41, s25
-; SI-NEXT: v_mov_b32_e32 v60, v29
+; SI-NEXT: v_mov_b32_e32 v61, v29
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v4, v11
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v41, s28
-; SI-NEXT: v_mov_b32_e32 v61, v30
+; SI-NEXT: v_mov_b32_e32 v60, v30
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v4, v12
@@ -236341,27 +237114,22 @@ define inreg <64 x half> @bitcast_v64i16_to_v64f16_scalar(<64 x i16> inreg %a, i
; SI-NEXT: ; kill: killed $vgpr4
; SI-NEXT: ; implicit-def: $vgpr41
; SI-NEXT: ; kill: killed $vgpr41
-; SI-NEXT: v_mov_b32_e32 v61, v30
+; SI-NEXT: v_mov_b32_e32 v60, v30
; SI-NEXT: ; implicit-def: $vgpr4
; SI-NEXT: ; kill: killed $vgpr4
; SI-NEXT: ; implicit-def: $vgpr41
; SI-NEXT: ; kill: killed $vgpr41
-; SI-NEXT: v_mov_b32_e32 v60, v29
+; SI-NEXT: v_mov_b32_e32 v61, v29
; SI-NEXT: ; implicit-def: $vgpr4
; SI-NEXT: ; kill: killed $vgpr4
; SI-NEXT: ; implicit-def: $vgpr41
; SI-NEXT: ; kill: killed $vgpr41
-; SI-NEXT: v_mov_b32_e32 v3, v10
+; SI-NEXT: v_mov_b32_e32 v3, v8
; SI-NEXT: ; implicit-def: $vgpr4
; SI-NEXT: ; kill: killed $vgpr4
; SI-NEXT: ; implicit-def: $vgpr41
; SI-NEXT: ; kill: killed $vgpr41
-; SI-NEXT: v_mov_b32_e32 v2, v9
-; SI-NEXT: ; implicit-def: $vgpr4
-; SI-NEXT: ; kill: killed $vgpr4
-; SI-NEXT: ; implicit-def: $vgpr41
-; SI-NEXT: ; kill: killed $vgpr41
-; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: v_mov_b32_e32 v2, v7
; SI-NEXT: ; implicit-def: $vgpr4
; SI-NEXT: ; kill: killed $vgpr4
; SI-NEXT: ; implicit-def: $vgpr41
@@ -236420,6 +237188,10 @@ define inreg <64 x half> @bitcast_v64i16_to_v64f16_scalar(<64 x i16> inreg %a, i
; SI-NEXT: ; kill: killed $vgpr41
; SI-NEXT: ; implicit-def: $vgpr4
; SI-NEXT: ; kill: killed $vgpr4
+; SI-NEXT: ; implicit-def: $vgpr41
+; SI-NEXT: ; kill: killed $vgpr41
+; SI-NEXT: ; implicit-def: $vgpr4
+; SI-NEXT: ; kill: killed $vgpr4
; SI-NEXT: ; implicit-def: $vgpr4
; SI-NEXT: ; kill: killed $vgpr4
; SI-NEXT: ; implicit-def: $vgpr4
@@ -236458,8 +237230,8 @@ define inreg <64 x half> @bitcast_v64i16_to_v64f16_scalar(<64 x i16> inreg %a, i
; SI-NEXT: ; %bb.4: ; %cmp.true
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:336 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:372 ; 4-byte Folded Reload
-; SI-NEXT: v_add_i32_e32 v10, vcc, 3, v3
-; SI-NEXT: v_add_i32_e32 v9, vcc, 3, v2
+; SI-NEXT: v_add_i32_e32 v8, vcc, 3, v3
+; SI-NEXT: v_add_i32_e32 v7, vcc, 3, v2
; SI-NEXT: s_add_i32 s16, s16, 3
; SI-NEXT: v_cvt_f32_f16_e32 v41, s16
; SI-NEXT: s_add_i32 s17, s17, 3
@@ -236518,8 +237290,8 @@ define inreg <64 x half> @bitcast_v64i16_to_v64f16_scalar(<64 x i16> inreg %a, i
; SI-NEXT: v_add_i32_e32 v59, vcc, 3, v63
; SI-NEXT: v_add_i32_e32 v31, vcc, 3, v31
; SI-NEXT: v_add_i32_e32 v32, vcc, 3, v32
-; SI-NEXT: v_add_i32_e32 v30, vcc, 3, v61
-; SI-NEXT: v_add_i32_e32 v29, vcc, 3, v60
+; SI-NEXT: v_add_i32_e32 v30, vcc, 3, v60
+; SI-NEXT: v_add_i32_e32 v29, vcc, 3, v61
; SI-NEXT: s_add_i32 s29, s29, 3
; SI-NEXT: s_waitcnt vmcnt(10)
; SI-NEXT: v_add_i32_e32 v20, vcc, 3, v4
@@ -236541,73 +237313,75 @@ define inreg <64 x half> @bitcast_v64i16_to_v64f16_scalar(<64 x i16> inreg %a, i
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v19, vcc, 3, v4
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:328 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_add_i32_e32 v18, vcc, 3, v4
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v28, vcc, 3, v1
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:368 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_add_i32_e32 v18, vcc, 3, v4
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:324 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v17, vcc, 3, v4
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v27, vcc, 3, v1
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:364 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_add_i32_e32 v17, vcc, 3, v4
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:320 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v16, vcc, 3, v4
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v26, vcc, 3, v1
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:360 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_add_i32_e32 v16, vcc, 3, v4
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:316 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v15, vcc, 3, v4
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v25, vcc, 3, v1
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:356 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_add_i32_e32 v15, vcc, 3, v4
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:312 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v14, vcc, 3, v4
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v24, vcc, 3, v1
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:352 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_add_i32_e32 v14, vcc, 3, v4
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:308 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v13, vcc, 3, v4
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v23, vcc, 3, v1
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:348 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_add_i32_e32 v13, vcc, 3, v4
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:304 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v12, vcc, 3, v4
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v22, vcc, 3, v1
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:344 ; 4-byte Folded Reload
; SI-NEXT: v_cvt_f32_f16_e32 v22, v22
; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_add_i32_e32 v12, vcc, 3, v4
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:300 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v11, vcc, 3, v4
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v21, vcc, 3, v1
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:340 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_add_i32_e32 v11, vcc, 3, v4
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_add_i32_e32 v8, vcc, 3, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_add_i32_e32 v7, vcc, 3, v4
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v10, vcc, 3, v4
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_add_i32_e32 v3, vcc, 3, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v6, vcc, 3, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_add_i32_e32 v6, vcc, 3, v4
+; SI-NEXT: v_add_i32_e32 v9, vcc, 3, v4
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v1
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v3, vcc, 3, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v4
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v1
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_add_i32_e32 v4, vcc, 3, v4
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
@@ -236617,8 +237391,6 @@ define inreg <64 x half> @bitcast_v64i16_to_v64f16_scalar(<64 x i16> inreg %a, i
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v1, v3
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(3)
-; SI-NEXT: v_add_i32_e32 v4, vcc, 3, v4
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v1, v4
; SI-NEXT: v_cvt_f32_f16_e32 v4, v49
@@ -237012,8 +237784,9 @@ define inreg <64 x half> @bitcast_v64i16_to_v64f16_scalar(<64 x i16> inreg %a, i
; VI-NEXT: v_writelane_b32 v32, s34, 2
; VI-NEXT: v_writelane_b32 v32, s35, 3
; VI-NEXT: v_writelane_b32 v32, s36, 4
-; VI-NEXT: v_writelane_b32 v32, s37, 5
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: v_writelane_b32 v32, s37, 5
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_writelane_b32 v32, s38, 6
; VI-NEXT: v_readfirstlane_b32 s47, v2
; VI-NEXT: v_readfirstlane_b32 s46, v3
@@ -237031,14 +237804,17 @@ define inreg <64 x half> @bitcast_v64i16_to_v64f16_scalar(<64 x i16> inreg %a, i
; VI-NEXT: v_readfirstlane_b32 s10, v15
; VI-NEXT: v_readfirstlane_b32 s9, v16
; VI-NEXT: v_readfirstlane_b32 s8, v17
-; VI-NEXT: v_readfirstlane_b32 s6, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: v_readfirstlane_b32 s7, v1
+; VI-NEXT: v_readfirstlane_b32 s7, v0
+; VI-NEXT: v_readfirstlane_b32 s6, v1
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_writelane_b32 v32, s39, 7
-; VI-NEXT: s_cbranch_scc0 .LBB111_4
+; VI-NEXT: s_cbranch_scc0 .LBB111_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB111_3
-; VI-NEXT: .LBB111_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB111_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB111_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_and_b32 s4, s47, 0xffff0000
; VI-NEXT: s_add_i32 s5, s47, 3
; VI-NEXT: s_and_b32 s47, s46, 0xffff0000
@@ -237099,12 +237875,12 @@ define inreg <64 x half> @bitcast_v64i16_to_v64f16_scalar(<64 x i16> inreg %a, i
; VI-NEXT: s_add_i32 s28, s28, 3
; VI-NEXT: s_and_b32 s37, s29, 0xffff0000
; VI-NEXT: s_add_i32 s29, s29, 3
-; VI-NEXT: s_and_b32 s38, s6, 0xffff0000
-; VI-NEXT: s_add_i32 s6, s6, 3
-; VI-NEXT: s_and_b32 s39, s7, 0xffff0000
+; VI-NEXT: s_and_b32 s38, s7, 0xffff0000
; VI-NEXT: s_add_i32 s7, s7, 3
-; VI-NEXT: s_and_b32 s7, s7, 0xffff
+; VI-NEXT: s_and_b32 s39, s6, 0xffff0000
+; VI-NEXT: s_add_i32 s6, s6, 3
; VI-NEXT: s_and_b32 s6, s6, 0xffff
+; VI-NEXT: s_and_b32 s7, s7, 0xffff
; VI-NEXT: s_and_b32 s29, s29, 0xffff
; VI-NEXT: s_and_b32 s28, s28, 0xffff
; VI-NEXT: s_and_b32 s27, s27, 0xffff
@@ -237135,8 +237911,8 @@ define inreg <64 x half> @bitcast_v64i16_to_v64f16_scalar(<64 x i16> inreg %a, i
; VI-NEXT: s_and_b32 s45, s45, 0xffff
; VI-NEXT: s_and_b32 s46, s46, 0xffff
; VI-NEXT: s_and_b32 s5, s5, 0xffff
-; VI-NEXT: s_or_b32 s7, s39, s7
-; VI-NEXT: s_or_b32 s6, s38, s6
+; VI-NEXT: s_or_b32 s6, s39, s6
+; VI-NEXT: s_or_b32 s7, s38, s7
; VI-NEXT: s_or_b32 s29, s37, s29
; VI-NEXT: s_or_b32 s28, s36, s28
; VI-NEXT: s_or_b32 s27, s35, s27
@@ -237167,8 +237943,8 @@ define inreg <64 x half> @bitcast_v64i16_to_v64f16_scalar(<64 x i16> inreg %a, i
; VI-NEXT: s_or_b32 s45, s56, s45
; VI-NEXT: s_or_b32 s46, s47, s46
; VI-NEXT: s_or_b32 s4, s4, s5
-; VI-NEXT: s_add_i32 s7, s7, 0x30000
; VI-NEXT: s_add_i32 s6, s6, 0x30000
+; VI-NEXT: s_add_i32 s7, s7, 0x30000
; VI-NEXT: s_add_i32 s29, s29, 0x30000
; VI-NEXT: s_add_i32 s28, s28, 0x30000
; VI-NEXT: s_add_i32 s27, s27, 0x30000
@@ -237199,7 +237975,7 @@ define inreg <64 x half> @bitcast_v64i16_to_v64f16_scalar(<64 x i16> inreg %a, i
; VI-NEXT: s_add_i32 s45, s45, 0x30000
; VI-NEXT: s_add_i32 s46, s46, 0x30000
; VI-NEXT: s_add_i32 s47, s4, 0x30000
-; VI-NEXT: .LBB111_3: ; %end
+; VI-NEXT: .LBB111_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -237214,8 +237990,8 @@ define inreg <64 x half> @bitcast_v64i16_to_v64f16_scalar(<64 x i16> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: v_mov_b32_e32 v14, s6
-; VI-NEXT: v_mov_b32_e32 v15, s7
+; VI-NEXT: v_mov_b32_e32 v14, s7
+; VI-NEXT: v_mov_b32_e32 v15, s6
; VI-NEXT: v_mov_b32_e32 v16, s47
; VI-NEXT: v_mov_b32_e32 v17, s46
; VI-NEXT: v_mov_b32_e32 v18, s45
@@ -237245,13 +238021,12 @@ define inreg <64 x half> @bitcast_v64i16_to_v64f16_scalar(<64 x i16> inreg %a, i
; VI-NEXT: s_mov_b64 exec, s[4:5]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB111_4:
-; VI-NEXT: s_branch .LBB111_2
;
; GFX9-LABEL: bitcast_v64i16_to_v64f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v31, v17
; GFX9-NEXT: v_mov_b32_e32 v30, v16
; GFX9-NEXT: v_mov_b32_e32 v29, v15
@@ -237272,7 +238047,7 @@ define inreg <64 x half> @bitcast_v64i16_to_v64f16_scalar(<64 x i16> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
@@ -237285,10 +238060,13 @@ define inreg <64 x half> @bitcast_v64i16_to_v64f16_scalar(<64 x i16> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB111_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB111_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB111_3
-; GFX9-NEXT: .LBB111_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB111_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB111_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
@@ -237321,11 +238099,9 @@ define inreg <64 x half> @bitcast_v64i16_to_v64f16_scalar(<64 x i16> inreg %a, i
; GFX9-NEXT: v_pk_add_u16 v32, v32, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: .LBB111_3: ; %end
+; GFX9-NEXT: .LBB111_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v18, v32
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB111_4:
-; GFX9-NEXT: s_branch .LBB111_2
;
; GFX11-LABEL: bitcast_v64i16_to_v64f16_scalar:
; GFX11: ; %bb.0:
@@ -237339,17 +238115,20 @@ define inreg <64 x half> @bitcast_v64i16_to_v64f16_scalar(<64 x i16> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v20, v2
; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v18, v0
; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
+; GFX11-NEXT: s_mov_b32 s12, s0
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
; GFX11-NEXT: s_mov_b32 s15, s3
; GFX11-NEXT: s_mov_b32 s14, s2
; GFX11-NEXT: s_mov_b32 s13, s1
-; GFX11-NEXT: s_mov_b32 s12, s0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB111_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB111_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB111_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB111_4
-; GFX11-NEXT: .LBB111_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v15, s27, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v14, s26, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v13, s25, 3 op_sel_hi:[1,0]
@@ -237383,8 +238162,6 @@ define inreg <64 x half> @bitcast_v64i16_to_v64f16_scalar(<64 x i16> inreg %a, i
; GFX11-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB111_3:
-; GFX11-NEXT: s_branch .LBB111_2
; GFX11-NEXT: .LBB111_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.128bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.128bit.ll
index 3e96ab1..e837971 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.128bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.128bit.ll
@@ -93,86 +93,93 @@ define inreg <4 x float> @bitcast_v4i32_to_v4f32_scalar(<4 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB1_3
-; SI-NEXT: .LBB1_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB1_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB1_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_i32 s19, s19, 3
; SI-NEXT: s_add_i32 s18, s18, 3
; SI-NEXT: s_add_i32 s17, s17, 3
; SI-NEXT: s_add_i32 s16, s16, 3
-; SI-NEXT: .LBB1_3: ; %end
+; SI-NEXT: .LBB1_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
; SI-NEXT: v_mov_b32_e32 v3, s19
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: s_branch .LBB1_2
;
; VI-LABEL: bitcast_v4i32_to_v4f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB1_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB1_3
-; VI-NEXT: .LBB1_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB1_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB1_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s19, s19, 3
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB1_3: ; %end
+; VI-NEXT: .LBB1_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_4:
-; VI-NEXT: s_branch .LBB1_2
;
; GFX9-LABEL: bitcast_v4i32_to_v4f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB1_3
-; GFX9-NEXT: .LBB1_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB1_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s19, s19, 3
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB1_3: ; %end
+; GFX9-NEXT: .LBB1_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-LABEL: bitcast_v4i32_to_v4f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB1_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB1_3
-; GFX11-NEXT: .LBB1_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s3, s3, 3
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB1_3: ; %end
+; GFX11-NEXT: .LBB1_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_4:
-; GFX11-NEXT: s_branch .LBB1_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -275,17 +282,19 @@ define inreg <4 x i32> @bitcast_v4f32_to_v4i32_scalar(<4 x float> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB3_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB3_4
-; SI-NEXT: .LBB3_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB3_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB3_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v3, s19, 1.0
; SI-NEXT: v_add_f32_e64 v2, s18, 1.0
; SI-NEXT: v_add_f32_e64 v1, s17, 1.0
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB3_3:
-; SI-NEXT: s_branch .LBB3_2
; SI-NEXT: .LBB3_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -297,17 +306,19 @@ define inreg <4 x i32> @bitcast_v4f32_to_v4i32_scalar(<4 x float> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB3_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_4
-; VI-NEXT: .LBB3_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB3_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB3_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v3, s19, 1.0
; VI-NEXT: v_add_f32_e64 v2, s18, 1.0
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB3_3:
-; VI-NEXT: s_branch .LBB3_2
; VI-NEXT: .LBB3_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -319,17 +330,19 @@ define inreg <4 x i32> @bitcast_v4f32_to_v4i32_scalar(<4 x float> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_4
-; GFX9-NEXT: .LBB3_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB3_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v3, s19, 1.0
; GFX9-NEXT: v_add_f32_e64 v2, s18, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB3_3:
-; GFX9-NEXT: s_branch .LBB3_2
; GFX9-NEXT: .LBB3_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -341,19 +354,20 @@ define inreg <4 x i32> @bitcast_v4f32_to_v4i32_scalar(<4 x float> inreg %a, i32
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB3_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
-; GFX11-NEXT: .LBB3_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v3, s3, 1.0
; GFX11-NEXT: v_add_f32_e64 v2, s2, 1.0
; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB3_3:
-; GFX11-NEXT: s_branch .LBB3_2
; GFX11-NEXT: .LBB3_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -462,86 +476,93 @@ define inreg <2 x i64> @bitcast_v4i32_to_v2i64_scalar(<4 x i32> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB5_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB5_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB5_3
-; SI-NEXT: .LBB5_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB5_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB5_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_i32 s19, s19, 3
; SI-NEXT: s_add_i32 s18, s18, 3
; SI-NEXT: s_add_i32 s17, s17, 3
; SI-NEXT: s_add_i32 s16, s16, 3
-; SI-NEXT: .LBB5_3: ; %end
+; SI-NEXT: .LBB5_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
; SI-NEXT: v_mov_b32_e32 v3, s19
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB5_4:
-; SI-NEXT: s_branch .LBB5_2
;
; VI-LABEL: bitcast_v4i32_to_v2i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB5_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB5_3
-; VI-NEXT: .LBB5_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB5_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB5_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s19, s19, 3
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB5_3: ; %end
+; VI-NEXT: .LBB5_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB5_4:
-; VI-NEXT: s_branch .LBB5_2
;
; GFX9-LABEL: bitcast_v4i32_to_v2i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB5_3
-; GFX9-NEXT: .LBB5_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB5_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s19, s19, 3
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB5_3: ; %end
+; GFX9-NEXT: .LBB5_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_4:
-; GFX9-NEXT: s_branch .LBB5_2
;
; GFX11-LABEL: bitcast_v4i32_to_v2i64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB5_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB5_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB5_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB5_3
-; GFX11-NEXT: .LBB5_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s3, s3, 3
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB5_3: ; %end
+; GFX11-NEXT: .LBB5_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB5_4:
-; GFX11-NEXT: s_branch .LBB5_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -647,86 +668,93 @@ define inreg <4 x i32> @bitcast_v2i64_to_v4i32_scalar(<2 x i64> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB7_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB7_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB7_3
-; SI-NEXT: .LBB7_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB7_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB7_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_u32 s18, s18, 3
; SI-NEXT: s_addc_u32 s19, s19, 0
; SI-NEXT: s_add_u32 s16, s16, 3
; SI-NEXT: s_addc_u32 s17, s17, 0
-; SI-NEXT: .LBB7_3: ; %end
+; SI-NEXT: .LBB7_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
; SI-NEXT: v_mov_b32_e32 v3, s19
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB7_4:
-; SI-NEXT: s_branch .LBB7_2
;
; VI-LABEL: bitcast_v2i64_to_v4i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB7_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB7_3
-; VI-NEXT: .LBB7_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB7_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB7_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_u32 s18, s18, 3
; VI-NEXT: s_addc_u32 s19, s19, 0
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
-; VI-NEXT: .LBB7_3: ; %end
+; VI-NEXT: .LBB7_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB7_4:
-; VI-NEXT: s_branch .LBB7_2
;
; GFX9-LABEL: bitcast_v2i64_to_v4i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB7_3
-; GFX9-NEXT: .LBB7_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB7_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB7_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_u32 s18, s18, 3
; GFX9-NEXT: s_addc_u32 s19, s19, 0
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
-; GFX9-NEXT: .LBB7_3: ; %end
+; GFX9-NEXT: .LBB7_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB7_4:
-; GFX9-NEXT: s_branch .LBB7_2
;
; GFX11-LABEL: bitcast_v2i64_to_v4i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB7_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB7_3
-; GFX11-NEXT: .LBB7_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB7_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s2, s2, 3
; GFX11-NEXT: s_addc_u32 s3, s3, 0
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: .LBB7_3: ; %end
+; GFX11-NEXT: .LBB7_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB7_4:
-; GFX11-NEXT: s_branch .LBB7_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -831,86 +859,93 @@ define inreg <2 x double> @bitcast_v4i32_to_v2f64_scalar(<4 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB9_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB9_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB9_3
-; SI-NEXT: .LBB9_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB9_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB9_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_i32 s19, s19, 3
; SI-NEXT: s_add_i32 s18, s18, 3
; SI-NEXT: s_add_i32 s17, s17, 3
; SI-NEXT: s_add_i32 s16, s16, 3
-; SI-NEXT: .LBB9_3: ; %end
+; SI-NEXT: .LBB9_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
; SI-NEXT: v_mov_b32_e32 v3, s19
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB9_4:
-; SI-NEXT: s_branch .LBB9_2
;
; VI-LABEL: bitcast_v4i32_to_v2f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB9_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB9_3
-; VI-NEXT: .LBB9_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB9_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB9_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s19, s19, 3
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB9_3: ; %end
+; VI-NEXT: .LBB9_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB9_4:
-; VI-NEXT: s_branch .LBB9_2
;
; GFX9-LABEL: bitcast_v4i32_to_v2f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB9_3
-; GFX9-NEXT: .LBB9_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB9_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s19, s19, 3
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB9_3: ; %end
+; GFX9-NEXT: .LBB9_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB9_4:
-; GFX9-NEXT: s_branch .LBB9_2
;
; GFX11-LABEL: bitcast_v4i32_to_v2f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB9_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB9_3
-; GFX11-NEXT: .LBB9_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s3, s3, 3
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB9_3: ; %end
+; GFX11-NEXT: .LBB9_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB9_4:
-; GFX11-NEXT: s_branch .LBB9_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1010,15 +1045,17 @@ define inreg <4 x i32> @bitcast_v2f64_to_v4i32_scalar(<2 x double> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB11_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB11_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB11_4
-; SI-NEXT: .LBB11_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB11_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB11_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB11_3:
-; SI-NEXT: s_branch .LBB11_2
; SI-NEXT: .LBB11_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -1030,15 +1067,17 @@ define inreg <4 x i32> @bitcast_v2f64_to_v4i32_scalar(<2 x double> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB11_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB11_4
-; VI-NEXT: .LBB11_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB11_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB11_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB11_3:
-; VI-NEXT: s_branch .LBB11_2
; VI-NEXT: .LBB11_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -1050,15 +1089,17 @@ define inreg <4 x i32> @bitcast_v2f64_to_v4i32_scalar(<2 x double> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_4
-; GFX9-NEXT: .LBB11_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB11_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB11_3:
-; GFX9-NEXT: s_branch .LBB11_2
; GFX9-NEXT: .LBB11_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -1070,17 +1111,18 @@ define inreg <4 x i32> @bitcast_v2f64_to_v4i32_scalar(<2 x double> inreg %a, i32
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB11_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
-; GFX11-NEXT: .LBB11_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[2:3], s[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: s_branch .LBB11_2
; GFX11-NEXT: .LBB11_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -1207,6 +1249,7 @@ define inreg <8 x i16> @bitcast_v4i32_to_v8i16_scalar(<4 x i32> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB13_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s18
@@ -1240,71 +1283,78 @@ define inreg <8 x i16> @bitcast_v4i32_to_v8i16_scalar(<4 x i32> inreg %a, i32 in
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $vgpr5
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB13_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB13_2
+; SI-NEXT: s_branch .LBB13_3
;
; VI-LABEL: bitcast_v4i32_to_v8i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB13_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB13_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB13_3
-; VI-NEXT: .LBB13_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB13_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB13_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s19, s19, 3
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB13_3: ; %end
+; VI-NEXT: .LBB13_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB13_4:
-; VI-NEXT: s_branch .LBB13_2
;
; GFX9-LABEL: bitcast_v4i32_to_v8i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB13_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB13_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB13_3
-; GFX9-NEXT: .LBB13_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB13_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB13_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s19, s19, 3
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB13_3: ; %end
+; GFX9-NEXT: .LBB13_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB13_4:
-; GFX9-NEXT: s_branch .LBB13_2
;
; GFX11-LABEL: bitcast_v4i32_to_v8i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB13_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB13_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB13_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB13_3
-; GFX11-NEXT: .LBB13_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB13_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s3, s3, 3
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB13_3: ; %end
+; GFX11-NEXT: .LBB13_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB13_4:
-; GFX11-NEXT: s_branch .LBB13_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1462,6 +1512,7 @@ define inreg <4 x i32> @bitcast_v8i16_to_v4i32_scalar(<8 x i16> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
+; SI-NEXT: s_mov_b64 s[8:9], -1
; SI-NEXT: s_cbranch_scc0 .LBB15_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
@@ -1506,16 +1557,22 @@ define inreg <4 x i32> @bitcast_v8i16_to_v4i32_scalar(<8 x i16> inreg %a, i32 in
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB15_4:
; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7
-; SI-NEXT: s_branch .LBB15_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; SI-NEXT: s_cbranch_vccz .LBB15_2
+; SI-NEXT: s_branch .LBB15_3
;
; VI-LABEL: bitcast_v8i16_to_v4i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB15_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB15_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB15_3
-; VI-NEXT: .LBB15_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB15_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB15_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s19, 3
; VI-NEXT: s_and_b32 s4, s19, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
@@ -1536,30 +1593,30 @@ define inreg <4 x i32> @bitcast_v8i16_to_v4i32_scalar(<8 x i16> inreg %a, i32 in
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB15_3: ; %end
+; VI-NEXT: .LBB15_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB15_4:
-; VI-NEXT: s_branch .LBB15_2
;
; GFX9-LABEL: bitcast_v8i16_to_v4i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB15_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB15_4
-; GFX9-NEXT: .LBB15_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB15_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB15_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v3, s19, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v2, s18, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB15_3:
-; GFX9-NEXT: s_branch .LBB15_2
; GFX9-NEXT: .LBB15_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -1571,19 +1628,20 @@ define inreg <4 x i32> @bitcast_v8i16_to_v4i32_scalar(<8 x i16> inreg %a, i32 in
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB15_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB15_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB15_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB15_4
-; GFX11-NEXT: .LBB15_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB15_3:
-; GFX11-NEXT: s_branch .LBB15_2
; GFX11-NEXT: .LBB15_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -1740,6 +1798,7 @@ define inreg <8 x half> @bitcast_v4i32_to_v8f16_scalar(<4 x i32> inreg %a, i32 i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB17_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s4, s19, 16
@@ -1783,71 +1842,78 @@ define inreg <8 x half> @bitcast_v4i32_to_v8f16_scalar(<4 x i32> inreg %a, i32 i
; SI-NEXT: ; implicit-def: $vgpr5
; SI-NEXT: ; implicit-def: $vgpr6
; SI-NEXT: ; implicit-def: $vgpr7
-; SI-NEXT: s_branch .LBB17_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB17_2
+; SI-NEXT: s_branch .LBB17_3
;
; VI-LABEL: bitcast_v4i32_to_v8f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB17_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB17_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB17_3
-; VI-NEXT: .LBB17_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB17_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB17_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s19, s19, 3
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB17_3: ; %end
+; VI-NEXT: .LBB17_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB17_4:
-; VI-NEXT: s_branch .LBB17_2
;
; GFX9-LABEL: bitcast_v4i32_to_v8f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB17_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB17_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB17_3
-; GFX9-NEXT: .LBB17_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB17_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB17_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s19, s19, 3
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB17_3: ; %end
+; GFX9-NEXT: .LBB17_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB17_4:
-; GFX9-NEXT: s_branch .LBB17_2
;
; GFX11-LABEL: bitcast_v4i32_to_v8f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB17_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB17_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB17_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB17_3
-; GFX11-NEXT: .LBB17_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB17_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s3, s3, 3
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB17_3: ; %end
+; GFX11-NEXT: .LBB17_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB17_4:
-; GFX11-NEXT: s_branch .LBB17_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2032,6 +2098,7 @@ define inreg <4 x i32> @bitcast_v8f16_to_v4i32_scalar(<8 x half> inreg %a, i32 i
; SI-NEXT: v_cvt_f16_f32_e32 v5, s23
; SI-NEXT: v_cvt_f16_f32_e32 v4, s22
; SI-NEXT: s_cmp_lg_u32 s24, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB19_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v11
@@ -2080,16 +2147,22 @@ define inreg <4 x i32> @bitcast_v8f16_to_v4i32_scalar(<8 x half> inreg %a, i32 i
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB19_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
-; SI-NEXT: s_branch .LBB19_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB19_2
+; SI-NEXT: s_branch .LBB19_3
;
; VI-LABEL: bitcast_v8f16_to_v4i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB19_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB19_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB19_4
-; VI-NEXT: .LBB19_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB19_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB19_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s19, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -2112,8 +2185,6 @@ define inreg <4 x i32> @bitcast_v8f16_to_v4i32_scalar(<8 x half> inreg %a, i32 i
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v4
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB19_3:
-; VI-NEXT: s_branch .LBB19_2
; VI-NEXT: .LBB19_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -2125,18 +2196,20 @@ define inreg <4 x i32> @bitcast_v8f16_to_v4i32_scalar(<8 x half> inreg %a, i32 i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB19_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB19_4
-; GFX9-NEXT: .LBB19_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB19_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB19_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v3, s19, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v2, s18, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB19_3:
-; GFX9-NEXT: s_branch .LBB19_2
; GFX9-NEXT: .LBB19_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -2148,19 +2221,20 @@ define inreg <4 x i32> @bitcast_v8f16_to_v4i32_scalar(<8 x half> inreg %a, i32 i
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB19_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB19_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB19_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB19_4
-; GFX11-NEXT: .LBB19_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB19_3:
-; GFX11-NEXT: s_branch .LBB19_2
; GFX11-NEXT: .LBB19_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -2309,6 +2383,7 @@ define inreg <8 x bfloat> @bitcast_v4i32_to_v8bf16_scalar(<4 x i32> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB21_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s6, s19, 0xffff0000
@@ -2352,71 +2427,78 @@ define inreg <8 x bfloat> @bitcast_v4i32_to_v8bf16_scalar(<4 x i32> inreg %a, i3
; SI-NEXT: ; implicit-def: $sgpr8
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB21_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB21_2
+; SI-NEXT: s_branch .LBB21_3
;
; VI-LABEL: bitcast_v4i32_to_v8bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB21_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB21_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB21_3
-; VI-NEXT: .LBB21_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB21_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB21_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s19, s19, 3
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB21_3: ; %end
+; VI-NEXT: .LBB21_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB21_4:
-; VI-NEXT: s_branch .LBB21_2
;
; GFX9-LABEL: bitcast_v4i32_to_v8bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB21_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB21_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB21_3
-; GFX9-NEXT: .LBB21_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB21_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s19, s19, 3
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB21_3: ; %end
+; GFX9-NEXT: .LBB21_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB21_4:
-; GFX9-NEXT: s_branch .LBB21_2
;
; GFX11-LABEL: bitcast_v4i32_to_v8bf16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB21_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB21_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB21_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB21_3
-; GFX11-NEXT: .LBB21_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s3, s3, 3
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB21_3: ; %end
+; GFX11-NEXT: .LBB21_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB21_4:
-; GFX11-NEXT: s_branch .LBB21_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2856,6 +2938,7 @@ define inreg <4 x i32> @bitcast_v8bf16_to_v4i32_scalar(<8 x bfloat> inreg %a, i3
; SI-NEXT: v_mul_f32_e64 v7, 1.0, s20
; SI-NEXT: v_mul_f32_e64 v4, 1.0, s23
; SI-NEXT: v_mul_f32_e64 v5, 1.0, s22
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB23_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v10
@@ -2896,16 +2979,22 @@ define inreg <4 x i32> @bitcast_v8bf16_to_v4i32_scalar(<8 x bfloat> inreg %a, i3
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB23_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
-; SI-NEXT: s_branch .LBB23_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB23_2
+; SI-NEXT: s_branch .LBB23_3
;
; VI-LABEL: bitcast_v8bf16_to_v4i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB23_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB23_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB23_4
-; VI-NEXT: .LBB23_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB23_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB23_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s19, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x40c00000
; VI-NEXT: v_add_f32_e32 v1, s4, v0
@@ -2980,8 +3069,6 @@ define inreg <4 x i32> @bitcast_v8bf16_to_v4i32_scalar(<8 x bfloat> inreg %a, i3
; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; VI-NEXT: v_alignbit_b32 v0, v0, v4, 16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB23_3:
-; VI-NEXT: s_branch .LBB23_2
; VI-NEXT: .LBB23_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -2993,10 +3080,14 @@ define inreg <4 x i32> @bitcast_v8bf16_to_v4i32_scalar(<8 x bfloat> inreg %a, i3
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB23_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB23_4
-; GFX9-NEXT: .LBB23_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB23_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_pack_lh_b32_b16 s4, 0, s19
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
; GFX9-NEXT: v_add_f32_e32 v1, s4, v0
@@ -3076,8 +3167,6 @@ define inreg <4 x i32> @bitcast_v8bf16_to_v4i32_scalar(<8 x bfloat> inreg %a, i3
; GFX9-NEXT: v_and_b32_sdwa v0, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: v_lshl_or_b32 v0, v5, 16, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB23_3:
-; GFX9-NEXT: s_branch .LBB23_2
; GFX9-NEXT: .LBB23_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -3089,12 +3178,15 @@ define inreg <4 x i32> @bitcast_v8bf16_to_v4i32_scalar(<8 x bfloat> inreg %a, i3
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB23_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB23_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB23_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB23_4
-; GFX11-NEXT: .LBB23_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_lshl_b32 s4, s3, 16
; GFX11-NEXT: s_pack_lh_b32_b16 s3, 0, s3
; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
@@ -3181,8 +3273,6 @@ define inreg <4 x i32> @bitcast_v8bf16_to_v4i32_scalar(<8 x bfloat> inreg %a, i3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_lshl_or_b32 v0, v7, 16, v8
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB23_3:
-; GFX11-NEXT: s_branch .LBB23_2
; GFX11-NEXT: .LBB23_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -3535,6 +3625,7 @@ define inreg <16 x i8> @bitcast_v4i32_to_v16i8_scalar(<4 x i32> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB25_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s18
@@ -3596,12 +3687,15 @@ define inreg <16 x i8> @bitcast_v4i32_to_v16i8_scalar(<4 x i32> inreg %a, i32 in
; SI-NEXT: ; implicit-def: $sgpr8
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB25_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB25_2
+; SI-NEXT: s_branch .LBB25_3
;
; VI-LABEL: bitcast_v4i32_to_v16i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
+; VI-NEXT: s_mov_b64 s[8:9], -1
; VI-NEXT: s_cbranch_scc0 .LBB25_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s10, s19, 24
@@ -3665,12 +3759,15 @@ define inreg <16 x i8> @bitcast_v4i32_to_v16i8_scalar(<4 x i32> inreg %a, i32 in
; VI-NEXT: ; implicit-def: $sgpr12
; VI-NEXT: ; implicit-def: $sgpr11
; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: s_branch .LBB25_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; VI-NEXT: s_cbranch_vccz .LBB25_2
+; VI-NEXT: s_branch .LBB25_3
;
; GFX9-LABEL: bitcast_v4i32_to_v16i8_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
+; GFX9-NEXT: s_mov_b64 s[8:9], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB25_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s10, s19, 24
@@ -3734,15 +3831,18 @@ define inreg <16 x i8> @bitcast_v4i32_to_v16i8_scalar(<4 x i32> inreg %a, i32 in
; GFX9-NEXT: ; implicit-def: $sgpr12
; GFX9-NEXT: ; implicit-def: $sgpr11
; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: s_branch .LBB25_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; GFX9-NEXT: s_cbranch_vccz .LBB25_2
+; GFX9-NEXT: s_branch .LBB25_3
;
; GFX11-LABEL: bitcast_v4i32_to_v16i8_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
-; GFX11-NEXT: s_mov_b32 s18, 0
+; GFX11-NEXT: s_mov_b32 s5, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB25_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-NEXT: s_lshr_b64 s[4:5], s[2:3], 24
; GFX11-NEXT: s_lshr_b32 s8, s3, 24
; GFX11-NEXT: s_lshr_b32 s9, s3, 16
; GFX11-NEXT: s_lshr_b32 s10, s3, 8
@@ -3753,10 +3853,8 @@ define inreg <16 x i8> @bitcast_v4i32_to_v16i8_scalar(<4 x i32> inreg %a, i32 in
; GFX11-NEXT: s_lshr_b32 s15, s1, 8
; GFX11-NEXT: s_lshr_b32 s16, s0, 16
; GFX11-NEXT: s_lshr_b32 s17, s0, 8
-; GFX11-NEXT: s_lshr_b64 s[4:5], s[2:3], 24
; GFX11-NEXT: s_lshr_b64 s[6:7], s[0:1], 24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
-; GFX11-NEXT: s_cbranch_vccnz .LBB25_3
+; GFX11-NEXT: s_cbranch_execnz .LBB25_3
; GFX11-NEXT: .LBB25_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s3, s3, 3
@@ -3798,7 +3896,9 @@ define inreg <16 x i8> @bitcast_v4i32_to_v16i8_scalar(<4 x i32> inreg %a, i32 in
; GFX11-NEXT: ; implicit-def: $sgpr10
; GFX11-NEXT: ; implicit-def: $sgpr9
; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: s_branch .LBB25_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
+; GFX11-NEXT: s_cbranch_vccz .LBB25_2
+; GFX11-NEXT: s_branch .LBB25_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -4386,8 +4486,9 @@ define inreg <4 x i32> @bitcast_v16i8_to_v4i32_scalar(<16 x i8> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: v_mov_b32_e32 v4, v0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v1
; SI-NEXT: s_cbranch_scc0 .LBB27_4
; SI-NEXT: ; %bb.1: ; %cmp.false
@@ -4489,15 +4590,18 @@ define inreg <4 x i32> @bitcast_v16i8_to_v4i32_scalar(<16 x i8> inreg %a, i32 in
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB27_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
-; SI-NEXT: s_branch .LBB27_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB27_2
+; SI-NEXT: s_branch .LBB27_3
;
; VI-LABEL: bitcast_v16i8_to_v4i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
; VI-NEXT: v_readfirstlane_b32 s10, v1
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s11, v0
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[8:9], -1
; VI-NEXT: s_cbranch_scc0 .LBB27_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, s16, 0xff
@@ -4598,15 +4702,18 @@ define inreg <4 x i32> @bitcast_v16i8_to_v4i32_scalar(<16 x i8> inreg %a, i32 in
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB27_4:
; VI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7
-; VI-NEXT: s_branch .LBB27_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; VI-NEXT: s_cbranch_vccz .LBB27_2
+; VI-NEXT: s_branch .LBB27_3
;
; GFX9-LABEL: bitcast_v16i8_to_v4i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
; GFX9-NEXT: v_readfirstlane_b32 s10, v1
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_readfirstlane_b32 s11, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[8:9], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB27_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_and_b32 s4, s16, 0xff
@@ -4707,13 +4814,15 @@ define inreg <4 x i32> @bitcast_v16i8_to_v4i32_scalar(<16 x i8> inreg %a, i32 in
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB27_4:
; GFX9-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7
-; GFX9-NEXT: s_branch .LBB27_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; GFX9-NEXT: s_cbranch_vccz .LBB27_2
+; GFX9-NEXT: s_branch .LBB27_3
;
; GFX11-LABEL: bitcast_v16i8_to_v4i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
-; GFX11-NEXT: s_mov_b32 s8, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB27_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_and_b32 s4, s0, 0xff
@@ -4724,10 +4833,10 @@ define inreg <4 x i32> @bitcast_v16i8_to_v4i32_scalar(<16 x i8> inreg %a, i32 in
; GFX11-NEXT: s_or_b32 s5, s6, s7
; GFX11-NEXT: s_and_b32 s6, s16, 0xff
; GFX11-NEXT: s_lshl_b32 s7, s17, 8
-; GFX11-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s19, 8
+; GFX11-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s19, 8
; GFX11-NEXT: s_or_b32 s6, s6, s7
-; GFX11-NEXT: s_or_b32 s7, s9, s10
+; GFX11-NEXT: s_or_b32 s7, s8, s9
; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
; GFX11-NEXT: s_lshl_b32 s5, s5, 16
; GFX11-NEXT: s_and_b32 s6, s6, 0xffff
@@ -4736,24 +4845,23 @@ define inreg <4 x i32> @bitcast_v16i8_to_v4i32_scalar(<16 x i8> inreg %a, i32 in
; GFX11-NEXT: s_or_b32 s5, s6, s7
; GFX11-NEXT: s_and_b32 s6, s20, 0xff
; GFX11-NEXT: s_lshl_b32 s7, s21, 8
-; GFX11-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s23, 8
+; GFX11-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s23, 8
; GFX11-NEXT: s_or_b32 s6, s6, s7
-; GFX11-NEXT: s_or_b32 s7, s9, s10
-; GFX11-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s25, 8
-; GFX11-NEXT: s_and_b32 s11, s26, 0xff
-; GFX11-NEXT: s_lshl_b32 s12, s27, 8
-; GFX11-NEXT: s_or_b32 s9, s9, s10
-; GFX11-NEXT: s_or_b32 s10, s11, s12
+; GFX11-NEXT: s_or_b32 s7, s8, s9
+; GFX11-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s25, 8
+; GFX11-NEXT: s_and_b32 s10, s26, 0xff
+; GFX11-NEXT: s_lshl_b32 s11, s27, 8
+; GFX11-NEXT: s_or_b32 s8, s8, s9
+; GFX11-NEXT: s_or_b32 s9, s10, s11
; GFX11-NEXT: s_and_b32 s6, s6, 0xffff
; GFX11-NEXT: s_lshl_b32 s7, s7, 16
-; GFX11-NEXT: s_and_b32 s9, s9, 0xffff
-; GFX11-NEXT: s_lshl_b32 s10, s10, 16
+; GFX11-NEXT: s_and_b32 s8, s8, 0xffff
+; GFX11-NEXT: s_lshl_b32 s9, s9, 16
; GFX11-NEXT: s_or_b32 s6, s6, s7
-; GFX11-NEXT: s_or_b32 s7, s9, s10
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB27_3
+; GFX11-NEXT: s_or_b32 s7, s8, s9
+; GFX11-NEXT: s_cbranch_execnz .LBB27_3
; GFX11-NEXT: .LBB27_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
; GFX11-NEXT: s_add_i32 s2, s2, 3
@@ -4813,7 +4921,9 @@ define inreg <4 x i32> @bitcast_v16i8_to_v4i32_scalar(<16 x i8> inreg %a, i32 in
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB27_4:
; GFX11-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7
-; GFX11-NEXT: s_branch .LBB27_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-NEXT: s_cbranch_vccz .LBB27_2
+; GFX11-NEXT: s_branch .LBB27_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -4916,17 +5026,19 @@ define inreg <2 x i64> @bitcast_v4f32_to_v2i64_scalar(<4 x float> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB29_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB29_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB29_4
-; SI-NEXT: .LBB29_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB29_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB29_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v3, s19, 1.0
; SI-NEXT: v_add_f32_e64 v2, s18, 1.0
; SI-NEXT: v_add_f32_e64 v1, s17, 1.0
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB29_3:
-; SI-NEXT: s_branch .LBB29_2
; SI-NEXT: .LBB29_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -4938,17 +5050,19 @@ define inreg <2 x i64> @bitcast_v4f32_to_v2i64_scalar(<4 x float> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB29_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB29_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB29_4
-; VI-NEXT: .LBB29_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB29_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB29_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v3, s19, 1.0
; VI-NEXT: v_add_f32_e64 v2, s18, 1.0
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB29_3:
-; VI-NEXT: s_branch .LBB29_2
; VI-NEXT: .LBB29_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -4960,17 +5074,19 @@ define inreg <2 x i64> @bitcast_v4f32_to_v2i64_scalar(<4 x float> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB29_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB29_4
-; GFX9-NEXT: .LBB29_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB29_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB29_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v3, s19, 1.0
; GFX9-NEXT: v_add_f32_e64 v2, s18, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB29_3:
-; GFX9-NEXT: s_branch .LBB29_2
; GFX9-NEXT: .LBB29_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -4982,19 +5098,20 @@ define inreg <2 x i64> @bitcast_v4f32_to_v2i64_scalar(<4 x float> inreg %a, i32
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB29_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB29_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB29_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB29_4
-; GFX11-NEXT: .LBB29_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v3, s3, 1.0
; GFX11-NEXT: v_add_f32_e64 v2, s2, 1.0
; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB29_3:
-; GFX11-NEXT: s_branch .LBB29_2
; GFX11-NEXT: .LBB29_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -5104,86 +5221,93 @@ define inreg <4 x float> @bitcast_v2i64_to_v4f32_scalar(<2 x i64> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB31_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB31_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB31_3
-; SI-NEXT: .LBB31_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB31_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB31_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_u32 s18, s18, 3
; SI-NEXT: s_addc_u32 s19, s19, 0
; SI-NEXT: s_add_u32 s16, s16, 3
; SI-NEXT: s_addc_u32 s17, s17, 0
-; SI-NEXT: .LBB31_3: ; %end
+; SI-NEXT: .LBB31_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
; SI-NEXT: v_mov_b32_e32 v3, s19
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB31_4:
-; SI-NEXT: s_branch .LBB31_2
;
; VI-LABEL: bitcast_v2i64_to_v4f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB31_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB31_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB31_3
-; VI-NEXT: .LBB31_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB31_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB31_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_u32 s18, s18, 3
; VI-NEXT: s_addc_u32 s19, s19, 0
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
-; VI-NEXT: .LBB31_3: ; %end
+; VI-NEXT: .LBB31_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB31_4:
-; VI-NEXT: s_branch .LBB31_2
;
; GFX9-LABEL: bitcast_v2i64_to_v4f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB31_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB31_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB31_3
-; GFX9-NEXT: .LBB31_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB31_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB31_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_u32 s18, s18, 3
; GFX9-NEXT: s_addc_u32 s19, s19, 0
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
-; GFX9-NEXT: .LBB31_3: ; %end
+; GFX9-NEXT: .LBB31_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB31_4:
-; GFX9-NEXT: s_branch .LBB31_2
;
; GFX11-LABEL: bitcast_v2i64_to_v4f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB31_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB31_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB31_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB31_3
-; GFX11-NEXT: .LBB31_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB31_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s2, s2, 3
; GFX11-NEXT: s_addc_u32 s3, s3, 0
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: .LBB31_3: ; %end
+; GFX11-NEXT: .LBB31_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB31_4:
-; GFX11-NEXT: s_branch .LBB31_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -5286,17 +5410,19 @@ define inreg <2 x double> @bitcast_v4f32_to_v2f64_scalar(<4 x float> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB33_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB33_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB33_4
-; SI-NEXT: .LBB33_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB33_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB33_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v3, s19, 1.0
; SI-NEXT: v_add_f32_e64 v2, s18, 1.0
; SI-NEXT: v_add_f32_e64 v1, s17, 1.0
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB33_3:
-; SI-NEXT: s_branch .LBB33_2
; SI-NEXT: .LBB33_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -5308,17 +5434,19 @@ define inreg <2 x double> @bitcast_v4f32_to_v2f64_scalar(<4 x float> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB33_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB33_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB33_4
-; VI-NEXT: .LBB33_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB33_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB33_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v3, s19, 1.0
; VI-NEXT: v_add_f32_e64 v2, s18, 1.0
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB33_3:
-; VI-NEXT: s_branch .LBB33_2
; VI-NEXT: .LBB33_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -5330,17 +5458,19 @@ define inreg <2 x double> @bitcast_v4f32_to_v2f64_scalar(<4 x float> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB33_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB33_4
-; GFX9-NEXT: .LBB33_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB33_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB33_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v3, s19, 1.0
; GFX9-NEXT: v_add_f32_e64 v2, s18, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB33_3:
-; GFX9-NEXT: s_branch .LBB33_2
; GFX9-NEXT: .LBB33_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -5352,19 +5482,20 @@ define inreg <2 x double> @bitcast_v4f32_to_v2f64_scalar(<4 x float> inreg %a, i
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB33_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB33_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB33_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB33_4
-; GFX11-NEXT: .LBB33_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v3, s3, 1.0
; GFX11-NEXT: v_add_f32_e64 v2, s2, 1.0
; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB33_3:
-; GFX11-NEXT: s_branch .LBB33_2
; GFX11-NEXT: .LBB33_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -5468,15 +5599,17 @@ define inreg <4 x float> @bitcast_v2f64_to_v4f32_scalar(<2 x double> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB35_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB35_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB35_4
-; SI-NEXT: .LBB35_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB35_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB35_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB35_3:
-; SI-NEXT: s_branch .LBB35_2
; SI-NEXT: .LBB35_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -5488,15 +5621,17 @@ define inreg <4 x float> @bitcast_v2f64_to_v4f32_scalar(<2 x double> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB35_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB35_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB35_4
-; VI-NEXT: .LBB35_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB35_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB35_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB35_3:
-; VI-NEXT: s_branch .LBB35_2
; VI-NEXT: .LBB35_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -5508,15 +5643,17 @@ define inreg <4 x float> @bitcast_v2f64_to_v4f32_scalar(<2 x double> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB35_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB35_4
-; GFX9-NEXT: .LBB35_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB35_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB35_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB35_3:
-; GFX9-NEXT: s_branch .LBB35_2
; GFX9-NEXT: .LBB35_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -5528,17 +5665,18 @@ define inreg <4 x float> @bitcast_v2f64_to_v4f32_scalar(<2 x double> inreg %a, i
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB35_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB35_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB35_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB35_4
-; GFX11-NEXT: .LBB35_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[2:3], s[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB35_3:
-; GFX11-NEXT: s_branch .LBB35_2
; GFX11-NEXT: .LBB35_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -5663,6 +5801,7 @@ define inreg <8 x i16> @bitcast_v4f32_to_v8i16_scalar(<4 x float> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB37_3
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s18
@@ -5687,7 +5826,8 @@ define inreg <8 x i16> @bitcast_v4f32_to_v8i16_scalar(<4 x float> inreg %a, i32
; SI-NEXT: ; implicit-def: $sgpr6
; SI-NEXT: ; implicit-def: $vgpr5
; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: s_branch .LBB37_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB37_2
; SI-NEXT: .LBB37_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v2, s17
@@ -5701,17 +5841,19 @@ define inreg <8 x i16> @bitcast_v4f32_to_v8i16_scalar(<4 x float> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB37_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB37_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB37_4
-; VI-NEXT: .LBB37_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB37_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB37_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v3, s19, 1.0
; VI-NEXT: v_add_f32_e64 v2, s18, 1.0
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB37_3:
-; VI-NEXT: s_branch .LBB37_2
; VI-NEXT: .LBB37_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -5723,17 +5865,19 @@ define inreg <8 x i16> @bitcast_v4f32_to_v8i16_scalar(<4 x float> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB37_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB37_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB37_4
-; GFX9-NEXT: .LBB37_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB37_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB37_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v3, s19, 1.0
; GFX9-NEXT: v_add_f32_e64 v2, s18, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB37_3:
-; GFX9-NEXT: s_branch .LBB37_2
; GFX9-NEXT: .LBB37_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -5745,19 +5889,20 @@ define inreg <8 x i16> @bitcast_v4f32_to_v8i16_scalar(<4 x float> inreg %a, i32
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB37_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB37_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB37_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB37_4
-; GFX11-NEXT: .LBB37_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v3, s3, 1.0
; GFX11-NEXT: v_add_f32_e64 v2, s2, 1.0
; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB37_3:
-; GFX11-NEXT: s_branch .LBB37_2
; GFX11-NEXT: .LBB37_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -5919,6 +6064,7 @@ define inreg <4 x float> @bitcast_v8i16_to_v4f32_scalar(<8 x i16> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
+; SI-NEXT: s_mov_b64 s[8:9], -1
; SI-NEXT: s_cbranch_scc0 .LBB39_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
@@ -5963,16 +6109,22 @@ define inreg <4 x float> @bitcast_v8i16_to_v4f32_scalar(<8 x i16> inreg %a, i32
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB39_4:
; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7
-; SI-NEXT: s_branch .LBB39_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; SI-NEXT: s_cbranch_vccz .LBB39_2
+; SI-NEXT: s_branch .LBB39_3
;
; VI-LABEL: bitcast_v8i16_to_v4f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB39_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB39_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB39_3
-; VI-NEXT: .LBB39_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB39_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB39_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s19, 3
; VI-NEXT: s_and_b32 s4, s19, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
@@ -5993,30 +6145,30 @@ define inreg <4 x float> @bitcast_v8i16_to_v4f32_scalar(<8 x i16> inreg %a, i32
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB39_3: ; %end
+; VI-NEXT: .LBB39_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB39_4:
-; VI-NEXT: s_branch .LBB39_2
;
; GFX9-LABEL: bitcast_v8i16_to_v4f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB39_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB39_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB39_4
-; GFX9-NEXT: .LBB39_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB39_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB39_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v3, s19, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v2, s18, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB39_3:
-; GFX9-NEXT: s_branch .LBB39_2
; GFX9-NEXT: .LBB39_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -6028,19 +6180,20 @@ define inreg <4 x float> @bitcast_v8i16_to_v4f32_scalar(<8 x i16> inreg %a, i32
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB39_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB39_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB39_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB39_4
-; GFX11-NEXT: .LBB39_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB39_3:
-; GFX11-NEXT: s_branch .LBB39_2
; GFX11-NEXT: .LBB39_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -6195,6 +6348,7 @@ define inreg <8 x half> @bitcast_v4f32_to_v8f16_scalar(<4 x float> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB41_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s4, s19, 16
@@ -6238,23 +6392,27 @@ define inreg <8 x half> @bitcast_v4f32_to_v8f16_scalar(<4 x float> inreg %a, i32
; SI-NEXT: ; implicit-def: $vgpr5
; SI-NEXT: ; implicit-def: $vgpr6
; SI-NEXT: ; implicit-def: $vgpr7
-; SI-NEXT: s_branch .LBB41_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB41_2
+; SI-NEXT: s_branch .LBB41_3
;
; VI-LABEL: bitcast_v4f32_to_v8f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB41_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB41_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB41_4
-; VI-NEXT: .LBB41_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB41_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB41_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v3, s19, 1.0
; VI-NEXT: v_add_f32_e64 v2, s18, 1.0
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB41_3:
-; VI-NEXT: s_branch .LBB41_2
; VI-NEXT: .LBB41_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -6266,17 +6424,19 @@ define inreg <8 x half> @bitcast_v4f32_to_v8f16_scalar(<4 x float> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB41_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB41_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB41_4
-; GFX9-NEXT: .LBB41_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB41_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB41_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v3, s19, 1.0
; GFX9-NEXT: v_add_f32_e64 v2, s18, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB41_3:
-; GFX9-NEXT: s_branch .LBB41_2
; GFX9-NEXT: .LBB41_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -6288,19 +6448,20 @@ define inreg <8 x half> @bitcast_v4f32_to_v8f16_scalar(<4 x float> inreg %a, i32
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB41_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB41_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB41_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB41_4
-; GFX11-NEXT: .LBB41_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v3, s3, 1.0
; GFX11-NEXT: v_add_f32_e64 v2, s2, 1.0
; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB41_3:
-; GFX11-NEXT: s_branch .LBB41_2
; GFX11-NEXT: .LBB41_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -6489,6 +6650,7 @@ define inreg <4 x float> @bitcast_v8f16_to_v4f32_scalar(<8 x half> inreg %a, i32
; SI-NEXT: v_cvt_f16_f32_e32 v5, s23
; SI-NEXT: v_cvt_f16_f32_e32 v4, s22
; SI-NEXT: s_cmp_lg_u32 s24, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB43_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v11
@@ -6537,16 +6699,22 @@ define inreg <4 x float> @bitcast_v8f16_to_v4f32_scalar(<8 x half> inreg %a, i32
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB43_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
-; SI-NEXT: s_branch .LBB43_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB43_2
+; SI-NEXT: s_branch .LBB43_3
;
; VI-LABEL: bitcast_v8f16_to_v4f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB43_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB43_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB43_4
-; VI-NEXT: .LBB43_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB43_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB43_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s19, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -6569,8 +6737,6 @@ define inreg <4 x float> @bitcast_v8f16_to_v4f32_scalar(<8 x half> inreg %a, i32
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v4
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB43_3:
-; VI-NEXT: s_branch .LBB43_2
; VI-NEXT: .LBB43_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -6582,18 +6748,20 @@ define inreg <4 x float> @bitcast_v8f16_to_v4f32_scalar(<8 x half> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB43_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB43_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB43_4
-; GFX9-NEXT: .LBB43_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB43_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB43_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v3, s19, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v2, s18, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB43_3:
-; GFX9-NEXT: s_branch .LBB43_2
; GFX9-NEXT: .LBB43_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -6605,19 +6773,20 @@ define inreg <4 x float> @bitcast_v8f16_to_v4f32_scalar(<8 x half> inreg %a, i32
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB43_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB43_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB43_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB43_4
-; GFX11-NEXT: .LBB43_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB43_3:
-; GFX11-NEXT: s_branch .LBB43_2
; GFX11-NEXT: .LBB43_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -6764,6 +6933,7 @@ define inreg <8 x bfloat> @bitcast_v4f32_to_v8bf16_scalar(<4 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB45_3
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s6, s19, 0xffff0000
@@ -6798,7 +6968,8 @@ define inreg <8 x bfloat> @bitcast_v4f32_to_v8bf16_scalar(<4 x float> inreg %a,
; SI-NEXT: ; implicit-def: $sgpr8
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB45_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB45_2
; SI-NEXT: .LBB45_4:
; SI-NEXT: v_mov_b32_e32 v0, s13
; SI-NEXT: v_mov_b32_e32 v1, s12
@@ -6814,17 +6985,19 @@ define inreg <8 x bfloat> @bitcast_v4f32_to_v8bf16_scalar(<4 x float> inreg %a,
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB45_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB45_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB45_4
-; VI-NEXT: .LBB45_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB45_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB45_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v3, s19, 1.0
; VI-NEXT: v_add_f32_e64 v2, s18, 1.0
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB45_3:
-; VI-NEXT: s_branch .LBB45_2
; VI-NEXT: .LBB45_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -6836,17 +7009,19 @@ define inreg <8 x bfloat> @bitcast_v4f32_to_v8bf16_scalar(<4 x float> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB45_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB45_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB45_4
-; GFX9-NEXT: .LBB45_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB45_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB45_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v3, s19, 1.0
; GFX9-NEXT: v_add_f32_e64 v2, s18, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB45_3:
-; GFX9-NEXT: s_branch .LBB45_2
; GFX9-NEXT: .LBB45_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -6858,19 +7033,20 @@ define inreg <8 x bfloat> @bitcast_v4f32_to_v8bf16_scalar(<4 x float> inreg %a,
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB45_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB45_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB45_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB45_4
-; GFX11-NEXT: .LBB45_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v3, s3, 1.0
; GFX11-NEXT: v_add_f32_e64 v2, s2, 1.0
; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB45_3:
-; GFX11-NEXT: s_branch .LBB45_2
; GFX11-NEXT: .LBB45_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -7314,6 +7490,7 @@ define inreg <4 x float> @bitcast_v8bf16_to_v4f32_scalar(<8 x bfloat> inreg %a,
; SI-NEXT: v_mul_f32_e64 v7, 1.0, s20
; SI-NEXT: v_mul_f32_e64 v4, 1.0, s23
; SI-NEXT: v_mul_f32_e64 v5, 1.0, s22
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB47_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v10
@@ -7354,16 +7531,22 @@ define inreg <4 x float> @bitcast_v8bf16_to_v4f32_scalar(<8 x bfloat> inreg %a,
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB47_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
-; SI-NEXT: s_branch .LBB47_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB47_2
+; SI-NEXT: s_branch .LBB47_3
;
; VI-LABEL: bitcast_v8bf16_to_v4f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB47_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB47_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB47_4
-; VI-NEXT: .LBB47_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB47_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB47_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s19, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x40c00000
; VI-NEXT: v_add_f32_e32 v1, s4, v0
@@ -7438,8 +7621,6 @@ define inreg <4 x float> @bitcast_v8bf16_to_v4f32_scalar(<8 x bfloat> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; VI-NEXT: v_alignbit_b32 v0, v0, v4, 16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB47_3:
-; VI-NEXT: s_branch .LBB47_2
; VI-NEXT: .LBB47_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -7451,10 +7632,14 @@ define inreg <4 x float> @bitcast_v8bf16_to_v4f32_scalar(<8 x bfloat> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB47_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB47_4
-; GFX9-NEXT: .LBB47_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB47_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_pack_lh_b32_b16 s4, 0, s19
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
; GFX9-NEXT: v_add_f32_e32 v1, s4, v0
@@ -7534,8 +7719,6 @@ define inreg <4 x float> @bitcast_v8bf16_to_v4f32_scalar(<8 x bfloat> inreg %a,
; GFX9-NEXT: v_and_b32_sdwa v0, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: v_lshl_or_b32 v0, v5, 16, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB47_3:
-; GFX9-NEXT: s_branch .LBB47_2
; GFX9-NEXT: .LBB47_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -7547,12 +7730,15 @@ define inreg <4 x float> @bitcast_v8bf16_to_v4f32_scalar(<8 x bfloat> inreg %a,
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB47_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB47_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB47_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB47_4
-; GFX11-NEXT: .LBB47_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_lshl_b32 s4, s3, 16
; GFX11-NEXT: s_pack_lh_b32_b16 s3, 0, s3
; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
@@ -7639,8 +7825,6 @@ define inreg <4 x float> @bitcast_v8bf16_to_v4f32_scalar(<8 x bfloat> inreg %a,
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_lshl_or_b32 v0, v7, 16, v8
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB47_3:
-; GFX11-NEXT: s_branch .LBB47_2
; GFX11-NEXT: .LBB47_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -7989,6 +8173,7 @@ define inreg <16 x i8> @bitcast_v4f32_to_v16i8_scalar(<4 x float> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB49_3
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s18
@@ -8037,7 +8222,8 @@ define inreg <16 x i8> @bitcast_v4f32_to_v16i8_scalar(<4 x float> inreg %a, i32
; SI-NEXT: ; implicit-def: $sgpr11
; SI-NEXT: ; implicit-def: $sgpr10
; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: s_branch .LBB49_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB49_2
; SI-NEXT: .LBB49_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v4, s17
@@ -8055,6 +8241,7 @@ define inreg <16 x i8> @bitcast_v4f32_to_v16i8_scalar(<4 x float> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
+; VI-NEXT: s_mov_b64 s[8:9], -1
; VI-NEXT: s_cbranch_scc0 .LBB49_3
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s10, s19, 24
@@ -8101,7 +8288,8 @@ define inreg <16 x i8> @bitcast_v4f32_to_v16i8_scalar(<4 x float> inreg %a, i32
; VI-NEXT: ; implicit-def: $sgpr13
; VI-NEXT: ; implicit-def: $sgpr11
; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: s_branch .LBB49_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; VI-NEXT: s_cbranch_vccz .LBB49_2
; VI-NEXT: .LBB49_4:
; VI-NEXT: v_mov_b32_e32 v18, s16
; VI-NEXT: v_mov_b32_e32 v19, s17
@@ -8130,6 +8318,7 @@ define inreg <16 x i8> @bitcast_v4f32_to_v16i8_scalar(<4 x float> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
+; GFX9-NEXT: s_mov_b64 s[8:9], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB49_3
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s10, s19, 24
@@ -8176,7 +8365,8 @@ define inreg <16 x i8> @bitcast_v4f32_to_v16i8_scalar(<4 x float> inreg %a, i32
; GFX9-NEXT: ; implicit-def: $sgpr13
; GFX9-NEXT: ; implicit-def: $sgpr11
; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: s_branch .LBB49_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; GFX9-NEXT: s_cbranch_vccz .LBB49_2
; GFX9-NEXT: .LBB49_4:
; GFX9-NEXT: v_mov_b32_e32 v18, s16
; GFX9-NEXT: v_mov_b32_e32 v19, s17
@@ -8205,23 +8395,22 @@ define inreg <16 x i8> @bitcast_v4f32_to_v16i8_scalar(<4 x float> inreg %a, i32
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
-; GFX11-NEXT: s_mov_b32 s8, 0
+; GFX11-NEXT: s_mov_b32 s5, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB49_3
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s9, s3, 24
-; GFX11-NEXT: s_lshr_b32 s10, s3, 16
-; GFX11-NEXT: s_lshr_b32 s12, s3, 8
-; GFX11-NEXT: s_lshr_b32 s11, s2, 16
-; GFX11-NEXT: s_lshr_b32 s13, s2, 8
-; GFX11-NEXT: s_lshr_b32 s14, s1, 24
-; GFX11-NEXT: s_lshr_b32 s15, s1, 16
-; GFX11-NEXT: s_lshr_b32 s17, s1, 8
-; GFX11-NEXT: s_lshr_b32 s16, s0, 16
-; GFX11-NEXT: s_lshr_b32 s18, s0, 8
-; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX11-NEXT: s_lshr_b32 s8, s3, 24
+; GFX11-NEXT: s_lshr_b32 s9, s3, 16
+; GFX11-NEXT: s_lshr_b32 s11, s3, 8
+; GFX11-NEXT: s_lshr_b32 s10, s2, 16
+; GFX11-NEXT: s_lshr_b32 s12, s2, 8
+; GFX11-NEXT: s_lshr_b32 s13, s1, 24
+; GFX11-NEXT: s_lshr_b32 s14, s1, 16
+; GFX11-NEXT: s_lshr_b32 s16, s1, 8
+; GFX11-NEXT: s_lshr_b32 s15, s0, 16
+; GFX11-NEXT: s_lshr_b32 s17, s0, 8
+; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
+; GFX11-NEXT: s_cbranch_execnz .LBB49_4
; GFX11-NEXT: .LBB49_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v19, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v17, s3, 1.0
@@ -8243,28 +8432,29 @@ define inreg <16 x i8> @bitcast_v4f32_to_v16i8_scalar(<4 x float> inreg %a, i32
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v18
; GFX11-NEXT: s_branch .LBB49_5
; GFX11-NEXT: .LBB49_3:
-; GFX11-NEXT: ; implicit-def: $sgpr18
-; GFX11-NEXT: ; implicit-def: $sgpr16
-; GFX11-NEXT: ; implicit-def: $sgpr4
; GFX11-NEXT: ; implicit-def: $sgpr17
; GFX11-NEXT: ; implicit-def: $sgpr15
+; GFX11-NEXT: ; implicit-def: $sgpr4
+; GFX11-NEXT: ; implicit-def: $sgpr16
; GFX11-NEXT: ; implicit-def: $sgpr14
; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr11
-; GFX11-NEXT: ; implicit-def: $sgpr6
; GFX11-NEXT: ; implicit-def: $sgpr12
; GFX11-NEXT: ; implicit-def: $sgpr10
+; GFX11-NEXT: ; implicit-def: $sgpr6
+; GFX11-NEXT: ; implicit-def: $sgpr11
; GFX11-NEXT: ; implicit-def: $sgpr9
-; GFX11-NEXT: s_branch .LBB49_2
+; GFX11-NEXT: ; implicit-def: $sgpr8
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
+; GFX11-NEXT: s_cbranch_vccz .LBB49_2
; GFX11-NEXT: .LBB49_4:
; GFX11-NEXT: v_dual_mov_b32 v18, s0 :: v_dual_mov_b32 v19, s1
; GFX11-NEXT: v_dual_mov_b32 v16, s2 :: v_dual_mov_b32 v17, s3
-; GFX11-NEXT: v_dual_mov_b32 v1, s18 :: v_dual_mov_b32 v2, s16
-; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s15
-; GFX11-NEXT: v_dual_mov_b32 v7, s14 :: v_dual_mov_b32 v10, s11
-; GFX11-NEXT: v_dual_mov_b32 v9, s13 :: v_dual_mov_b32 v14, s10
-; GFX11-NEXT: v_mov_b32_e32 v13, s12
-; GFX11-NEXT: v_mov_b32_e32 v15, s9
+; GFX11-NEXT: v_dual_mov_b32 v1, s17 :: v_dual_mov_b32 v2, s15
+; GFX11-NEXT: v_dual_mov_b32 v5, s16 :: v_dual_mov_b32 v6, s14
+; GFX11-NEXT: v_dual_mov_b32 v7, s13 :: v_dual_mov_b32 v10, s10
+; GFX11-NEXT: v_dual_mov_b32 v9, s12 :: v_dual_mov_b32 v14, s9
+; GFX11-NEXT: v_mov_b32_e32 v13, s11
+; GFX11-NEXT: v_mov_b32_e32 v15, s8
; GFX11-NEXT: v_mov_b32_e32 v11, s6
; GFX11-NEXT: v_mov_b32_e32 v3, s4
; GFX11-NEXT: .LBB49_5: ; %end
@@ -8860,8 +9050,9 @@ define inreg <4 x float> @bitcast_v16i8_to_v4f32_scalar(<16 x i8> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: v_mov_b32_e32 v4, v0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v1
; SI-NEXT: s_cbranch_scc0 .LBB51_4
; SI-NEXT: ; %bb.1: ; %cmp.false
@@ -8963,15 +9154,18 @@ define inreg <4 x float> @bitcast_v16i8_to_v4f32_scalar(<16 x i8> inreg %a, i32
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB51_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
-; SI-NEXT: s_branch .LBB51_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB51_2
+; SI-NEXT: s_branch .LBB51_3
;
; VI-LABEL: bitcast_v16i8_to_v4f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
; VI-NEXT: v_readfirstlane_b32 s10, v1
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s11, v0
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[8:9], -1
; VI-NEXT: s_cbranch_scc0 .LBB51_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, s16, 0xff
@@ -9072,15 +9266,18 @@ define inreg <4 x float> @bitcast_v16i8_to_v4f32_scalar(<16 x i8> inreg %a, i32
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB51_4:
; VI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7
-; VI-NEXT: s_branch .LBB51_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; VI-NEXT: s_cbranch_vccz .LBB51_2
+; VI-NEXT: s_branch .LBB51_3
;
; GFX9-LABEL: bitcast_v16i8_to_v4f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
; GFX9-NEXT: v_readfirstlane_b32 s10, v1
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_readfirstlane_b32 s11, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[8:9], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB51_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_and_b32 s4, s16, 0xff
@@ -9181,13 +9378,15 @@ define inreg <4 x float> @bitcast_v16i8_to_v4f32_scalar(<16 x i8> inreg %a, i32
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB51_4:
; GFX9-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7
-; GFX9-NEXT: s_branch .LBB51_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; GFX9-NEXT: s_cbranch_vccz .LBB51_2
+; GFX9-NEXT: s_branch .LBB51_3
;
; GFX11-LABEL: bitcast_v16i8_to_v4f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
-; GFX11-NEXT: s_mov_b32 s8, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB51_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_and_b32 s4, s0, 0xff
@@ -9198,10 +9397,10 @@ define inreg <4 x float> @bitcast_v16i8_to_v4f32_scalar(<16 x i8> inreg %a, i32
; GFX11-NEXT: s_or_b32 s5, s6, s7
; GFX11-NEXT: s_and_b32 s6, s16, 0xff
; GFX11-NEXT: s_lshl_b32 s7, s17, 8
-; GFX11-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s19, 8
+; GFX11-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s19, 8
; GFX11-NEXT: s_or_b32 s6, s6, s7
-; GFX11-NEXT: s_or_b32 s7, s9, s10
+; GFX11-NEXT: s_or_b32 s7, s8, s9
; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
; GFX11-NEXT: s_lshl_b32 s5, s5, 16
; GFX11-NEXT: s_and_b32 s6, s6, 0xffff
@@ -9210,24 +9409,23 @@ define inreg <4 x float> @bitcast_v16i8_to_v4f32_scalar(<16 x i8> inreg %a, i32
; GFX11-NEXT: s_or_b32 s5, s6, s7
; GFX11-NEXT: s_and_b32 s6, s20, 0xff
; GFX11-NEXT: s_lshl_b32 s7, s21, 8
-; GFX11-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s23, 8
+; GFX11-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s23, 8
; GFX11-NEXT: s_or_b32 s6, s6, s7
-; GFX11-NEXT: s_or_b32 s7, s9, s10
-; GFX11-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s25, 8
-; GFX11-NEXT: s_and_b32 s11, s26, 0xff
-; GFX11-NEXT: s_lshl_b32 s12, s27, 8
-; GFX11-NEXT: s_or_b32 s9, s9, s10
-; GFX11-NEXT: s_or_b32 s10, s11, s12
+; GFX11-NEXT: s_or_b32 s7, s8, s9
+; GFX11-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s25, 8
+; GFX11-NEXT: s_and_b32 s10, s26, 0xff
+; GFX11-NEXT: s_lshl_b32 s11, s27, 8
+; GFX11-NEXT: s_or_b32 s8, s8, s9
+; GFX11-NEXT: s_or_b32 s9, s10, s11
; GFX11-NEXT: s_and_b32 s6, s6, 0xffff
; GFX11-NEXT: s_lshl_b32 s7, s7, 16
-; GFX11-NEXT: s_and_b32 s9, s9, 0xffff
-; GFX11-NEXT: s_lshl_b32 s10, s10, 16
+; GFX11-NEXT: s_and_b32 s8, s8, 0xffff
+; GFX11-NEXT: s_lshl_b32 s9, s9, 16
; GFX11-NEXT: s_or_b32 s6, s6, s7
-; GFX11-NEXT: s_or_b32 s7, s9, s10
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB51_3
+; GFX11-NEXT: s_or_b32 s7, s8, s9
+; GFX11-NEXT: s_cbranch_execnz .LBB51_3
; GFX11-NEXT: .LBB51_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
; GFX11-NEXT: s_add_i32 s2, s2, 3
@@ -9287,7 +9485,9 @@ define inreg <4 x float> @bitcast_v16i8_to_v4f32_scalar(<16 x i8> inreg %a, i32
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB51_4:
; GFX11-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7
-; GFX11-NEXT: s_branch .LBB51_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-NEXT: s_cbranch_vccz .LBB51_2
+; GFX11-NEXT: s_branch .LBB51_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -9393,85 +9593,92 @@ define inreg <2 x double> @bitcast_v2i64_to_v2f64_scalar(<2 x i64> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB53_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB53_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB53_3
-; SI-NEXT: .LBB53_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB53_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB53_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_u32 s16, s16, 3
; SI-NEXT: s_addc_u32 s17, s17, 0
; SI-NEXT: s_add_u32 s18, s18, 3
; SI-NEXT: s_addc_u32 s19, s19, 0
-; SI-NEXT: .LBB53_3: ; %end
+; SI-NEXT: .LBB53_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
; SI-NEXT: v_mov_b32_e32 v3, s19
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB53_4:
-; SI-NEXT: s_branch .LBB53_2
;
; VI-LABEL: bitcast_v2i64_to_v2f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB53_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB53_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB53_3
-; VI-NEXT: .LBB53_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB53_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB53_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
; VI-NEXT: s_add_u32 s18, s18, 3
; VI-NEXT: s_addc_u32 s19, s19, 0
-; VI-NEXT: .LBB53_3: ; %end
+; VI-NEXT: .LBB53_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB53_4:
-; VI-NEXT: s_branch .LBB53_2
;
; GFX9-LABEL: bitcast_v2i64_to_v2f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB53_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB53_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB53_3
-; GFX9-NEXT: .LBB53_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB53_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB53_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
; GFX9-NEXT: s_add_u32 s18, s18, 3
; GFX9-NEXT: s_addc_u32 s19, s19, 0
-; GFX9-NEXT: .LBB53_3: ; %end
+; GFX9-NEXT: .LBB53_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB53_4:
-; GFX9-NEXT: s_branch .LBB53_2
;
; GFX11-LABEL: bitcast_v2i64_to_v2f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB53_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB53_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB53_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB53_3
-; GFX11-NEXT: .LBB53_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB53_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
; GFX11-NEXT: s_add_u32 s2, s2, 3
; GFX11-NEXT: s_addc_u32 s3, s3, 0
-; GFX11-NEXT: .LBB53_3: ; %end
+; GFX11-NEXT: .LBB53_4: ; %end
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB53_4:
-; GFX11-NEXT: s_branch .LBB53_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -9571,15 +9778,17 @@ define inreg <2 x i64> @bitcast_v2f64_to_v2i64_scalar(<2 x double> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB55_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB55_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB55_4
-; SI-NEXT: .LBB55_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB55_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB55_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB55_3:
-; SI-NEXT: s_branch .LBB55_2
; SI-NEXT: .LBB55_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -9591,15 +9800,17 @@ define inreg <2 x i64> @bitcast_v2f64_to_v2i64_scalar(<2 x double> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB55_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB55_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB55_4
-; VI-NEXT: .LBB55_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB55_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB55_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB55_3:
-; VI-NEXT: s_branch .LBB55_2
; VI-NEXT: .LBB55_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -9611,15 +9822,17 @@ define inreg <2 x i64> @bitcast_v2f64_to_v2i64_scalar(<2 x double> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB55_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB55_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB55_4
-; GFX9-NEXT: .LBB55_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB55_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB55_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB55_3:
-; GFX9-NEXT: s_branch .LBB55_2
; GFX9-NEXT: .LBB55_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -9631,17 +9844,18 @@ define inreg <2 x i64> @bitcast_v2f64_to_v2i64_scalar(<2 x double> inreg %a, i32
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB55_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB55_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB55_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB55_4
-; GFX11-NEXT: .LBB55_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[2:3], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB55_3:
-; GFX11-NEXT: s_branch .LBB55_2
; GFX11-NEXT: .LBB55_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -9769,6 +9983,7 @@ define inreg <8 x i16> @bitcast_v2i64_to_v8i16_scalar(<2 x i64> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB57_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s18
@@ -9802,71 +10017,78 @@ define inreg <8 x i16> @bitcast_v2i64_to_v8i16_scalar(<2 x i64> inreg %a, i32 in
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $vgpr5
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB57_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB57_2
+; SI-NEXT: s_branch .LBB57_3
;
; VI-LABEL: bitcast_v2i64_to_v8i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB57_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB57_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB57_3
-; VI-NEXT: .LBB57_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB57_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB57_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_u32 s18, s18, 3
; VI-NEXT: s_addc_u32 s19, s19, 0
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
-; VI-NEXT: .LBB57_3: ; %end
+; VI-NEXT: .LBB57_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB57_4:
-; VI-NEXT: s_branch .LBB57_2
;
; GFX9-LABEL: bitcast_v2i64_to_v8i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB57_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB57_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB57_3
-; GFX9-NEXT: .LBB57_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB57_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB57_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_u32 s18, s18, 3
; GFX9-NEXT: s_addc_u32 s19, s19, 0
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
-; GFX9-NEXT: .LBB57_3: ; %end
+; GFX9-NEXT: .LBB57_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB57_4:
-; GFX9-NEXT: s_branch .LBB57_2
;
; GFX11-LABEL: bitcast_v2i64_to_v8i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB57_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB57_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB57_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB57_3
-; GFX11-NEXT: .LBB57_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB57_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s2, s2, 3
; GFX11-NEXT: s_addc_u32 s3, s3, 0
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: .LBB57_3: ; %end
+; GFX11-NEXT: .LBB57_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB57_4:
-; GFX11-NEXT: s_branch .LBB57_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -10024,6 +10246,7 @@ define inreg <2 x i64> @bitcast_v8i16_to_v2i64_scalar(<8 x i16> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
+; SI-NEXT: s_mov_b64 s[8:9], -1
; SI-NEXT: s_cbranch_scc0 .LBB59_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
@@ -10068,16 +10291,22 @@ define inreg <2 x i64> @bitcast_v8i16_to_v2i64_scalar(<8 x i16> inreg %a, i32 in
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB59_4:
; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7
-; SI-NEXT: s_branch .LBB59_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; SI-NEXT: s_cbranch_vccz .LBB59_2
+; SI-NEXT: s_branch .LBB59_3
;
; VI-LABEL: bitcast_v8i16_to_v2i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB59_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB59_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB59_3
-; VI-NEXT: .LBB59_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB59_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB59_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s19, 3
; VI-NEXT: s_and_b32 s4, s19, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
@@ -10098,30 +10327,30 @@ define inreg <2 x i64> @bitcast_v8i16_to_v2i64_scalar(<8 x i16> inreg %a, i32 in
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB59_3: ; %end
+; VI-NEXT: .LBB59_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB59_4:
-; VI-NEXT: s_branch .LBB59_2
;
; GFX9-LABEL: bitcast_v8i16_to_v2i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB59_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB59_4
-; GFX9-NEXT: .LBB59_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB59_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB59_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v3, s19, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v2, s18, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB59_3:
-; GFX9-NEXT: s_branch .LBB59_2
; GFX9-NEXT: .LBB59_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -10133,19 +10362,20 @@ define inreg <2 x i64> @bitcast_v8i16_to_v2i64_scalar(<8 x i16> inreg %a, i32 in
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB59_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB59_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB59_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB59_4
-; GFX11-NEXT: .LBB59_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB59_3:
-; GFX11-NEXT: s_branch .LBB59_2
; GFX11-NEXT: .LBB59_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -10303,6 +10533,7 @@ define inreg <8 x half> @bitcast_v2i64_to_v8f16_scalar(<2 x i64> inreg %a, i32 i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB61_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s4, s19, 16
@@ -10346,71 +10577,78 @@ define inreg <8 x half> @bitcast_v2i64_to_v8f16_scalar(<2 x i64> inreg %a, i32 i
; SI-NEXT: ; implicit-def: $vgpr5
; SI-NEXT: ; implicit-def: $vgpr6
; SI-NEXT: ; implicit-def: $vgpr7
-; SI-NEXT: s_branch .LBB61_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB61_2
+; SI-NEXT: s_branch .LBB61_3
;
; VI-LABEL: bitcast_v2i64_to_v8f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB61_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB61_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB61_3
-; VI-NEXT: .LBB61_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB61_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB61_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_u32 s18, s18, 3
; VI-NEXT: s_addc_u32 s19, s19, 0
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
-; VI-NEXT: .LBB61_3: ; %end
+; VI-NEXT: .LBB61_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB61_4:
-; VI-NEXT: s_branch .LBB61_2
;
; GFX9-LABEL: bitcast_v2i64_to_v8f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB61_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB61_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB61_3
-; GFX9-NEXT: .LBB61_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB61_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB61_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_u32 s18, s18, 3
; GFX9-NEXT: s_addc_u32 s19, s19, 0
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
-; GFX9-NEXT: .LBB61_3: ; %end
+; GFX9-NEXT: .LBB61_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB61_4:
-; GFX9-NEXT: s_branch .LBB61_2
;
; GFX11-LABEL: bitcast_v2i64_to_v8f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB61_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB61_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB61_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB61_3
-; GFX11-NEXT: .LBB61_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB61_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s2, s2, 3
; GFX11-NEXT: s_addc_u32 s3, s3, 0
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: .LBB61_3: ; %end
+; GFX11-NEXT: .LBB61_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB61_4:
-; GFX11-NEXT: s_branch .LBB61_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -10595,6 +10833,7 @@ define inreg <2 x i64> @bitcast_v8f16_to_v2i64_scalar(<8 x half> inreg %a, i32 i
; SI-NEXT: v_cvt_f16_f32_e32 v5, s23
; SI-NEXT: v_cvt_f16_f32_e32 v4, s22
; SI-NEXT: s_cmp_lg_u32 s24, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB63_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v11
@@ -10643,16 +10882,22 @@ define inreg <2 x i64> @bitcast_v8f16_to_v2i64_scalar(<8 x half> inreg %a, i32 i
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB63_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
-; SI-NEXT: s_branch .LBB63_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB63_2
+; SI-NEXT: s_branch .LBB63_3
;
; VI-LABEL: bitcast_v8f16_to_v2i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB63_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB63_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB63_4
-; VI-NEXT: .LBB63_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB63_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB63_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s19, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -10675,8 +10920,6 @@ define inreg <2 x i64> @bitcast_v8f16_to_v2i64_scalar(<8 x half> inreg %a, i32 i
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v4
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB63_3:
-; VI-NEXT: s_branch .LBB63_2
; VI-NEXT: .LBB63_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -10688,18 +10931,20 @@ define inreg <2 x i64> @bitcast_v8f16_to_v2i64_scalar(<8 x half> inreg %a, i32 i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB63_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB63_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB63_4
-; GFX9-NEXT: .LBB63_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB63_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB63_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v3, s19, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v2, s18, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB63_3:
-; GFX9-NEXT: s_branch .LBB63_2
; GFX9-NEXT: .LBB63_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -10711,19 +10956,20 @@ define inreg <2 x i64> @bitcast_v8f16_to_v2i64_scalar(<8 x half> inreg %a, i32 i
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB63_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB63_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB63_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB63_4
-; GFX11-NEXT: .LBB63_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB63_3:
-; GFX11-NEXT: s_branch .LBB63_2
; GFX11-NEXT: .LBB63_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -10873,6 +11119,7 @@ define inreg <8 x bfloat> @bitcast_v2i64_to_v8bf16_scalar(<2 x i64> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB65_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s6, s19, 0xffff0000
@@ -10916,71 +11163,78 @@ define inreg <8 x bfloat> @bitcast_v2i64_to_v8bf16_scalar(<2 x i64> inreg %a, i3
; SI-NEXT: ; implicit-def: $sgpr8
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB65_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB65_2
+; SI-NEXT: s_branch .LBB65_3
;
; VI-LABEL: bitcast_v2i64_to_v8bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB65_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB65_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB65_3
-; VI-NEXT: .LBB65_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB65_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB65_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_u32 s18, s18, 3
; VI-NEXT: s_addc_u32 s19, s19, 0
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
-; VI-NEXT: .LBB65_3: ; %end
+; VI-NEXT: .LBB65_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB65_4:
-; VI-NEXT: s_branch .LBB65_2
;
; GFX9-LABEL: bitcast_v2i64_to_v8bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB65_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB65_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB65_3
-; GFX9-NEXT: .LBB65_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB65_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB65_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_u32 s18, s18, 3
; GFX9-NEXT: s_addc_u32 s19, s19, 0
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
-; GFX9-NEXT: .LBB65_3: ; %end
+; GFX9-NEXT: .LBB65_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB65_4:
-; GFX9-NEXT: s_branch .LBB65_2
;
; GFX11-LABEL: bitcast_v2i64_to_v8bf16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB65_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB65_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB65_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB65_3
-; GFX11-NEXT: .LBB65_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB65_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s2, s2, 3
; GFX11-NEXT: s_addc_u32 s3, s3, 0
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: .LBB65_3: ; %end
+; GFX11-NEXT: .LBB65_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB65_4:
-; GFX11-NEXT: s_branch .LBB65_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -11420,6 +11674,7 @@ define inreg <2 x i64> @bitcast_v8bf16_to_v2i64_scalar(<8 x bfloat> inreg %a, i3
; SI-NEXT: v_mul_f32_e64 v7, 1.0, s20
; SI-NEXT: v_mul_f32_e64 v4, 1.0, s23
; SI-NEXT: v_mul_f32_e64 v5, 1.0, s22
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB67_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v10
@@ -11460,16 +11715,22 @@ define inreg <2 x i64> @bitcast_v8bf16_to_v2i64_scalar(<8 x bfloat> inreg %a, i3
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB67_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
-; SI-NEXT: s_branch .LBB67_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB67_2
+; SI-NEXT: s_branch .LBB67_3
;
; VI-LABEL: bitcast_v8bf16_to_v2i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB67_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB67_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB67_4
-; VI-NEXT: .LBB67_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB67_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB67_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s19, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x40c00000
; VI-NEXT: v_add_f32_e32 v1, s4, v0
@@ -11544,8 +11805,6 @@ define inreg <2 x i64> @bitcast_v8bf16_to_v2i64_scalar(<8 x bfloat> inreg %a, i3
; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; VI-NEXT: v_alignbit_b32 v0, v0, v4, 16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB67_3:
-; VI-NEXT: s_branch .LBB67_2
; VI-NEXT: .LBB67_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -11557,10 +11816,14 @@ define inreg <2 x i64> @bitcast_v8bf16_to_v2i64_scalar(<8 x bfloat> inreg %a, i3
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB67_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB67_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB67_4
-; GFX9-NEXT: .LBB67_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB67_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB67_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_pack_lh_b32_b16 s4, 0, s19
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
; GFX9-NEXT: v_add_f32_e32 v1, s4, v0
@@ -11640,8 +11903,6 @@ define inreg <2 x i64> @bitcast_v8bf16_to_v2i64_scalar(<8 x bfloat> inreg %a, i3
; GFX9-NEXT: v_and_b32_sdwa v0, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: v_lshl_or_b32 v0, v5, 16, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB67_3:
-; GFX9-NEXT: s_branch .LBB67_2
; GFX9-NEXT: .LBB67_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -11653,12 +11914,15 @@ define inreg <2 x i64> @bitcast_v8bf16_to_v2i64_scalar(<8 x bfloat> inreg %a, i3
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB67_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB67_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB67_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB67_4
-; GFX11-NEXT: .LBB67_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_lshl_b32 s4, s3, 16
; GFX11-NEXT: s_pack_lh_b32_b16 s3, 0, s3
; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
@@ -11745,8 +12009,6 @@ define inreg <2 x i64> @bitcast_v8bf16_to_v2i64_scalar(<8 x bfloat> inreg %a, i3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_lshl_or_b32 v0, v7, 16, v8
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB67_3:
-; GFX11-NEXT: s_branch .LBB67_2
; GFX11-NEXT: .LBB67_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -12099,6 +12361,7 @@ define inreg <16 x i8> @bitcast_v2i64_to_v16i8_scalar(<2 x i64> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB69_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s18
@@ -12160,12 +12423,15 @@ define inreg <16 x i8> @bitcast_v2i64_to_v16i8_scalar(<2 x i64> inreg %a, i32 in
; SI-NEXT: ; implicit-def: $sgpr8
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB69_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB69_2
+; SI-NEXT: s_branch .LBB69_3
;
; VI-LABEL: bitcast_v2i64_to_v16i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
+; VI-NEXT: s_mov_b64 s[8:9], -1
; VI-NEXT: s_cbranch_scc0 .LBB69_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s10, s19, 24
@@ -12229,12 +12495,15 @@ define inreg <16 x i8> @bitcast_v2i64_to_v16i8_scalar(<2 x i64> inreg %a, i32 in
; VI-NEXT: ; implicit-def: $sgpr12
; VI-NEXT: ; implicit-def: $sgpr11
; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: s_branch .LBB69_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; VI-NEXT: s_cbranch_vccz .LBB69_2
+; VI-NEXT: s_branch .LBB69_3
;
; GFX9-LABEL: bitcast_v2i64_to_v16i8_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
+; GFX9-NEXT: s_mov_b64 s[8:9], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB69_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s10, s19, 24
@@ -12298,15 +12567,18 @@ define inreg <16 x i8> @bitcast_v2i64_to_v16i8_scalar(<2 x i64> inreg %a, i32 in
; GFX9-NEXT: ; implicit-def: $sgpr12
; GFX9-NEXT: ; implicit-def: $sgpr11
; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: s_branch .LBB69_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; GFX9-NEXT: s_cbranch_vccz .LBB69_2
+; GFX9-NEXT: s_branch .LBB69_3
;
; GFX11-LABEL: bitcast_v2i64_to_v16i8_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
-; GFX11-NEXT: s_mov_b32 s18, 0
+; GFX11-NEXT: s_mov_b32 s5, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB69_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-NEXT: s_lshr_b64 s[4:5], s[2:3], 24
; GFX11-NEXT: s_lshr_b32 s8, s3, 24
; GFX11-NEXT: s_lshr_b32 s9, s3, 16
; GFX11-NEXT: s_lshr_b32 s10, s3, 8
@@ -12317,10 +12589,8 @@ define inreg <16 x i8> @bitcast_v2i64_to_v16i8_scalar(<2 x i64> inreg %a, i32 in
; GFX11-NEXT: s_lshr_b32 s15, s1, 8
; GFX11-NEXT: s_lshr_b32 s16, s0, 16
; GFX11-NEXT: s_lshr_b32 s17, s0, 8
-; GFX11-NEXT: s_lshr_b64 s[4:5], s[2:3], 24
; GFX11-NEXT: s_lshr_b64 s[6:7], s[0:1], 24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
-; GFX11-NEXT: s_cbranch_vccnz .LBB69_3
+; GFX11-NEXT: s_cbranch_execnz .LBB69_3
; GFX11-NEXT: .LBB69_2: ; %cmp.true
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
@@ -12362,7 +12632,9 @@ define inreg <16 x i8> @bitcast_v2i64_to_v16i8_scalar(<2 x i64> inreg %a, i32 in
; GFX11-NEXT: ; implicit-def: $sgpr10
; GFX11-NEXT: ; implicit-def: $sgpr9
; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: s_branch .LBB69_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
+; GFX11-NEXT: s_cbranch_vccz .LBB69_2
+; GFX11-NEXT: s_branch .LBB69_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -12950,8 +13222,9 @@ define inreg <2 x i64> @bitcast_v16i8_to_v2i64_scalar(<16 x i8> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: v_mov_b32_e32 v4, v0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v1
; SI-NEXT: s_cbranch_scc0 .LBB71_4
; SI-NEXT: ; %bb.1: ; %cmp.false
@@ -13053,15 +13326,18 @@ define inreg <2 x i64> @bitcast_v16i8_to_v2i64_scalar(<16 x i8> inreg %a, i32 in
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB71_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
-; SI-NEXT: s_branch .LBB71_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB71_2
+; SI-NEXT: s_branch .LBB71_3
;
; VI-LABEL: bitcast_v16i8_to_v2i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
; VI-NEXT: v_readfirstlane_b32 s10, v1
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s11, v0
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[8:9], -1
; VI-NEXT: s_cbranch_scc0 .LBB71_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, s16, 0xff
@@ -13162,15 +13438,18 @@ define inreg <2 x i64> @bitcast_v16i8_to_v2i64_scalar(<16 x i8> inreg %a, i32 in
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB71_4:
; VI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7
-; VI-NEXT: s_branch .LBB71_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; VI-NEXT: s_cbranch_vccz .LBB71_2
+; VI-NEXT: s_branch .LBB71_3
;
; GFX9-LABEL: bitcast_v16i8_to_v2i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
; GFX9-NEXT: v_readfirstlane_b32 s10, v1
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_readfirstlane_b32 s11, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[8:9], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB71_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_and_b32 s4, s16, 0xff
@@ -13271,13 +13550,15 @@ define inreg <2 x i64> @bitcast_v16i8_to_v2i64_scalar(<16 x i8> inreg %a, i32 in
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB71_4:
; GFX9-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7
-; GFX9-NEXT: s_branch .LBB71_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; GFX9-NEXT: s_cbranch_vccz .LBB71_2
+; GFX9-NEXT: s_branch .LBB71_3
;
; GFX11-LABEL: bitcast_v16i8_to_v2i64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
-; GFX11-NEXT: s_mov_b32 s8, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB71_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_and_b32 s4, s0, 0xff
@@ -13288,10 +13569,10 @@ define inreg <2 x i64> @bitcast_v16i8_to_v2i64_scalar(<16 x i8> inreg %a, i32 in
; GFX11-NEXT: s_or_b32 s5, s6, s7
; GFX11-NEXT: s_and_b32 s6, s16, 0xff
; GFX11-NEXT: s_lshl_b32 s7, s17, 8
-; GFX11-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s19, 8
+; GFX11-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s19, 8
; GFX11-NEXT: s_or_b32 s6, s6, s7
-; GFX11-NEXT: s_or_b32 s7, s9, s10
+; GFX11-NEXT: s_or_b32 s7, s8, s9
; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
; GFX11-NEXT: s_lshl_b32 s5, s5, 16
; GFX11-NEXT: s_and_b32 s6, s6, 0xffff
@@ -13300,24 +13581,23 @@ define inreg <2 x i64> @bitcast_v16i8_to_v2i64_scalar(<16 x i8> inreg %a, i32 in
; GFX11-NEXT: s_or_b32 s5, s6, s7
; GFX11-NEXT: s_and_b32 s6, s20, 0xff
; GFX11-NEXT: s_lshl_b32 s7, s21, 8
-; GFX11-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s23, 8
+; GFX11-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s23, 8
; GFX11-NEXT: s_or_b32 s6, s6, s7
-; GFX11-NEXT: s_or_b32 s7, s9, s10
-; GFX11-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s25, 8
-; GFX11-NEXT: s_and_b32 s11, s26, 0xff
-; GFX11-NEXT: s_lshl_b32 s12, s27, 8
-; GFX11-NEXT: s_or_b32 s9, s9, s10
-; GFX11-NEXT: s_or_b32 s10, s11, s12
+; GFX11-NEXT: s_or_b32 s7, s8, s9
+; GFX11-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s25, 8
+; GFX11-NEXT: s_and_b32 s10, s26, 0xff
+; GFX11-NEXT: s_lshl_b32 s11, s27, 8
+; GFX11-NEXT: s_or_b32 s8, s8, s9
+; GFX11-NEXT: s_or_b32 s9, s10, s11
; GFX11-NEXT: s_and_b32 s6, s6, 0xffff
; GFX11-NEXT: s_lshl_b32 s7, s7, 16
-; GFX11-NEXT: s_and_b32 s9, s9, 0xffff
-; GFX11-NEXT: s_lshl_b32 s10, s10, 16
+; GFX11-NEXT: s_and_b32 s8, s8, 0xffff
+; GFX11-NEXT: s_lshl_b32 s9, s9, 16
; GFX11-NEXT: s_or_b32 s6, s6, s7
-; GFX11-NEXT: s_or_b32 s7, s9, s10
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB71_3
+; GFX11-NEXT: s_or_b32 s7, s8, s9
+; GFX11-NEXT: s_cbranch_execnz .LBB71_3
; GFX11-NEXT: .LBB71_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
; GFX11-NEXT: s_add_i32 s2, s2, 3
@@ -13377,7 +13657,9 @@ define inreg <2 x i64> @bitcast_v16i8_to_v2i64_scalar(<16 x i8> inreg %a, i32 in
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB71_4:
; GFX11-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7
-; GFX11-NEXT: s_branch .LBB71_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-NEXT: s_cbranch_vccz .LBB71_2
+; GFX11-NEXT: s_branch .LBB71_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -13499,6 +13781,7 @@ define inreg <8 x i16> @bitcast_v2f64_to_v8i16_scalar(<2 x double> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB73_3
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s18
@@ -13521,7 +13804,8 @@ define inreg <8 x i16> @bitcast_v2f64_to_v8i16_scalar(<2 x double> inreg %a, i32
; SI-NEXT: ; implicit-def: $sgpr6
; SI-NEXT: ; implicit-def: $vgpr5
; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: s_branch .LBB73_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB73_2
; SI-NEXT: .LBB73_4:
; SI-NEXT: v_mov_b32_e32 v11, s17
; SI-NEXT: v_mov_b32_e32 v9, s19
@@ -13540,15 +13824,17 @@ define inreg <8 x i16> @bitcast_v2f64_to_v8i16_scalar(<2 x double> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB73_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB73_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB73_4
-; VI-NEXT: .LBB73_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB73_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB73_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB73_3:
-; VI-NEXT: s_branch .LBB73_2
; VI-NEXT: .LBB73_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -13560,15 +13846,17 @@ define inreg <8 x i16> @bitcast_v2f64_to_v8i16_scalar(<2 x double> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB73_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB73_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB73_4
-; GFX9-NEXT: .LBB73_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB73_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB73_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB73_3:
-; GFX9-NEXT: s_branch .LBB73_2
; GFX9-NEXT: .LBB73_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -13580,17 +13868,18 @@ define inreg <8 x i16> @bitcast_v2f64_to_v8i16_scalar(<2 x double> inreg %a, i32
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB73_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB73_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB73_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB73_4
-; GFX11-NEXT: .LBB73_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[2:3], s[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB73_3:
-; GFX11-NEXT: s_branch .LBB73_2
; GFX11-NEXT: .LBB73_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -13752,6 +14041,7 @@ define inreg <2 x double> @bitcast_v8i16_to_v2f64_scalar(<8 x i16> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
+; SI-NEXT: s_mov_b64 s[8:9], -1
; SI-NEXT: s_cbranch_scc0 .LBB75_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
@@ -13796,16 +14086,22 @@ define inreg <2 x double> @bitcast_v8i16_to_v2f64_scalar(<8 x i16> inreg %a, i32
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB75_4:
; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7
-; SI-NEXT: s_branch .LBB75_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; SI-NEXT: s_cbranch_vccz .LBB75_2
+; SI-NEXT: s_branch .LBB75_3
;
; VI-LABEL: bitcast_v8i16_to_v2f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB75_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB75_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB75_3
-; VI-NEXT: .LBB75_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB75_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB75_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s19, 3
; VI-NEXT: s_and_b32 s4, s19, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
@@ -13826,30 +14122,30 @@ define inreg <2 x double> @bitcast_v8i16_to_v2f64_scalar(<8 x i16> inreg %a, i32
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB75_3: ; %end
+; VI-NEXT: .LBB75_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB75_4:
-; VI-NEXT: s_branch .LBB75_2
;
; GFX9-LABEL: bitcast_v8i16_to_v2f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB75_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB75_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB75_4
-; GFX9-NEXT: .LBB75_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB75_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB75_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v3, s19, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v2, s18, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB75_3:
-; GFX9-NEXT: s_branch .LBB75_2
; GFX9-NEXT: .LBB75_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -13861,19 +14157,20 @@ define inreg <2 x double> @bitcast_v8i16_to_v2f64_scalar(<8 x i16> inreg %a, i32
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB75_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB75_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB75_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB75_4
-; GFX11-NEXT: .LBB75_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB75_3:
-; GFX11-NEXT: s_branch .LBB75_2
; GFX11-NEXT: .LBB75_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -14019,6 +14316,7 @@ define inreg <8 x half> @bitcast_v2f64_to_v8f16_scalar(<2 x double> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB77_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s4, s19, 16
@@ -14060,21 +14358,25 @@ define inreg <8 x half> @bitcast_v2f64_to_v8f16_scalar(<2 x double> inreg %a, i3
; SI-NEXT: ; implicit-def: $vgpr5
; SI-NEXT: ; implicit-def: $vgpr6
; SI-NEXT: ; implicit-def: $vgpr7
-; SI-NEXT: s_branch .LBB77_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB77_2
+; SI-NEXT: s_branch .LBB77_3
;
; VI-LABEL: bitcast_v2f64_to_v8f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB77_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB77_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB77_4
-; VI-NEXT: .LBB77_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB77_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB77_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB77_3:
-; VI-NEXT: s_branch .LBB77_2
; VI-NEXT: .LBB77_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -14086,15 +14388,17 @@ define inreg <8 x half> @bitcast_v2f64_to_v8f16_scalar(<2 x double> inreg %a, i3
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB77_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB77_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB77_4
-; GFX9-NEXT: .LBB77_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB77_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB77_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB77_3:
-; GFX9-NEXT: s_branch .LBB77_2
; GFX9-NEXT: .LBB77_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -14106,17 +14410,18 @@ define inreg <8 x half> @bitcast_v2f64_to_v8f16_scalar(<2 x double> inreg %a, i3
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB77_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB77_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB77_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB77_4
-; GFX11-NEXT: .LBB77_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[2:3], s[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB77_3:
-; GFX11-NEXT: s_branch .LBB77_2
; GFX11-NEXT: .LBB77_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -14305,6 +14610,7 @@ define inreg <2 x double> @bitcast_v8f16_to_v2f64_scalar(<8 x half> inreg %a, i3
; SI-NEXT: v_cvt_f16_f32_e32 v5, s23
; SI-NEXT: v_cvt_f16_f32_e32 v4, s22
; SI-NEXT: s_cmp_lg_u32 s24, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB79_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v11
@@ -14353,16 +14659,22 @@ define inreg <2 x double> @bitcast_v8f16_to_v2f64_scalar(<8 x half> inreg %a, i3
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB79_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
-; SI-NEXT: s_branch .LBB79_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB79_2
+; SI-NEXT: s_branch .LBB79_3
;
; VI-LABEL: bitcast_v8f16_to_v2f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB79_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB79_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB79_4
-; VI-NEXT: .LBB79_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB79_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB79_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s19, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -14385,8 +14697,6 @@ define inreg <2 x double> @bitcast_v8f16_to_v2f64_scalar(<8 x half> inreg %a, i3
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v4
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB79_3:
-; VI-NEXT: s_branch .LBB79_2
; VI-NEXT: .LBB79_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -14398,18 +14708,20 @@ define inreg <2 x double> @bitcast_v8f16_to_v2f64_scalar(<8 x half> inreg %a, i3
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB79_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB79_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB79_4
-; GFX9-NEXT: .LBB79_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB79_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB79_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v3, s19, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v2, s18, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB79_3:
-; GFX9-NEXT: s_branch .LBB79_2
; GFX9-NEXT: .LBB79_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -14421,19 +14733,20 @@ define inreg <2 x double> @bitcast_v8f16_to_v2f64_scalar(<8 x half> inreg %a, i3
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB79_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB79_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB79_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB79_4
-; GFX11-NEXT: .LBB79_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB79_3:
-; GFX11-NEXT: s_branch .LBB79_2
; GFX11-NEXT: .LBB79_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -14570,6 +14883,7 @@ define inreg <8 x bfloat> @bitcast_v2f64_to_v8bf16_scalar(<2 x double> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB81_3
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s13, s19, 0xffff0000
@@ -14602,7 +14916,8 @@ define inreg <8 x bfloat> @bitcast_v2f64_to_v8bf16_scalar(<2 x double> inreg %a,
; SI-NEXT: ; implicit-def: $sgpr11
; SI-NEXT: ; implicit-def: $sgpr12
; SI-NEXT: ; implicit-def: $sgpr13
-; SI-NEXT: s_branch .LBB81_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB81_2
; SI-NEXT: .LBB81_4:
; SI-NEXT: v_mov_b32_e32 v7, s13
; SI-NEXT: v_mov_b32_e32 v6, s12
@@ -14618,15 +14933,17 @@ define inreg <8 x bfloat> @bitcast_v2f64_to_v8bf16_scalar(<2 x double> inreg %a,
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB81_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB81_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB81_4
-; VI-NEXT: .LBB81_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB81_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB81_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB81_3:
-; VI-NEXT: s_branch .LBB81_2
; VI-NEXT: .LBB81_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -14638,15 +14955,17 @@ define inreg <8 x bfloat> @bitcast_v2f64_to_v8bf16_scalar(<2 x double> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB81_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB81_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB81_4
-; GFX9-NEXT: .LBB81_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB81_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB81_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB81_3:
-; GFX9-NEXT: s_branch .LBB81_2
; GFX9-NEXT: .LBB81_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -14658,17 +14977,18 @@ define inreg <8 x bfloat> @bitcast_v2f64_to_v8bf16_scalar(<2 x double> inreg %a,
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB81_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB81_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB81_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB81_4
-; GFX11-NEXT: .LBB81_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[2:3], s[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB81_3:
-; GFX11-NEXT: s_branch .LBB81_2
; GFX11-NEXT: .LBB81_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -15112,6 +15432,7 @@ define inreg <2 x double> @bitcast_v8bf16_to_v2f64_scalar(<8 x bfloat> inreg %a,
; SI-NEXT: v_mul_f32_e64 v7, 1.0, s20
; SI-NEXT: v_mul_f32_e64 v4, 1.0, s23
; SI-NEXT: v_mul_f32_e64 v5, 1.0, s22
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB83_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v10
@@ -15152,16 +15473,22 @@ define inreg <2 x double> @bitcast_v8bf16_to_v2f64_scalar(<8 x bfloat> inreg %a,
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB83_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
-; SI-NEXT: s_branch .LBB83_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB83_2
+; SI-NEXT: s_branch .LBB83_3
;
; VI-LABEL: bitcast_v8bf16_to_v2f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB83_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB83_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB83_4
-; VI-NEXT: .LBB83_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB83_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB83_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s19, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x40c00000
; VI-NEXT: v_add_f32_e32 v1, s4, v0
@@ -15236,8 +15563,6 @@ define inreg <2 x double> @bitcast_v8bf16_to_v2f64_scalar(<8 x bfloat> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; VI-NEXT: v_alignbit_b32 v0, v0, v4, 16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB83_3:
-; VI-NEXT: s_branch .LBB83_2
; VI-NEXT: .LBB83_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -15249,10 +15574,14 @@ define inreg <2 x double> @bitcast_v8bf16_to_v2f64_scalar(<8 x bfloat> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB83_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB83_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB83_4
-; GFX9-NEXT: .LBB83_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB83_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB83_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_pack_lh_b32_b16 s4, 0, s19
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
; GFX9-NEXT: v_add_f32_e32 v1, s4, v0
@@ -15332,8 +15661,6 @@ define inreg <2 x double> @bitcast_v8bf16_to_v2f64_scalar(<8 x bfloat> inreg %a,
; GFX9-NEXT: v_and_b32_sdwa v0, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: v_lshl_or_b32 v0, v5, 16, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB83_3:
-; GFX9-NEXT: s_branch .LBB83_2
; GFX9-NEXT: .LBB83_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -15345,12 +15672,15 @@ define inreg <2 x double> @bitcast_v8bf16_to_v2f64_scalar(<8 x bfloat> inreg %a,
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB83_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB83_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB83_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB83_4
-; GFX11-NEXT: .LBB83_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_lshl_b32 s4, s3, 16
; GFX11-NEXT: s_pack_lh_b32_b16 s3, 0, s3
; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
@@ -15437,8 +15767,6 @@ define inreg <2 x double> @bitcast_v8bf16_to_v2f64_scalar(<8 x bfloat> inreg %a,
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_lshl_or_b32 v0, v7, 16, v8
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB83_3:
-; GFX11-NEXT: s_branch .LBB83_2
; GFX11-NEXT: .LBB83_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -15783,6 +16111,7 @@ define inreg <16 x i8> @bitcast_v2f64_to_v16i8_scalar(<2 x double> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB85_3
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s18
@@ -15829,7 +16158,8 @@ define inreg <16 x i8> @bitcast_v2f64_to_v16i8_scalar(<2 x double> inreg %a, i32
; SI-NEXT: ; implicit-def: $sgpr9
; SI-NEXT: ; implicit-def: $sgpr10
; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: s_branch .LBB85_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB85_2
; SI-NEXT: .LBB85_4:
; SI-NEXT: v_mov_b32_e32 v19, s17
; SI-NEXT: v_mov_b32_e32 v17, s19
@@ -15852,6 +16182,7 @@ define inreg <16 x i8> @bitcast_v2f64_to_v16i8_scalar(<2 x double> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
+; VI-NEXT: s_mov_b64 s[8:9], -1
; VI-NEXT: s_cbranch_scc0 .LBB85_3
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s15, s19, 24
@@ -15896,7 +16227,8 @@ define inreg <16 x i8> @bitcast_v2f64_to_v16i8_scalar(<2 x double> inreg %a, i32
; VI-NEXT: ; implicit-def: $sgpr13
; VI-NEXT: ; implicit-def: $sgpr14
; VI-NEXT: ; implicit-def: $sgpr15
-; VI-NEXT: s_branch .LBB85_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; VI-NEXT: s_cbranch_vccz .LBB85_2
; VI-NEXT: .LBB85_4:
; VI-NEXT: v_mov_b32_e32 v18, s16
; VI-NEXT: v_mov_b32_e32 v16, s18
@@ -15925,6 +16257,7 @@ define inreg <16 x i8> @bitcast_v2f64_to_v16i8_scalar(<2 x double> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
+; GFX9-NEXT: s_mov_b64 s[8:9], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB85_3
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s15, s19, 24
@@ -15969,7 +16302,8 @@ define inreg <16 x i8> @bitcast_v2f64_to_v16i8_scalar(<2 x double> inreg %a, i32
; GFX9-NEXT: ; implicit-def: $sgpr13
; GFX9-NEXT: ; implicit-def: $sgpr14
; GFX9-NEXT: ; implicit-def: $sgpr15
-; GFX9-NEXT: s_branch .LBB85_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; GFX9-NEXT: s_cbranch_vccz .LBB85_2
; GFX9-NEXT: .LBB85_4:
; GFX9-NEXT: v_mov_b32_e32 v18, s16
; GFX9-NEXT: v_mov_b32_e32 v16, s18
@@ -15998,23 +16332,22 @@ define inreg <16 x i8> @bitcast_v2f64_to_v16i8_scalar(<2 x double> inreg %a, i32
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
-; GFX11-NEXT: s_mov_b32 s8, 0
+; GFX11-NEXT: s_mov_b32 s5, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB85_3
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s14, s3, 24
-; GFX11-NEXT: s_lshr_b32 s13, s3, 16
-; GFX11-NEXT: s_lshr_b32 s12, s3, 8
-; GFX11-NEXT: s_lshr_b32 s16, s2, 16
-; GFX11-NEXT: s_lshr_b32 s15, s2, 8
-; GFX11-NEXT: s_lshr_b32 s11, s1, 24
-; GFX11-NEXT: s_lshr_b32 s10, s1, 16
-; GFX11-NEXT: s_lshr_b32 s9, s1, 8
-; GFX11-NEXT: s_lshr_b32 s18, s0, 16
-; GFX11-NEXT: s_lshr_b32 s17, s0, 8
-; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB85_4
+; GFX11-NEXT: s_lshr_b32 s13, s3, 24
+; GFX11-NEXT: s_lshr_b32 s12, s3, 16
+; GFX11-NEXT: s_lshr_b32 s11, s3, 8
+; GFX11-NEXT: s_lshr_b32 s15, s2, 16
+; GFX11-NEXT: s_lshr_b32 s14, s2, 8
+; GFX11-NEXT: s_lshr_b32 s10, s1, 24
+; GFX11-NEXT: s_lshr_b32 s9, s1, 16
+; GFX11-NEXT: s_lshr_b32 s8, s1, 8
+; GFX11-NEXT: s_lshr_b32 s17, s0, 16
+; GFX11-NEXT: s_lshr_b32 s16, s0, 8
+; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
+; GFX11-NEXT: s_cbranch_execnz .LBB85_4
; GFX11-NEXT: .LBB85_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[16:17], s[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[18:19], s[0:1], 1.0
@@ -16033,30 +16366,31 @@ define inreg <16 x i8> @bitcast_v2f64_to_v16i8_scalar(<2 x double> inreg %a, i32
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v18
; GFX11-NEXT: s_branch .LBB85_5
; GFX11-NEXT: .LBB85_3:
+; GFX11-NEXT: ; implicit-def: $sgpr16
; GFX11-NEXT: ; implicit-def: $sgpr17
-; GFX11-NEXT: ; implicit-def: $sgpr18
; GFX11-NEXT: ; implicit-def: $sgpr4
+; GFX11-NEXT: ; implicit-def: $sgpr8
; GFX11-NEXT: ; implicit-def: $sgpr9
; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr11
+; GFX11-NEXT: ; implicit-def: $sgpr14
; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr16
; GFX11-NEXT: ; implicit-def: $sgpr6
+; GFX11-NEXT: ; implicit-def: $sgpr11
; GFX11-NEXT: ; implicit-def: $sgpr12
; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: s_branch .LBB85_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
+; GFX11-NEXT: s_cbranch_vccz .LBB85_2
; GFX11-NEXT: .LBB85_4:
; GFX11-NEXT: v_dual_mov_b32 v18, s0 :: v_dual_mov_b32 v17, s3
; GFX11-NEXT: v_dual_mov_b32 v16, s2 :: v_dual_mov_b32 v19, s1
-; GFX11-NEXT: v_dual_mov_b32 v2, s18 :: v_dual_mov_b32 v1, s17
-; GFX11-NEXT: v_dual_mov_b32 v10, s16 :: v_dual_mov_b32 v9, s15
-; GFX11-NEXT: v_dual_mov_b32 v3, s4 :: v_dual_mov_b32 v14, s13
-; GFX11-NEXT: v_dual_mov_b32 v11, s6 :: v_dual_mov_b32 v6, s10
-; GFX11-NEXT: v_mov_b32_e32 v15, s14
-; GFX11-NEXT: v_mov_b32_e32 v13, s12
-; GFX11-NEXT: v_mov_b32_e32 v7, s11
-; GFX11-NEXT: v_mov_b32_e32 v5, s9
+; GFX11-NEXT: v_dual_mov_b32 v2, s17 :: v_dual_mov_b32 v1, s16
+; GFX11-NEXT: v_dual_mov_b32 v10, s15 :: v_dual_mov_b32 v9, s14
+; GFX11-NEXT: v_dual_mov_b32 v3, s4 :: v_dual_mov_b32 v14, s12
+; GFX11-NEXT: v_dual_mov_b32 v11, s6 :: v_dual_mov_b32 v6, s9
+; GFX11-NEXT: v_mov_b32_e32 v15, s13
+; GFX11-NEXT: v_mov_b32_e32 v13, s11
+; GFX11-NEXT: v_mov_b32_e32 v7, s10
+; GFX11-NEXT: v_mov_b32_e32 v5, s8
; GFX11-NEXT: .LBB85_5: ; %end
; GFX11-NEXT: v_mov_b32_e32 v0, v18
; GFX11-NEXT: v_mov_b32_e32 v4, v19
@@ -16650,8 +16984,9 @@ define inreg <2 x double> @bitcast_v16i8_to_v2f64_scalar(<16 x i8> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; SI-NEXT: v_mov_b32_e32 v4, v0
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: v_mov_b32_e32 v4, v0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v1
; SI-NEXT: s_cbranch_scc0 .LBB87_4
; SI-NEXT: ; %bb.1: ; %cmp.false
@@ -16753,15 +17088,18 @@ define inreg <2 x double> @bitcast_v16i8_to_v2f64_scalar(<16 x i8> inreg %a, i32
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB87_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
-; SI-NEXT: s_branch .LBB87_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB87_2
+; SI-NEXT: s_branch .LBB87_3
;
; VI-LABEL: bitcast_v16i8_to_v2f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
; VI-NEXT: v_readfirstlane_b32 s10, v1
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s11, v0
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[8:9], -1
; VI-NEXT: s_cbranch_scc0 .LBB87_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, s16, 0xff
@@ -16862,15 +17200,18 @@ define inreg <2 x double> @bitcast_v16i8_to_v2f64_scalar(<16 x i8> inreg %a, i32
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB87_4:
; VI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7
-; VI-NEXT: s_branch .LBB87_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; VI-NEXT: s_cbranch_vccz .LBB87_2
+; VI-NEXT: s_branch .LBB87_3
;
; GFX9-LABEL: bitcast_v16i8_to_v2f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
; GFX9-NEXT: v_readfirstlane_b32 s10, v1
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_readfirstlane_b32 s11, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[8:9], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB87_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_and_b32 s4, s16, 0xff
@@ -16971,13 +17312,15 @@ define inreg <2 x double> @bitcast_v16i8_to_v2f64_scalar(<16 x i8> inreg %a, i32
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB87_4:
; GFX9-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7
-; GFX9-NEXT: s_branch .LBB87_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; GFX9-NEXT: s_cbranch_vccz .LBB87_2
+; GFX9-NEXT: s_branch .LBB87_3
;
; GFX11-LABEL: bitcast_v16i8_to_v2f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
-; GFX11-NEXT: s_mov_b32 s8, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB87_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_and_b32 s4, s0, 0xff
@@ -16988,10 +17331,10 @@ define inreg <2 x double> @bitcast_v16i8_to_v2f64_scalar(<16 x i8> inreg %a, i32
; GFX11-NEXT: s_or_b32 s5, s6, s7
; GFX11-NEXT: s_and_b32 s6, s16, 0xff
; GFX11-NEXT: s_lshl_b32 s7, s17, 8
-; GFX11-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s19, 8
+; GFX11-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s19, 8
; GFX11-NEXT: s_or_b32 s6, s6, s7
-; GFX11-NEXT: s_or_b32 s7, s9, s10
+; GFX11-NEXT: s_or_b32 s7, s8, s9
; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
; GFX11-NEXT: s_lshl_b32 s5, s5, 16
; GFX11-NEXT: s_and_b32 s6, s6, 0xffff
@@ -17000,24 +17343,23 @@ define inreg <2 x double> @bitcast_v16i8_to_v2f64_scalar(<16 x i8> inreg %a, i32
; GFX11-NEXT: s_or_b32 s5, s6, s7
; GFX11-NEXT: s_and_b32 s6, s20, 0xff
; GFX11-NEXT: s_lshl_b32 s7, s21, 8
-; GFX11-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s23, 8
+; GFX11-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s23, 8
; GFX11-NEXT: s_or_b32 s6, s6, s7
-; GFX11-NEXT: s_or_b32 s7, s9, s10
-; GFX11-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s25, 8
-; GFX11-NEXT: s_and_b32 s11, s26, 0xff
-; GFX11-NEXT: s_lshl_b32 s12, s27, 8
-; GFX11-NEXT: s_or_b32 s9, s9, s10
-; GFX11-NEXT: s_or_b32 s10, s11, s12
+; GFX11-NEXT: s_or_b32 s7, s8, s9
+; GFX11-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s25, 8
+; GFX11-NEXT: s_and_b32 s10, s26, 0xff
+; GFX11-NEXT: s_lshl_b32 s11, s27, 8
+; GFX11-NEXT: s_or_b32 s8, s8, s9
+; GFX11-NEXT: s_or_b32 s9, s10, s11
; GFX11-NEXT: s_and_b32 s6, s6, 0xffff
; GFX11-NEXT: s_lshl_b32 s7, s7, 16
-; GFX11-NEXT: s_and_b32 s9, s9, 0xffff
-; GFX11-NEXT: s_lshl_b32 s10, s10, 16
+; GFX11-NEXT: s_and_b32 s8, s8, 0xffff
+; GFX11-NEXT: s_lshl_b32 s9, s9, 16
; GFX11-NEXT: s_or_b32 s6, s6, s7
-; GFX11-NEXT: s_or_b32 s7, s9, s10
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB87_3
+; GFX11-NEXT: s_or_b32 s7, s8, s9
+; GFX11-NEXT: s_cbranch_execnz .LBB87_3
; GFX11-NEXT: .LBB87_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
; GFX11-NEXT: s_add_i32 s2, s2, 3
@@ -17077,7 +17419,9 @@ define inreg <2 x double> @bitcast_v16i8_to_v2f64_scalar(<16 x i8> inreg %a, i32
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB87_4:
; GFX11-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7
-; GFX11-NEXT: s_branch .LBB87_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-NEXT: s_cbranch_vccz .LBB87_2
+; GFX11-NEXT: s_branch .LBB87_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -17244,6 +17588,7 @@ define inreg <8 x half> @bitcast_v8i16_to_v8f16_scalar(<8 x i16> inreg %a, i32 i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB89_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_cvt_f32_f16_e32 v0, s16
@@ -17283,16 +17628,22 @@ define inreg <8 x half> @bitcast_v8i16_to_v8f16_scalar(<8 x i16> inreg %a, i32 i
; SI-NEXT: ; implicit-def: $vgpr5
; SI-NEXT: ; implicit-def: $vgpr6
; SI-NEXT: ; implicit-def: $vgpr7
-; SI-NEXT: s_branch .LBB89_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB89_2
+; SI-NEXT: s_branch .LBB89_3
;
; VI-LABEL: bitcast_v8i16_to_v8f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB89_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB89_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB89_3
-; VI-NEXT: .LBB89_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB89_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB89_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s16, 3
; VI-NEXT: s_add_i32 s7, s17, 3
; VI-NEXT: s_add_i32 s9, s18, 3
@@ -17313,30 +17664,30 @@ define inreg <8 x half> @bitcast_v8i16_to_v8f16_scalar(<8 x i16> inreg %a, i32 i
; VI-NEXT: s_add_i32 s18, s8, 0x30000
; VI-NEXT: s_add_i32 s17, s6, 0x30000
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB89_3: ; %end
+; VI-NEXT: .LBB89_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB89_4:
-; VI-NEXT: s_branch .LBB89_2
;
; GFX9-LABEL: bitcast_v8i16_to_v8f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB89_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB89_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB89_4
-; GFX9-NEXT: .LBB89_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB89_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB89_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v3, s19, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v2, s18, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB89_3:
-; GFX9-NEXT: s_branch .LBB89_2
; GFX9-NEXT: .LBB89_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -17348,19 +17699,20 @@ define inreg <8 x half> @bitcast_v8i16_to_v8f16_scalar(<8 x i16> inreg %a, i32 i
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB89_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB89_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB89_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB89_4
-; GFX11-NEXT: .LBB89_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB89_3:
-; GFX11-NEXT: s_branch .LBB89_2
; GFX11-NEXT: .LBB89_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -17527,10 +17879,14 @@ define inreg <8 x i16> @bitcast_v8f16_to_v8i16_scalar(<8 x half> inreg %a, i32 i
; SI-NEXT: v_cvt_f16_f32_e32 v6, s22
; SI-NEXT: v_cvt_f16_f32_e32 v7, s23
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB91_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB91_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB91_3
-; SI-NEXT: .LBB91_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB91_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB91_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v7, v7
; SI-NEXT: v_cvt_f32_f16_e32 v6, v6
; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
@@ -17565,19 +17921,21 @@ define inreg <8 x i16> @bitcast_v8f16_to_v8i16_scalar(<8 x half> inreg %a, i32 i
; SI-NEXT: v_or_b32_e32 v4, v4, v5
; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16
; SI-NEXT: v_alignbit_b32 v5, v6, v5, 16
-; SI-NEXT: .LBB91_3: ; %end
+; SI-NEXT: .LBB91_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB91_4:
-; SI-NEXT: s_branch .LBB91_2
;
; VI-LABEL: bitcast_v8f16_to_v8i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB91_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB91_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB91_4
-; VI-NEXT: .LBB91_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB91_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB91_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v1, s4
; VI-NEXT: s_lshr_b32 s4, s17, 16
@@ -17600,8 +17958,6 @@ define inreg <8 x i16> @bitcast_v8f16_to_v8i16_scalar(<8 x half> inreg %a, i32 i
; VI-NEXT: v_or_b32_e32 v1, v1, v6
; VI-NEXT: v_or_b32_e32 v0, v4, v5
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB91_3:
-; VI-NEXT: s_branch .LBB91_2
; VI-NEXT: .LBB91_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -17613,18 +17969,20 @@ define inreg <8 x i16> @bitcast_v8f16_to_v8i16_scalar(<8 x half> inreg %a, i32 i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB91_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB91_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB91_4
-; GFX9-NEXT: .LBB91_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB91_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB91_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v3, s19, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v2, s18, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB91_3:
-; GFX9-NEXT: s_branch .LBB91_2
; GFX9-NEXT: .LBB91_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -17636,19 +17994,20 @@ define inreg <8 x i16> @bitcast_v8f16_to_v8i16_scalar(<8 x half> inreg %a, i32 i
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB91_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB91_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB91_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB91_4
-; GFX11-NEXT: .LBB91_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB91_3:
-; GFX11-NEXT: s_branch .LBB91_2
; GFX11-NEXT: .LBB91_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -17816,6 +18175,7 @@ define inreg <8 x bfloat> @bitcast_v8i16_to_v8bf16_scalar(<8 x i16> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB93_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshl_b32 s6, s16, 16
@@ -17875,16 +18235,22 @@ define inreg <8 x bfloat> @bitcast_v8i16_to_v8bf16_scalar(<8 x i16> inreg %a, i3
; SI-NEXT: ; implicit-def: $sgpr11
; SI-NEXT: ; implicit-def: $sgpr13
; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: s_branch .LBB93_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB93_2
+; SI-NEXT: s_branch .LBB93_3
;
; VI-LABEL: bitcast_v8i16_to_v8bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB93_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB93_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB93_3
-; VI-NEXT: .LBB93_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB93_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB93_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s16, 3
; VI-NEXT: s_add_i32 s7, s17, 3
; VI-NEXT: s_add_i32 s9, s18, 3
@@ -17905,30 +18271,30 @@ define inreg <8 x bfloat> @bitcast_v8i16_to_v8bf16_scalar(<8 x i16> inreg %a, i3
; VI-NEXT: s_add_i32 s18, s8, 0x30000
; VI-NEXT: s_add_i32 s17, s6, 0x30000
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB93_3: ; %end
+; VI-NEXT: .LBB93_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB93_4:
-; VI-NEXT: s_branch .LBB93_2
;
; GFX9-LABEL: bitcast_v8i16_to_v8bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB93_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB93_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB93_4
-; GFX9-NEXT: .LBB93_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB93_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB93_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v3, s19, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v2, s18, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB93_3:
-; GFX9-NEXT: s_branch .LBB93_2
; GFX9-NEXT: .LBB93_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -17940,19 +18306,20 @@ define inreg <8 x bfloat> @bitcast_v8i16_to_v8bf16_scalar(<8 x i16> inreg %a, i3
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB93_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB93_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB93_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB93_4
-; GFX11-NEXT: .LBB93_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB93_3:
-; GFX11-NEXT: s_branch .LBB93_2
; GFX11-NEXT: .LBB93_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -18414,6 +18781,7 @@ define inreg <8 x i16> @bitcast_v8bf16_to_v8i16_scalar(<8 x bfloat> inreg %a, i3
; SI-NEXT: v_mul_f32_e64 v12, 1.0, s21
; SI-NEXT: v_mul_f32_e64 v11, 1.0, s22
; SI-NEXT: v_mul_f32_e64 v10, 1.0, s23
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB95_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v15
@@ -18465,16 +18833,22 @@ define inreg <8 x i16> @bitcast_v8bf16_to_v8i16_scalar(<8 x bfloat> inreg %a, i3
; SI-NEXT: ; implicit-def: $vgpr5
; SI-NEXT: ; implicit-def: $vgpr6
; SI-NEXT: ; implicit-def: $vgpr7
-; SI-NEXT: s_branch .LBB95_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB95_2
+; SI-NEXT: s_branch .LBB95_3
;
; VI-LABEL: bitcast_v8bf16_to_v8i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB95_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB95_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB95_4
-; VI-NEXT: .LBB95_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB95_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB95_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x40c00000
; VI-NEXT: v_add_f32_e32 v1, s4, v0
@@ -18549,8 +18923,6 @@ define inreg <8 x i16> @bitcast_v8bf16_to_v8i16_scalar(<8 x bfloat> inreg %a, i3
; VI-NEXT: v_alignbit_b32 v1, v6, v1, 16
; VI-NEXT: v_alignbit_b32 v0, v5, v4, 16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB95_3:
-; VI-NEXT: s_branch .LBB95_2
; VI-NEXT: .LBB95_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -18562,10 +18934,14 @@ define inreg <8 x i16> @bitcast_v8bf16_to_v8i16_scalar(<8 x bfloat> inreg %a, i3
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB95_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB95_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB95_4
-; GFX9-NEXT: .LBB95_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB95_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB95_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_pack_lh_b32_b16 s4, 0, s16
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
; GFX9-NEXT: v_add_f32_e32 v1, s4, v0
@@ -18641,8 +19017,6 @@ define inreg <8 x i16> @bitcast_v8bf16_to_v8i16_scalar(<8 x bfloat> inreg %a, i3
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v5
; GFX9-NEXT: v_and_or_b32 v0, v4, v8, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB95_3:
-; GFX9-NEXT: s_branch .LBB95_2
; GFX9-NEXT: .LBB95_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -18654,12 +19028,15 @@ define inreg <8 x i16> @bitcast_v8bf16_to_v8i16_scalar(<8 x bfloat> inreg %a, i3
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB95_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB95_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB95_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB95_4
-; GFX11-NEXT: .LBB95_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_pack_lh_b32_b16 s4, 0, s0
; GFX11-NEXT: s_lshl_b32 s0, s0, 16
; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
@@ -18742,8 +19119,6 @@ define inreg <8 x i16> @bitcast_v8bf16_to_v8i16_scalar(<8 x bfloat> inreg %a, i3
; GFX11-NEXT: v_and_or_b32 v1, 0xffff0000, v6, v8
; GFX11-NEXT: v_and_or_b32 v0, 0xffff0000, v0, v9
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB95_3:
-; GFX11-NEXT: s_branch .LBB95_2
; GFX11-NEXT: .LBB95_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -19152,6 +19527,7 @@ define inreg <16 x i8> @bitcast_v8i16_to_v16i8_scalar(<8 x i16> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB97_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
@@ -19245,12 +19621,15 @@ define inreg <16 x i8> @bitcast_v8i16_to_v16i8_scalar(<8 x i16> inreg %a, i32 in
; SI-NEXT: ; implicit-def: $sgpr13
; SI-NEXT: ; implicit-def: $sgpr14
; SI-NEXT: ; implicit-def: $sgpr15
-; SI-NEXT: s_branch .LBB97_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB97_2
+; SI-NEXT: s_branch .LBB97_3
;
; VI-LABEL: bitcast_v8i16_to_v16i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
+; VI-NEXT: s_mov_b64 s[8:9], -1
; VI-NEXT: s_cbranch_scc0 .LBB97_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s10, s19, 24
@@ -19330,12 +19709,15 @@ define inreg <16 x i8> @bitcast_v8i16_to_v16i8_scalar(<8 x i16> inreg %a, i32 in
; VI-NEXT: ; implicit-def: $sgpr12
; VI-NEXT: ; implicit-def: $sgpr11
; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: s_branch .LBB97_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; VI-NEXT: s_cbranch_vccz .LBB97_2
+; VI-NEXT: s_branch .LBB97_3
;
; GFX9-LABEL: bitcast_v8i16_to_v16i8_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
+; GFX9-NEXT: s_mov_b64 s[8:9], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB97_3
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s10, s19, 24
@@ -19382,7 +19764,8 @@ define inreg <16 x i8> @bitcast_v8i16_to_v16i8_scalar(<8 x i16> inreg %a, i32 in
; GFX9-NEXT: ; implicit-def: $sgpr13
; GFX9-NEXT: ; implicit-def: $sgpr11
; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: s_branch .LBB97_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; GFX9-NEXT: s_cbranch_vccz .LBB97_2
; GFX9-NEXT: .LBB97_4:
; GFX9-NEXT: v_mov_b32_e32 v18, s16
; GFX9-NEXT: v_mov_b32_e32 v19, s17
@@ -19411,23 +19794,22 @@ define inreg <16 x i8> @bitcast_v8i16_to_v16i8_scalar(<8 x i16> inreg %a, i32 in
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
-; GFX11-NEXT: s_mov_b32 s8, 0
+; GFX11-NEXT: s_mov_b32 s5, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB97_3
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s9, s3, 24
-; GFX11-NEXT: s_lshr_b32 s10, s3, 16
-; GFX11-NEXT: s_lshr_b32 s12, s3, 8
-; GFX11-NEXT: s_lshr_b32 s11, s2, 16
-; GFX11-NEXT: s_lshr_b32 s13, s2, 8
-; GFX11-NEXT: s_lshr_b32 s14, s1, 24
-; GFX11-NEXT: s_lshr_b32 s15, s1, 16
-; GFX11-NEXT: s_lshr_b32 s17, s1, 8
-; GFX11-NEXT: s_lshr_b32 s16, s0, 16
-; GFX11-NEXT: s_lshr_b32 s18, s0, 8
-; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB97_4
+; GFX11-NEXT: s_lshr_b32 s8, s3, 24
+; GFX11-NEXT: s_lshr_b32 s9, s3, 16
+; GFX11-NEXT: s_lshr_b32 s11, s3, 8
+; GFX11-NEXT: s_lshr_b32 s10, s2, 16
+; GFX11-NEXT: s_lshr_b32 s12, s2, 8
+; GFX11-NEXT: s_lshr_b32 s13, s1, 24
+; GFX11-NEXT: s_lshr_b32 s14, s1, 16
+; GFX11-NEXT: s_lshr_b32 s16, s1, 8
+; GFX11-NEXT: s_lshr_b32 s15, s0, 16
+; GFX11-NEXT: s_lshr_b32 s17, s0, 8
+; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
+; GFX11-NEXT: s_cbranch_execnz .LBB97_4
; GFX11-NEXT: .LBB97_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v19, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v17, s3, 3 op_sel_hi:[1,0]
@@ -19449,28 +19831,29 @@ define inreg <16 x i8> @bitcast_v8i16_to_v16i8_scalar(<8 x i16> inreg %a, i32 in
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v18
; GFX11-NEXT: s_branch .LBB97_5
; GFX11-NEXT: .LBB97_3:
-; GFX11-NEXT: ; implicit-def: $sgpr18
-; GFX11-NEXT: ; implicit-def: $sgpr16
-; GFX11-NEXT: ; implicit-def: $sgpr4
; GFX11-NEXT: ; implicit-def: $sgpr17
; GFX11-NEXT: ; implicit-def: $sgpr15
+; GFX11-NEXT: ; implicit-def: $sgpr4
+; GFX11-NEXT: ; implicit-def: $sgpr16
; GFX11-NEXT: ; implicit-def: $sgpr14
; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr11
-; GFX11-NEXT: ; implicit-def: $sgpr6
; GFX11-NEXT: ; implicit-def: $sgpr12
; GFX11-NEXT: ; implicit-def: $sgpr10
+; GFX11-NEXT: ; implicit-def: $sgpr6
+; GFX11-NEXT: ; implicit-def: $sgpr11
; GFX11-NEXT: ; implicit-def: $sgpr9
-; GFX11-NEXT: s_branch .LBB97_2
+; GFX11-NEXT: ; implicit-def: $sgpr8
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
+; GFX11-NEXT: s_cbranch_vccz .LBB97_2
; GFX11-NEXT: .LBB97_4:
; GFX11-NEXT: v_dual_mov_b32 v18, s0 :: v_dual_mov_b32 v19, s1
; GFX11-NEXT: v_dual_mov_b32 v16, s2 :: v_dual_mov_b32 v17, s3
-; GFX11-NEXT: v_dual_mov_b32 v1, s18 :: v_dual_mov_b32 v2, s16
-; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s15
-; GFX11-NEXT: v_dual_mov_b32 v7, s14 :: v_dual_mov_b32 v10, s11
-; GFX11-NEXT: v_dual_mov_b32 v9, s13 :: v_dual_mov_b32 v14, s10
-; GFX11-NEXT: v_mov_b32_e32 v13, s12
-; GFX11-NEXT: v_mov_b32_e32 v15, s9
+; GFX11-NEXT: v_dual_mov_b32 v1, s17 :: v_dual_mov_b32 v2, s15
+; GFX11-NEXT: v_dual_mov_b32 v5, s16 :: v_dual_mov_b32 v6, s14
+; GFX11-NEXT: v_dual_mov_b32 v7, s13 :: v_dual_mov_b32 v10, s10
+; GFX11-NEXT: v_dual_mov_b32 v9, s12 :: v_dual_mov_b32 v14, s9
+; GFX11-NEXT: v_mov_b32_e32 v13, s11
+; GFX11-NEXT: v_mov_b32_e32 v15, s8
; GFX11-NEXT: v_mov_b32_e32 v11, s6
; GFX11-NEXT: v_mov_b32_e32 v3, s4
; GFX11-NEXT: .LBB97_5: ; %end
@@ -20079,9 +20462,10 @@ define inreg <8 x i16> @bitcast_v16i8_to_v8i16_scalar(<16 x i8> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; SI-NEXT: v_readfirstlane_b32 s6, v1
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: v_readfirstlane_b32 s8, v0
+; SI-NEXT: v_readfirstlane_b32 s7, v1
+; SI-NEXT: v_readfirstlane_b32 s6, v0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB99_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s20, 0xff
@@ -20089,20 +20473,20 @@ define inreg <8 x i16> @bitcast_v16i8_to_v8i16_scalar(<16 x i8> inreg %a, i32 in
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: s_and_b32 s5, s22, 0xff
; SI-NEXT: s_lshl_b32 s5, s5, 16
-; SI-NEXT: s_lshl_b32 s7, s23, 24
+; SI-NEXT: s_lshl_b32 s8, s23, 24
; SI-NEXT: s_and_b32 s4, s4, 0xffff
-; SI-NEXT: s_or_b32 s5, s7, s5
+; SI-NEXT: s_or_b32 s5, s8, s5
; SI-NEXT: s_or_b32 s10, s4, s5
; SI-NEXT: s_and_b32 s4, s18, 0xff
; SI-NEXT: s_lshl_b32 s4, s4, 16
-; SI-NEXT: s_lshl_b32 s7, s19, 24
-; SI-NEXT: s_or_b32 s4, s7, s4
-; SI-NEXT: s_and_b32 s7, s28, 0xff
+; SI-NEXT: s_lshl_b32 s8, s19, 24
+; SI-NEXT: s_or_b32 s4, s8, s4
+; SI-NEXT: s_and_b32 s8, s28, 0xff
; SI-NEXT: s_lshl_b32 s9, s29, 8
-; SI-NEXT: s_or_b32 s7, s7, s9
-; SI-NEXT: s_and_b32 s9, s8, 0xff
+; SI-NEXT: s_or_b32 s8, s8, s9
+; SI-NEXT: s_and_b32 s9, s6, 0xff
; SI-NEXT: s_lshl_b32 s9, s9, 16
-; SI-NEXT: s_lshl_b32 s11, s6, 24
+; SI-NEXT: s_lshl_b32 s11, s7, 24
; SI-NEXT: s_or_b32 s13, s11, s9
; SI-NEXT: s_and_b32 s9, s26, 0xff
; SI-NEXT: s_lshl_b32 s9, s9, 16
@@ -20116,13 +20500,13 @@ define inreg <8 x i16> @bitcast_v16i8_to_v8i16_scalar(<16 x i8> inreg %a, i32 in
; SI-NEXT: s_or_b32 s11, s11, s4
; SI-NEXT: s_and_b32 s4, s24, 0xff
; SI-NEXT: s_lshl_b32 s12, s25, 8
-; SI-NEXT: s_and_b32 s7, s7, 0xffff
+; SI-NEXT: s_and_b32 s8, s8, 0xffff
; SI-NEXT: s_or_b32 s4, s4, s12
; SI-NEXT: v_alignbit_b32 v1, s10, v0, 16
-; SI-NEXT: s_or_b32 s7, s7, s13
+; SI-NEXT: s_or_b32 s8, s8, s13
; SI-NEXT: v_mov_b32_e32 v0, s9
; SI-NEXT: s_and_b32 s4, s4, 0xffff
-; SI-NEXT: v_alignbit_b32 v5, s7, v0, 16
+; SI-NEXT: v_alignbit_b32 v5, s8, v0, 16
; SI-NEXT: s_or_b32 s9, s4, s9
; SI-NEXT: s_lshr_b32 s12, s5, 16
; SI-NEXT: s_lshr_b32 s13, s13, 16
@@ -20133,28 +20517,28 @@ define inreg <8 x i16> @bitcast_v16i8_to_v8i16_scalar(<16 x i8> inreg %a, i32 in
; SI-NEXT: s_lshl_b32 s5, s25, 8
; SI-NEXT: s_add_i32 s26, s26, 3
; SI-NEXT: s_or_b32 s4, s5, s4
-; SI-NEXT: s_and_b32 s7, s26, 0xff
+; SI-NEXT: s_and_b32 s8, s26, 0xff
; SI-NEXT: s_addk_i32 s4, 0x300
; SI-NEXT: s_lshl_b32 s5, s27, 24
-; SI-NEXT: s_lshl_b32 s7, s7, 16
+; SI-NEXT: s_lshl_b32 s8, s8, 16
; SI-NEXT: s_and_b32 s4, s4, 0xffff
-; SI-NEXT: s_or_b32 s5, s5, s7
+; SI-NEXT: s_or_b32 s5, s5, s8
; SI-NEXT: s_or_b32 s4, s5, s4
; SI-NEXT: s_add_i32 s28, s28, 3
; SI-NEXT: s_add_i32 s9, s4, 0x3000000
; SI-NEXT: s_and_b32 s4, s28, 0xff
; SI-NEXT: s_lshl_b32 s5, s29, 8
-; SI-NEXT: s_add_i32 s8, s8, 3
+; SI-NEXT: s_add_i32 s6, s6, 3
; SI-NEXT: s_or_b32 s4, s5, s4
-; SI-NEXT: s_lshl_b32 s5, s6, 24
-; SI-NEXT: s_and_b32 s6, s8, 0xff
+; SI-NEXT: s_and_b32 s6, s6, 0xff
; SI-NEXT: s_addk_i32 s4, 0x300
+; SI-NEXT: s_lshl_b32 s5, s7, 24
; SI-NEXT: s_lshl_b32 s6, s6, 16
; SI-NEXT: s_and_b32 s4, s4, 0xffff
; SI-NEXT: s_or_b32 s5, s5, s6
; SI-NEXT: s_or_b32 s4, s5, s4
; SI-NEXT: s_add_i32 s16, s16, 3
-; SI-NEXT: s_add_i32 s7, s4, 0x3000000
+; SI-NEXT: s_add_i32 s8, s4, 0x3000000
; SI-NEXT: s_and_b32 s4, s16, 0xff
; SI-NEXT: s_lshl_b32 s5, s17, 8
; SI-NEXT: s_add_i32 s18, s18, 3
@@ -20183,15 +20567,15 @@ define inreg <8 x i16> @bitcast_v16i8_to_v8i16_scalar(<16 x i8> inreg %a, i32 in
; SI-NEXT: v_mov_b32_e32 v0, s11
; SI-NEXT: v_alignbit_b32 v1, s10, v0, 16
; SI-NEXT: v_mov_b32_e32 v0, s9
-; SI-NEXT: v_alignbit_b32 v5, s7, v0, 16
+; SI-NEXT: v_alignbit_b32 v5, s8, v0, 16
; SI-NEXT: s_lshr_b32 s12, s10, 16
-; SI-NEXT: s_lshr_b32 s13, s7, 16
+; SI-NEXT: s_lshr_b32 s13, s8, 16
; SI-NEXT: .LBB99_3: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s11
; SI-NEXT: v_mov_b32_e32 v2, s10
; SI-NEXT: v_mov_b32_e32 v3, s12
; SI-NEXT: v_mov_b32_e32 v4, s9
-; SI-NEXT: v_mov_b32_e32 v6, s7
+; SI-NEXT: v_mov_b32_e32 v6, s8
; SI-NEXT: v_mov_b32_e32 v7, s13
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB99_4:
@@ -20201,17 +20585,20 @@ define inreg <8 x i16> @bitcast_v16i8_to_v8i16_scalar(<16 x i8> inreg %a, i32 in
; SI-NEXT: ; implicit-def: $sgpr12
; SI-NEXT: ; implicit-def: $sgpr9
; SI-NEXT: ; implicit-def: $vgpr5
-; SI-NEXT: ; implicit-def: $sgpr7
+; SI-NEXT: ; implicit-def: $sgpr8
; SI-NEXT: ; implicit-def: $sgpr13
-; SI-NEXT: s_branch .LBB99_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB99_2
+; SI-NEXT: s_branch .LBB99_3
;
; VI-LABEL: bitcast_v16i8_to_v8i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
; VI-NEXT: v_readfirstlane_b32 s10, v1
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s11, v0
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[8:9], -1
; VI-NEXT: s_cbranch_scc0 .LBB99_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, s16, 0xff
@@ -20312,15 +20699,18 @@ define inreg <8 x i16> @bitcast_v16i8_to_v8i16_scalar(<16 x i8> inreg %a, i32 in
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB99_4:
; VI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7
-; VI-NEXT: s_branch .LBB99_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; VI-NEXT: s_cbranch_vccz .LBB99_2
+; VI-NEXT: s_branch .LBB99_3
;
; GFX9-LABEL: bitcast_v16i8_to_v8i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
; GFX9-NEXT: v_readfirstlane_b32 s10, v1
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_readfirstlane_b32 s11, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[8:9], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB99_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_and_b32 s4, s16, 0xff
@@ -20421,13 +20811,15 @@ define inreg <8 x i16> @bitcast_v16i8_to_v8i16_scalar(<16 x i8> inreg %a, i32 in
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB99_4:
; GFX9-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7
-; GFX9-NEXT: s_branch .LBB99_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; GFX9-NEXT: s_cbranch_vccz .LBB99_2
+; GFX9-NEXT: s_branch .LBB99_3
;
; GFX11-LABEL: bitcast_v16i8_to_v8i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
-; GFX11-NEXT: s_mov_b32 s8, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB99_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_and_b32 s4, s0, 0xff
@@ -20438,10 +20830,10 @@ define inreg <8 x i16> @bitcast_v16i8_to_v8i16_scalar(<16 x i8> inreg %a, i32 in
; GFX11-NEXT: s_or_b32 s5, s6, s7
; GFX11-NEXT: s_and_b32 s6, s16, 0xff
; GFX11-NEXT: s_lshl_b32 s7, s17, 8
-; GFX11-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s19, 8
+; GFX11-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s19, 8
; GFX11-NEXT: s_or_b32 s6, s6, s7
-; GFX11-NEXT: s_or_b32 s7, s9, s10
+; GFX11-NEXT: s_or_b32 s7, s8, s9
; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
; GFX11-NEXT: s_lshl_b32 s5, s5, 16
; GFX11-NEXT: s_and_b32 s6, s6, 0xffff
@@ -20450,24 +20842,23 @@ define inreg <8 x i16> @bitcast_v16i8_to_v8i16_scalar(<16 x i8> inreg %a, i32 in
; GFX11-NEXT: s_or_b32 s5, s6, s7
; GFX11-NEXT: s_and_b32 s6, s20, 0xff
; GFX11-NEXT: s_lshl_b32 s7, s21, 8
-; GFX11-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s23, 8
+; GFX11-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s23, 8
; GFX11-NEXT: s_or_b32 s6, s6, s7
-; GFX11-NEXT: s_or_b32 s7, s9, s10
-; GFX11-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s25, 8
-; GFX11-NEXT: s_and_b32 s11, s26, 0xff
-; GFX11-NEXT: s_lshl_b32 s12, s27, 8
-; GFX11-NEXT: s_or_b32 s9, s9, s10
-; GFX11-NEXT: s_or_b32 s10, s11, s12
+; GFX11-NEXT: s_or_b32 s7, s8, s9
+; GFX11-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s25, 8
+; GFX11-NEXT: s_and_b32 s10, s26, 0xff
+; GFX11-NEXT: s_lshl_b32 s11, s27, 8
+; GFX11-NEXT: s_or_b32 s8, s8, s9
+; GFX11-NEXT: s_or_b32 s9, s10, s11
; GFX11-NEXT: s_and_b32 s6, s6, 0xffff
; GFX11-NEXT: s_lshl_b32 s7, s7, 16
-; GFX11-NEXT: s_and_b32 s9, s9, 0xffff
-; GFX11-NEXT: s_lshl_b32 s10, s10, 16
+; GFX11-NEXT: s_and_b32 s8, s8, 0xffff
+; GFX11-NEXT: s_lshl_b32 s9, s9, 16
; GFX11-NEXT: s_or_b32 s6, s6, s7
-; GFX11-NEXT: s_or_b32 s7, s9, s10
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB99_3
+; GFX11-NEXT: s_or_b32 s7, s8, s9
+; GFX11-NEXT: s_cbranch_execnz .LBB99_3
; GFX11-NEXT: .LBB99_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
; GFX11-NEXT: s_add_i32 s2, s2, 3
@@ -20527,7 +20918,9 @@ define inreg <8 x i16> @bitcast_v16i8_to_v8i16_scalar(<16 x i8> inreg %a, i32 in
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB99_4:
; GFX11-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7
-; GFX11-NEXT: s_branch .LBB99_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-NEXT: s_cbranch_vccz .LBB99_2
+; GFX11-NEXT: s_branch .LBB99_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -20719,6 +21112,7 @@ define inreg <8 x bfloat> @bitcast_v8f16_to_v8bf16_scalar(<8 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v14, s22
; SI-NEXT: v_cvt_f16_f32_e32 v15, s23
; SI-NEXT: s_cmp_lg_u32 s24, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB101_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v8
@@ -20774,16 +21168,22 @@ define inreg <8 x bfloat> @bitcast_v8f16_to_v8bf16_scalar(<8 x half> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr5
; SI-NEXT: ; implicit-def: $vgpr6
; SI-NEXT: ; implicit-def: $vgpr7
-; SI-NEXT: s_branch .LBB101_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB101_2
+; SI-NEXT: s_branch .LBB101_3
;
; VI-LABEL: bitcast_v8f16_to_v8bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB101_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB101_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB101_4
-; VI-NEXT: .LBB101_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB101_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB101_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v1, s4
; VI-NEXT: s_lshr_b32 s4, s17, 16
@@ -20806,8 +21206,6 @@ define inreg <8 x bfloat> @bitcast_v8f16_to_v8bf16_scalar(<8 x half> inreg %a, i
; VI-NEXT: v_or_b32_e32 v1, v1, v6
; VI-NEXT: v_or_b32_e32 v0, v4, v5
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB101_3:
-; VI-NEXT: s_branch .LBB101_2
; VI-NEXT: .LBB101_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -20819,18 +21217,20 @@ define inreg <8 x bfloat> @bitcast_v8f16_to_v8bf16_scalar(<8 x half> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB101_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB101_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB101_4
-; GFX9-NEXT: .LBB101_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB101_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB101_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v3, s19, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v2, s18, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB101_3:
-; GFX9-NEXT: s_branch .LBB101_2
; GFX9-NEXT: .LBB101_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -20842,19 +21242,20 @@ define inreg <8 x bfloat> @bitcast_v8f16_to_v8bf16_scalar(<8 x half> inreg %a, i
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB101_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB101_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB101_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB101_4
-; GFX11-NEXT: .LBB101_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB101_3:
-; GFX11-NEXT: s_branch .LBB101_2
; GFX11-NEXT: .LBB101_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -21324,6 +21725,7 @@ define inreg <8 x half> @bitcast_v8bf16_to_v8f16_scalar(<8 x bfloat> inreg %a, i
; SI-NEXT: v_mul_f32_e64 v13, 1.0, s21
; SI-NEXT: v_mul_f32_e64 v14, 1.0, s22
; SI-NEXT: v_mul_f32_e64 v15, 1.0, s23
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB103_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v8
@@ -21387,16 +21789,22 @@ define inreg <8 x half> @bitcast_v8bf16_to_v8f16_scalar(<8 x bfloat> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr5
; SI-NEXT: ; implicit-def: $vgpr6
; SI-NEXT: ; implicit-def: $vgpr7
-; SI-NEXT: s_branch .LBB103_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB103_2
+; SI-NEXT: s_branch .LBB103_3
;
; VI-LABEL: bitcast_v8bf16_to_v8f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
-; VI-NEXT: s_cbranch_scc0 .LBB103_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB103_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB103_4
-; VI-NEXT: .LBB103_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB103_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB103_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x40c00000
; VI-NEXT: v_add_f32_e32 v1, s4, v0
@@ -21471,8 +21879,6 @@ define inreg <8 x half> @bitcast_v8bf16_to_v8f16_scalar(<8 x bfloat> inreg %a, i
; VI-NEXT: v_alignbit_b32 v1, v6, v1, 16
; VI-NEXT: v_alignbit_b32 v0, v5, v4, 16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB103_3:
-; VI-NEXT: s_branch .LBB103_2
; VI-NEXT: .LBB103_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -21484,10 +21890,14 @@ define inreg <8 x half> @bitcast_v8bf16_to_v8f16_scalar(<8 x bfloat> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB103_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB103_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB103_4
-; GFX9-NEXT: .LBB103_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB103_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB103_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_pack_lh_b32_b16 s4, 0, s16
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
; GFX9-NEXT: v_add_f32_e32 v1, s4, v0
@@ -21567,8 +21977,6 @@ define inreg <8 x half> @bitcast_v8bf16_to_v8f16_scalar(<8 x bfloat> inreg %a, i
; GFX9-NEXT: v_and_b32_sdwa v0, v8, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: v_lshl_or_b32 v0, v4, 16, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB103_3:
-; GFX9-NEXT: s_branch .LBB103_2
; GFX9-NEXT: .LBB103_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -21580,12 +21988,15 @@ define inreg <8 x half> @bitcast_v8bf16_to_v8f16_scalar(<8 x bfloat> inreg %a, i
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB103_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB103_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB103_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB103_4
-; GFX11-NEXT: .LBB103_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_pack_lh_b32_b16 s4, 0, s0
; GFX11-NEXT: s_lshl_b32 s0, s0, 16
; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s4
@@ -21676,8 +22087,6 @@ define inreg <8 x half> @bitcast_v8bf16_to_v8f16_scalar(<8 x bfloat> inreg %a, i
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
; GFX11-NEXT: v_lshl_or_b32 v1, v6, 16, v7
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB103_3:
-; GFX11-NEXT: s_branch .LBB103_2
; GFX11-NEXT: .LBB103_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -22093,6 +22502,7 @@ define inreg <16 x i8> @bitcast_v8f16_to_v16i8_scalar(<8 x half> inreg %a, i32 i
; SI-NEXT: v_cvt_f16_f32_e32 v14, s23
; SI-NEXT: v_cvt_f16_f32_e32 v19, s22
; SI-NEXT: s_cmp_lg_u32 s24, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB105_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v21
@@ -22174,12 +22584,15 @@ define inreg <16 x i8> @bitcast_v8f16_to_v16i8_scalar(<8 x half> inreg %a, i32 i
; SI-NEXT: ; implicit-def: $vgpr12
; SI-NEXT: ; implicit-def: $vgpr13
; SI-NEXT: ; implicit-def: $vgpr15
-; SI-NEXT: s_branch .LBB105_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB105_2
+; SI-NEXT: s_branch .LBB105_3
;
; VI-LABEL: bitcast_v8f16_to_v16i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
+; VI-NEXT: s_mov_b64 s[8:9], -1
; VI-NEXT: s_cbranch_scc0 .LBB105_3
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s14, s19, 24
@@ -22239,7 +22652,8 @@ define inreg <16 x i8> @bitcast_v8f16_to_v16i8_scalar(<8 x half> inreg %a, i32 i
; VI-NEXT: ; implicit-def: $sgpr11
; VI-NEXT: ; implicit-def: $sgpr20
; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: s_branch .LBB105_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; VI-NEXT: s_cbranch_vccz .LBB105_2
; VI-NEXT: .LBB105_4:
; VI-NEXT: v_mov_b32_e32 v2, s23
; VI-NEXT: v_mov_b32_e32 v6, s22
@@ -22266,6 +22680,7 @@ define inreg <16 x i8> @bitcast_v8f16_to_v16i8_scalar(<8 x half> inreg %a, i32 i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
+; GFX9-NEXT: s_mov_b64 s[8:9], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB105_3
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s10, s19, 24
@@ -22313,7 +22728,8 @@ define inreg <16 x i8> @bitcast_v8f16_to_v16i8_scalar(<8 x half> inreg %a, i32 i
; GFX9-NEXT: ; implicit-def: $sgpr13
; GFX9-NEXT: ; implicit-def: $sgpr11
; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: s_branch .LBB105_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; GFX9-NEXT: s_cbranch_vccz .LBB105_2
; GFX9-NEXT: .LBB105_4:
; GFX9-NEXT: v_mov_b32_e32 v18, s16
; GFX9-NEXT: v_mov_b32_e32 v19, s17
@@ -22342,23 +22758,22 @@ define inreg <16 x i8> @bitcast_v8f16_to_v16i8_scalar(<8 x half> inreg %a, i32 i
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
-; GFX11-NEXT: s_mov_b32 s8, 0
+; GFX11-NEXT: s_mov_b32 s5, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB105_3
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s9, s3, 24
-; GFX11-NEXT: s_lshr_b32 s10, s3, 16
-; GFX11-NEXT: s_lshr_b32 s12, s3, 8
-; GFX11-NEXT: s_lshr_b32 s11, s2, 16
-; GFX11-NEXT: s_lshr_b32 s13, s2, 8
-; GFX11-NEXT: s_lshr_b32 s14, s1, 24
-; GFX11-NEXT: s_lshr_b32 s15, s1, 16
-; GFX11-NEXT: s_lshr_b32 s17, s1, 8
-; GFX11-NEXT: s_lshr_b32 s16, s0, 16
-; GFX11-NEXT: s_lshr_b32 s18, s0, 8
-; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB105_4
+; GFX11-NEXT: s_lshr_b32 s8, s3, 24
+; GFX11-NEXT: s_lshr_b32 s9, s3, 16
+; GFX11-NEXT: s_lshr_b32 s11, s3, 8
+; GFX11-NEXT: s_lshr_b32 s10, s2, 16
+; GFX11-NEXT: s_lshr_b32 s12, s2, 8
+; GFX11-NEXT: s_lshr_b32 s13, s1, 24
+; GFX11-NEXT: s_lshr_b32 s14, s1, 16
+; GFX11-NEXT: s_lshr_b32 s16, s1, 8
+; GFX11-NEXT: s_lshr_b32 s15, s0, 16
+; GFX11-NEXT: s_lshr_b32 s17, s0, 8
+; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
+; GFX11-NEXT: s_cbranch_execnz .LBB105_4
; GFX11-NEXT: .LBB105_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v19, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v17, 0x200, s3 op_sel_hi:[0,1]
@@ -22380,28 +22795,29 @@ define inreg <16 x i8> @bitcast_v8f16_to_v16i8_scalar(<8 x half> inreg %a, i32 i
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v18
; GFX11-NEXT: s_branch .LBB105_5
; GFX11-NEXT: .LBB105_3:
-; GFX11-NEXT: ; implicit-def: $sgpr18
-; GFX11-NEXT: ; implicit-def: $sgpr16
-; GFX11-NEXT: ; implicit-def: $sgpr4
; GFX11-NEXT: ; implicit-def: $sgpr17
; GFX11-NEXT: ; implicit-def: $sgpr15
+; GFX11-NEXT: ; implicit-def: $sgpr4
+; GFX11-NEXT: ; implicit-def: $sgpr16
; GFX11-NEXT: ; implicit-def: $sgpr14
; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: ; implicit-def: $sgpr11
-; GFX11-NEXT: ; implicit-def: $sgpr6
; GFX11-NEXT: ; implicit-def: $sgpr12
; GFX11-NEXT: ; implicit-def: $sgpr10
+; GFX11-NEXT: ; implicit-def: $sgpr6
+; GFX11-NEXT: ; implicit-def: $sgpr11
; GFX11-NEXT: ; implicit-def: $sgpr9
-; GFX11-NEXT: s_branch .LBB105_2
+; GFX11-NEXT: ; implicit-def: $sgpr8
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
+; GFX11-NEXT: s_cbranch_vccz .LBB105_2
; GFX11-NEXT: .LBB105_4:
; GFX11-NEXT: v_dual_mov_b32 v18, s0 :: v_dual_mov_b32 v19, s1
; GFX11-NEXT: v_dual_mov_b32 v16, s2 :: v_dual_mov_b32 v17, s3
-; GFX11-NEXT: v_dual_mov_b32 v1, s18 :: v_dual_mov_b32 v2, s16
-; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s15
-; GFX11-NEXT: v_dual_mov_b32 v7, s14 :: v_dual_mov_b32 v10, s11
-; GFX11-NEXT: v_dual_mov_b32 v9, s13 :: v_dual_mov_b32 v14, s10
-; GFX11-NEXT: v_mov_b32_e32 v13, s12
-; GFX11-NEXT: v_mov_b32_e32 v15, s9
+; GFX11-NEXT: v_dual_mov_b32 v1, s17 :: v_dual_mov_b32 v2, s15
+; GFX11-NEXT: v_dual_mov_b32 v5, s16 :: v_dual_mov_b32 v6, s14
+; GFX11-NEXT: v_dual_mov_b32 v7, s13 :: v_dual_mov_b32 v10, s10
+; GFX11-NEXT: v_dual_mov_b32 v9, s12 :: v_dual_mov_b32 v14, s9
+; GFX11-NEXT: v_mov_b32_e32 v13, s11
+; GFX11-NEXT: v_mov_b32_e32 v15, s8
; GFX11-NEXT: v_mov_b32_e32 v11, s6
; GFX11-NEXT: v_mov_b32_e32 v3, s4
; GFX11-NEXT: .LBB105_5: ; %end
@@ -22993,9 +23409,10 @@ define inreg <8 x half> @bitcast_v16i8_to_v8f16_scalar(<16 x i8> inreg %a, i32 i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; SI-NEXT: v_readfirstlane_b32 s6, v1
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: v_readfirstlane_b32 s6, v1
; SI-NEXT: v_readfirstlane_b32 s7, v0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB107_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
@@ -23091,15 +23508,18 @@ define inreg <8 x half> @bitcast_v16i8_to_v8f16_scalar(<16 x i8> inreg %a, i32 i
; SI-NEXT: ; implicit-def: $vgpr5
; SI-NEXT: ; implicit-def: $vgpr6
; SI-NEXT: ; implicit-def: $vgpr7
-; SI-NEXT: s_branch .LBB107_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB107_2
+; SI-NEXT: s_branch .LBB107_3
;
; VI-LABEL: bitcast_v16i8_to_v8f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
; VI-NEXT: v_readfirstlane_b32 s10, v1
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s11, v0
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[8:9], -1
; VI-NEXT: s_cbranch_scc0 .LBB107_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, s16, 0xff
@@ -23200,15 +23620,18 @@ define inreg <8 x half> @bitcast_v16i8_to_v8f16_scalar(<16 x i8> inreg %a, i32 i
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB107_4:
; VI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7
-; VI-NEXT: s_branch .LBB107_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; VI-NEXT: s_cbranch_vccz .LBB107_2
+; VI-NEXT: s_branch .LBB107_3
;
; GFX9-LABEL: bitcast_v16i8_to_v8f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
; GFX9-NEXT: v_readfirstlane_b32 s10, v1
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_readfirstlane_b32 s11, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[8:9], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB107_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_and_b32 s4, s16, 0xff
@@ -23309,13 +23732,15 @@ define inreg <8 x half> @bitcast_v16i8_to_v8f16_scalar(<16 x i8> inreg %a, i32 i
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB107_4:
; GFX9-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7
-; GFX9-NEXT: s_branch .LBB107_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; GFX9-NEXT: s_cbranch_vccz .LBB107_2
+; GFX9-NEXT: s_branch .LBB107_3
;
; GFX11-LABEL: bitcast_v16i8_to_v8f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
-; GFX11-NEXT: s_mov_b32 s8, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB107_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_and_b32 s4, s0, 0xff
@@ -23326,10 +23751,10 @@ define inreg <8 x half> @bitcast_v16i8_to_v8f16_scalar(<16 x i8> inreg %a, i32 i
; GFX11-NEXT: s_or_b32 s5, s6, s7
; GFX11-NEXT: s_and_b32 s6, s16, 0xff
; GFX11-NEXT: s_lshl_b32 s7, s17, 8
-; GFX11-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s19, 8
+; GFX11-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s19, 8
; GFX11-NEXT: s_or_b32 s6, s6, s7
-; GFX11-NEXT: s_or_b32 s7, s9, s10
+; GFX11-NEXT: s_or_b32 s7, s8, s9
; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
; GFX11-NEXT: s_lshl_b32 s5, s5, 16
; GFX11-NEXT: s_and_b32 s6, s6, 0xffff
@@ -23338,24 +23763,23 @@ define inreg <8 x half> @bitcast_v16i8_to_v8f16_scalar(<16 x i8> inreg %a, i32 i
; GFX11-NEXT: s_or_b32 s5, s6, s7
; GFX11-NEXT: s_and_b32 s6, s20, 0xff
; GFX11-NEXT: s_lshl_b32 s7, s21, 8
-; GFX11-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s23, 8
+; GFX11-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s23, 8
; GFX11-NEXT: s_or_b32 s6, s6, s7
-; GFX11-NEXT: s_or_b32 s7, s9, s10
-; GFX11-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s25, 8
-; GFX11-NEXT: s_and_b32 s11, s26, 0xff
-; GFX11-NEXT: s_lshl_b32 s12, s27, 8
-; GFX11-NEXT: s_or_b32 s9, s9, s10
-; GFX11-NEXT: s_or_b32 s10, s11, s12
+; GFX11-NEXT: s_or_b32 s7, s8, s9
+; GFX11-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s25, 8
+; GFX11-NEXT: s_and_b32 s10, s26, 0xff
+; GFX11-NEXT: s_lshl_b32 s11, s27, 8
+; GFX11-NEXT: s_or_b32 s8, s8, s9
+; GFX11-NEXT: s_or_b32 s9, s10, s11
; GFX11-NEXT: s_and_b32 s6, s6, 0xffff
; GFX11-NEXT: s_lshl_b32 s7, s7, 16
-; GFX11-NEXT: s_and_b32 s9, s9, 0xffff
-; GFX11-NEXT: s_lshl_b32 s10, s10, 16
+; GFX11-NEXT: s_and_b32 s8, s8, 0xffff
+; GFX11-NEXT: s_lshl_b32 s9, s9, 16
; GFX11-NEXT: s_or_b32 s6, s6, s7
-; GFX11-NEXT: s_or_b32 s7, s9, s10
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB107_3
+; GFX11-NEXT: s_or_b32 s7, s8, s9
+; GFX11-NEXT: s_cbranch_execnz .LBB107_3
; GFX11-NEXT: .LBB107_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
; GFX11-NEXT: s_add_i32 s2, s2, 3
@@ -23415,7 +23839,9 @@ define inreg <8 x half> @bitcast_v16i8_to_v8f16_scalar(<16 x i8> inreg %a, i32 i
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB107_4:
; GFX11-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7
-; GFX11-NEXT: s_branch .LBB107_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-NEXT: s_cbranch_vccz .LBB107_2
+; GFX11-NEXT: s_branch .LBB107_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -24098,6 +24524,7 @@ define inreg <16 x i8> @bitcast_v8bf16_to_v16i8_scalar(<8 x bfloat> inreg %a, i3
; SI-NEXT: v_mul_f32_e64 v23, 1.0, s20
; SI-NEXT: v_mul_f32_e64 v20, 1.0, s23
; SI-NEXT: v_mul_f32_e64 v21, 1.0, s22
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB109_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v18
@@ -24173,12 +24600,15 @@ define inreg <16 x i8> @bitcast_v8bf16_to_v16i8_scalar(<8 x bfloat> inreg %a, i3
; SI-NEXT: ; implicit-def: $vgpr13
; SI-NEXT: ; implicit-def: $vgpr14
; SI-NEXT: ; implicit-def: $vgpr15
-; SI-NEXT: s_branch .LBB109_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB109_2
+; SI-NEXT: s_branch .LBB109_3
;
; VI-LABEL: bitcast_v8bf16_to_v16i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
+; VI-NEXT: s_mov_b64 s[8:9], -1
; VI-NEXT: s_cbranch_scc0 .LBB109_3
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s10, s19, 24
@@ -24294,7 +24724,8 @@ define inreg <16 x i8> @bitcast_v8bf16_to_v16i8_scalar(<8 x bfloat> inreg %a, i3
; VI-NEXT: ; implicit-def: $sgpr13
; VI-NEXT: ; implicit-def: $sgpr11
; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: s_branch .LBB109_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; VI-NEXT: s_cbranch_vccz .LBB109_2
; VI-NEXT: .LBB109_4:
; VI-NEXT: v_mov_b32_e32 v18, s16
; VI-NEXT: v_mov_b32_e32 v19, s17
@@ -24323,6 +24754,7 @@ define inreg <16 x i8> @bitcast_v8bf16_to_v16i8_scalar(<8 x bfloat> inreg %a, i3
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
+; GFX9-NEXT: s_mov_b64 s[8:9], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB109_3
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s12, s19, 24
@@ -24444,7 +24876,8 @@ define inreg <16 x i8> @bitcast_v8bf16_to_v16i8_scalar(<8 x bfloat> inreg %a, i3
; GFX9-NEXT: ; implicit-def: $sgpr15
; GFX9-NEXT: ; implicit-def: $sgpr23
; GFX9-NEXT: ; implicit-def: $sgpr12
-; GFX9-NEXT: s_branch .LBB109_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; GFX9-NEXT: s_cbranch_vccz .LBB109_2
; GFX9-NEXT: .LBB109_4:
; GFX9-NEXT: v_mov_b32_e32 v8, s18
; GFX9-NEXT: v_mov_b32_e32 v16, s19
@@ -24471,23 +24904,22 @@ define inreg <16 x i8> @bitcast_v8bf16_to_v16i8_scalar(<8 x bfloat> inreg %a, i3
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
-; GFX11-NEXT: s_mov_b32 s8, 0
+; GFX11-NEXT: s_mov_b32 s5, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB109_3
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s11, s3, 24
-; GFX11-NEXT: s_lshr_b32 s18, s3, 16
-; GFX11-NEXT: s_lshr_b32 s14, s3, 8
-; GFX11-NEXT: s_lshr_b32 s16, s2, 16
-; GFX11-NEXT: s_lshr_b32 s15, s2, 8
-; GFX11-NEXT: s_lshr_b32 s9, s1, 24
-; GFX11-NEXT: s_lshr_b32 s17, s1, 16
-; GFX11-NEXT: s_lshr_b32 s10, s1, 8
-; GFX11-NEXT: s_lshr_b32 s13, s0, 16
-; GFX11-NEXT: s_lshr_b32 s12, s0, 8
-; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB109_4
+; GFX11-NEXT: s_lshr_b32 s10, s3, 24
+; GFX11-NEXT: s_lshr_b32 s17, s3, 16
+; GFX11-NEXT: s_lshr_b32 s13, s3, 8
+; GFX11-NEXT: s_lshr_b32 s15, s2, 16
+; GFX11-NEXT: s_lshr_b32 s14, s2, 8
+; GFX11-NEXT: s_lshr_b32 s8, s1, 24
+; GFX11-NEXT: s_lshr_b32 s16, s1, 16
+; GFX11-NEXT: s_lshr_b32 s9, s1, 8
+; GFX11-NEXT: s_lshr_b32 s12, s0, 16
+; GFX11-NEXT: s_lshr_b32 s11, s0, 8
+; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
+; GFX11-NEXT: s_cbranch_execnz .LBB109_4
; GFX11-NEXT: .LBB109_2: ; %cmp.true
; GFX11-NEXT: s_lshl_b32 s4, s1, 16
; GFX11-NEXT: s_pack_lh_b32_b16 s1, 0, s1
@@ -24586,27 +25018,28 @@ define inreg <16 x i8> @bitcast_v8bf16_to_v16i8_scalar(<8 x bfloat> inreg %a, i3
; GFX11-NEXT: v_lshrrev_b32_e32 v9, 8, v9
; GFX11-NEXT: s_branch .LBB109_5
; GFX11-NEXT: .LBB109_3:
+; GFX11-NEXT: ; implicit-def: $sgpr11
; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr13
; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr17
; GFX11-NEXT: ; implicit-def: $sgpr9
-; GFX11-NEXT: ; implicit-def: $sgpr15
; GFX11-NEXT: ; implicit-def: $sgpr16
-; GFX11-NEXT: ; implicit-def: $sgpr6
+; GFX11-NEXT: ; implicit-def: $sgpr8
; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr18
-; GFX11-NEXT: ; implicit-def: $sgpr11
-; GFX11-NEXT: s_branch .LBB109_2
+; GFX11-NEXT: ; implicit-def: $sgpr15
+; GFX11-NEXT: ; implicit-def: $sgpr6
+; GFX11-NEXT: ; implicit-def: $sgpr13
+; GFX11-NEXT: ; implicit-def: $sgpr17
+; GFX11-NEXT: ; implicit-def: $sgpr10
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
+; GFX11-NEXT: s_cbranch_vccz .LBB109_2
; GFX11-NEXT: .LBB109_4:
; GFX11-NEXT: v_dual_mov_b32 v8, s2 :: v_dual_mov_b32 v17, s1
-; GFX11-NEXT: v_dual_mov_b32 v16, s3 :: v_dual_mov_b32 v9, s15
-; GFX11-NEXT: v_dual_mov_b32 v14, s18 :: v_dual_mov_b32 v15, s11
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v13, s14
-; GFX11-NEXT: v_dual_mov_b32 v6, s17 :: v_dual_mov_b32 v1, s12
-; GFX11-NEXT: v_dual_mov_b32 v10, s16 :: v_dual_mov_b32 v7, s9
-; GFX11-NEXT: v_dual_mov_b32 v2, s13 :: v_dual_mov_b32 v5, s10
+; GFX11-NEXT: v_dual_mov_b32 v16, s3 :: v_dual_mov_b32 v9, s14
+; GFX11-NEXT: v_dual_mov_b32 v14, s17 :: v_dual_mov_b32 v15, s10
+; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v13, s13
+; GFX11-NEXT: v_dual_mov_b32 v6, s16 :: v_dual_mov_b32 v1, s11
+; GFX11-NEXT: v_dual_mov_b32 v10, s15 :: v_dual_mov_b32 v7, s8
+; GFX11-NEXT: v_dual_mov_b32 v2, s12 :: v_dual_mov_b32 v5, s9
; GFX11-NEXT: v_mov_b32_e32 v11, s6
; GFX11-NEXT: v_mov_b32_e32 v3, s4
; GFX11-NEXT: .LBB109_5: ; %end
@@ -25212,9 +25645,10 @@ define inreg <8 x bfloat> @bitcast_v16i8_to_v8bf16_scalar(<16 x i8> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; SI-NEXT: v_readfirstlane_b32 s9, v1
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: v_readfirstlane_b32 s11, v0
+; SI-NEXT: v_readfirstlane_b32 s11, v1
+; SI-NEXT: v_readfirstlane_b32 s10, v0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB111_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
@@ -25232,7 +25666,7 @@ define inreg <8 x bfloat> @bitcast_v16i8_to_v8bf16_scalar(<16 x i8> inreg %a, i3
; SI-NEXT: s_and_b32 s4, s22, 0xff
; SI-NEXT: s_lshl_b32 s4, s4, 16
; SI-NEXT: s_lshl_b32 s5, s23, 24
-; SI-NEXT: s_or_b32 s10, s5, s4
+; SI-NEXT: s_or_b32 s9, s5, s4
; SI-NEXT: s_and_b32 s4, s24, 0xff
; SI-NEXT: s_lshl_b32 s4, s4, 16
; SI-NEXT: s_lshl_b32 s5, s25, 24
@@ -25245,20 +25679,20 @@ define inreg <8 x bfloat> @bitcast_v16i8_to_v8bf16_scalar(<16 x i8> inreg %a, i3
; SI-NEXT: s_lshl_b32 s5, s29, 8
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: s_lshl_b32 s14, s4, 16
-; SI-NEXT: s_and_b32 s4, s11, 0xff
+; SI-NEXT: s_and_b32 s4, s10, 0xff
; SI-NEXT: s_lshl_b32 s4, s4, 16
-; SI-NEXT: s_lshl_b32 s5, s9, 24
+; SI-NEXT: s_lshl_b32 s5, s11, 24
; SI-NEXT: s_or_b32 s15, s5, s4
; SI-NEXT: s_cbranch_execnz .LBB111_3
; SI-NEXT: .LBB111_2: ; %cmp.true
; SI-NEXT: s_add_i32 s28, s28, 3
; SI-NEXT: s_and_b32 s4, s28, 0xff
; SI-NEXT: s_lshl_b32 s5, s29, 8
-; SI-NEXT: s_add_i32 s11, s11, 3
+; SI-NEXT: s_add_i32 s10, s10, 3
; SI-NEXT: s_or_b32 s4, s5, s4
-; SI-NEXT: s_and_b32 s6, s11, 0xff
+; SI-NEXT: s_and_b32 s6, s10, 0xff
; SI-NEXT: s_addk_i32 s4, 0x300
-; SI-NEXT: s_lshl_b32 s5, s9, 24
+; SI-NEXT: s_lshl_b32 s5, s11, 24
; SI-NEXT: s_lshl_b32 s6, s6, 16
; SI-NEXT: s_and_b32 s4, s4, 0xffff
; SI-NEXT: s_or_b32 s5, s5, s6
@@ -25305,7 +25739,7 @@ define inreg <8 x bfloat> @bitcast_v16i8_to_v8bf16_scalar(<16 x i8> inreg %a, i3
; SI-NEXT: s_add_i32 s6, s6, 0x3000000
; SI-NEXT: s_and_b32 s7, s6, 0xffff0000
; SI-NEXT: s_lshl_b32 s6, s6, 16
-; SI-NEXT: s_and_b32 s10, s8, 0xffff0000
+; SI-NEXT: s_and_b32 s9, s8, 0xffff0000
; SI-NEXT: s_lshl_b32 s8, s8, 16
; SI-NEXT: s_and_b32 s13, s5, 0xffff0000
; SI-NEXT: s_lshl_b32 s12, s5, 16
@@ -25315,7 +25749,7 @@ define inreg <8 x bfloat> @bitcast_v16i8_to_v8bf16_scalar(<16 x i8> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v0, s6
; SI-NEXT: v_mov_b32_e32 v1, s7
; SI-NEXT: v_mov_b32_e32 v2, s8
-; SI-NEXT: v_mov_b32_e32 v3, s10
+; SI-NEXT: v_mov_b32_e32 v3, s9
; SI-NEXT: v_mov_b32_e32 v4, s12
; SI-NEXT: v_mov_b32_e32 v5, s13
; SI-NEXT: v_mov_b32_e32 v6, s14
@@ -25325,20 +25759,23 @@ define inreg <8 x bfloat> @bitcast_v16i8_to_v8bf16_scalar(<16 x i8> inreg %a, i3
; SI-NEXT: ; implicit-def: $sgpr6
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: ; implicit-def: $sgpr10
+; SI-NEXT: ; implicit-def: $sgpr9
; SI-NEXT: ; implicit-def: $sgpr12
; SI-NEXT: ; implicit-def: $sgpr13
; SI-NEXT: ; implicit-def: $sgpr14
; SI-NEXT: ; implicit-def: $sgpr15
-; SI-NEXT: s_branch .LBB111_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB111_2
+; SI-NEXT: s_branch .LBB111_3
;
; VI-LABEL: bitcast_v16i8_to_v8bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
; VI-NEXT: v_readfirstlane_b32 s10, v1
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s11, v0
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[8:9], -1
; VI-NEXT: s_cbranch_scc0 .LBB111_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, s16, 0xff
@@ -25439,15 +25876,18 @@ define inreg <8 x bfloat> @bitcast_v16i8_to_v8bf16_scalar(<16 x i8> inreg %a, i3
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB111_4:
; VI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7
-; VI-NEXT: s_branch .LBB111_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; VI-NEXT: s_cbranch_vccz .LBB111_2
+; VI-NEXT: s_branch .LBB111_3
;
; GFX9-LABEL: bitcast_v16i8_to_v8bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
; GFX9-NEXT: v_readfirstlane_b32 s10, v1
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_readfirstlane_b32 s11, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[8:9], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB111_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_and_b32 s4, s16, 0xff
@@ -25548,13 +25988,15 @@ define inreg <8 x bfloat> @bitcast_v16i8_to_v8bf16_scalar(<16 x i8> inreg %a, i3
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB111_4:
; GFX9-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7
-; GFX9-NEXT: s_branch .LBB111_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; GFX9-NEXT: s_cbranch_vccz .LBB111_2
+; GFX9-NEXT: s_branch .LBB111_3
;
; GFX11-LABEL: bitcast_v16i8_to_v8bf16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
-; GFX11-NEXT: s_mov_b32 s8, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB111_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_and_b32 s4, s0, 0xff
@@ -25565,10 +26007,10 @@ define inreg <8 x bfloat> @bitcast_v16i8_to_v8bf16_scalar(<16 x i8> inreg %a, i3
; GFX11-NEXT: s_or_b32 s5, s6, s7
; GFX11-NEXT: s_and_b32 s6, s16, 0xff
; GFX11-NEXT: s_lshl_b32 s7, s17, 8
-; GFX11-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s19, 8
+; GFX11-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s19, 8
; GFX11-NEXT: s_or_b32 s6, s6, s7
-; GFX11-NEXT: s_or_b32 s7, s9, s10
+; GFX11-NEXT: s_or_b32 s7, s8, s9
; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
; GFX11-NEXT: s_lshl_b32 s5, s5, 16
; GFX11-NEXT: s_and_b32 s6, s6, 0xffff
@@ -25577,24 +26019,23 @@ define inreg <8 x bfloat> @bitcast_v16i8_to_v8bf16_scalar(<16 x i8> inreg %a, i3
; GFX11-NEXT: s_or_b32 s5, s6, s7
; GFX11-NEXT: s_and_b32 s6, s20, 0xff
; GFX11-NEXT: s_lshl_b32 s7, s21, 8
-; GFX11-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s23, 8
+; GFX11-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s23, 8
; GFX11-NEXT: s_or_b32 s6, s6, s7
-; GFX11-NEXT: s_or_b32 s7, s9, s10
-; GFX11-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s25, 8
-; GFX11-NEXT: s_and_b32 s11, s26, 0xff
-; GFX11-NEXT: s_lshl_b32 s12, s27, 8
-; GFX11-NEXT: s_or_b32 s9, s9, s10
-; GFX11-NEXT: s_or_b32 s10, s11, s12
+; GFX11-NEXT: s_or_b32 s7, s8, s9
+; GFX11-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s25, 8
+; GFX11-NEXT: s_and_b32 s10, s26, 0xff
+; GFX11-NEXT: s_lshl_b32 s11, s27, 8
+; GFX11-NEXT: s_or_b32 s8, s8, s9
+; GFX11-NEXT: s_or_b32 s9, s10, s11
; GFX11-NEXT: s_and_b32 s6, s6, 0xffff
; GFX11-NEXT: s_lshl_b32 s7, s7, 16
-; GFX11-NEXT: s_and_b32 s9, s9, 0xffff
-; GFX11-NEXT: s_lshl_b32 s10, s10, 16
+; GFX11-NEXT: s_and_b32 s8, s8, 0xffff
+; GFX11-NEXT: s_lshl_b32 s9, s9, 16
; GFX11-NEXT: s_or_b32 s6, s6, s7
-; GFX11-NEXT: s_or_b32 s7, s9, s10
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB111_3
+; GFX11-NEXT: s_or_b32 s7, s8, s9
+; GFX11-NEXT: s_cbranch_execnz .LBB111_3
; GFX11-NEXT: .LBB111_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
; GFX11-NEXT: s_add_i32 s2, s2, 3
@@ -25654,7 +26095,9 @@ define inreg <8 x bfloat> @bitcast_v16i8_to_v8bf16_scalar(<16 x i8> inreg %a, i3
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB111_4:
; GFX11-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7
-; GFX11-NEXT: s_branch .LBB111_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-NEXT: s_cbranch_vccz .LBB111_2
+; GFX11-NEXT: s_branch .LBB111_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.160bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.160bit.ll
index c87d52c..6b805da 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.160bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.160bit.ll
@@ -97,94 +97,101 @@ define inreg <5 x float> @bitcast_v5i32_to_v5f32_scalar(<5 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s21, 0
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB1_3
-; SI-NEXT: .LBB1_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB1_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB1_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_i32 s20, s20, 3
; SI-NEXT: s_add_i32 s19, s19, 3
; SI-NEXT: s_add_i32 s18, s18, 3
; SI-NEXT: s_add_i32 s17, s17, 3
; SI-NEXT: s_add_i32 s16, s16, 3
-; SI-NEXT: .LBB1_3: ; %end
+; SI-NEXT: .LBB1_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
; SI-NEXT: v_mov_b32_e32 v3, s19
; SI-NEXT: v_mov_b32_e32 v4, s20
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: s_branch .LBB1_2
;
; VI-LABEL: bitcast_v5i32_to_v5f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s21, 0
-; VI-NEXT: s_cbranch_scc0 .LBB1_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB1_3
-; VI-NEXT: .LBB1_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB1_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB1_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s20, s20, 3
; VI-NEXT: s_add_i32 s19, s19, 3
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB1_3: ; %end
+; VI-NEXT: .LBB1_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_4:
-; VI-NEXT: s_branch .LBB1_2
;
; GFX9-LABEL: bitcast_v5i32_to_v5f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s21, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB1_3
-; GFX9-NEXT: .LBB1_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB1_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s20, s20, 3
; GFX9-NEXT: s_add_i32 s19, s19, 3
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB1_3: ; %end
+; GFX9-NEXT: .LBB1_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-LABEL: bitcast_v5i32_to_v5f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s17, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB1_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB1_3
-; GFX11-NEXT: .LBB1_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s16, s16, 3
; GFX11-NEXT: s_add_i32 s3, s3, 3
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB1_3: ; %end
+; GFX11-NEXT: .LBB1_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_mov_b32_e32 v4, s16
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_4:
-; GFX11-NEXT: s_branch .LBB1_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -291,18 +298,20 @@ define inreg <5 x i32> @bitcast_v5f32_to_v5i32_scalar(<5 x float> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s21, 0
-; SI-NEXT: s_cbranch_scc0 .LBB3_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB3_4
-; SI-NEXT: .LBB3_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB3_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB3_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v4, s20, 1.0
; SI-NEXT: v_add_f32_e64 v3, s19, 1.0
; SI-NEXT: v_add_f32_e64 v2, s18, 1.0
; SI-NEXT: v_add_f32_e64 v1, s17, 1.0
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB3_3:
-; SI-NEXT: s_branch .LBB3_2
; SI-NEXT: .LBB3_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -315,18 +324,20 @@ define inreg <5 x i32> @bitcast_v5f32_to_v5i32_scalar(<5 x float> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s21, 0
-; VI-NEXT: s_cbranch_scc0 .LBB3_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_4
-; VI-NEXT: .LBB3_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB3_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB3_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v4, s20, 1.0
; VI-NEXT: v_add_f32_e64 v3, s19, 1.0
; VI-NEXT: v_add_f32_e64 v2, s18, 1.0
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB3_3:
-; VI-NEXT: s_branch .LBB3_2
; VI-NEXT: .LBB3_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -339,18 +350,20 @@ define inreg <5 x i32> @bitcast_v5f32_to_v5i32_scalar(<5 x float> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s21, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_4
-; GFX9-NEXT: .LBB3_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB3_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v4, s20, 1.0
; GFX9-NEXT: v_add_f32_e64 v3, s19, 1.0
; GFX9-NEXT: v_add_f32_e64 v2, s18, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB3_3:
-; GFX9-NEXT: s_branch .LBB3_2
; GFX9-NEXT: .LBB3_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -364,20 +377,21 @@ define inreg <5 x i32> @bitcast_v5f32_to_v5i32_scalar(<5 x float> inreg %a, i32
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s17, 0
+; GFX11-NEXT: s_mov_b32 s5, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s5, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB3_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
-; GFX11-NEXT: .LBB3_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v4, s4, 1.0
; GFX11-NEXT: v_add_f32_e64 v3, s3, 1.0
; GFX11-NEXT: v_add_f32_e64 v2, s2, 1.0
; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB3_3:
-; GFX11-NEXT: s_branch .LBB3_2
; GFX11-NEXT: .LBB3_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -518,6 +532,7 @@ define inreg <10 x i16> @bitcast_v5i32_to_v10i16_scalar(<5 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s21, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB5_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s20
@@ -558,77 +573,84 @@ define inreg <10 x i16> @bitcast_v5i32_to_v10i16_scalar(<5 x i32> inreg %a, i32
; SI-NEXT: ; implicit-def: $vgpr5
; SI-NEXT: ; implicit-def: $sgpr6
; SI-NEXT: ; implicit-def: $vgpr9
-; SI-NEXT: s_branch .LBB5_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB5_2
+; SI-NEXT: s_branch .LBB5_3
;
; VI-LABEL: bitcast_v5i32_to_v10i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s21, 0
-; VI-NEXT: s_cbranch_scc0 .LBB5_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB5_3
-; VI-NEXT: .LBB5_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB5_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB5_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s20, s20, 3
; VI-NEXT: s_add_i32 s19, s19, 3
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB5_3: ; %end
+; VI-NEXT: .LBB5_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB5_4:
-; VI-NEXT: s_branch .LBB5_2
;
; GFX9-LABEL: bitcast_v5i32_to_v10i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s21, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB5_3
-; GFX9-NEXT: .LBB5_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB5_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s20, s20, 3
; GFX9-NEXT: s_add_i32 s19, s19, 3
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB5_3: ; %end
+; GFX9-NEXT: .LBB5_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_4:
-; GFX9-NEXT: s_branch .LBB5_2
;
; GFX11-LABEL: bitcast_v5i32_to_v10i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s17, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB5_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB5_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB5_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB5_3
-; GFX11-NEXT: .LBB5_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s16, s16, 3
; GFX11-NEXT: s_add_i32 s3, s3, 3
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB5_3: ; %end
+; GFX11-NEXT: .LBB5_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_mov_b32_e32 v4, s16
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB5_4:
-; GFX11-NEXT: s_branch .LBB5_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -802,6 +824,7 @@ define inreg <5 x i32> @bitcast_v10i16_to_v5i32_scalar(<10 x i16> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
+; SI-NEXT: s_mov_b64 s[10:11], -1
; SI-NEXT: s_cbranch_scc0 .LBB7_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
@@ -855,16 +878,22 @@ define inreg <5 x i32> @bitcast_v10i16_to_v5i32_scalar(<10 x i16> inreg %a, i32
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB7_4:
; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8
-; SI-NEXT: s_branch .LBB7_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[10:11]
+; SI-NEXT: s_cbranch_vccz .LBB7_2
+; SI-NEXT: s_branch .LBB7_3
;
; VI-LABEL: bitcast_v10i16_to_v5i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s21, 0
-; VI-NEXT: s_cbranch_scc0 .LBB7_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB7_3
-; VI-NEXT: .LBB7_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB7_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB7_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s20, 3
; VI-NEXT: s_and_b32 s4, s20, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
@@ -890,32 +919,32 @@ define inreg <5 x i32> @bitcast_v10i16_to_v5i32_scalar(<10 x i16> inreg %a, i32
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB7_3: ; %end
+; VI-NEXT: .LBB7_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB7_4:
-; VI-NEXT: s_branch .LBB7_2
;
; GFX9-LABEL: bitcast_v10i16_to_v5i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s21, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB7_4
-; GFX9-NEXT: .LBB7_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB7_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB7_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v4, s20, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v3, s19, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v2, s18, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB7_3:
-; GFX9-NEXT: s_branch .LBB7_2
; GFX9-NEXT: .LBB7_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -929,20 +958,21 @@ define inreg <5 x i32> @bitcast_v10i16_to_v5i32_scalar(<10 x i16> inreg %a, i32
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s17, 0
+; GFX11-NEXT: s_mov_b32 s5, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s5, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB7_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
; GFX11-NEXT: s_cbranch_vccnz .LBB7_4
-; GFX11-NEXT: .LBB7_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB7_3:
-; GFX11-NEXT: s_branch .LBB7_2
; GFX11-NEXT: .LBB7_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -1114,6 +1144,7 @@ define inreg <10 x half> @bitcast_v5i32_to_v10f16_scalar(<5 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s21, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB9_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s4, s20, 16
@@ -1166,77 +1197,84 @@ define inreg <10 x half> @bitcast_v5i32_to_v10f16_scalar(<5 x i32> inreg %a, i32
; SI-NEXT: ; implicit-def: $vgpr7
; SI-NEXT: ; implicit-def: $vgpr8
; SI-NEXT: ; implicit-def: $vgpr9
-; SI-NEXT: s_branch .LBB9_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB9_2
+; SI-NEXT: s_branch .LBB9_3
;
; VI-LABEL: bitcast_v5i32_to_v10f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s21, 0
-; VI-NEXT: s_cbranch_scc0 .LBB9_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB9_3
-; VI-NEXT: .LBB9_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB9_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB9_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s20, s20, 3
; VI-NEXT: s_add_i32 s19, s19, 3
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB9_3: ; %end
+; VI-NEXT: .LBB9_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB9_4:
-; VI-NEXT: s_branch .LBB9_2
;
; GFX9-LABEL: bitcast_v5i32_to_v10f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s21, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB9_3
-; GFX9-NEXT: .LBB9_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB9_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s20, s20, 3
; GFX9-NEXT: s_add_i32 s19, s19, 3
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB9_3: ; %end
+; GFX9-NEXT: .LBB9_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB9_4:
-; GFX9-NEXT: s_branch .LBB9_2
;
; GFX11-LABEL: bitcast_v5i32_to_v10f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s17, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB9_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB9_3
-; GFX11-NEXT: .LBB9_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s16, s16, 3
; GFX11-NEXT: s_add_i32 s3, s3, 3
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB9_3: ; %end
+; GFX11-NEXT: .LBB9_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_mov_b32_e32 v4, s16
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB9_4:
-; GFX11-NEXT: s_branch .LBB9_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1442,6 +1480,7 @@ define inreg <5 x i32> @bitcast_v10f16_to_v5i32_scalar(<10 x half> inreg %a, i32
; SI-NEXT: v_cvt_f16_f32_e32 v6, s25
; SI-NEXT: v_cvt_f16_f32_e32 v5, s24
; SI-NEXT: s_cmp_lg_u32 s26, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB11_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v14
@@ -1500,16 +1539,22 @@ define inreg <5 x i32> @bitcast_v10f16_to_v5i32_scalar(<10 x half> inreg %a, i32
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB11_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4
-; SI-NEXT: s_branch .LBB11_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB11_2
+; SI-NEXT: s_branch .LBB11_3
;
; VI-LABEL: bitcast_v10f16_to_v5i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s21, 0
-; VI-NEXT: s_cbranch_scc0 .LBB11_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB11_4
-; VI-NEXT: .LBB11_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB11_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB11_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s20, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -1537,8 +1582,6 @@ define inreg <5 x i32> @bitcast_v10f16_to_v5i32_scalar(<10 x half> inreg %a, i32
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v5
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB11_3:
-; VI-NEXT: s_branch .LBB11_2
; VI-NEXT: .LBB11_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -1551,10 +1594,14 @@ define inreg <5 x i32> @bitcast_v10f16_to_v5i32_scalar(<10 x half> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s21, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_4
-; GFX9-NEXT: .LBB11_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB11_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v4, s20, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v3, s19, v0 op_sel_hi:[1,0]
@@ -1562,8 +1609,6 @@ define inreg <5 x i32> @bitcast_v10f16_to_v5i32_scalar(<10 x half> inreg %a, i32
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB11_3:
-; GFX9-NEXT: s_branch .LBB11_2
; GFX9-NEXT: .LBB11_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -1577,20 +1622,21 @@ define inreg <5 x i32> @bitcast_v10f16_to_v5i32_scalar(<10 x half> inreg %a, i32
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s17, 0
+; GFX11-NEXT: s_mov_b32 s5, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s5, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB11_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
-; GFX11-NEXT: .LBB11_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: s_branch .LBB11_2
; GFX11-NEXT: .LBB11_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -1729,6 +1775,7 @@ define inreg <10 x i16> @bitcast_v5f32_to_v10i16_scalar(<5 x float> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s21, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB13_3
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s20
@@ -1758,7 +1805,8 @@ define inreg <10 x i16> @bitcast_v5f32_to_v10i16_scalar(<5 x float> inreg %a, i3
; SI-NEXT: ; implicit-def: $vgpr5
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $vgpr9
-; SI-NEXT: s_branch .LBB13_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB13_2
; SI-NEXT: .LBB13_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v2, s17
@@ -1773,18 +1821,20 @@ define inreg <10 x i16> @bitcast_v5f32_to_v10i16_scalar(<5 x float> inreg %a, i3
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s21, 0
-; VI-NEXT: s_cbranch_scc0 .LBB13_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB13_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB13_4
-; VI-NEXT: .LBB13_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB13_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB13_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v4, s20, 1.0
; VI-NEXT: v_add_f32_e64 v3, s19, 1.0
; VI-NEXT: v_add_f32_e64 v2, s18, 1.0
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB13_3:
-; VI-NEXT: s_branch .LBB13_2
; VI-NEXT: .LBB13_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -1800,18 +1850,20 @@ define inreg <10 x i16> @bitcast_v5f32_to_v10i16_scalar(<5 x float> inreg %a, i3
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s21, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB13_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB13_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB13_4
-; GFX9-NEXT: .LBB13_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB13_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB13_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v4, s20, 1.0
; GFX9-NEXT: v_add_f32_e64 v3, s19, 1.0
; GFX9-NEXT: v_add_f32_e64 v2, s18, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB13_3:
-; GFX9-NEXT: s_branch .LBB13_2
; GFX9-NEXT: .LBB13_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -1828,20 +1880,21 @@ define inreg <10 x i16> @bitcast_v5f32_to_v10i16_scalar(<5 x float> inreg %a, i3
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s17, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB13_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB13_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB13_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB13_4
-; GFX11-NEXT: .LBB13_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v4, s4, 1.0
; GFX11-NEXT: v_add_f32_e64 v3, s3, 1.0
; GFX11-NEXT: v_add_f32_e64 v2, s2, 1.0
; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB13_3:
-; GFX11-NEXT: s_branch .LBB13_2
; GFX11-NEXT: .LBB13_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -2021,6 +2074,7 @@ define inreg <5 x float> @bitcast_v10i16_to_v5f32_scalar(<10 x i16> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
+; SI-NEXT: s_mov_b64 s[10:11], -1
; SI-NEXT: s_cbranch_scc0 .LBB15_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
@@ -2074,16 +2128,22 @@ define inreg <5 x float> @bitcast_v10i16_to_v5f32_scalar(<10 x i16> inreg %a, i3
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB15_4:
; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8
-; SI-NEXT: s_branch .LBB15_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[10:11]
+; SI-NEXT: s_cbranch_vccz .LBB15_2
+; SI-NEXT: s_branch .LBB15_3
;
; VI-LABEL: bitcast_v10i16_to_v5f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s21, 0
-; VI-NEXT: s_cbranch_scc0 .LBB15_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB15_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB15_3
-; VI-NEXT: .LBB15_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB15_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB15_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s20, 3
; VI-NEXT: s_and_b32 s4, s20, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
@@ -2109,32 +2169,32 @@ define inreg <5 x float> @bitcast_v10i16_to_v5f32_scalar(<10 x i16> inreg %a, i3
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB15_3: ; %end
+; VI-NEXT: .LBB15_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB15_4:
-; VI-NEXT: s_branch .LBB15_2
;
; GFX9-LABEL: bitcast_v10i16_to_v5f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s21, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB15_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB15_4
-; GFX9-NEXT: .LBB15_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB15_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB15_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v4, s20, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v3, s19, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v2, s18, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB15_3:
-; GFX9-NEXT: s_branch .LBB15_2
; GFX9-NEXT: .LBB15_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -2148,20 +2208,21 @@ define inreg <5 x float> @bitcast_v10i16_to_v5f32_scalar(<10 x i16> inreg %a, i3
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s17, 0
+; GFX11-NEXT: s_mov_b32 s5, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB15_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s5, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB15_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB15_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
; GFX11-NEXT: s_cbranch_vccnz .LBB15_4
-; GFX11-NEXT: .LBB15_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB15_3:
-; GFX11-NEXT: s_branch .LBB15_2
; GFX11-NEXT: .LBB15_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -2331,6 +2392,7 @@ define inreg <10 x half> @bitcast_v5f32_to_v10f16_scalar(<5 x float> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s21, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB17_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s4, s20, 16
@@ -2383,24 +2445,28 @@ define inreg <10 x half> @bitcast_v5f32_to_v10f16_scalar(<5 x float> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr7
; SI-NEXT: ; implicit-def: $vgpr8
; SI-NEXT: ; implicit-def: $vgpr9
-; SI-NEXT: s_branch .LBB17_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB17_2
+; SI-NEXT: s_branch .LBB17_3
;
; VI-LABEL: bitcast_v5f32_to_v10f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s21, 0
-; VI-NEXT: s_cbranch_scc0 .LBB17_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB17_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB17_4
-; VI-NEXT: .LBB17_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB17_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB17_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v4, s20, 1.0
; VI-NEXT: v_add_f32_e64 v3, s19, 1.0
; VI-NEXT: v_add_f32_e64 v2, s18, 1.0
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB17_3:
-; VI-NEXT: s_branch .LBB17_2
; VI-NEXT: .LBB17_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -2416,18 +2482,20 @@ define inreg <10 x half> @bitcast_v5f32_to_v10f16_scalar(<5 x float> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s21, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB17_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB17_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB17_4
-; GFX9-NEXT: .LBB17_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB17_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB17_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v4, s20, 1.0
; GFX9-NEXT: v_add_f32_e64 v3, s19, 1.0
; GFX9-NEXT: v_add_f32_e64 v2, s18, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB17_3:
-; GFX9-NEXT: s_branch .LBB17_2
; GFX9-NEXT: .LBB17_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -2444,20 +2512,21 @@ define inreg <10 x half> @bitcast_v5f32_to_v10f16_scalar(<5 x float> inreg %a, i
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s17, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB17_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB17_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB17_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB17_4
-; GFX11-NEXT: .LBB17_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v4, s4, 1.0
; GFX11-NEXT: v_add_f32_e64 v3, s3, 1.0
; GFX11-NEXT: v_add_f32_e64 v2, s2, 1.0
; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB17_3:
-; GFX11-NEXT: s_branch .LBB17_2
; GFX11-NEXT: .LBB17_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -2669,6 +2738,7 @@ define inreg <5 x float> @bitcast_v10f16_to_v5f32_scalar(<10 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v6, s25
; SI-NEXT: v_cvt_f16_f32_e32 v5, s24
; SI-NEXT: s_cmp_lg_u32 s26, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB19_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v14
@@ -2727,16 +2797,22 @@ define inreg <5 x float> @bitcast_v10f16_to_v5f32_scalar(<10 x half> inreg %a, i
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB19_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4
-; SI-NEXT: s_branch .LBB19_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB19_2
+; SI-NEXT: s_branch .LBB19_3
;
; VI-LABEL: bitcast_v10f16_to_v5f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s21, 0
-; VI-NEXT: s_cbranch_scc0 .LBB19_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB19_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB19_4
-; VI-NEXT: .LBB19_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB19_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB19_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s20, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -2764,8 +2840,6 @@ define inreg <5 x float> @bitcast_v10f16_to_v5f32_scalar(<10 x half> inreg %a, i
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v5
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB19_3:
-; VI-NEXT: s_branch .LBB19_2
; VI-NEXT: .LBB19_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -2778,10 +2852,14 @@ define inreg <5 x float> @bitcast_v10f16_to_v5f32_scalar(<10 x half> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s21, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB19_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB19_4
-; GFX9-NEXT: .LBB19_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB19_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB19_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v4, s20, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v3, s19, v0 op_sel_hi:[1,0]
@@ -2789,8 +2867,6 @@ define inreg <5 x float> @bitcast_v10f16_to_v5f32_scalar(<10 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB19_3:
-; GFX9-NEXT: s_branch .LBB19_2
; GFX9-NEXT: .LBB19_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -2804,20 +2880,21 @@ define inreg <5 x float> @bitcast_v10f16_to_v5f32_scalar(<10 x half> inreg %a, i
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s17, 0
+; GFX11-NEXT: s_mov_b32 s5, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB19_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s5, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB19_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB19_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
; GFX11-NEXT: s_cbranch_vccnz .LBB19_4
-; GFX11-NEXT: .LBB19_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB19_3:
-; GFX11-NEXT: s_branch .LBB19_2
; GFX11-NEXT: .LBB19_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -3006,6 +3083,7 @@ define inreg <10 x half> @bitcast_v10i16_to_v10f16_scalar(<10 x i16> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB21_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_cvt_f32_f16_e32 v0, s16
@@ -3053,16 +3131,22 @@ define inreg <10 x half> @bitcast_v10i16_to_v10f16_scalar(<10 x i16> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr7
; SI-NEXT: ; implicit-def: $vgpr8
; SI-NEXT: ; implicit-def: $vgpr9
-; SI-NEXT: s_branch .LBB21_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB21_2
+; SI-NEXT: s_branch .LBB21_3
;
; VI-LABEL: bitcast_v10i16_to_v10f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s21, 0
-; VI-NEXT: s_cbranch_scc0 .LBB21_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB21_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB21_3
-; VI-NEXT: .LBB21_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB21_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB21_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s16, 3
; VI-NEXT: s_add_i32 s7, s17, 3
; VI-NEXT: s_add_i32 s9, s18, 3
@@ -3088,32 +3172,32 @@ define inreg <10 x half> @bitcast_v10i16_to_v10f16_scalar(<10 x i16> inreg %a, i
; VI-NEXT: s_add_i32 s18, s8, 0x30000
; VI-NEXT: s_add_i32 s17, s6, 0x30000
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB21_3: ; %end
+; VI-NEXT: .LBB21_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB21_4:
-; VI-NEXT: s_branch .LBB21_2
;
; GFX9-LABEL: bitcast_v10i16_to_v10f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s21, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB21_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB21_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB21_4
-; GFX9-NEXT: .LBB21_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB21_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v4, s20, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v3, s19, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v2, s18, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB21_3:
-; GFX9-NEXT: s_branch .LBB21_2
; GFX9-NEXT: .LBB21_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -3130,20 +3214,21 @@ define inreg <10 x half> @bitcast_v10i16_to_v10f16_scalar(<10 x i16> inreg %a, i
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s17, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB21_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB21_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB21_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB21_4
-; GFX11-NEXT: .LBB21_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB21_3:
-; GFX11-NEXT: s_branch .LBB21_2
; GFX11-NEXT: .LBB21_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -3329,10 +3414,14 @@ define inreg <10 x i16> @bitcast_v10f16_to_v10i16_scalar(<10 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v8, s24
; SI-NEXT: v_cvt_f16_f32_e32 v9, s25
; SI-NEXT: s_cmp_lg_u32 s26, 0
-; SI-NEXT: s_cbranch_scc0 .LBB23_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB23_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB23_3
-; SI-NEXT: .LBB23_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB23_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB23_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v9, v9
; SI-NEXT: v_cvt_f32_f16_e32 v8, v8
; SI-NEXT: v_cvt_f32_f16_e32 v7, v7
@@ -3375,19 +3464,21 @@ define inreg <10 x i16> @bitcast_v10f16_to_v10i16_scalar(<10 x half> inreg %a, i
; SI-NEXT: v_or_b32_e32 v4, v4, v5
; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16
; SI-NEXT: v_alignbit_b32 v5, v6, v5, 16
-; SI-NEXT: .LBB23_3: ; %end
+; SI-NEXT: .LBB23_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB23_4:
-; SI-NEXT: s_branch .LBB23_2
;
; VI-LABEL: bitcast_v10f16_to_v10i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s21, 0
-; VI-NEXT: s_cbranch_scc0 .LBB23_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB23_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB23_4
-; VI-NEXT: .LBB23_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB23_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB23_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v1, s4
; VI-NEXT: s_lshr_b32 s4, s17, 16
@@ -3415,8 +3506,6 @@ define inreg <10 x i16> @bitcast_v10f16_to_v10i16_scalar(<10 x half> inreg %a, i
; VI-NEXT: v_or_b32_e32 v1, v1, v7
; VI-NEXT: v_or_b32_e32 v0, v5, v6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB23_3:
-; VI-NEXT: s_branch .LBB23_2
; VI-NEXT: .LBB23_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -3432,10 +3521,14 @@ define inreg <10 x i16> @bitcast_v10f16_to_v10i16_scalar(<10 x half> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s21, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB23_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB23_4
-; GFX9-NEXT: .LBB23_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB23_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v4, s20, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v3, s19, v0 op_sel_hi:[1,0]
@@ -3443,8 +3536,6 @@ define inreg <10 x i16> @bitcast_v10f16_to_v10i16_scalar(<10 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB23_3:
-; GFX9-NEXT: s_branch .LBB23_2
; GFX9-NEXT: .LBB23_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -3461,20 +3552,21 @@ define inreg <10 x i16> @bitcast_v10f16_to_v10i16_scalar(<10 x half> inreg %a, i
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s17, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB23_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB23_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB23_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB23_4
-; GFX11-NEXT: .LBB23_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB23_3:
-; GFX11-NEXT: s_branch .LBB23_2
; GFX11-NEXT: .LBB23_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.16bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.16bit.ll
index 5344095..365674d 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.16bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.16bit.ll
@@ -117,6 +117,7 @@ define inreg half @bitcast_i16_to_f16_scalar(i16 inreg %a, i32 inreg %b) {
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_and_b32 s6, s16, 0xffff
; SI-NEXT: s_cmp_lg_u32 s17, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB1_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_cvt_f32_f16_e32 v0, s6
@@ -128,53 +129,59 @@ define inreg half @bitcast_i16_to_f16_scalar(i16 inreg %a, i32 inreg %b) {
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB1_4:
; SI-NEXT: ; implicit-def: $vgpr0
-; SI-NEXT: s_branch .LBB1_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB1_2
+; SI-NEXT: s_branch .LBB1_3
;
; VI-LABEL: bitcast_i16_to_f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB1_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB1_3
-; VI-NEXT: .LBB1_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB1_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB1_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB1_3: ; %end
+; VI-NEXT: .LBB1_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_4:
-; VI-NEXT: s_branch .LBB1_2
;
; GFX9-LABEL: bitcast_i16_to_f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB1_3
-; GFX9-NEXT: .LBB1_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB1_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB1_3: ; %end
+; GFX9-NEXT: .LBB1_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-LABEL: bitcast_i16_to_f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-NEXT: s_mov_b32 s1, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB1_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccz .LBB1_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: v_mov_b32_e32 v0, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_3:
-; GFX11-NEXT: .LBB1_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
+; GFX11-NEXT: .LBB1_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -296,31 +303,35 @@ define inreg i16 @bitcast_f16_to_i16_scalar(half inreg %a, i32 inreg %b) {
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, s16
; SI-NEXT: s_cmp_lg_u32 s17, 0
-; SI-NEXT: s_cbranch_scc0 .LBB3_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB3_3
-; SI-NEXT: .LBB3_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB3_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB3_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
-; SI-NEXT: .LBB3_3: ; %end
+; SI-NEXT: .LBB3_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB3_4:
-; SI-NEXT: s_branch .LBB3_2
;
; VI-LABEL: bitcast_f16_to_i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB3_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_4
-; VI-NEXT: .LBB3_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB3_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB3_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB3_3:
-; VI-NEXT: s_branch .LBB3_2
; VI-NEXT: .LBB3_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -329,15 +340,17 @@ define inreg i16 @bitcast_f16_to_i16_scalar(half inreg %a, i32 inreg %b) {
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_4
-; GFX9-NEXT: .LBB3_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB3_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_add_f16_e32 v0, s16, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB3_3:
-; GFX9-NEXT: s_branch .LBB3_2
; GFX9-NEXT: .LBB3_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -346,16 +359,17 @@ define inreg i16 @bitcast_f16_to_i16_scalar(half inreg %a, i32 inreg %b) {
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s1, -1
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB3_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_mov_b32 s1, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB3_3
-; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: .LBB3_2: ; %Flow
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB3_4
-; GFX11-TRUE16-NEXT: .LBB3_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: ; %bb.3: ; %cmp.true
; GFX11-TRUE16-NEXT: v_add_f16_e64 v0.l, 0x200, s0
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-TRUE16-NEXT: .LBB3_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB3_2
; GFX11-TRUE16-NEXT: .LBB3_4:
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, s0
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
@@ -364,16 +378,17 @@ define inreg i16 @bitcast_f16_to_i16_scalar(half inreg %a, i32 inreg %b) {
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, -1
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB3_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB3_3
-; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: .LBB3_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB3_4
-; GFX11-FAKE16-NEXT: .LBB3_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
; GFX11-FAKE16-NEXT: v_add_f16_e64 v0, 0x200, s0
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-FAKE16-NEXT: .LBB3_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB3_2
; GFX11-FAKE16-NEXT: .LBB3_4:
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, s0
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
@@ -494,6 +509,7 @@ define inreg bfloat @bitcast_i16_to_bf16_scalar(i16 inreg %a, i32 inreg %b) {
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_and_b32 s6, s16, 0xffff
; SI-NEXT: s_cmp_lg_u32 s17, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB5_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshl_b32 s7, s6, 16
@@ -506,53 +522,59 @@ define inreg bfloat @bitcast_i16_to_bf16_scalar(i16 inreg %a, i32 inreg %b) {
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB5_4:
; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: s_branch .LBB5_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB5_2
+; SI-NEXT: s_branch .LBB5_3
;
; VI-LABEL: bitcast_i16_to_bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB5_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB5_3
-; VI-NEXT: .LBB5_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB5_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB5_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB5_3: ; %end
+; VI-NEXT: .LBB5_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB5_4:
-; VI-NEXT: s_branch .LBB5_2
;
; GFX9-LABEL: bitcast_i16_to_bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB5_3
-; GFX9-NEXT: .LBB5_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB5_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB5_3: ; %end
+; GFX9-NEXT: .LBB5_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_4:
-; GFX9-NEXT: s_branch .LBB5_2
;
; GFX11-LABEL: bitcast_i16_to_bf16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-NEXT: s_mov_b32 s1, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB5_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB5_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB5_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccz .LBB5_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: v_mov_b32_e32 v0, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB5_3:
-; GFX11-NEXT: .LBB5_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
+; GFX11-NEXT: .LBB5_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -722,6 +744,7 @@ define inreg i16 @bitcast_bf16_to_i16_scalar(bfloat inreg %a, i32 inreg %b) {
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s17, 0
; SI-NEXT: v_mul_f32_e64 v1, 1.0, s16
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB7_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v1
@@ -734,16 +757,22 @@ define inreg i16 @bitcast_bf16_to_i16_scalar(bfloat inreg %a, i32 inreg %b) {
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB7_4:
; SI-NEXT: ; implicit-def: $vgpr0
-; SI-NEXT: s_branch .LBB7_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB7_2
+; SI-NEXT: s_branch .LBB7_3
;
; VI-LABEL: bitcast_bf16_to_i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB7_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB7_4
-; VI-NEXT: .LBB7_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB7_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB7_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x40c00000
; VI-NEXT: v_add_f32_e32 v0, s4, v0
@@ -755,8 +784,6 @@ define inreg i16 @bitcast_bf16_to_i16_scalar(bfloat inreg %a, i32 inreg %b) {
; VI-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB7_3:
-; VI-NEXT: s_branch .LBB7_2
; VI-NEXT: .LBB7_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -765,10 +792,14 @@ define inreg i16 @bitcast_bf16_to_i16_scalar(bfloat inreg %a, i32 inreg %b) {
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB7_4
-; GFX9-NEXT: .LBB7_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB7_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB7_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_lshl_b32 s4, s16, 16
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
; GFX9-NEXT: v_add_f32_e32 v0, s4, v0
@@ -780,8 +811,6 @@ define inreg i16 @bitcast_bf16_to_i16_scalar(bfloat inreg %a, i32 inreg %b) {
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB7_3:
-; GFX9-NEXT: s_branch .LBB7_2
; GFX9-NEXT: .LBB7_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -790,12 +819,15 @@ define inreg i16 @bitcast_bf16_to_i16_scalar(bfloat inreg %a, i32 inreg %b) {
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-NEXT: s_mov_b32 s1, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB7_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
; GFX11-NEXT: s_cbranch_vccnz .LBB7_4
-; GFX11-NEXT: .LBB7_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_lshl_b32 s0, s0, 16
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
@@ -809,8 +841,6 @@ define inreg i16 @bitcast_bf16_to_i16_scalar(bfloat inreg %a, i32 inreg %b) {
; GFX11-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB7_3:
-; GFX11-NEXT: s_branch .LBB7_2
; GFX11-NEXT: .LBB7_4:
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -944,6 +974,7 @@ define inreg bfloat @bitcast_f16_to_bf16_scalar(half inreg %a, i32 inreg %b) {
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v1, s16
; SI-NEXT: s_cmp_lg_u32 s17, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB9_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v1
@@ -957,21 +988,25 @@ define inreg bfloat @bitcast_f16_to_bf16_scalar(half inreg %a, i32 inreg %b) {
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB9_4:
; SI-NEXT: ; implicit-def: $vgpr0
-; SI-NEXT: s_branch .LBB9_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB9_2
+; SI-NEXT: s_branch .LBB9_3
;
; VI-LABEL: bitcast_f16_to_bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB9_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB9_4
-; VI-NEXT: .LBB9_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB9_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB9_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB9_3:
-; VI-NEXT: s_branch .LBB9_2
; VI-NEXT: .LBB9_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -980,15 +1015,17 @@ define inreg bfloat @bitcast_f16_to_bf16_scalar(half inreg %a, i32 inreg %b) {
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB9_4
-; GFX9-NEXT: .LBB9_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB9_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_add_f16_e32 v0, s16, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB9_3:
-; GFX9-NEXT: s_branch .LBB9_2
; GFX9-NEXT: .LBB9_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -997,16 +1034,17 @@ define inreg bfloat @bitcast_f16_to_bf16_scalar(half inreg %a, i32 inreg %b) {
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s1, -1
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB9_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_mov_b32 s1, 0
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB9_3
-; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: .LBB9_2: ; %Flow
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB9_4
-; GFX11-TRUE16-NEXT: .LBB9_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: ; %bb.3: ; %cmp.true
; GFX11-TRUE16-NEXT: v_add_f16_e64 v0.l, 0x200, s0
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-TRUE16-NEXT: .LBB9_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB9_2
; GFX11-TRUE16-NEXT: .LBB9_4:
; GFX11-TRUE16-NEXT: v_mov_b32_e32 v0, s0
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
@@ -1015,16 +1053,17 @@ define inreg bfloat @bitcast_f16_to_bf16_scalar(half inreg %a, i32 inreg %b) {
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s1, -1
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB9_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_mov_b32 s1, 0
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB9_3
-; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: .LBB9_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB9_4
-; GFX11-FAKE16-NEXT: .LBB9_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
; GFX11-FAKE16-NEXT: v_add_f16_e64 v0, 0x200, s0
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
-; GFX11-FAKE16-NEXT: .LBB9_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB9_2
; GFX11-FAKE16-NEXT: .LBB9_4:
; GFX11-FAKE16-NEXT: v_mov_b32_e32 v0, s0
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
@@ -1196,6 +1235,7 @@ define inreg half @bitcast_bf16_to_f16_scalar(bfloat inreg %a, i32 inreg %b) {
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s17, 0
; SI-NEXT: v_mul_f32_e64 v1, 1.0, s16
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB11_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v1
@@ -1210,16 +1250,22 @@ define inreg half @bitcast_bf16_to_f16_scalar(bfloat inreg %a, i32 inreg %b) {
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB11_4:
; SI-NEXT: ; implicit-def: $vgpr0
-; SI-NEXT: s_branch .LBB11_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB11_2
+; SI-NEXT: s_branch .LBB11_3
;
; VI-LABEL: bitcast_bf16_to_f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB11_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB11_4
-; VI-NEXT: .LBB11_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB11_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB11_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x40c00000
; VI-NEXT: v_add_f32_e32 v0, s4, v0
@@ -1231,8 +1277,6 @@ define inreg half @bitcast_bf16_to_f16_scalar(bfloat inreg %a, i32 inreg %b) {
; VI-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB11_3:
-; VI-NEXT: s_branch .LBB11_2
; VI-NEXT: .LBB11_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -1241,10 +1285,14 @@ define inreg half @bitcast_bf16_to_f16_scalar(bfloat inreg %a, i32 inreg %b) {
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_4
-; GFX9-NEXT: .LBB11_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB11_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_lshl_b32 s4, s16, 16
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
; GFX9-NEXT: v_add_f32_e32 v0, s4, v0
@@ -1256,8 +1304,6 @@ define inreg half @bitcast_bf16_to_f16_scalar(bfloat inreg %a, i32 inreg %b) {
; GFX9-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB11_3:
-; GFX9-NEXT: s_branch .LBB11_2
; GFX9-NEXT: .LBB11_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -1266,12 +1312,15 @@ define inreg half @bitcast_bf16_to_f16_scalar(bfloat inreg %a, i32 inreg %b) {
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-NEXT: s_mov_b32 s1, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB11_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
-; GFX11-NEXT: .LBB11_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_lshl_b32 s0, s0, 16
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
@@ -1285,8 +1334,6 @@ define inreg half @bitcast_bf16_to_f16_scalar(bfloat inreg %a, i32 inreg %b) {
; GFX11-NEXT: v_cndmask_b32_e32 v0, v1, v2, vcc_lo
; GFX11-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: s_branch .LBB11_2
; GFX11-NEXT: .LBB11_4:
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.192bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.192bit.ll
index c3ace0a..9570fa5 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.192bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.192bit.ll
@@ -101,17 +101,21 @@ define inreg <6 x float> @bitcast_v6i32_to_v6f32_scalar(<6 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s22, 0
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB1_3
-; SI-NEXT: .LBB1_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB1_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB1_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_i32 s21, s21, 3
; SI-NEXT: s_add_i32 s20, s20, 3
; SI-NEXT: s_add_i32 s19, s19, 3
; SI-NEXT: s_add_i32 s18, s18, 3
; SI-NEXT: s_add_i32 s17, s17, 3
; SI-NEXT: s_add_i32 s16, s16, 3
-; SI-NEXT: .LBB1_3: ; %end
+; SI-NEXT: .LBB1_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -119,24 +123,26 @@ define inreg <6 x float> @bitcast_v6i32_to_v6f32_scalar(<6 x i32> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v4, s20
; SI-NEXT: v_mov_b32_e32 v5, s21
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: s_branch .LBB1_2
;
; VI-LABEL: bitcast_v6i32_to_v6f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB1_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB1_3
-; VI-NEXT: .LBB1_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB1_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB1_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s21, s21, 3
; VI-NEXT: s_add_i32 s20, s20, 3
; VI-NEXT: s_add_i32 s19, s19, 3
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB1_3: ; %end
+; VI-NEXT: .LBB1_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -144,24 +150,26 @@ define inreg <6 x float> @bitcast_v6i32_to_v6f32_scalar(<6 x i32> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: v_mov_b32_e32 v5, s21
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_4:
-; VI-NEXT: s_branch .LBB1_2
;
; GFX9-LABEL: bitcast_v6i32_to_v6f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB1_3
-; GFX9-NEXT: .LBB1_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB1_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s21, s21, 3
; GFX9-NEXT: s_add_i32 s20, s20, 3
; GFX9-NEXT: s_add_i32 s19, s19, 3
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB1_3: ; %end
+; GFX9-NEXT: .LBB1_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -169,33 +177,32 @@ define inreg <6 x float> @bitcast_v6i32_to_v6f32_scalar(<6 x i32> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v4, s20
; GFX9-NEXT: v_mov_b32_e32 v5, s21
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-LABEL: bitcast_v6i32_to_v6f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB1_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB1_3
-; GFX11-NEXT: .LBB1_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s17, s17, 3
; GFX11-NEXT: s_add_i32 s16, s16, 3
; GFX11-NEXT: s_add_i32 s3, s3, 3
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB1_3: ; %end
+; GFX11-NEXT: .LBB1_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_4:
-; GFX11-NEXT: s_branch .LBB1_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -305,10 +312,14 @@ define inreg <6 x i32> @bitcast_v6f32_to_v6i32_scalar(<6 x float> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s22, 0
-; SI-NEXT: s_cbranch_scc0 .LBB3_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB3_4
-; SI-NEXT: .LBB3_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB3_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB3_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v5, s21, 1.0
; SI-NEXT: v_add_f32_e64 v4, s20, 1.0
; SI-NEXT: v_add_f32_e64 v3, s19, 1.0
@@ -316,8 +327,6 @@ define inreg <6 x i32> @bitcast_v6f32_to_v6i32_scalar(<6 x float> inreg %a, i32
; SI-NEXT: v_add_f32_e64 v1, s17, 1.0
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB3_3:
-; SI-NEXT: s_branch .LBB3_2
; SI-NEXT: .LBB3_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -331,10 +340,14 @@ define inreg <6 x i32> @bitcast_v6f32_to_v6i32_scalar(<6 x float> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB3_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_4
-; VI-NEXT: .LBB3_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB3_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB3_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v5, s21, 1.0
; VI-NEXT: v_add_f32_e64 v4, s20, 1.0
; VI-NEXT: v_add_f32_e64 v3, s19, 1.0
@@ -342,8 +355,6 @@ define inreg <6 x i32> @bitcast_v6f32_to_v6i32_scalar(<6 x float> inreg %a, i32
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB3_3:
-; VI-NEXT: s_branch .LBB3_2
; VI-NEXT: .LBB3_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -357,10 +368,14 @@ define inreg <6 x i32> @bitcast_v6f32_to_v6i32_scalar(<6 x float> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_4
-; GFX9-NEXT: .LBB3_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB3_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v5, s21, 1.0
; GFX9-NEXT: v_add_f32_e64 v4, s20, 1.0
; GFX9-NEXT: v_add_f32_e64 v3, s19, 1.0
@@ -368,8 +383,6 @@ define inreg <6 x i32> @bitcast_v6f32_to_v6i32_scalar(<6 x float> inreg %a, i32
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB3_3:
-; GFX9-NEXT: s_branch .LBB3_2
; GFX9-NEXT: .LBB3_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -385,12 +398,15 @@ define inreg <6 x i32> @bitcast_v6f32_to_v6i32_scalar(<6 x float> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
+; GFX11-NEXT: s_mov_b32 s6, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s6, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB3_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
-; GFX11-NEXT: .LBB3_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v5, s5, 1.0
; GFX11-NEXT: v_add_f32_e64 v4, s4, 1.0
; GFX11-NEXT: v_add_f32_e64 v3, s3, 1.0
@@ -398,8 +414,6 @@ define inreg <6 x i32> @bitcast_v6f32_to_v6i32_scalar(<6 x float> inreg %a, i32
; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB3_3:
-; GFX11-NEXT: s_branch .LBB3_2
; GFX11-NEXT: .LBB3_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -517,17 +531,21 @@ define inreg <3 x i64> @bitcast_v6i32_to_v3i64_scalar(<6 x i32> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s22, 0
-; SI-NEXT: s_cbranch_scc0 .LBB5_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB5_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB5_3
-; SI-NEXT: .LBB5_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB5_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB5_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_i32 s21, s21, 3
; SI-NEXT: s_add_i32 s20, s20, 3
; SI-NEXT: s_add_i32 s19, s19, 3
; SI-NEXT: s_add_i32 s18, s18, 3
; SI-NEXT: s_add_i32 s17, s17, 3
; SI-NEXT: s_add_i32 s16, s16, 3
-; SI-NEXT: .LBB5_3: ; %end
+; SI-NEXT: .LBB5_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -535,24 +553,26 @@ define inreg <3 x i64> @bitcast_v6i32_to_v3i64_scalar(<6 x i32> inreg %a, i32 in
; SI-NEXT: v_mov_b32_e32 v4, s20
; SI-NEXT: v_mov_b32_e32 v5, s21
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB5_4:
-; SI-NEXT: s_branch .LBB5_2
;
; VI-LABEL: bitcast_v6i32_to_v3i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB5_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB5_3
-; VI-NEXT: .LBB5_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB5_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB5_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s21, s21, 3
; VI-NEXT: s_add_i32 s20, s20, 3
; VI-NEXT: s_add_i32 s19, s19, 3
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB5_3: ; %end
+; VI-NEXT: .LBB5_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -560,24 +580,26 @@ define inreg <3 x i64> @bitcast_v6i32_to_v3i64_scalar(<6 x i32> inreg %a, i32 in
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: v_mov_b32_e32 v5, s21
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB5_4:
-; VI-NEXT: s_branch .LBB5_2
;
; GFX9-LABEL: bitcast_v6i32_to_v3i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB5_3
-; GFX9-NEXT: .LBB5_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB5_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s21, s21, 3
; GFX9-NEXT: s_add_i32 s20, s20, 3
; GFX9-NEXT: s_add_i32 s19, s19, 3
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB5_3: ; %end
+; GFX9-NEXT: .LBB5_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -585,33 +607,32 @@ define inreg <3 x i64> @bitcast_v6i32_to_v3i64_scalar(<6 x i32> inreg %a, i32 in
; GFX9-NEXT: v_mov_b32_e32 v4, s20
; GFX9-NEXT: v_mov_b32_e32 v5, s21
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_4:
-; GFX9-NEXT: s_branch .LBB5_2
;
; GFX11-LABEL: bitcast_v6i32_to_v3i64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB5_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB5_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB5_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB5_3
-; GFX11-NEXT: .LBB5_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s17, s17, 3
; GFX11-NEXT: s_add_i32 s16, s16, 3
; GFX11-NEXT: s_add_i32 s3, s3, 3
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB5_3: ; %end
+; GFX11-NEXT: .LBB5_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB5_4:
-; GFX11-NEXT: s_branch .LBB5_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -726,17 +747,21 @@ define inreg <6 x i32> @bitcast_v3i64_to_v6i32_scalar(<3 x i64> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s22, 0
-; SI-NEXT: s_cbranch_scc0 .LBB7_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB7_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB7_3
-; SI-NEXT: .LBB7_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB7_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB7_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_u32 s20, s20, 3
; SI-NEXT: s_addc_u32 s21, s21, 0
; SI-NEXT: s_add_u32 s18, s18, 3
; SI-NEXT: s_addc_u32 s19, s19, 0
; SI-NEXT: s_add_u32 s16, s16, 3
; SI-NEXT: s_addc_u32 s17, s17, 0
-; SI-NEXT: .LBB7_3: ; %end
+; SI-NEXT: .LBB7_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -744,24 +769,26 @@ define inreg <6 x i32> @bitcast_v3i64_to_v6i32_scalar(<3 x i64> inreg %a, i32 in
; SI-NEXT: v_mov_b32_e32 v4, s20
; SI-NEXT: v_mov_b32_e32 v5, s21
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB7_4:
-; SI-NEXT: s_branch .LBB7_2
;
; VI-LABEL: bitcast_v3i64_to_v6i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB7_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB7_3
-; VI-NEXT: .LBB7_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB7_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB7_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_u32 s20, s20, 3
; VI-NEXT: s_addc_u32 s21, s21, 0
; VI-NEXT: s_add_u32 s18, s18, 3
; VI-NEXT: s_addc_u32 s19, s19, 0
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
-; VI-NEXT: .LBB7_3: ; %end
+; VI-NEXT: .LBB7_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -769,24 +796,26 @@ define inreg <6 x i32> @bitcast_v3i64_to_v6i32_scalar(<3 x i64> inreg %a, i32 in
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: v_mov_b32_e32 v5, s21
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB7_4:
-; VI-NEXT: s_branch .LBB7_2
;
; GFX9-LABEL: bitcast_v3i64_to_v6i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB7_3
-; GFX9-NEXT: .LBB7_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB7_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB7_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_u32 s20, s20, 3
; GFX9-NEXT: s_addc_u32 s21, s21, 0
; GFX9-NEXT: s_add_u32 s18, s18, 3
; GFX9-NEXT: s_addc_u32 s19, s19, 0
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
-; GFX9-NEXT: .LBB7_3: ; %end
+; GFX9-NEXT: .LBB7_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -794,33 +823,32 @@ define inreg <6 x i32> @bitcast_v3i64_to_v6i32_scalar(<3 x i64> inreg %a, i32 in
; GFX9-NEXT: v_mov_b32_e32 v4, s20
; GFX9-NEXT: v_mov_b32_e32 v5, s21
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB7_4:
-; GFX9-NEXT: s_branch .LBB7_2
;
; GFX11-LABEL: bitcast_v3i64_to_v6i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB7_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB7_3
-; GFX11-NEXT: .LBB7_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB7_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s16, s16, 3
; GFX11-NEXT: s_addc_u32 s17, s17, 0
; GFX11-NEXT: s_add_u32 s2, s2, 3
; GFX11-NEXT: s_addc_u32 s3, s3, 0
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: .LBB7_3: ; %end
+; GFX11-NEXT: .LBB7_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB7_4:
-; GFX11-NEXT: s_branch .LBB7_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -933,17 +961,21 @@ define inreg <3 x double> @bitcast_v6i32_to_v3f64_scalar(<6 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s22, 0
-; SI-NEXT: s_cbranch_scc0 .LBB9_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB9_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB9_3
-; SI-NEXT: .LBB9_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB9_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB9_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_i32 s21, s21, 3
; SI-NEXT: s_add_i32 s20, s20, 3
; SI-NEXT: s_add_i32 s19, s19, 3
; SI-NEXT: s_add_i32 s18, s18, 3
; SI-NEXT: s_add_i32 s17, s17, 3
; SI-NEXT: s_add_i32 s16, s16, 3
-; SI-NEXT: .LBB9_3: ; %end
+; SI-NEXT: .LBB9_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -951,24 +983,26 @@ define inreg <3 x double> @bitcast_v6i32_to_v3f64_scalar(<6 x i32> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v4, s20
; SI-NEXT: v_mov_b32_e32 v5, s21
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB9_4:
-; SI-NEXT: s_branch .LBB9_2
;
; VI-LABEL: bitcast_v6i32_to_v3f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB9_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB9_3
-; VI-NEXT: .LBB9_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB9_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB9_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s21, s21, 3
; VI-NEXT: s_add_i32 s20, s20, 3
; VI-NEXT: s_add_i32 s19, s19, 3
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB9_3: ; %end
+; VI-NEXT: .LBB9_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -976,24 +1010,26 @@ define inreg <3 x double> @bitcast_v6i32_to_v3f64_scalar(<6 x i32> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: v_mov_b32_e32 v5, s21
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB9_4:
-; VI-NEXT: s_branch .LBB9_2
;
; GFX9-LABEL: bitcast_v6i32_to_v3f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB9_3
-; GFX9-NEXT: .LBB9_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB9_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s21, s21, 3
; GFX9-NEXT: s_add_i32 s20, s20, 3
; GFX9-NEXT: s_add_i32 s19, s19, 3
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB9_3: ; %end
+; GFX9-NEXT: .LBB9_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -1001,33 +1037,32 @@ define inreg <3 x double> @bitcast_v6i32_to_v3f64_scalar(<6 x i32> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v4, s20
; GFX9-NEXT: v_mov_b32_e32 v5, s21
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB9_4:
-; GFX9-NEXT: s_branch .LBB9_2
;
; GFX11-LABEL: bitcast_v6i32_to_v3f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB9_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB9_3
-; GFX11-NEXT: .LBB9_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s17, s17, 3
; GFX11-NEXT: s_add_i32 s16, s16, 3
; GFX11-NEXT: s_add_i32 s3, s3, 3
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB9_3: ; %end
+; GFX11-NEXT: .LBB9_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB9_4:
-; GFX11-NEXT: s_branch .LBB9_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1131,16 +1166,18 @@ define inreg <6 x i32> @bitcast_v3f64_to_v6i32_scalar(<3 x double> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s22, 0
-; SI-NEXT: s_cbranch_scc0 .LBB11_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB11_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB11_4
-; SI-NEXT: .LBB11_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB11_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB11_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB11_3:
-; SI-NEXT: s_branch .LBB11_2
; SI-NEXT: .LBB11_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -1154,16 +1191,18 @@ define inreg <6 x i32> @bitcast_v3f64_to_v6i32_scalar(<3 x double> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB11_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB11_4
-; VI-NEXT: .LBB11_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB11_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB11_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB11_3:
-; VI-NEXT: s_branch .LBB11_2
; VI-NEXT: .LBB11_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -1177,16 +1216,18 @@ define inreg <6 x i32> @bitcast_v3f64_to_v6i32_scalar(<3 x double> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_4
-; GFX9-NEXT: .LBB11_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB11_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB11_3:
-; GFX9-NEXT: s_branch .LBB11_2
; GFX9-NEXT: .LBB11_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -1202,18 +1243,19 @@ define inreg <6 x i32> @bitcast_v3f64_to_v6i32_scalar(<3 x double> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
+; GFX11-NEXT: s_mov_b32 s6, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s6, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB11_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
-; GFX11-NEXT: .LBB11_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[4:5], s[4:5], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: s_branch .LBB11_2
; GFX11-NEXT: .LBB11_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -1358,6 +1400,7 @@ define inreg <12 x i16> @bitcast_v6i32_to_v12i16_scalar(<6 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s22, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB13_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s20
@@ -1404,23 +1447,29 @@ define inreg <12 x i16> @bitcast_v6i32_to_v12i16_scalar(<6 x i32> inreg %a, i32
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $vgpr9
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB13_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB13_2
+; SI-NEXT: s_branch .LBB13_3
;
; VI-LABEL: bitcast_v6i32_to_v12i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB13_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB13_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB13_3
-; VI-NEXT: .LBB13_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB13_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB13_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s21, s21, 3
; VI-NEXT: s_add_i32 s20, s20, 3
; VI-NEXT: s_add_i32 s19, s19, 3
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB13_3: ; %end
+; VI-NEXT: .LBB13_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -1428,24 +1477,26 @@ define inreg <12 x i16> @bitcast_v6i32_to_v12i16_scalar(<6 x i32> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: v_mov_b32_e32 v5, s21
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB13_4:
-; VI-NEXT: s_branch .LBB13_2
;
; GFX9-LABEL: bitcast_v6i32_to_v12i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB13_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB13_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB13_3
-; GFX9-NEXT: .LBB13_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB13_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB13_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s21, s21, 3
; GFX9-NEXT: s_add_i32 s20, s20, 3
; GFX9-NEXT: s_add_i32 s19, s19, 3
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB13_3: ; %end
+; GFX9-NEXT: .LBB13_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -1453,33 +1504,32 @@ define inreg <12 x i16> @bitcast_v6i32_to_v12i16_scalar(<6 x i32> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v4, s20
; GFX9-NEXT: v_mov_b32_e32 v5, s21
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB13_4:
-; GFX9-NEXT: s_branch .LBB13_2
;
; GFX11-LABEL: bitcast_v6i32_to_v12i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB13_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB13_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB13_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB13_3
-; GFX11-NEXT: .LBB13_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB13_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s17, s17, 3
; GFX11-NEXT: s_add_i32 s16, s16, 3
; GFX11-NEXT: s_add_i32 s3, s3, 3
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB13_3: ; %end
+; GFX11-NEXT: .LBB13_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB13_4:
-; GFX11-NEXT: s_branch .LBB13_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1667,6 +1717,7 @@ define inreg <6 x i32> @bitcast_v12i16_to_v6i32_scalar(<12 x i16> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
+; SI-NEXT: s_mov_b64 s[10:11], -1
; SI-NEXT: s_cbranch_scc0 .LBB15_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
@@ -1729,16 +1780,22 @@ define inreg <6 x i32> @bitcast_v12i16_to_v6i32_scalar(<12 x i16> inreg %a, i32
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB15_4:
; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9
-; SI-NEXT: s_branch .LBB15_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[10:11]
+; SI-NEXT: s_cbranch_vccz .LBB15_2
+; SI-NEXT: s_branch .LBB15_3
;
; VI-LABEL: bitcast_v12i16_to_v6i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB15_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB15_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB15_3
-; VI-NEXT: .LBB15_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB15_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB15_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s21, 3
; VI-NEXT: s_and_b32 s4, s21, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
@@ -1769,7 +1826,7 @@ define inreg <6 x i32> @bitcast_v12i16_to_v6i32_scalar(<12 x i16> inreg %a, i32
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB15_3: ; %end
+; VI-NEXT: .LBB15_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -1777,17 +1834,19 @@ define inreg <6 x i32> @bitcast_v12i16_to_v6i32_scalar(<12 x i16> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: v_mov_b32_e32 v5, s21
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB15_4:
-; VI-NEXT: s_branch .LBB15_2
;
; GFX9-LABEL: bitcast_v12i16_to_v6i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB15_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB15_4
-; GFX9-NEXT: .LBB15_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB15_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB15_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v5, s21, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v4, s20, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v3, s19, 3 op_sel_hi:[1,0]
@@ -1795,8 +1854,6 @@ define inreg <6 x i32> @bitcast_v12i16_to_v6i32_scalar(<12 x i16> inreg %a, i32
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB15_3:
-; GFX9-NEXT: s_branch .LBB15_2
; GFX9-NEXT: .LBB15_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -1812,12 +1869,15 @@ define inreg <6 x i32> @bitcast_v12i16_to_v6i32_scalar(<12 x i16> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
+; GFX11-NEXT: s_mov_b32 s6, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB15_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s6, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB15_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB15_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
; GFX11-NEXT: s_cbranch_vccnz .LBB15_4
-; GFX11-NEXT: .LBB15_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
@@ -1825,8 +1885,6 @@ define inreg <6 x i32> @bitcast_v12i16_to_v6i32_scalar(<12 x i16> inreg %a, i32
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB15_3:
-; GFX11-NEXT: s_branch .LBB15_2
; GFX11-NEXT: .LBB15_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -2012,6 +2070,7 @@ define inreg <12 x half> @bitcast_v6i32_to_v12f16_scalar(<6 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s22, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB17_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s4, s21, 16
@@ -2073,23 +2132,29 @@ define inreg <12 x half> @bitcast_v6i32_to_v12f16_scalar(<6 x i32> inreg %a, i32
; SI-NEXT: ; implicit-def: $vgpr9
; SI-NEXT: ; implicit-def: $vgpr10
; SI-NEXT: ; implicit-def: $vgpr11
-; SI-NEXT: s_branch .LBB17_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB17_2
+; SI-NEXT: s_branch .LBB17_3
;
; VI-LABEL: bitcast_v6i32_to_v12f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB17_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB17_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB17_3
-; VI-NEXT: .LBB17_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB17_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB17_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s21, s21, 3
; VI-NEXT: s_add_i32 s20, s20, 3
; VI-NEXT: s_add_i32 s19, s19, 3
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB17_3: ; %end
+; VI-NEXT: .LBB17_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -2097,24 +2162,26 @@ define inreg <12 x half> @bitcast_v6i32_to_v12f16_scalar(<6 x i32> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: v_mov_b32_e32 v5, s21
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB17_4:
-; VI-NEXT: s_branch .LBB17_2
;
; GFX9-LABEL: bitcast_v6i32_to_v12f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB17_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB17_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB17_3
-; GFX9-NEXT: .LBB17_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB17_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB17_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s21, s21, 3
; GFX9-NEXT: s_add_i32 s20, s20, 3
; GFX9-NEXT: s_add_i32 s19, s19, 3
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB17_3: ; %end
+; GFX9-NEXT: .LBB17_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -2122,33 +2189,32 @@ define inreg <12 x half> @bitcast_v6i32_to_v12f16_scalar(<6 x i32> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v4, s20
; GFX9-NEXT: v_mov_b32_e32 v5, s21
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB17_4:
-; GFX9-NEXT: s_branch .LBB17_2
;
; GFX11-LABEL: bitcast_v6i32_to_v12f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB17_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB17_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB17_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB17_3
-; GFX11-NEXT: .LBB17_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB17_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s17, s17, 3
; GFX11-NEXT: s_add_i32 s16, s16, 3
; GFX11-NEXT: s_add_i32 s3, s3, 3
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB17_3: ; %end
+; GFX11-NEXT: .LBB17_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB17_4:
-; GFX11-NEXT: s_branch .LBB17_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2375,6 +2441,7 @@ define inreg <6 x i32> @bitcast_v12f16_to_v6i32_scalar(<12 x half> inreg %a, i32
; SI-NEXT: v_cvt_f16_f32_e32 v7, s27
; SI-NEXT: v_cvt_f16_f32_e32 v6, s26
; SI-NEXT: s_cmp_lg_u32 s28, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB19_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v17
@@ -2443,16 +2510,22 @@ define inreg <6 x i32> @bitcast_v12f16_to_v6i32_scalar(<12 x half> inreg %a, i32
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB19_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5
-; SI-NEXT: s_branch .LBB19_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB19_2
+; SI-NEXT: s_branch .LBB19_3
;
; VI-LABEL: bitcast_v12f16_to_v6i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB19_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB19_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB19_4
-; VI-NEXT: .LBB19_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB19_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB19_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s21, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -2485,8 +2558,6 @@ define inreg <6 x i32> @bitcast_v12f16_to_v6i32_scalar(<12 x half> inreg %a, i32
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB19_3:
-; VI-NEXT: s_branch .LBB19_2
; VI-NEXT: .LBB19_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -2500,10 +2571,14 @@ define inreg <6 x i32> @bitcast_v12f16_to_v6i32_scalar(<12 x half> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB19_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB19_4
-; GFX9-NEXT: .LBB19_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB19_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB19_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v5, s21, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v4, s20, v0 op_sel_hi:[1,0]
@@ -2512,8 +2587,6 @@ define inreg <6 x i32> @bitcast_v12f16_to_v6i32_scalar(<12 x half> inreg %a, i32
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB19_3:
-; GFX9-NEXT: s_branch .LBB19_2
; GFX9-NEXT: .LBB19_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -2529,12 +2602,15 @@ define inreg <6 x i32> @bitcast_v12f16_to_v6i32_scalar(<12 x half> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
+; GFX11-NEXT: s_mov_b32 s6, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB19_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s6, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB19_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB19_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
; GFX11-NEXT: s_cbranch_vccnz .LBB19_4
-; GFX11-NEXT: .LBB19_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
@@ -2542,8 +2618,6 @@ define inreg <6 x i32> @bitcast_v12f16_to_v6i32_scalar(<12 x half> inreg %a, i32
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB19_3:
-; GFX11-NEXT: s_branch .LBB19_2
; GFX11-NEXT: .LBB19_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -2658,10 +2732,14 @@ define inreg <3 x i64> @bitcast_v6f32_to_v3i64_scalar(<6 x float> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s22, 0
-; SI-NEXT: s_cbranch_scc0 .LBB21_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB21_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB21_4
-; SI-NEXT: .LBB21_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB21_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB21_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v5, s21, 1.0
; SI-NEXT: v_add_f32_e64 v4, s20, 1.0
; SI-NEXT: v_add_f32_e64 v3, s19, 1.0
@@ -2669,8 +2747,6 @@ define inreg <3 x i64> @bitcast_v6f32_to_v3i64_scalar(<6 x float> inreg %a, i32
; SI-NEXT: v_add_f32_e64 v1, s17, 1.0
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB21_3:
-; SI-NEXT: s_branch .LBB21_2
; SI-NEXT: .LBB21_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -2684,10 +2760,14 @@ define inreg <3 x i64> @bitcast_v6f32_to_v3i64_scalar(<6 x float> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB21_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB21_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB21_4
-; VI-NEXT: .LBB21_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB21_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB21_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v5, s21, 1.0
; VI-NEXT: v_add_f32_e64 v4, s20, 1.0
; VI-NEXT: v_add_f32_e64 v3, s19, 1.0
@@ -2695,8 +2775,6 @@ define inreg <3 x i64> @bitcast_v6f32_to_v3i64_scalar(<6 x float> inreg %a, i32
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB21_3:
-; VI-NEXT: s_branch .LBB21_2
; VI-NEXT: .LBB21_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -2710,10 +2788,14 @@ define inreg <3 x i64> @bitcast_v6f32_to_v3i64_scalar(<6 x float> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB21_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB21_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB21_4
-; GFX9-NEXT: .LBB21_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB21_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v5, s21, 1.0
; GFX9-NEXT: v_add_f32_e64 v4, s20, 1.0
; GFX9-NEXT: v_add_f32_e64 v3, s19, 1.0
@@ -2721,8 +2803,6 @@ define inreg <3 x i64> @bitcast_v6f32_to_v3i64_scalar(<6 x float> inreg %a, i32
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB21_3:
-; GFX9-NEXT: s_branch .LBB21_2
; GFX9-NEXT: .LBB21_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -2738,12 +2818,15 @@ define inreg <3 x i64> @bitcast_v6f32_to_v3i64_scalar(<6 x float> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
+; GFX11-NEXT: s_mov_b32 s6, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB21_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s6, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB21_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB21_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
; GFX11-NEXT: s_cbranch_vccnz .LBB21_4
-; GFX11-NEXT: .LBB21_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v5, s5, 1.0
; GFX11-NEXT: v_add_f32_e64 v4, s4, 1.0
; GFX11-NEXT: v_add_f32_e64 v3, s3, 1.0
@@ -2751,8 +2834,6 @@ define inreg <3 x i64> @bitcast_v6f32_to_v3i64_scalar(<6 x float> inreg %a, i32
; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB21_3:
-; GFX11-NEXT: s_branch .LBB21_2
; GFX11-NEXT: .LBB21_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -2872,17 +2953,21 @@ define inreg <6 x float> @bitcast_v3i64_to_v6f32_scalar(<3 x i64> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s22, 0
-; SI-NEXT: s_cbranch_scc0 .LBB23_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB23_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB23_3
-; SI-NEXT: .LBB23_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB23_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB23_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_u32 s20, s20, 3
; SI-NEXT: s_addc_u32 s21, s21, 0
; SI-NEXT: s_add_u32 s18, s18, 3
; SI-NEXT: s_addc_u32 s19, s19, 0
; SI-NEXT: s_add_u32 s16, s16, 3
; SI-NEXT: s_addc_u32 s17, s17, 0
-; SI-NEXT: .LBB23_3: ; %end
+; SI-NEXT: .LBB23_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -2890,24 +2975,26 @@ define inreg <6 x float> @bitcast_v3i64_to_v6f32_scalar(<3 x i64> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v4, s20
; SI-NEXT: v_mov_b32_e32 v5, s21
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB23_4:
-; SI-NEXT: s_branch .LBB23_2
;
; VI-LABEL: bitcast_v3i64_to_v6f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB23_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB23_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB23_3
-; VI-NEXT: .LBB23_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB23_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB23_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_u32 s20, s20, 3
; VI-NEXT: s_addc_u32 s21, s21, 0
; VI-NEXT: s_add_u32 s18, s18, 3
; VI-NEXT: s_addc_u32 s19, s19, 0
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
-; VI-NEXT: .LBB23_3: ; %end
+; VI-NEXT: .LBB23_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -2915,24 +3002,26 @@ define inreg <6 x float> @bitcast_v3i64_to_v6f32_scalar(<3 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: v_mov_b32_e32 v5, s21
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB23_4:
-; VI-NEXT: s_branch .LBB23_2
;
; GFX9-LABEL: bitcast_v3i64_to_v6f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB23_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB23_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB23_3
-; GFX9-NEXT: .LBB23_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB23_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_u32 s20, s20, 3
; GFX9-NEXT: s_addc_u32 s21, s21, 0
; GFX9-NEXT: s_add_u32 s18, s18, 3
; GFX9-NEXT: s_addc_u32 s19, s19, 0
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
-; GFX9-NEXT: .LBB23_3: ; %end
+; GFX9-NEXT: .LBB23_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -2940,33 +3029,32 @@ define inreg <6 x float> @bitcast_v3i64_to_v6f32_scalar(<3 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v4, s20
; GFX9-NEXT: v_mov_b32_e32 v5, s21
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB23_4:
-; GFX9-NEXT: s_branch .LBB23_2
;
; GFX11-LABEL: bitcast_v3i64_to_v6f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB23_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB23_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB23_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB23_3
-; GFX11-NEXT: .LBB23_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s16, s16, 3
; GFX11-NEXT: s_addc_u32 s17, s17, 0
; GFX11-NEXT: s_add_u32 s2, s2, 3
; GFX11-NEXT: s_addc_u32 s3, s3, 0
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: .LBB23_3: ; %end
+; GFX11-NEXT: .LBB23_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB23_4:
-; GFX11-NEXT: s_branch .LBB23_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -3076,10 +3164,14 @@ define inreg <3 x double> @bitcast_v6f32_to_v3f64_scalar(<6 x float> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s22, 0
-; SI-NEXT: s_cbranch_scc0 .LBB25_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB25_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB25_4
-; SI-NEXT: .LBB25_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB25_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB25_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v5, s21, 1.0
; SI-NEXT: v_add_f32_e64 v4, s20, 1.0
; SI-NEXT: v_add_f32_e64 v3, s19, 1.0
@@ -3087,8 +3179,6 @@ define inreg <3 x double> @bitcast_v6f32_to_v3f64_scalar(<6 x float> inreg %a, i
; SI-NEXT: v_add_f32_e64 v1, s17, 1.0
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB25_3:
-; SI-NEXT: s_branch .LBB25_2
; SI-NEXT: .LBB25_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -3102,10 +3192,14 @@ define inreg <3 x double> @bitcast_v6f32_to_v3f64_scalar(<6 x float> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB25_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB25_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB25_4
-; VI-NEXT: .LBB25_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB25_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB25_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v5, s21, 1.0
; VI-NEXT: v_add_f32_e64 v4, s20, 1.0
; VI-NEXT: v_add_f32_e64 v3, s19, 1.0
@@ -3113,8 +3207,6 @@ define inreg <3 x double> @bitcast_v6f32_to_v3f64_scalar(<6 x float> inreg %a, i
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB25_3:
-; VI-NEXT: s_branch .LBB25_2
; VI-NEXT: .LBB25_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -3128,10 +3220,14 @@ define inreg <3 x double> @bitcast_v6f32_to_v3f64_scalar(<6 x float> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB25_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB25_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB25_4
-; GFX9-NEXT: .LBB25_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB25_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB25_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v5, s21, 1.0
; GFX9-NEXT: v_add_f32_e64 v4, s20, 1.0
; GFX9-NEXT: v_add_f32_e64 v3, s19, 1.0
@@ -3139,8 +3235,6 @@ define inreg <3 x double> @bitcast_v6f32_to_v3f64_scalar(<6 x float> inreg %a, i
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB25_3:
-; GFX9-NEXT: s_branch .LBB25_2
; GFX9-NEXT: .LBB25_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -3156,12 +3250,15 @@ define inreg <3 x double> @bitcast_v6f32_to_v3f64_scalar(<6 x float> inreg %a, i
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
+; GFX11-NEXT: s_mov_b32 s6, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB25_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s6, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB25_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB25_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
; GFX11-NEXT: s_cbranch_vccnz .LBB25_4
-; GFX11-NEXT: .LBB25_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v5, s5, 1.0
; GFX11-NEXT: v_add_f32_e64 v4, s4, 1.0
; GFX11-NEXT: v_add_f32_e64 v3, s3, 1.0
@@ -3169,8 +3266,6 @@ define inreg <3 x double> @bitcast_v6f32_to_v3f64_scalar(<6 x float> inreg %a, i
; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB25_3:
-; GFX11-NEXT: s_branch .LBB25_2
; GFX11-NEXT: .LBB25_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -3279,16 +3374,18 @@ define inreg <6 x float> @bitcast_v3f64_to_v6f32_scalar(<3 x double> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s22, 0
-; SI-NEXT: s_cbranch_scc0 .LBB27_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB27_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB27_4
-; SI-NEXT: .LBB27_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB27_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB27_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB27_3:
-; SI-NEXT: s_branch .LBB27_2
; SI-NEXT: .LBB27_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -3302,16 +3399,18 @@ define inreg <6 x float> @bitcast_v3f64_to_v6f32_scalar(<3 x double> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB27_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB27_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB27_4
-; VI-NEXT: .LBB27_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB27_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB27_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB27_3:
-; VI-NEXT: s_branch .LBB27_2
; VI-NEXT: .LBB27_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -3325,16 +3424,18 @@ define inreg <6 x float> @bitcast_v3f64_to_v6f32_scalar(<3 x double> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB27_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB27_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB27_4
-; GFX9-NEXT: .LBB27_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB27_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB27_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB27_3:
-; GFX9-NEXT: s_branch .LBB27_2
; GFX9-NEXT: .LBB27_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -3350,18 +3451,19 @@ define inreg <6 x float> @bitcast_v3f64_to_v6f32_scalar(<3 x double> inreg %a, i
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
+; GFX11-NEXT: s_mov_b32 s6, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB27_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s6, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB27_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB27_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
; GFX11-NEXT: s_cbranch_vccnz .LBB27_4
-; GFX11-NEXT: .LBB27_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[4:5], s[4:5], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB27_3:
-; GFX11-NEXT: s_branch .LBB27_2
; GFX11-NEXT: .LBB27_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -3503,6 +3605,7 @@ define inreg <12 x i16> @bitcast_v6f32_to_v12i16_scalar(<6 x float> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s22, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB29_3
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s20
@@ -3536,7 +3639,8 @@ define inreg <12 x i16> @bitcast_v6f32_to_v12i16_scalar(<6 x float> inreg %a, i3
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $vgpr9
; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: s_branch .LBB29_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB29_2
; SI-NEXT: .LBB29_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v2, s17
@@ -3553,10 +3657,14 @@ define inreg <12 x i16> @bitcast_v6f32_to_v12i16_scalar(<6 x float> inreg %a, i3
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB29_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB29_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB29_4
-; VI-NEXT: .LBB29_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB29_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB29_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v5, s21, 1.0
; VI-NEXT: v_add_f32_e64 v4, s20, 1.0
; VI-NEXT: v_add_f32_e64 v3, s19, 1.0
@@ -3564,8 +3672,6 @@ define inreg <12 x i16> @bitcast_v6f32_to_v12i16_scalar(<6 x float> inreg %a, i3
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB29_3:
-; VI-NEXT: s_branch .LBB29_2
; VI-NEXT: .LBB29_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -3581,10 +3687,14 @@ define inreg <12 x i16> @bitcast_v6f32_to_v12i16_scalar(<6 x float> inreg %a, i3
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB29_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB29_4
-; GFX9-NEXT: .LBB29_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB29_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB29_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v5, s21, 1.0
; GFX9-NEXT: v_add_f32_e64 v4, s20, 1.0
; GFX9-NEXT: v_add_f32_e64 v3, s19, 1.0
@@ -3592,8 +3702,6 @@ define inreg <12 x i16> @bitcast_v6f32_to_v12i16_scalar(<6 x float> inreg %a, i3
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB29_3:
-; GFX9-NEXT: s_branch .LBB29_2
; GFX9-NEXT: .LBB29_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -3611,12 +3719,15 @@ define inreg <12 x i16> @bitcast_v6f32_to_v12i16_scalar(<6 x float> inreg %a, i3
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB29_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB29_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB29_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB29_4
-; GFX11-NEXT: .LBB29_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v5, s5, 1.0
; GFX11-NEXT: v_add_f32_e64 v4, s4, 1.0
; GFX11-NEXT: v_add_f32_e64 v3, s3, 1.0
@@ -3624,8 +3735,6 @@ define inreg <12 x i16> @bitcast_v6f32_to_v12i16_scalar(<6 x float> inreg %a, i3
; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB29_3:
-; GFX11-NEXT: s_branch .LBB29_2
; GFX11-NEXT: .LBB29_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -3819,6 +3928,7 @@ define inreg <6 x float> @bitcast_v12i16_to_v6f32_scalar(<12 x i16> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
+; SI-NEXT: s_mov_b64 s[10:11], -1
; SI-NEXT: s_cbranch_scc0 .LBB31_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
@@ -3881,16 +3991,22 @@ define inreg <6 x float> @bitcast_v12i16_to_v6f32_scalar(<12 x i16> inreg %a, i3
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB31_4:
; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9
-; SI-NEXT: s_branch .LBB31_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[10:11]
+; SI-NEXT: s_cbranch_vccz .LBB31_2
+; SI-NEXT: s_branch .LBB31_3
;
; VI-LABEL: bitcast_v12i16_to_v6f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB31_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB31_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB31_3
-; VI-NEXT: .LBB31_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB31_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB31_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s21, 3
; VI-NEXT: s_and_b32 s4, s21, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
@@ -3921,7 +4037,7 @@ define inreg <6 x float> @bitcast_v12i16_to_v6f32_scalar(<12 x i16> inreg %a, i3
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB31_3: ; %end
+; VI-NEXT: .LBB31_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -3929,17 +4045,19 @@ define inreg <6 x float> @bitcast_v12i16_to_v6f32_scalar(<12 x i16> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: v_mov_b32_e32 v5, s21
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB31_4:
-; VI-NEXT: s_branch .LBB31_2
;
; GFX9-LABEL: bitcast_v12i16_to_v6f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB31_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB31_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB31_4
-; GFX9-NEXT: .LBB31_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB31_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB31_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v5, s21, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v4, s20, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v3, s19, 3 op_sel_hi:[1,0]
@@ -3947,8 +4065,6 @@ define inreg <6 x float> @bitcast_v12i16_to_v6f32_scalar(<12 x i16> inreg %a, i3
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB31_3:
-; GFX9-NEXT: s_branch .LBB31_2
; GFX9-NEXT: .LBB31_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -3964,12 +4080,15 @@ define inreg <6 x float> @bitcast_v12i16_to_v6f32_scalar(<12 x i16> inreg %a, i3
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
+; GFX11-NEXT: s_mov_b32 s6, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB31_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s6, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB31_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB31_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
; GFX11-NEXT: s_cbranch_vccnz .LBB31_4
-; GFX11-NEXT: .LBB31_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
@@ -3977,8 +4096,6 @@ define inreg <6 x float> @bitcast_v12i16_to_v6f32_scalar(<12 x i16> inreg %a, i3
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB31_3:
-; GFX11-NEXT: s_branch .LBB31_2
; GFX11-NEXT: .LBB31_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -4161,6 +4278,7 @@ define inreg <12 x half> @bitcast_v6f32_to_v12f16_scalar(<6 x float> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s22, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB33_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s4, s21, 16
@@ -4222,16 +4340,22 @@ define inreg <12 x half> @bitcast_v6f32_to_v12f16_scalar(<6 x float> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr9
; SI-NEXT: ; implicit-def: $vgpr10
; SI-NEXT: ; implicit-def: $vgpr11
-; SI-NEXT: s_branch .LBB33_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB33_2
+; SI-NEXT: s_branch .LBB33_3
;
; VI-LABEL: bitcast_v6f32_to_v12f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB33_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB33_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB33_4
-; VI-NEXT: .LBB33_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB33_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB33_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v5, s21, 1.0
; VI-NEXT: v_add_f32_e64 v4, s20, 1.0
; VI-NEXT: v_add_f32_e64 v3, s19, 1.0
@@ -4239,8 +4363,6 @@ define inreg <12 x half> @bitcast_v6f32_to_v12f16_scalar(<6 x float> inreg %a, i
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB33_3:
-; VI-NEXT: s_branch .LBB33_2
; VI-NEXT: .LBB33_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -4256,10 +4378,14 @@ define inreg <12 x half> @bitcast_v6f32_to_v12f16_scalar(<6 x float> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB33_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB33_4
-; GFX9-NEXT: .LBB33_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB33_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB33_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v5, s21, 1.0
; GFX9-NEXT: v_add_f32_e64 v4, s20, 1.0
; GFX9-NEXT: v_add_f32_e64 v3, s19, 1.0
@@ -4267,8 +4393,6 @@ define inreg <12 x half> @bitcast_v6f32_to_v12f16_scalar(<6 x float> inreg %a, i
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB33_3:
-; GFX9-NEXT: s_branch .LBB33_2
; GFX9-NEXT: .LBB33_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -4286,12 +4410,15 @@ define inreg <12 x half> @bitcast_v6f32_to_v12f16_scalar(<6 x float> inreg %a, i
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB33_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB33_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB33_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB33_4
-; GFX11-NEXT: .LBB33_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v5, s5, 1.0
; GFX11-NEXT: v_add_f32_e64 v4, s4, 1.0
; GFX11-NEXT: v_add_f32_e64 v3, s3, 1.0
@@ -4299,8 +4426,6 @@ define inreg <12 x half> @bitcast_v6f32_to_v12f16_scalar(<6 x float> inreg %a, i
; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB33_3:
-; GFX11-NEXT: s_branch .LBB33_2
; GFX11-NEXT: .LBB33_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -4533,6 +4658,7 @@ define inreg <6 x float> @bitcast_v12f16_to_v6f32_scalar(<12 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v7, s27
; SI-NEXT: v_cvt_f16_f32_e32 v6, s26
; SI-NEXT: s_cmp_lg_u32 s28, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB35_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v17
@@ -4601,16 +4727,22 @@ define inreg <6 x float> @bitcast_v12f16_to_v6f32_scalar(<12 x half> inreg %a, i
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB35_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5
-; SI-NEXT: s_branch .LBB35_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB35_2
+; SI-NEXT: s_branch .LBB35_3
;
; VI-LABEL: bitcast_v12f16_to_v6f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB35_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB35_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB35_4
-; VI-NEXT: .LBB35_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB35_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB35_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s21, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -4643,8 +4775,6 @@ define inreg <6 x float> @bitcast_v12f16_to_v6f32_scalar(<12 x half> inreg %a, i
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB35_3:
-; VI-NEXT: s_branch .LBB35_2
; VI-NEXT: .LBB35_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -4658,10 +4788,14 @@ define inreg <6 x float> @bitcast_v12f16_to_v6f32_scalar(<12 x half> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB35_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB35_4
-; GFX9-NEXT: .LBB35_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB35_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB35_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v5, s21, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v4, s20, v0 op_sel_hi:[1,0]
@@ -4670,8 +4804,6 @@ define inreg <6 x float> @bitcast_v12f16_to_v6f32_scalar(<12 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB35_3:
-; GFX9-NEXT: s_branch .LBB35_2
; GFX9-NEXT: .LBB35_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -4687,12 +4819,15 @@ define inreg <6 x float> @bitcast_v12f16_to_v6f32_scalar(<12 x half> inreg %a, i
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
+; GFX11-NEXT: s_mov_b32 s6, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB35_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s6, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB35_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB35_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
; GFX11-NEXT: s_cbranch_vccnz .LBB35_4
-; GFX11-NEXT: .LBB35_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
@@ -4700,8 +4835,6 @@ define inreg <6 x float> @bitcast_v12f16_to_v6f32_scalar(<12 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB35_3:
-; GFX11-NEXT: s_branch .LBB35_2
; GFX11-NEXT: .LBB35_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -4821,17 +4954,21 @@ define inreg <3 x double> @bitcast_v3i64_to_v3f64_scalar(<3 x i64> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s22, 0
-; SI-NEXT: s_cbranch_scc0 .LBB37_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB37_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB37_3
-; SI-NEXT: .LBB37_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB37_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB37_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_u32 s16, s16, 3
; SI-NEXT: s_addc_u32 s17, s17, 0
; SI-NEXT: s_add_u32 s18, s18, 3
; SI-NEXT: s_addc_u32 s19, s19, 0
; SI-NEXT: s_add_u32 s20, s20, 3
; SI-NEXT: s_addc_u32 s21, s21, 0
-; SI-NEXT: .LBB37_3: ; %end
+; SI-NEXT: .LBB37_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -4839,24 +4976,26 @@ define inreg <3 x double> @bitcast_v3i64_to_v3f64_scalar(<3 x i64> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v4, s20
; SI-NEXT: v_mov_b32_e32 v5, s21
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB37_4:
-; SI-NEXT: s_branch .LBB37_2
;
; VI-LABEL: bitcast_v3i64_to_v3f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB37_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB37_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB37_3
-; VI-NEXT: .LBB37_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB37_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB37_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
; VI-NEXT: s_add_u32 s18, s18, 3
; VI-NEXT: s_addc_u32 s19, s19, 0
; VI-NEXT: s_add_u32 s20, s20, 3
; VI-NEXT: s_addc_u32 s21, s21, 0
-; VI-NEXT: .LBB37_3: ; %end
+; VI-NEXT: .LBB37_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -4864,24 +5003,26 @@ define inreg <3 x double> @bitcast_v3i64_to_v3f64_scalar(<3 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: v_mov_b32_e32 v5, s21
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB37_4:
-; VI-NEXT: s_branch .LBB37_2
;
; GFX9-LABEL: bitcast_v3i64_to_v3f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB37_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB37_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB37_3
-; GFX9-NEXT: .LBB37_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB37_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB37_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
; GFX9-NEXT: s_add_u32 s18, s18, 3
; GFX9-NEXT: s_addc_u32 s19, s19, 0
; GFX9-NEXT: s_add_u32 s20, s20, 3
; GFX9-NEXT: s_addc_u32 s21, s21, 0
-; GFX9-NEXT: .LBB37_3: ; %end
+; GFX9-NEXT: .LBB37_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -4889,32 +5030,31 @@ define inreg <3 x double> @bitcast_v3i64_to_v3f64_scalar(<3 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v4, s20
; GFX9-NEXT: v_mov_b32_e32 v5, s21
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB37_4:
-; GFX9-NEXT: s_branch .LBB37_2
;
; GFX11-LABEL: bitcast_v3i64_to_v3f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB37_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB37_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB37_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB37_3
-; GFX11-NEXT: .LBB37_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB37_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
; GFX11-NEXT: s_add_u32 s2, s2, 3
; GFX11-NEXT: s_addc_u32 s3, s3, 0
; GFX11-NEXT: s_add_u32 s16, s16, 3
; GFX11-NEXT: s_addc_u32 s17, s17, 0
-; GFX11-NEXT: .LBB37_3: ; %end
+; GFX11-NEXT: .LBB37_4: ; %end
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB37_4:
-; GFX11-NEXT: s_branch .LBB37_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -5018,16 +5158,18 @@ define inreg <3 x i64> @bitcast_v3f64_to_v3i64_scalar(<3 x double> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s22, 0
-; SI-NEXT: s_cbranch_scc0 .LBB39_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB39_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB39_4
-; SI-NEXT: .LBB39_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB39_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB39_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; SI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB39_3:
-; SI-NEXT: s_branch .LBB39_2
; SI-NEXT: .LBB39_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -5041,16 +5183,18 @@ define inreg <3 x i64> @bitcast_v3f64_to_v3i64_scalar(<3 x double> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB39_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB39_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB39_4
-; VI-NEXT: .LBB39_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB39_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB39_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB39_3:
-; VI-NEXT: s_branch .LBB39_2
; VI-NEXT: .LBB39_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -5064,16 +5208,18 @@ define inreg <3 x i64> @bitcast_v3f64_to_v3i64_scalar(<3 x double> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB39_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB39_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB39_4
-; GFX9-NEXT: .LBB39_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB39_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB39_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB39_3:
-; GFX9-NEXT: s_branch .LBB39_2
; GFX9-NEXT: .LBB39_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -5089,18 +5235,19 @@ define inreg <3 x i64> @bitcast_v3f64_to_v3i64_scalar(<3 x double> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
+; GFX11-NEXT: s_mov_b32 s6, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB39_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s6, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB39_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB39_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
; GFX11-NEXT: s_cbranch_vccnz .LBB39_4
-; GFX11-NEXT: .LBB39_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[4:5], s[4:5], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB39_3:
-; GFX11-NEXT: s_branch .LBB39_2
; GFX11-NEXT: .LBB39_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -5247,6 +5394,7 @@ define inreg <12 x i16> @bitcast_v3i64_to_v12i16_scalar(<3 x i64> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s22, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB41_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s20
@@ -5293,23 +5441,29 @@ define inreg <12 x i16> @bitcast_v3i64_to_v12i16_scalar(<3 x i64> inreg %a, i32
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $vgpr9
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB41_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB41_2
+; SI-NEXT: s_branch .LBB41_3
;
; VI-LABEL: bitcast_v3i64_to_v12i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB41_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB41_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB41_3
-; VI-NEXT: .LBB41_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB41_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB41_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_u32 s20, s20, 3
; VI-NEXT: s_addc_u32 s21, s21, 0
; VI-NEXT: s_add_u32 s18, s18, 3
; VI-NEXT: s_addc_u32 s19, s19, 0
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
-; VI-NEXT: .LBB41_3: ; %end
+; VI-NEXT: .LBB41_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -5317,24 +5471,26 @@ define inreg <12 x i16> @bitcast_v3i64_to_v12i16_scalar(<3 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: v_mov_b32_e32 v5, s21
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB41_4:
-; VI-NEXT: s_branch .LBB41_2
;
; GFX9-LABEL: bitcast_v3i64_to_v12i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB41_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB41_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB41_3
-; GFX9-NEXT: .LBB41_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB41_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB41_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_u32 s20, s20, 3
; GFX9-NEXT: s_addc_u32 s21, s21, 0
; GFX9-NEXT: s_add_u32 s18, s18, 3
; GFX9-NEXT: s_addc_u32 s19, s19, 0
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
-; GFX9-NEXT: .LBB41_3: ; %end
+; GFX9-NEXT: .LBB41_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -5342,33 +5498,32 @@ define inreg <12 x i16> @bitcast_v3i64_to_v12i16_scalar(<3 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v4, s20
; GFX9-NEXT: v_mov_b32_e32 v5, s21
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB41_4:
-; GFX9-NEXT: s_branch .LBB41_2
;
; GFX11-LABEL: bitcast_v3i64_to_v12i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB41_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB41_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB41_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB41_3
-; GFX11-NEXT: .LBB41_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB41_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s16, s16, 3
; GFX11-NEXT: s_addc_u32 s17, s17, 0
; GFX11-NEXT: s_add_u32 s2, s2, 3
; GFX11-NEXT: s_addc_u32 s3, s3, 0
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: .LBB41_3: ; %end
+; GFX11-NEXT: .LBB41_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB41_4:
-; GFX11-NEXT: s_branch .LBB41_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -5556,6 +5711,7 @@ define inreg <3 x i64> @bitcast_v12i16_to_v3i64_scalar(<12 x i16> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
+; SI-NEXT: s_mov_b64 s[10:11], -1
; SI-NEXT: s_cbranch_scc0 .LBB43_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
@@ -5618,16 +5774,22 @@ define inreg <3 x i64> @bitcast_v12i16_to_v3i64_scalar(<12 x i16> inreg %a, i32
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB43_4:
; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9
-; SI-NEXT: s_branch .LBB43_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[10:11]
+; SI-NEXT: s_cbranch_vccz .LBB43_2
+; SI-NEXT: s_branch .LBB43_3
;
; VI-LABEL: bitcast_v12i16_to_v3i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB43_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB43_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB43_3
-; VI-NEXT: .LBB43_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB43_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB43_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s21, 3
; VI-NEXT: s_and_b32 s4, s21, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
@@ -5658,7 +5820,7 @@ define inreg <3 x i64> @bitcast_v12i16_to_v3i64_scalar(<12 x i16> inreg %a, i32
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB43_3: ; %end
+; VI-NEXT: .LBB43_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -5666,17 +5828,19 @@ define inreg <3 x i64> @bitcast_v12i16_to_v3i64_scalar(<12 x i16> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: v_mov_b32_e32 v5, s21
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB43_4:
-; VI-NEXT: s_branch .LBB43_2
;
; GFX9-LABEL: bitcast_v12i16_to_v3i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB43_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB43_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB43_4
-; GFX9-NEXT: .LBB43_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB43_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB43_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v5, s21, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v4, s20, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v3, s19, 3 op_sel_hi:[1,0]
@@ -5684,8 +5848,6 @@ define inreg <3 x i64> @bitcast_v12i16_to_v3i64_scalar(<12 x i16> inreg %a, i32
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB43_3:
-; GFX9-NEXT: s_branch .LBB43_2
; GFX9-NEXT: .LBB43_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -5701,12 +5863,15 @@ define inreg <3 x i64> @bitcast_v12i16_to_v3i64_scalar(<12 x i16> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
+; GFX11-NEXT: s_mov_b32 s6, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB43_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s6, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB43_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB43_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
; GFX11-NEXT: s_cbranch_vccnz .LBB43_4
-; GFX11-NEXT: .LBB43_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
@@ -5714,8 +5879,6 @@ define inreg <3 x i64> @bitcast_v12i16_to_v3i64_scalar(<12 x i16> inreg %a, i32
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB43_3:
-; GFX11-NEXT: s_branch .LBB43_2
; GFX11-NEXT: .LBB43_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -5903,6 +6066,7 @@ define inreg <12 x half> @bitcast_v3i64_to_v12f16_scalar(<3 x i64> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s22, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB45_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s4, s21, 16
@@ -5964,23 +6128,29 @@ define inreg <12 x half> @bitcast_v3i64_to_v12f16_scalar(<3 x i64> inreg %a, i32
; SI-NEXT: ; implicit-def: $vgpr9
; SI-NEXT: ; implicit-def: $vgpr10
; SI-NEXT: ; implicit-def: $vgpr11
-; SI-NEXT: s_branch .LBB45_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB45_2
+; SI-NEXT: s_branch .LBB45_3
;
; VI-LABEL: bitcast_v3i64_to_v12f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB45_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB45_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB45_3
-; VI-NEXT: .LBB45_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB45_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB45_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_u32 s20, s20, 3
; VI-NEXT: s_addc_u32 s21, s21, 0
; VI-NEXT: s_add_u32 s18, s18, 3
; VI-NEXT: s_addc_u32 s19, s19, 0
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
-; VI-NEXT: .LBB45_3: ; %end
+; VI-NEXT: .LBB45_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -5988,24 +6158,26 @@ define inreg <12 x half> @bitcast_v3i64_to_v12f16_scalar(<3 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: v_mov_b32_e32 v5, s21
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB45_4:
-; VI-NEXT: s_branch .LBB45_2
;
; GFX9-LABEL: bitcast_v3i64_to_v12f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB45_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB45_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB45_3
-; GFX9-NEXT: .LBB45_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB45_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB45_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_u32 s20, s20, 3
; GFX9-NEXT: s_addc_u32 s21, s21, 0
; GFX9-NEXT: s_add_u32 s18, s18, 3
; GFX9-NEXT: s_addc_u32 s19, s19, 0
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
-; GFX9-NEXT: .LBB45_3: ; %end
+; GFX9-NEXT: .LBB45_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -6013,33 +6185,32 @@ define inreg <12 x half> @bitcast_v3i64_to_v12f16_scalar(<3 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v4, s20
; GFX9-NEXT: v_mov_b32_e32 v5, s21
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB45_4:
-; GFX9-NEXT: s_branch .LBB45_2
;
; GFX11-LABEL: bitcast_v3i64_to_v12f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB45_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB45_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB45_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB45_3
-; GFX11-NEXT: .LBB45_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB45_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s16, s16, 3
; GFX11-NEXT: s_addc_u32 s17, s17, 0
; GFX11-NEXT: s_add_u32 s2, s2, 3
; GFX11-NEXT: s_addc_u32 s3, s3, 0
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: .LBB45_3: ; %end
+; GFX11-NEXT: .LBB45_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB45_4:
-; GFX11-NEXT: s_branch .LBB45_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -6266,6 +6437,7 @@ define inreg <3 x i64> @bitcast_v12f16_to_v3i64_scalar(<12 x half> inreg %a, i32
; SI-NEXT: v_cvt_f16_f32_e32 v7, s27
; SI-NEXT: v_cvt_f16_f32_e32 v6, s26
; SI-NEXT: s_cmp_lg_u32 s28, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB47_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v17
@@ -6334,16 +6506,22 @@ define inreg <3 x i64> @bitcast_v12f16_to_v3i64_scalar(<12 x half> inreg %a, i32
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB47_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5
-; SI-NEXT: s_branch .LBB47_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB47_2
+; SI-NEXT: s_branch .LBB47_3
;
; VI-LABEL: bitcast_v12f16_to_v3i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB47_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB47_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB47_4
-; VI-NEXT: .LBB47_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB47_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB47_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s21, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -6376,8 +6554,6 @@ define inreg <3 x i64> @bitcast_v12f16_to_v3i64_scalar(<12 x half> inreg %a, i32
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB47_3:
-; VI-NEXT: s_branch .LBB47_2
; VI-NEXT: .LBB47_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -6391,10 +6567,14 @@ define inreg <3 x i64> @bitcast_v12f16_to_v3i64_scalar(<12 x half> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB47_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB47_4
-; GFX9-NEXT: .LBB47_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB47_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v5, s21, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v4, s20, v0 op_sel_hi:[1,0]
@@ -6403,8 +6583,6 @@ define inreg <3 x i64> @bitcast_v12f16_to_v3i64_scalar(<12 x half> inreg %a, i32
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB47_3:
-; GFX9-NEXT: s_branch .LBB47_2
; GFX9-NEXT: .LBB47_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -6420,12 +6598,15 @@ define inreg <3 x i64> @bitcast_v12f16_to_v3i64_scalar(<12 x half> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
+; GFX11-NEXT: s_mov_b32 s6, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB47_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s6, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB47_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB47_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
; GFX11-NEXT: s_cbranch_vccnz .LBB47_4
-; GFX11-NEXT: .LBB47_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
@@ -6433,8 +6614,6 @@ define inreg <3 x i64> @bitcast_v12f16_to_v3i64_scalar(<12 x half> inreg %a, i32
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB47_3:
-; GFX11-NEXT: s_branch .LBB47_2
; GFX11-NEXT: .LBB47_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -6576,6 +6755,7 @@ define inreg <12 x i16> @bitcast_v3f64_to_v12i16_scalar(<3 x double> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s22, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB49_3
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s20
@@ -6606,7 +6786,8 @@ define inreg <12 x i16> @bitcast_v3f64_to_v12i16_scalar(<3 x double> inreg %a, i
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $vgpr9
; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: s_branch .LBB49_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB49_2
; SI-NEXT: .LBB49_4:
; SI-NEXT: v_mov_b32_e32 v17, s17
; SI-NEXT: v_mov_b32_e32 v16, s16
@@ -6630,16 +6811,18 @@ define inreg <12 x i16> @bitcast_v3f64_to_v12i16_scalar(<3 x double> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB49_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB49_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB49_4
-; VI-NEXT: .LBB49_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB49_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB49_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB49_3:
-; VI-NEXT: s_branch .LBB49_2
; VI-NEXT: .LBB49_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -6655,16 +6838,18 @@ define inreg <12 x i16> @bitcast_v3f64_to_v12i16_scalar(<3 x double> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB49_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB49_4
-; GFX9-NEXT: .LBB49_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB49_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB49_3:
-; GFX9-NEXT: s_branch .LBB49_2
; GFX9-NEXT: .LBB49_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -6682,18 +6867,19 @@ define inreg <12 x i16> @bitcast_v3f64_to_v12i16_scalar(<3 x double> inreg %a, i
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB49_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB49_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB49_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB49_4
-; GFX11-NEXT: .LBB49_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[4:5], s[4:5], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB49_3:
-; GFX11-NEXT: s_branch .LBB49_2
; GFX11-NEXT: .LBB49_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -6887,6 +7073,7 @@ define inreg <3 x double> @bitcast_v12i16_to_v3f64_scalar(<12 x i16> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
+; SI-NEXT: s_mov_b64 s[10:11], -1
; SI-NEXT: s_cbranch_scc0 .LBB51_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
@@ -6949,16 +7136,22 @@ define inreg <3 x double> @bitcast_v12i16_to_v3f64_scalar(<12 x i16> inreg %a, i
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB51_4:
; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9
-; SI-NEXT: s_branch .LBB51_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[10:11]
+; SI-NEXT: s_cbranch_vccz .LBB51_2
+; SI-NEXT: s_branch .LBB51_3
;
; VI-LABEL: bitcast_v12i16_to_v3f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB51_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB51_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB51_3
-; VI-NEXT: .LBB51_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB51_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB51_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s21, 3
; VI-NEXT: s_and_b32 s4, s21, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
@@ -6989,7 +7182,7 @@ define inreg <3 x double> @bitcast_v12i16_to_v3f64_scalar(<12 x i16> inreg %a, i
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB51_3: ; %end
+; VI-NEXT: .LBB51_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -6997,17 +7190,19 @@ define inreg <3 x double> @bitcast_v12i16_to_v3f64_scalar(<12 x i16> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: v_mov_b32_e32 v5, s21
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB51_4:
-; VI-NEXT: s_branch .LBB51_2
;
; GFX9-LABEL: bitcast_v12i16_to_v3f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB51_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB51_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB51_4
-; GFX9-NEXT: .LBB51_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB51_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB51_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v5, s21, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v4, s20, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v3, s19, 3 op_sel_hi:[1,0]
@@ -7015,8 +7210,6 @@ define inreg <3 x double> @bitcast_v12i16_to_v3f64_scalar(<12 x i16> inreg %a, i
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB51_3:
-; GFX9-NEXT: s_branch .LBB51_2
; GFX9-NEXT: .LBB51_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -7032,12 +7225,15 @@ define inreg <3 x double> @bitcast_v12i16_to_v3f64_scalar(<12 x i16> inreg %a, i
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
+; GFX11-NEXT: s_mov_b32 s6, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB51_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s6, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB51_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB51_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
; GFX11-NEXT: s_cbranch_vccnz .LBB51_4
-; GFX11-NEXT: .LBB51_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
@@ -7045,8 +7241,6 @@ define inreg <3 x double> @bitcast_v12i16_to_v3f64_scalar(<12 x i16> inreg %a, i
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB51_3:
-; GFX11-NEXT: s_branch .LBB51_2
; GFX11-NEXT: .LBB51_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -7216,6 +7410,7 @@ define inreg <12 x half> @bitcast_v3f64_to_v12f16_scalar(<3 x double> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s22, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB53_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s4, s21, 16
@@ -7274,22 +7469,26 @@ define inreg <12 x half> @bitcast_v3f64_to_v12f16_scalar(<3 x double> inreg %a,
; SI-NEXT: ; implicit-def: $vgpr9
; SI-NEXT: ; implicit-def: $vgpr10
; SI-NEXT: ; implicit-def: $vgpr11
-; SI-NEXT: s_branch .LBB53_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB53_2
+; SI-NEXT: s_branch .LBB53_3
;
; VI-LABEL: bitcast_v3f64_to_v12f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB53_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB53_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB53_4
-; VI-NEXT: .LBB53_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB53_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB53_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB53_3:
-; VI-NEXT: s_branch .LBB53_2
; VI-NEXT: .LBB53_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -7305,16 +7504,18 @@ define inreg <12 x half> @bitcast_v3f64_to_v12f16_scalar(<3 x double> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB53_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB53_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB53_4
-; GFX9-NEXT: .LBB53_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB53_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB53_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB53_3:
-; GFX9-NEXT: s_branch .LBB53_2
; GFX9-NEXT: .LBB53_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -7332,18 +7533,19 @@ define inreg <12 x half> @bitcast_v3f64_to_v12f16_scalar(<3 x double> inreg %a,
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB53_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB53_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB53_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB53_4
-; GFX11-NEXT: .LBB53_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[4:5], s[4:5], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB53_3:
-; GFX11-NEXT: s_branch .LBB53_2
; GFX11-NEXT: .LBB53_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -7576,6 +7778,7 @@ define inreg <3 x double> @bitcast_v12f16_to_v3f64_scalar(<12 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v7, s27
; SI-NEXT: v_cvt_f16_f32_e32 v6, s26
; SI-NEXT: s_cmp_lg_u32 s28, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB55_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v17
@@ -7644,16 +7847,22 @@ define inreg <3 x double> @bitcast_v12f16_to_v3f64_scalar(<12 x half> inreg %a,
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB55_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5
-; SI-NEXT: s_branch .LBB55_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB55_2
+; SI-NEXT: s_branch .LBB55_3
;
; VI-LABEL: bitcast_v12f16_to_v3f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB55_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB55_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB55_4
-; VI-NEXT: .LBB55_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB55_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB55_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s21, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -7686,8 +7895,6 @@ define inreg <3 x double> @bitcast_v12f16_to_v3f64_scalar(<12 x half> inreg %a,
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB55_3:
-; VI-NEXT: s_branch .LBB55_2
; VI-NEXT: .LBB55_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -7701,10 +7908,14 @@ define inreg <3 x double> @bitcast_v12f16_to_v3f64_scalar(<12 x half> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB55_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB55_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB55_4
-; GFX9-NEXT: .LBB55_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB55_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB55_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v5, s21, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v4, s20, v0 op_sel_hi:[1,0]
@@ -7713,8 +7924,6 @@ define inreg <3 x double> @bitcast_v12f16_to_v3f64_scalar(<12 x half> inreg %a,
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB55_3:
-; GFX9-NEXT: s_branch .LBB55_2
; GFX9-NEXT: .LBB55_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -7730,12 +7939,15 @@ define inreg <3 x double> @bitcast_v12f16_to_v3f64_scalar(<12 x half> inreg %a,
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
+; GFX11-NEXT: s_mov_b32 s6, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB55_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s6, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB55_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB55_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
; GFX11-NEXT: s_cbranch_vccnz .LBB55_4
-; GFX11-NEXT: .LBB55_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
@@ -7743,8 +7955,6 @@ define inreg <3 x double> @bitcast_v12f16_to_v3f64_scalar(<12 x half> inreg %a,
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB55_3:
-; GFX11-NEXT: s_branch .LBB55_2
; GFX11-NEXT: .LBB55_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -7950,6 +8160,7 @@ define inreg <12 x half> @bitcast_v12i16_to_v12f16_scalar(<12 x i16> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB57_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_cvt_f32_f16_e32 v0, s16
@@ -8005,16 +8216,22 @@ define inreg <12 x half> @bitcast_v12i16_to_v12f16_scalar(<12 x i16> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr9
; SI-NEXT: ; implicit-def: $vgpr10
; SI-NEXT: ; implicit-def: $vgpr11
-; SI-NEXT: s_branch .LBB57_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB57_2
+; SI-NEXT: s_branch .LBB57_3
;
; VI-LABEL: bitcast_v12i16_to_v12f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB57_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB57_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB57_3
-; VI-NEXT: .LBB57_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB57_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB57_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s16, 3
; VI-NEXT: s_add_i32 s7, s17, 3
; VI-NEXT: s_add_i32 s9, s18, 3
@@ -8045,7 +8262,7 @@ define inreg <12 x half> @bitcast_v12i16_to_v12f16_scalar(<12 x i16> inreg %a, i
; VI-NEXT: s_add_i32 s18, s8, 0x30000
; VI-NEXT: s_add_i32 s17, s6, 0x30000
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB57_3: ; %end
+; VI-NEXT: .LBB57_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -8053,17 +8270,19 @@ define inreg <12 x half> @bitcast_v12i16_to_v12f16_scalar(<12 x i16> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: v_mov_b32_e32 v5, s21
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB57_4:
-; VI-NEXT: s_branch .LBB57_2
;
; GFX9-LABEL: bitcast_v12i16_to_v12f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB57_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB57_4
-; GFX9-NEXT: .LBB57_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB57_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB57_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v5, s21, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v4, s20, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v3, s19, 3 op_sel_hi:[1,0]
@@ -8071,8 +8290,6 @@ define inreg <12 x half> @bitcast_v12i16_to_v12f16_scalar(<12 x i16> inreg %a, i
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB57_3:
-; GFX9-NEXT: s_branch .LBB57_2
; GFX9-NEXT: .LBB57_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -8090,12 +8307,15 @@ define inreg <12 x half> @bitcast_v12i16_to_v12f16_scalar(<12 x i16> inreg %a, i
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB57_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB57_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB57_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB57_4
-; GFX11-NEXT: .LBB57_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
@@ -8103,8 +8323,6 @@ define inreg <12 x half> @bitcast_v12i16_to_v12f16_scalar(<12 x i16> inreg %a, i
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB57_3:
-; GFX11-NEXT: s_branch .LBB57_2
; GFX11-NEXT: .LBB57_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -8308,10 +8526,14 @@ define inreg <12 x i16> @bitcast_v12f16_to_v12i16_scalar(<12 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v10, s26
; SI-NEXT: v_cvt_f16_f32_e32 v11, s27
; SI-NEXT: s_cmp_lg_u32 s28, 0
-; SI-NEXT: s_cbranch_scc0 .LBB59_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB59_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB59_3
-; SI-NEXT: .LBB59_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB59_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB59_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v11, v11
; SI-NEXT: v_cvt_f32_f16_e32 v10, v10
; SI-NEXT: v_cvt_f32_f16_e32 v7, v7
@@ -8363,19 +8585,21 @@ define inreg <12 x i16> @bitcast_v12f16_to_v12i16_scalar(<12 x half> inreg %a, i
; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16
; SI-NEXT: v_alignbit_b32 v5, v6, v5, 16
; SI-NEXT: v_alignbit_b32 v9, v10, v9, 16
-; SI-NEXT: .LBB59_3: ; %end
+; SI-NEXT: .LBB59_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB59_4:
-; SI-NEXT: s_branch .LBB59_2
;
; VI-LABEL: bitcast_v12f16_to_v12i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s22, 0
-; VI-NEXT: s_cbranch_scc0 .LBB59_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB59_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB59_4
-; VI-NEXT: .LBB59_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB59_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB59_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v1, s4
; VI-NEXT: s_lshr_b32 s4, s17, 16
@@ -8408,8 +8632,6 @@ define inreg <12 x i16> @bitcast_v12f16_to_v12i16_scalar(<12 x half> inreg %a, i
; VI-NEXT: v_or_b32_e32 v1, v1, v8
; VI-NEXT: v_or_b32_e32 v0, v6, v7
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB59_3:
-; VI-NEXT: s_branch .LBB59_2
; VI-NEXT: .LBB59_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -8425,10 +8647,14 @@ define inreg <12 x i16> @bitcast_v12f16_to_v12i16_scalar(<12 x half> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s22, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB59_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB59_4
-; GFX9-NEXT: .LBB59_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB59_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB59_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v5, s21, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v4, s20, v0 op_sel_hi:[1,0]
@@ -8437,8 +8663,6 @@ define inreg <12 x i16> @bitcast_v12f16_to_v12i16_scalar(<12 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB59_3:
-; GFX9-NEXT: s_branch .LBB59_2
; GFX9-NEXT: .LBB59_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -8456,12 +8680,15 @@ define inreg <12 x i16> @bitcast_v12f16_to_v12i16_scalar(<12 x half> inreg %a, i
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s18, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB59_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB59_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB59_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB59_4
-; GFX11-NEXT: .LBB59_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
@@ -8469,8 +8696,6 @@ define inreg <12 x i16> @bitcast_v12f16_to_v12i16_scalar(<12 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB59_3:
-; GFX11-NEXT: s_branch .LBB59_2
; GFX11-NEXT: .LBB59_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.224bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.224bit.ll
index c830d6b..a768d75 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.224bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.224bit.ll
@@ -106,10 +106,14 @@ define inreg <7 x float> @bitcast_v7i32_to_v7f32_scalar(<7 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s23, 0
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB1_3
-; SI-NEXT: .LBB1_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB1_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB1_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_i32 s22, s22, 3
; SI-NEXT: s_add_i32 s21, s21, 3
; SI-NEXT: s_add_i32 s20, s20, 3
@@ -117,7 +121,7 @@ define inreg <7 x float> @bitcast_v7i32_to_v7f32_scalar(<7 x i32> inreg %a, i32
; SI-NEXT: s_add_i32 s18, s18, 3
; SI-NEXT: s_add_i32 s17, s17, 3
; SI-NEXT: s_add_i32 s16, s16, 3
-; SI-NEXT: .LBB1_3: ; %end
+; SI-NEXT: .LBB1_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -126,17 +130,19 @@ define inreg <7 x float> @bitcast_v7i32_to_v7f32_scalar(<7 x i32> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v5, s21
; SI-NEXT: v_mov_b32_e32 v6, s22
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: s_branch .LBB1_2
;
; VI-LABEL: bitcast_v7i32_to_v7f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s23, 0
-; VI-NEXT: s_cbranch_scc0 .LBB1_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB1_3
-; VI-NEXT: .LBB1_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB1_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB1_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s22, s22, 3
; VI-NEXT: s_add_i32 s21, s21, 3
; VI-NEXT: s_add_i32 s20, s20, 3
@@ -144,7 +150,7 @@ define inreg <7 x float> @bitcast_v7i32_to_v7f32_scalar(<7 x i32> inreg %a, i32
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB1_3: ; %end
+; VI-NEXT: .LBB1_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -153,17 +159,19 @@ define inreg <7 x float> @bitcast_v7i32_to_v7f32_scalar(<7 x i32> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v5, s21
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_4:
-; VI-NEXT: s_branch .LBB1_2
;
; GFX9-LABEL: bitcast_v7i32_to_v7f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s23, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB1_3
-; GFX9-NEXT: .LBB1_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB1_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s22, s22, 3
; GFX9-NEXT: s_add_i32 s21, s21, 3
; GFX9-NEXT: s_add_i32 s20, s20, 3
@@ -171,7 +179,7 @@ define inreg <7 x float> @bitcast_v7i32_to_v7f32_scalar(<7 x i32> inreg %a, i32
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB1_3: ; %end
+; GFX9-NEXT: .LBB1_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -180,19 +188,20 @@ define inreg <7 x float> @bitcast_v7i32_to_v7f32_scalar(<7 x i32> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v5, s21
; GFX9-NEXT: v_mov_b32_e32 v6, s22
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-LABEL: bitcast_v7i32_to_v7f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s19, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB1_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB1_3
-; GFX11-NEXT: .LBB1_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s18, s18, 3
; GFX11-NEXT: s_add_i32 s17, s17, 3
; GFX11-NEXT: s_add_i32 s16, s16, 3
@@ -200,15 +209,13 @@ define inreg <7 x float> @bitcast_v7i32_to_v7f32_scalar(<7 x i32> inreg %a, i32
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB1_3: ; %end
+; GFX11-NEXT: .LBB1_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: v_mov_b32_e32 v6, s18
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_4:
-; GFX11-NEXT: s_branch .LBB1_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -322,10 +329,14 @@ define inreg <7 x i32> @bitcast_v7f32_to_v7i32_scalar(<7 x float> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s23, 0
-; SI-NEXT: s_cbranch_scc0 .LBB3_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB3_4
-; SI-NEXT: .LBB3_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB3_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB3_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v6, s22, 1.0
; SI-NEXT: v_add_f32_e64 v5, s21, 1.0
; SI-NEXT: v_add_f32_e64 v4, s20, 1.0
@@ -334,8 +345,6 @@ define inreg <7 x i32> @bitcast_v7f32_to_v7i32_scalar(<7 x float> inreg %a, i32
; SI-NEXT: v_add_f32_e64 v1, s17, 1.0
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB3_3:
-; SI-NEXT: s_branch .LBB3_2
; SI-NEXT: .LBB3_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -350,10 +359,14 @@ define inreg <7 x i32> @bitcast_v7f32_to_v7i32_scalar(<7 x float> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s23, 0
-; VI-NEXT: s_cbranch_scc0 .LBB3_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_4
-; VI-NEXT: .LBB3_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB3_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB3_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v6, s22, 1.0
; VI-NEXT: v_add_f32_e64 v5, s21, 1.0
; VI-NEXT: v_add_f32_e64 v4, s20, 1.0
@@ -362,8 +375,6 @@ define inreg <7 x i32> @bitcast_v7f32_to_v7i32_scalar(<7 x float> inreg %a, i32
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB3_3:
-; VI-NEXT: s_branch .LBB3_2
; VI-NEXT: .LBB3_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -378,10 +389,14 @@ define inreg <7 x i32> @bitcast_v7f32_to_v7i32_scalar(<7 x float> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s23, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_4
-; GFX9-NEXT: .LBB3_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB3_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v6, s22, 1.0
; GFX9-NEXT: v_add_f32_e64 v5, s21, 1.0
; GFX9-NEXT: v_add_f32_e64 v4, s20, 1.0
@@ -390,8 +405,6 @@ define inreg <7 x i32> @bitcast_v7f32_to_v7i32_scalar(<7 x float> inreg %a, i32
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB3_3:
-; GFX9-NEXT: s_branch .LBB3_2
; GFX9-NEXT: .LBB3_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -409,12 +422,15 @@ define inreg <7 x i32> @bitcast_v7f32_to_v7i32_scalar(<7 x float> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s19, 0
+; GFX11-NEXT: s_mov_b32 s7, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s7, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB3_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s7
; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
-; GFX11-NEXT: .LBB3_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v6, s6, 1.0
; GFX11-NEXT: v_add_f32_e64 v5, s5, 1.0
; GFX11-NEXT: v_add_f32_e64 v4, s4, 1.0
@@ -423,8 +439,6 @@ define inreg <7 x i32> @bitcast_v7f32_to_v7i32_scalar(<7 x float> inreg %a, i32
; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB3_3:
-; GFX11-NEXT: s_branch .LBB3_2
; GFX11-NEXT: .LBB3_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -583,6 +597,7 @@ define inreg <14 x i16> @bitcast_v7i32_to_v14i16_scalar(<7 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s23, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB5_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s22
@@ -636,16 +651,22 @@ define inreg <14 x i16> @bitcast_v7i32_to_v14i16_scalar(<7 x i32> inreg %a, i32
; SI-NEXT: ; implicit-def: $vgpr9
; SI-NEXT: ; implicit-def: $sgpr6
; SI-NEXT: ; implicit-def: $vgpr13
-; SI-NEXT: s_branch .LBB5_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB5_2
+; SI-NEXT: s_branch .LBB5_3
;
; VI-LABEL: bitcast_v7i32_to_v14i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s23, 0
-; VI-NEXT: s_cbranch_scc0 .LBB5_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB5_3
-; VI-NEXT: .LBB5_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB5_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB5_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s22, s22, 3
; VI-NEXT: s_add_i32 s21, s21, 3
; VI-NEXT: s_add_i32 s20, s20, 3
@@ -653,7 +674,7 @@ define inreg <14 x i16> @bitcast_v7i32_to_v14i16_scalar(<7 x i32> inreg %a, i32
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB5_3: ; %end
+; VI-NEXT: .LBB5_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -662,17 +683,19 @@ define inreg <14 x i16> @bitcast_v7i32_to_v14i16_scalar(<7 x i32> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v5, s21
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB5_4:
-; VI-NEXT: s_branch .LBB5_2
;
; GFX9-LABEL: bitcast_v7i32_to_v14i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s23, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB5_3
-; GFX9-NEXT: .LBB5_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB5_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s22, s22, 3
; GFX9-NEXT: s_add_i32 s21, s21, 3
; GFX9-NEXT: s_add_i32 s20, s20, 3
@@ -680,7 +703,7 @@ define inreg <14 x i16> @bitcast_v7i32_to_v14i16_scalar(<7 x i32> inreg %a, i32
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB5_3: ; %end
+; GFX9-NEXT: .LBB5_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -689,19 +712,20 @@ define inreg <14 x i16> @bitcast_v7i32_to_v14i16_scalar(<7 x i32> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v5, s21
; GFX9-NEXT: v_mov_b32_e32 v6, s22
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_4:
-; GFX9-NEXT: s_branch .LBB5_2
;
; GFX11-LABEL: bitcast_v7i32_to_v14i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s19, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB5_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB5_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB5_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB5_3
-; GFX11-NEXT: .LBB5_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s18, s18, 3
; GFX11-NEXT: s_add_i32 s17, s17, 3
; GFX11-NEXT: s_add_i32 s16, s16, 3
@@ -709,15 +733,13 @@ define inreg <14 x i16> @bitcast_v7i32_to_v14i16_scalar(<7 x i32> inreg %a, i32
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB5_3: ; %end
+; GFX11-NEXT: .LBB5_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: v_mov_b32_e32 v6, s18
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB5_4:
-; GFX11-NEXT: s_branch .LBB5_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -922,6 +944,7 @@ define inreg <7 x i32> @bitcast_v14i16_to_v7i32_scalar(<14 x i16> inreg %a, i32
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[12:13], -1
; SI-NEXT: s_cbranch_scc0 .LBB7_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
@@ -993,16 +1016,22 @@ define inreg <7 x i32> @bitcast_v14i16_to_v7i32_scalar(<14 x i16> inreg %a, i32
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB7_4:
; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10
-; SI-NEXT: s_branch .LBB7_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[12:13]
+; SI-NEXT: s_cbranch_vccz .LBB7_2
+; SI-NEXT: s_branch .LBB7_3
;
; VI-LABEL: bitcast_v14i16_to_v7i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s23, 0
-; VI-NEXT: s_cbranch_scc0 .LBB7_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB7_3
-; VI-NEXT: .LBB7_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB7_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB7_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s22, 3
; VI-NEXT: s_and_b32 s4, s22, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
@@ -1038,7 +1067,7 @@ define inreg <7 x i32> @bitcast_v14i16_to_v7i32_scalar(<14 x i16> inreg %a, i32
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB7_3: ; %end
+; VI-NEXT: .LBB7_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -1047,17 +1076,19 @@ define inreg <7 x i32> @bitcast_v14i16_to_v7i32_scalar(<14 x i16> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v5, s21
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB7_4:
-; VI-NEXT: s_branch .LBB7_2
;
; GFX9-LABEL: bitcast_v14i16_to_v7i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s23, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB7_4
-; GFX9-NEXT: .LBB7_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB7_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB7_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v6, s22, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v5, s21, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v4, s20, 3 op_sel_hi:[1,0]
@@ -1066,8 +1097,6 @@ define inreg <7 x i32> @bitcast_v14i16_to_v7i32_scalar(<14 x i16> inreg %a, i32
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB7_3:
-; GFX9-NEXT: s_branch .LBB7_2
; GFX9-NEXT: .LBB7_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -1085,12 +1114,15 @@ define inreg <7 x i32> @bitcast_v14i16_to_v7i32_scalar(<14 x i16> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s19, 0
+; GFX11-NEXT: s_mov_b32 s7, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s7, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB7_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s7
; GFX11-NEXT: s_cbranch_vccnz .LBB7_4
-; GFX11-NEXT: .LBB7_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
@@ -1099,8 +1131,6 @@ define inreg <7 x i32> @bitcast_v14i16_to_v7i32_scalar(<14 x i16> inreg %a, i32
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB7_3:
-; GFX11-NEXT: s_branch .LBB7_2
; GFX11-NEXT: .LBB7_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -1302,6 +1332,7 @@ define inreg <14 x half> @bitcast_v7i32_to_v14f16_scalar(<7 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s23, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB9_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s4, s22, 16
@@ -1372,16 +1403,22 @@ define inreg <14 x half> @bitcast_v7i32_to_v14f16_scalar(<7 x i32> inreg %a, i32
; SI-NEXT: ; implicit-def: $vgpr11
; SI-NEXT: ; implicit-def: $vgpr12
; SI-NEXT: ; implicit-def: $vgpr13
-; SI-NEXT: s_branch .LBB9_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB9_2
+; SI-NEXT: s_branch .LBB9_3
;
; VI-LABEL: bitcast_v7i32_to_v14f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s23, 0
-; VI-NEXT: s_cbranch_scc0 .LBB9_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB9_3
-; VI-NEXT: .LBB9_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB9_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB9_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s22, s22, 3
; VI-NEXT: s_add_i32 s21, s21, 3
; VI-NEXT: s_add_i32 s20, s20, 3
@@ -1389,7 +1426,7 @@ define inreg <14 x half> @bitcast_v7i32_to_v14f16_scalar(<7 x i32> inreg %a, i32
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB9_3: ; %end
+; VI-NEXT: .LBB9_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -1398,17 +1435,19 @@ define inreg <14 x half> @bitcast_v7i32_to_v14f16_scalar(<7 x i32> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v5, s21
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB9_4:
-; VI-NEXT: s_branch .LBB9_2
;
; GFX9-LABEL: bitcast_v7i32_to_v14f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s23, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB9_3
-; GFX9-NEXT: .LBB9_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB9_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s22, s22, 3
; GFX9-NEXT: s_add_i32 s21, s21, 3
; GFX9-NEXT: s_add_i32 s20, s20, 3
@@ -1416,7 +1455,7 @@ define inreg <14 x half> @bitcast_v7i32_to_v14f16_scalar(<7 x i32> inreg %a, i32
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB9_3: ; %end
+; GFX9-NEXT: .LBB9_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -1425,19 +1464,20 @@ define inreg <14 x half> @bitcast_v7i32_to_v14f16_scalar(<7 x i32> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v5, s21
; GFX9-NEXT: v_mov_b32_e32 v6, s22
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB9_4:
-; GFX9-NEXT: s_branch .LBB9_2
;
; GFX11-LABEL: bitcast_v7i32_to_v14f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s19, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB9_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB9_3
-; GFX11-NEXT: .LBB9_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s18, s18, 3
; GFX11-NEXT: s_add_i32 s17, s17, 3
; GFX11-NEXT: s_add_i32 s16, s16, 3
@@ -1445,15 +1485,13 @@ define inreg <14 x half> @bitcast_v7i32_to_v14f16_scalar(<7 x i32> inreg %a, i32
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB9_3: ; %end
+; GFX11-NEXT: .LBB9_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: v_mov_b32_e32 v6, s18
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB9_4:
-; GFX11-NEXT: s_branch .LBB9_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1703,6 +1741,7 @@ define inreg <7 x i32> @bitcast_v14f16_to_v7i32_scalar(<14 x half> inreg %a, i32
; SI-NEXT: v_cvt_f16_f32_e32 v7, s28
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB11_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v20
@@ -1781,16 +1820,22 @@ define inreg <7 x i32> @bitcast_v14f16_to_v7i32_scalar(<14 x half> inreg %a, i32
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB11_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
-; SI-NEXT: s_branch .LBB11_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB11_2
+; SI-NEXT: s_branch .LBB11_3
;
; VI-LABEL: bitcast_v14f16_to_v7i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s23, 0
-; VI-NEXT: s_cbranch_scc0 .LBB11_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB11_4
-; VI-NEXT: .LBB11_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB11_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB11_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s22, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -1828,8 +1873,6 @@ define inreg <7 x i32> @bitcast_v14f16_to_v7i32_scalar(<14 x half> inreg %a, i32
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v7
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB11_3:
-; VI-NEXT: s_branch .LBB11_2
; VI-NEXT: .LBB11_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -1844,10 +1887,14 @@ define inreg <7 x i32> @bitcast_v14f16_to_v7i32_scalar(<14 x half> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s23, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_4
-; GFX9-NEXT: .LBB11_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB11_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v6, s22, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v5, s21, v0 op_sel_hi:[1,0]
@@ -1857,8 +1904,6 @@ define inreg <7 x i32> @bitcast_v14f16_to_v7i32_scalar(<14 x half> inreg %a, i32
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB11_3:
-; GFX9-NEXT: s_branch .LBB11_2
; GFX9-NEXT: .LBB11_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -1876,12 +1921,15 @@ define inreg <7 x i32> @bitcast_v14f16_to_v7i32_scalar(<14 x half> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s19, 0
+; GFX11-NEXT: s_mov_b32 s7, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s7, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB11_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s7
; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
-; GFX11-NEXT: .LBB11_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
@@ -1890,8 +1938,6 @@ define inreg <7 x i32> @bitcast_v14f16_to_v7i32_scalar(<14 x half> inreg %a, i32
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: s_branch .LBB11_2
; GFX11-NEXT: .LBB11_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -2046,6 +2092,7 @@ define inreg <14 x i16> @bitcast_v7f32_to_v14i16_scalar(<7 x float> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s23, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB13_3
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s22
@@ -2084,7 +2131,8 @@ define inreg <14 x i16> @bitcast_v7f32_to_v14i16_scalar(<7 x float> inreg %a, i3
; SI-NEXT: ; implicit-def: $vgpr9
; SI-NEXT: ; implicit-def: $sgpr8
; SI-NEXT: ; implicit-def: $vgpr13
-; SI-NEXT: s_branch .LBB13_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB13_2
; SI-NEXT: .LBB13_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v2, s17
@@ -2102,10 +2150,14 @@ define inreg <14 x i16> @bitcast_v7f32_to_v14i16_scalar(<7 x float> inreg %a, i3
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s23, 0
-; VI-NEXT: s_cbranch_scc0 .LBB13_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB13_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB13_4
-; VI-NEXT: .LBB13_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB13_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB13_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v6, s22, 1.0
; VI-NEXT: v_add_f32_e64 v5, s21, 1.0
; VI-NEXT: v_add_f32_e64 v4, s20, 1.0
@@ -2114,8 +2166,6 @@ define inreg <14 x i16> @bitcast_v7f32_to_v14i16_scalar(<7 x float> inreg %a, i3
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB13_3:
-; VI-NEXT: s_branch .LBB13_2
; VI-NEXT: .LBB13_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -2131,10 +2181,14 @@ define inreg <14 x i16> @bitcast_v7f32_to_v14i16_scalar(<7 x float> inreg %a, i3
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s23, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB13_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB13_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB13_4
-; GFX9-NEXT: .LBB13_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB13_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB13_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v6, s22, 1.0
; GFX9-NEXT: v_add_f32_e64 v5, s21, 1.0
; GFX9-NEXT: v_add_f32_e64 v4, s20, 1.0
@@ -2143,8 +2197,6 @@ define inreg <14 x i16> @bitcast_v7f32_to_v14i16_scalar(<7 x float> inreg %a, i3
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB13_3:
-; GFX9-NEXT: s_branch .LBB13_2
; GFX9-NEXT: .LBB13_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -2163,12 +2215,15 @@ define inreg <14 x i16> @bitcast_v7f32_to_v14i16_scalar(<7 x float> inreg %a, i3
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s19, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB13_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB13_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB13_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB13_4
-; GFX11-NEXT: .LBB13_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v6, s6, 1.0
; GFX11-NEXT: v_add_f32_e64 v5, s5, 1.0
; GFX11-NEXT: v_add_f32_e64 v4, s4, 1.0
@@ -2177,8 +2232,6 @@ define inreg <14 x i16> @bitcast_v7f32_to_v14i16_scalar(<7 x float> inreg %a, i3
; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB13_3:
-; GFX11-NEXT: s_branch .LBB13_2
; GFX11-NEXT: .LBB13_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -2389,6 +2442,7 @@ define inreg <7 x float> @bitcast_v14i16_to_v7f32_scalar(<14 x i16> inreg %a, i3
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[12:13], -1
; SI-NEXT: s_cbranch_scc0 .LBB15_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
@@ -2460,16 +2514,22 @@ define inreg <7 x float> @bitcast_v14i16_to_v7f32_scalar(<14 x i16> inreg %a, i3
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB15_4:
; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7_sgpr8_sgpr9_sgpr10
-; SI-NEXT: s_branch .LBB15_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[12:13]
+; SI-NEXT: s_cbranch_vccz .LBB15_2
+; SI-NEXT: s_branch .LBB15_3
;
; VI-LABEL: bitcast_v14i16_to_v7f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s23, 0
-; VI-NEXT: s_cbranch_scc0 .LBB15_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB15_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB15_3
-; VI-NEXT: .LBB15_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB15_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB15_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s22, 3
; VI-NEXT: s_and_b32 s4, s22, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
@@ -2505,7 +2565,7 @@ define inreg <7 x float> @bitcast_v14i16_to_v7f32_scalar(<14 x i16> inreg %a, i3
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB15_3: ; %end
+; VI-NEXT: .LBB15_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -2514,17 +2574,19 @@ define inreg <7 x float> @bitcast_v14i16_to_v7f32_scalar(<14 x i16> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v5, s21
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB15_4:
-; VI-NEXT: s_branch .LBB15_2
;
; GFX9-LABEL: bitcast_v14i16_to_v7f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s23, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB15_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB15_4
-; GFX9-NEXT: .LBB15_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB15_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB15_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v6, s22, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v5, s21, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v4, s20, 3 op_sel_hi:[1,0]
@@ -2533,8 +2595,6 @@ define inreg <7 x float> @bitcast_v14i16_to_v7f32_scalar(<14 x i16> inreg %a, i3
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB15_3:
-; GFX9-NEXT: s_branch .LBB15_2
; GFX9-NEXT: .LBB15_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -2552,12 +2612,15 @@ define inreg <7 x float> @bitcast_v14i16_to_v7f32_scalar(<14 x i16> inreg %a, i3
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s19, 0
+; GFX11-NEXT: s_mov_b32 s7, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB15_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s7, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB15_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB15_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s7
; GFX11-NEXT: s_cbranch_vccnz .LBB15_4
-; GFX11-NEXT: .LBB15_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
@@ -2566,8 +2629,6 @@ define inreg <7 x float> @bitcast_v14i16_to_v7f32_scalar(<14 x i16> inreg %a, i3
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB15_3:
-; GFX11-NEXT: s_branch .LBB15_2
; GFX11-NEXT: .LBB15_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -2765,6 +2826,7 @@ define inreg <14 x half> @bitcast_v7f32_to_v14f16_scalar(<7 x float> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s23, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB17_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s4, s22, 16
@@ -2835,16 +2897,22 @@ define inreg <14 x half> @bitcast_v7f32_to_v14f16_scalar(<7 x float> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr11
; SI-NEXT: ; implicit-def: $vgpr12
; SI-NEXT: ; implicit-def: $vgpr13
-; SI-NEXT: s_branch .LBB17_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB17_2
+; SI-NEXT: s_branch .LBB17_3
;
; VI-LABEL: bitcast_v7f32_to_v14f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s23, 0
-; VI-NEXT: s_cbranch_scc0 .LBB17_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB17_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB17_4
-; VI-NEXT: .LBB17_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB17_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB17_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v6, s22, 1.0
; VI-NEXT: v_add_f32_e64 v5, s21, 1.0
; VI-NEXT: v_add_f32_e64 v4, s20, 1.0
@@ -2853,8 +2921,6 @@ define inreg <14 x half> @bitcast_v7f32_to_v14f16_scalar(<7 x float> inreg %a, i
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB17_3:
-; VI-NEXT: s_branch .LBB17_2
; VI-NEXT: .LBB17_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -2870,10 +2936,14 @@ define inreg <14 x half> @bitcast_v7f32_to_v14f16_scalar(<7 x float> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s23, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB17_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB17_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB17_4
-; GFX9-NEXT: .LBB17_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB17_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB17_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v6, s22, 1.0
; GFX9-NEXT: v_add_f32_e64 v5, s21, 1.0
; GFX9-NEXT: v_add_f32_e64 v4, s20, 1.0
@@ -2882,8 +2952,6 @@ define inreg <14 x half> @bitcast_v7f32_to_v14f16_scalar(<7 x float> inreg %a, i
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB17_3:
-; GFX9-NEXT: s_branch .LBB17_2
; GFX9-NEXT: .LBB17_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -2902,12 +2970,15 @@ define inreg <14 x half> @bitcast_v7f32_to_v14f16_scalar(<7 x float> inreg %a, i
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s19, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB17_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB17_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB17_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB17_4
-; GFX11-NEXT: .LBB17_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v6, s6, 1.0
; GFX11-NEXT: v_add_f32_e64 v5, s5, 1.0
; GFX11-NEXT: v_add_f32_e64 v4, s4, 1.0
@@ -2916,8 +2987,6 @@ define inreg <14 x half> @bitcast_v7f32_to_v14f16_scalar(<7 x float> inreg %a, i
; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB17_3:
-; GFX11-NEXT: s_branch .LBB17_2
; GFX11-NEXT: .LBB17_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -3173,6 +3242,7 @@ define inreg <7 x float> @bitcast_v14f16_to_v7f32_scalar(<14 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v7, s28
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB19_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v20
@@ -3251,16 +3321,22 @@ define inreg <7 x float> @bitcast_v14f16_to_v7f32_scalar(<14 x half> inreg %a, i
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB19_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6
-; SI-NEXT: s_branch .LBB19_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB19_2
+; SI-NEXT: s_branch .LBB19_3
;
; VI-LABEL: bitcast_v14f16_to_v7f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s23, 0
-; VI-NEXT: s_cbranch_scc0 .LBB19_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB19_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB19_4
-; VI-NEXT: .LBB19_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB19_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB19_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s22, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -3298,8 +3374,6 @@ define inreg <7 x float> @bitcast_v14f16_to_v7f32_scalar(<14 x half> inreg %a, i
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v7
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB19_3:
-; VI-NEXT: s_branch .LBB19_2
; VI-NEXT: .LBB19_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -3314,10 +3388,14 @@ define inreg <7 x float> @bitcast_v14f16_to_v7f32_scalar(<14 x half> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s23, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB19_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB19_4
-; GFX9-NEXT: .LBB19_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB19_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB19_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v6, s22, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v5, s21, v0 op_sel_hi:[1,0]
@@ -3327,8 +3405,6 @@ define inreg <7 x float> @bitcast_v14f16_to_v7f32_scalar(<14 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB19_3:
-; GFX9-NEXT: s_branch .LBB19_2
; GFX9-NEXT: .LBB19_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -3346,12 +3422,15 @@ define inreg <7 x float> @bitcast_v14f16_to_v7f32_scalar(<14 x half> inreg %a, i
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s19, 0
+; GFX11-NEXT: s_mov_b32 s7, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB19_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s7, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB19_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB19_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s7
; GFX11-NEXT: s_cbranch_vccnz .LBB19_4
-; GFX11-NEXT: .LBB19_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
@@ -3360,8 +3439,6 @@ define inreg <7 x float> @bitcast_v14f16_to_v7f32_scalar(<14 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB19_3:
-; GFX11-NEXT: s_branch .LBB19_2
; GFX11-NEXT: .LBB19_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -3587,6 +3664,7 @@ define inreg <14 x half> @bitcast_v14i16_to_v14f16_scalar(<14 x i16> inreg %a, i
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB21_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_cvt_f32_f16_e32 v0, s16
@@ -3650,16 +3728,22 @@ define inreg <14 x half> @bitcast_v14i16_to_v14f16_scalar(<14 x i16> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr11
; SI-NEXT: ; implicit-def: $vgpr12
; SI-NEXT: ; implicit-def: $vgpr13
-; SI-NEXT: s_branch .LBB21_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB21_2
+; SI-NEXT: s_branch .LBB21_3
;
; VI-LABEL: bitcast_v14i16_to_v14f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s23, 0
-; VI-NEXT: s_cbranch_scc0 .LBB21_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB21_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB21_3
-; VI-NEXT: .LBB21_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB21_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB21_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s16, 3
; VI-NEXT: s_and_b32 s6, s17, 0xffff0000
; VI-NEXT: s_add_i32 s7, s17, 3
@@ -3695,7 +3779,7 @@ define inreg <14 x half> @bitcast_v14i16_to_v14f16_scalar(<14 x i16> inreg %a, i
; VI-NEXT: s_add_i32 s18, s8, 0x30000
; VI-NEXT: s_add_i32 s17, s6, 0x30000
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB21_3: ; %end
+; VI-NEXT: .LBB21_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -3704,17 +3788,19 @@ define inreg <14 x half> @bitcast_v14i16_to_v14f16_scalar(<14 x i16> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v5, s21
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB21_4:
-; VI-NEXT: s_branch .LBB21_2
;
; GFX9-LABEL: bitcast_v14i16_to_v14f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s23, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB21_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB21_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB21_4
-; GFX9-NEXT: .LBB21_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB21_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v6, s22, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v5, s21, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v4, s20, 3 op_sel_hi:[1,0]
@@ -3723,8 +3809,6 @@ define inreg <14 x half> @bitcast_v14i16_to_v14f16_scalar(<14 x i16> inreg %a, i
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB21_3:
-; GFX9-NEXT: s_branch .LBB21_2
; GFX9-NEXT: .LBB21_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -3743,12 +3827,15 @@ define inreg <14 x half> @bitcast_v14i16_to_v14f16_scalar(<14 x i16> inreg %a, i
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s19, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB21_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB21_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB21_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB21_4
-; GFX11-NEXT: .LBB21_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
@@ -3757,8 +3844,6 @@ define inreg <14 x half> @bitcast_v14i16_to_v14f16_scalar(<14 x i16> inreg %a, i
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB21_3:
-; GFX11-NEXT: s_branch .LBB21_2
; GFX11-NEXT: .LBB21_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -3982,10 +4067,14 @@ define inreg <14 x i16> @bitcast_v14f16_to_v14i16_scalar(<14 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v13, s29
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: s_cbranch_scc0 .LBB23_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB23_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB23_3
-; SI-NEXT: .LBB23_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB23_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB23_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v13, v13
; SI-NEXT: v_cvt_f32_f16_e32 v12, v12
; SI-NEXT: v_cvt_f32_f16_e32 v11, v11
@@ -4045,19 +4134,21 @@ define inreg <14 x i16> @bitcast_v14f16_to_v14i16_scalar(<14 x half> inreg %a, i
; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16
; SI-NEXT: v_alignbit_b32 v5, v6, v5, 16
; SI-NEXT: v_alignbit_b32 v9, v10, v9, 16
-; SI-NEXT: .LBB23_3: ; %end
+; SI-NEXT: .LBB23_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB23_4:
-; SI-NEXT: s_branch .LBB23_2
;
; VI-LABEL: bitcast_v14f16_to_v14i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s23, 0
-; VI-NEXT: s_cbranch_scc0 .LBB23_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB23_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB23_4
-; VI-NEXT: .LBB23_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB23_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB23_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v1, s4
; VI-NEXT: s_lshr_b32 s4, s17, 16
@@ -4095,8 +4186,6 @@ define inreg <14 x i16> @bitcast_v14f16_to_v14i16_scalar(<14 x half> inreg %a, i
; VI-NEXT: v_or_b32_e32 v1, v1, v9
; VI-NEXT: v_or_b32_e32 v0, v7, v8
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB23_3:
-; VI-NEXT: s_branch .LBB23_2
; VI-NEXT: .LBB23_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -4112,10 +4201,14 @@ define inreg <14 x i16> @bitcast_v14f16_to_v14i16_scalar(<14 x half> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s23, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB23_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB23_4
-; GFX9-NEXT: .LBB23_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB23_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v6, s22, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v5, s21, v0 op_sel_hi:[1,0]
@@ -4125,8 +4218,6 @@ define inreg <14 x i16> @bitcast_v14f16_to_v14i16_scalar(<14 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB23_3:
-; GFX9-NEXT: s_branch .LBB23_2
; GFX9-NEXT: .LBB23_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -4145,12 +4236,15 @@ define inreg <14 x i16> @bitcast_v14f16_to_v14i16_scalar(<14 x half> inreg %a, i
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s19, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB23_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB23_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB23_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB23_4
-; GFX11-NEXT: .LBB23_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
@@ -4159,8 +4253,6 @@ define inreg <14 x i16> @bitcast_v14f16_to_v14i16_scalar(<14 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB23_3:
-; GFX11-NEXT: s_branch .LBB23_2
; GFX11-NEXT: .LBB23_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll
index f8ffaa4..4b7e4c8 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.256bit.ll
@@ -110,10 +110,14 @@ define inreg <8 x float> @bitcast_v8i32_to_v8f32_scalar(<8 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB1_3
-; SI-NEXT: .LBB1_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB1_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB1_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_i32 s23, s23, 3
; SI-NEXT: s_add_i32 s22, s22, 3
; SI-NEXT: s_add_i32 s21, s21, 3
@@ -122,7 +126,7 @@ define inreg <8 x float> @bitcast_v8i32_to_v8f32_scalar(<8 x i32> inreg %a, i32
; SI-NEXT: s_add_i32 s18, s18, 3
; SI-NEXT: s_add_i32 s17, s17, 3
; SI-NEXT: s_add_i32 s16, s16, 3
-; SI-NEXT: .LBB1_3: ; %end
+; SI-NEXT: .LBB1_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -132,17 +136,19 @@ define inreg <8 x float> @bitcast_v8i32_to_v8f32_scalar(<8 x i32> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v6, s22
; SI-NEXT: v_mov_b32_e32 v7, s23
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: s_branch .LBB1_2
;
; VI-LABEL: bitcast_v8i32_to_v8f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB1_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB1_3
-; VI-NEXT: .LBB1_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB1_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB1_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s23, s23, 3
; VI-NEXT: s_add_i32 s22, s22, 3
; VI-NEXT: s_add_i32 s21, s21, 3
@@ -151,7 +157,7 @@ define inreg <8 x float> @bitcast_v8i32_to_v8f32_scalar(<8 x i32> inreg %a, i32
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB1_3: ; %end
+; VI-NEXT: .LBB1_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -161,17 +167,19 @@ define inreg <8 x float> @bitcast_v8i32_to_v8f32_scalar(<8 x i32> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_4:
-; VI-NEXT: s_branch .LBB1_2
;
; GFX9-LABEL: bitcast_v8i32_to_v8f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB1_3
-; GFX9-NEXT: .LBB1_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB1_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s23, s23, 3
; GFX9-NEXT: s_add_i32 s22, s22, 3
; GFX9-NEXT: s_add_i32 s21, s21, 3
@@ -180,7 +188,7 @@ define inreg <8 x float> @bitcast_v8i32_to_v8f32_scalar(<8 x i32> inreg %a, i32
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB1_3: ; %end
+; GFX9-NEXT: .LBB1_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -190,19 +198,20 @@ define inreg <8 x float> @bitcast_v8i32_to_v8f32_scalar(<8 x i32> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v6, s22
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-LABEL: bitcast_v8i32_to_v8f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB1_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB1_3
-; GFX11-NEXT: .LBB1_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s19, s19, 3
; GFX11-NEXT: s_add_i32 s18, s18, 3
; GFX11-NEXT: s_add_i32 s17, s17, 3
@@ -211,15 +220,13 @@ define inreg <8 x float> @bitcast_v8i32_to_v8f32_scalar(<8 x i32> inreg %a, i32
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB1_3: ; %end
+; GFX11-NEXT: .LBB1_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_4:
-; GFX11-NEXT: s_branch .LBB1_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -336,10 +343,14 @@ define inreg <8 x i32> @bitcast_v8f32_to_v8i32_scalar(<8 x float> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB3_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB3_4
-; SI-NEXT: .LBB3_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB3_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB3_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v7, s23, 1.0
; SI-NEXT: v_add_f32_e64 v6, s22, 1.0
; SI-NEXT: v_add_f32_e64 v5, s21, 1.0
@@ -349,8 +360,6 @@ define inreg <8 x i32> @bitcast_v8f32_to_v8i32_scalar(<8 x float> inreg %a, i32
; SI-NEXT: v_add_f32_e64 v1, s17, 1.0
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB3_3:
-; SI-NEXT: s_branch .LBB3_2
; SI-NEXT: .LBB3_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -366,10 +375,14 @@ define inreg <8 x i32> @bitcast_v8f32_to_v8i32_scalar(<8 x float> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB3_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_4
-; VI-NEXT: .LBB3_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB3_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB3_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v7, s23, 1.0
; VI-NEXT: v_add_f32_e64 v6, s22, 1.0
; VI-NEXT: v_add_f32_e64 v5, s21, 1.0
@@ -379,8 +392,6 @@ define inreg <8 x i32> @bitcast_v8f32_to_v8i32_scalar(<8 x float> inreg %a, i32
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB3_3:
-; VI-NEXT: s_branch .LBB3_2
; VI-NEXT: .LBB3_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -396,10 +407,14 @@ define inreg <8 x i32> @bitcast_v8f32_to_v8i32_scalar(<8 x float> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_4
-; GFX9-NEXT: .LBB3_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB3_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v7, s23, 1.0
; GFX9-NEXT: v_add_f32_e64 v6, s22, 1.0
; GFX9-NEXT: v_add_f32_e64 v5, s21, 1.0
@@ -409,8 +424,6 @@ define inreg <8 x i32> @bitcast_v8f32_to_v8i32_scalar(<8 x float> inreg %a, i32
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB3_3:
-; GFX9-NEXT: s_branch .LBB3_2
; GFX9-NEXT: .LBB3_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -430,12 +443,15 @@ define inreg <8 x i32> @bitcast_v8f32_to_v8i32_scalar(<8 x float> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB3_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
-; GFX11-NEXT: .LBB3_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v7, s7, 1.0
; GFX11-NEXT: v_add_f32_e64 v6, s6, 1.0
; GFX11-NEXT: v_add_f32_e64 v5, s5, 1.0
@@ -445,8 +461,6 @@ define inreg <8 x i32> @bitcast_v8f32_to_v8i32_scalar(<8 x float> inreg %a, i32
; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB3_3:
-; GFX11-NEXT: s_branch .LBB3_2
; GFX11-NEXT: .LBB3_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -574,10 +588,14 @@ define inreg <4 x i64> @bitcast_v8i32_to_v4i64_scalar(<8 x i32> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB5_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB5_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB5_3
-; SI-NEXT: .LBB5_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB5_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB5_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_i32 s23, s23, 3
; SI-NEXT: s_add_i32 s22, s22, 3
; SI-NEXT: s_add_i32 s21, s21, 3
@@ -586,7 +604,7 @@ define inreg <4 x i64> @bitcast_v8i32_to_v4i64_scalar(<8 x i32> inreg %a, i32 in
; SI-NEXT: s_add_i32 s18, s18, 3
; SI-NEXT: s_add_i32 s17, s17, 3
; SI-NEXT: s_add_i32 s16, s16, 3
-; SI-NEXT: .LBB5_3: ; %end
+; SI-NEXT: .LBB5_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -596,17 +614,19 @@ define inreg <4 x i64> @bitcast_v8i32_to_v4i64_scalar(<8 x i32> inreg %a, i32 in
; SI-NEXT: v_mov_b32_e32 v6, s22
; SI-NEXT: v_mov_b32_e32 v7, s23
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB5_4:
-; SI-NEXT: s_branch .LBB5_2
;
; VI-LABEL: bitcast_v8i32_to_v4i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB5_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB5_3
-; VI-NEXT: .LBB5_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB5_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB5_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s23, s23, 3
; VI-NEXT: s_add_i32 s22, s22, 3
; VI-NEXT: s_add_i32 s21, s21, 3
@@ -615,7 +635,7 @@ define inreg <4 x i64> @bitcast_v8i32_to_v4i64_scalar(<8 x i32> inreg %a, i32 in
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB5_3: ; %end
+; VI-NEXT: .LBB5_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -625,17 +645,19 @@ define inreg <4 x i64> @bitcast_v8i32_to_v4i64_scalar(<8 x i32> inreg %a, i32 in
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB5_4:
-; VI-NEXT: s_branch .LBB5_2
;
; GFX9-LABEL: bitcast_v8i32_to_v4i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB5_3
-; GFX9-NEXT: .LBB5_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB5_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s23, s23, 3
; GFX9-NEXT: s_add_i32 s22, s22, 3
; GFX9-NEXT: s_add_i32 s21, s21, 3
@@ -644,7 +666,7 @@ define inreg <4 x i64> @bitcast_v8i32_to_v4i64_scalar(<8 x i32> inreg %a, i32 in
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB5_3: ; %end
+; GFX9-NEXT: .LBB5_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -654,19 +676,20 @@ define inreg <4 x i64> @bitcast_v8i32_to_v4i64_scalar(<8 x i32> inreg %a, i32 in
; GFX9-NEXT: v_mov_b32_e32 v6, s22
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_4:
-; GFX9-NEXT: s_branch .LBB5_2
;
; GFX11-LABEL: bitcast_v8i32_to_v4i64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB5_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB5_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB5_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB5_3
-; GFX11-NEXT: .LBB5_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s19, s19, 3
; GFX11-NEXT: s_add_i32 s18, s18, 3
; GFX11-NEXT: s_add_i32 s17, s17, 3
@@ -675,15 +698,13 @@ define inreg <4 x i64> @bitcast_v8i32_to_v4i64_scalar(<8 x i32> inreg %a, i32 in
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB5_3: ; %end
+; GFX11-NEXT: .LBB5_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB5_4:
-; GFX11-NEXT: s_branch .LBB5_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -807,10 +828,14 @@ define inreg <8 x i32> @bitcast_v4i64_to_v8i32_scalar(<4 x i64> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB7_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB7_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB7_3
-; SI-NEXT: .LBB7_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB7_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB7_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_u32 s22, s22, 3
; SI-NEXT: s_addc_u32 s23, s23, 0
; SI-NEXT: s_add_u32 s20, s20, 3
@@ -819,7 +844,7 @@ define inreg <8 x i32> @bitcast_v4i64_to_v8i32_scalar(<4 x i64> inreg %a, i32 in
; SI-NEXT: s_addc_u32 s19, s19, 0
; SI-NEXT: s_add_u32 s16, s16, 3
; SI-NEXT: s_addc_u32 s17, s17, 0
-; SI-NEXT: .LBB7_3: ; %end
+; SI-NEXT: .LBB7_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -829,17 +854,19 @@ define inreg <8 x i32> @bitcast_v4i64_to_v8i32_scalar(<4 x i64> inreg %a, i32 in
; SI-NEXT: v_mov_b32_e32 v6, s22
; SI-NEXT: v_mov_b32_e32 v7, s23
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB7_4:
-; SI-NEXT: s_branch .LBB7_2
;
; VI-LABEL: bitcast_v4i64_to_v8i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB7_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB7_3
-; VI-NEXT: .LBB7_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB7_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB7_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_u32 s22, s22, 3
; VI-NEXT: s_addc_u32 s23, s23, 0
; VI-NEXT: s_add_u32 s20, s20, 3
@@ -848,7 +875,7 @@ define inreg <8 x i32> @bitcast_v4i64_to_v8i32_scalar(<4 x i64> inreg %a, i32 in
; VI-NEXT: s_addc_u32 s19, s19, 0
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
-; VI-NEXT: .LBB7_3: ; %end
+; VI-NEXT: .LBB7_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -858,17 +885,19 @@ define inreg <8 x i32> @bitcast_v4i64_to_v8i32_scalar(<4 x i64> inreg %a, i32 in
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB7_4:
-; VI-NEXT: s_branch .LBB7_2
;
; GFX9-LABEL: bitcast_v4i64_to_v8i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB7_3
-; GFX9-NEXT: .LBB7_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB7_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB7_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_u32 s22, s22, 3
; GFX9-NEXT: s_addc_u32 s23, s23, 0
; GFX9-NEXT: s_add_u32 s20, s20, 3
@@ -877,7 +906,7 @@ define inreg <8 x i32> @bitcast_v4i64_to_v8i32_scalar(<4 x i64> inreg %a, i32 in
; GFX9-NEXT: s_addc_u32 s19, s19, 0
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
-; GFX9-NEXT: .LBB7_3: ; %end
+; GFX9-NEXT: .LBB7_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -887,19 +916,20 @@ define inreg <8 x i32> @bitcast_v4i64_to_v8i32_scalar(<4 x i64> inreg %a, i32 in
; GFX9-NEXT: v_mov_b32_e32 v6, s22
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB7_4:
-; GFX9-NEXT: s_branch .LBB7_2
;
; GFX11-LABEL: bitcast_v4i64_to_v8i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB7_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB7_3
-; GFX11-NEXT: .LBB7_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB7_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s18, s18, 3
; GFX11-NEXT: s_addc_u32 s19, s19, 0
; GFX11-NEXT: s_add_u32 s16, s16, 3
@@ -908,15 +938,13 @@ define inreg <8 x i32> @bitcast_v4i64_to_v8i32_scalar(<4 x i64> inreg %a, i32 in
; GFX11-NEXT: s_addc_u32 s3, s3, 0
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: .LBB7_3: ; %end
+; GFX11-NEXT: .LBB7_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB7_4:
-; GFX11-NEXT: s_branch .LBB7_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1038,10 +1066,14 @@ define inreg <4 x double> @bitcast_v8i32_to_v4f64_scalar(<8 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB9_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB9_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB9_3
-; SI-NEXT: .LBB9_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB9_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB9_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_i32 s23, s23, 3
; SI-NEXT: s_add_i32 s22, s22, 3
; SI-NEXT: s_add_i32 s21, s21, 3
@@ -1050,7 +1082,7 @@ define inreg <4 x double> @bitcast_v8i32_to_v4f64_scalar(<8 x i32> inreg %a, i32
; SI-NEXT: s_add_i32 s18, s18, 3
; SI-NEXT: s_add_i32 s17, s17, 3
; SI-NEXT: s_add_i32 s16, s16, 3
-; SI-NEXT: .LBB9_3: ; %end
+; SI-NEXT: .LBB9_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -1060,17 +1092,19 @@ define inreg <4 x double> @bitcast_v8i32_to_v4f64_scalar(<8 x i32> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v6, s22
; SI-NEXT: v_mov_b32_e32 v7, s23
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB9_4:
-; SI-NEXT: s_branch .LBB9_2
;
; VI-LABEL: bitcast_v8i32_to_v4f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB9_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB9_3
-; VI-NEXT: .LBB9_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB9_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB9_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s23, s23, 3
; VI-NEXT: s_add_i32 s22, s22, 3
; VI-NEXT: s_add_i32 s21, s21, 3
@@ -1079,7 +1113,7 @@ define inreg <4 x double> @bitcast_v8i32_to_v4f64_scalar(<8 x i32> inreg %a, i32
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB9_3: ; %end
+; VI-NEXT: .LBB9_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -1089,17 +1123,19 @@ define inreg <4 x double> @bitcast_v8i32_to_v4f64_scalar(<8 x i32> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB9_4:
-; VI-NEXT: s_branch .LBB9_2
;
; GFX9-LABEL: bitcast_v8i32_to_v4f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB9_3
-; GFX9-NEXT: .LBB9_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB9_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s23, s23, 3
; GFX9-NEXT: s_add_i32 s22, s22, 3
; GFX9-NEXT: s_add_i32 s21, s21, 3
@@ -1108,7 +1144,7 @@ define inreg <4 x double> @bitcast_v8i32_to_v4f64_scalar(<8 x i32> inreg %a, i32
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB9_3: ; %end
+; GFX9-NEXT: .LBB9_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -1118,19 +1154,20 @@ define inreg <4 x double> @bitcast_v8i32_to_v4f64_scalar(<8 x i32> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v6, s22
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB9_4:
-; GFX9-NEXT: s_branch .LBB9_2
;
; GFX11-LABEL: bitcast_v8i32_to_v4f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB9_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB9_3
-; GFX11-NEXT: .LBB9_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s19, s19, 3
; GFX11-NEXT: s_add_i32 s18, s18, 3
; GFX11-NEXT: s_add_i32 s17, s17, 3
@@ -1139,15 +1176,13 @@ define inreg <4 x double> @bitcast_v8i32_to_v4f64_scalar(<8 x i32> inreg %a, i32
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB9_3: ; %end
+; GFX11-NEXT: .LBB9_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB9_4:
-; GFX11-NEXT: s_branch .LBB9_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1255,17 +1290,19 @@ define inreg <8 x i32> @bitcast_v4f64_to_v8i32_scalar(<4 x double> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB11_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB11_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB11_4
-; SI-NEXT: .LBB11_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB11_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB11_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
; SI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB11_3:
-; SI-NEXT: s_branch .LBB11_2
; SI-NEXT: .LBB11_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -1281,17 +1318,19 @@ define inreg <8 x i32> @bitcast_v4f64_to_v8i32_scalar(<4 x double> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB11_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB11_4
-; VI-NEXT: .LBB11_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB11_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB11_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
; VI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB11_3:
-; VI-NEXT: s_branch .LBB11_2
; VI-NEXT: .LBB11_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -1307,17 +1346,19 @@ define inreg <8 x i32> @bitcast_v4f64_to_v8i32_scalar(<4 x double> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_4
-; GFX9-NEXT: .LBB11_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB11_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
; GFX9-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB11_3:
-; GFX9-NEXT: s_branch .LBB11_2
; GFX9-NEXT: .LBB11_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -1337,19 +1378,20 @@ define inreg <8 x i32> @bitcast_v4f64_to_v8i32_scalar(<4 x double> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB11_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
-; GFX11-NEXT: .LBB11_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[6:7], s[6:7], 1.0
; GFX11-NEXT: v_add_f64 v[4:5], s[4:5], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: s_branch .LBB11_2
; GFX11-NEXT: .LBB11_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -1512,6 +1554,7 @@ define inreg <16 x i16> @bitcast_v8i32_to_v16i16_scalar(<8 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB13_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s22
@@ -1571,16 +1614,22 @@ define inreg <16 x i16> @bitcast_v8i32_to_v16i16_scalar(<8 x i32> inreg %a, i32
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $vgpr13
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB13_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB13_2
+; SI-NEXT: s_branch .LBB13_3
;
; VI-LABEL: bitcast_v8i32_to_v16i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB13_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB13_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB13_3
-; VI-NEXT: .LBB13_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB13_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB13_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s23, s23, 3
; VI-NEXT: s_add_i32 s22, s22, 3
; VI-NEXT: s_add_i32 s21, s21, 3
@@ -1589,7 +1638,7 @@ define inreg <16 x i16> @bitcast_v8i32_to_v16i16_scalar(<8 x i32> inreg %a, i32
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB13_3: ; %end
+; VI-NEXT: .LBB13_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -1599,17 +1648,19 @@ define inreg <16 x i16> @bitcast_v8i32_to_v16i16_scalar(<8 x i32> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB13_4:
-; VI-NEXT: s_branch .LBB13_2
;
; GFX9-LABEL: bitcast_v8i32_to_v16i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB13_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB13_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB13_3
-; GFX9-NEXT: .LBB13_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB13_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB13_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s23, s23, 3
; GFX9-NEXT: s_add_i32 s22, s22, 3
; GFX9-NEXT: s_add_i32 s21, s21, 3
@@ -1618,7 +1669,7 @@ define inreg <16 x i16> @bitcast_v8i32_to_v16i16_scalar(<8 x i32> inreg %a, i32
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB13_3: ; %end
+; GFX9-NEXT: .LBB13_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -1628,19 +1679,20 @@ define inreg <16 x i16> @bitcast_v8i32_to_v16i16_scalar(<8 x i32> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v6, s22
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB13_4:
-; GFX9-NEXT: s_branch .LBB13_2
;
; GFX11-LABEL: bitcast_v8i32_to_v16i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB13_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB13_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB13_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB13_3
-; GFX11-NEXT: .LBB13_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB13_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s19, s19, 3
; GFX11-NEXT: s_add_i32 s18, s18, 3
; GFX11-NEXT: s_add_i32 s17, s17, 3
@@ -1649,15 +1701,13 @@ define inreg <16 x i16> @bitcast_v8i32_to_v16i16_scalar(<8 x i32> inreg %a, i32
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB13_3: ; %end
+; GFX11-NEXT: .LBB13_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB13_4:
-; GFX11-NEXT: s_branch .LBB13_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1875,8 +1925,9 @@ define inreg <8 x i32> @bitcast_v16i16_to_v8i32_scalar(<16 x i16> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; SI-NEXT: v_mov_b32_e32 v8, v0
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: v_mov_b32_e32 v8, v0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v1
; SI-NEXT: s_cbranch_scc0 .LBB15_4
; SI-NEXT: ; %bb.1: ; %cmp.false
@@ -1962,16 +2013,22 @@ define inreg <8 x i32> @bitcast_v16i16_to_v8i32_scalar(<16 x i16> inreg %a, i32
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB15_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; SI-NEXT: s_branch .LBB15_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB15_2
+; SI-NEXT: s_branch .LBB15_3
;
; VI-LABEL: bitcast_v16i16_to_v8i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB15_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB15_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB15_3
-; VI-NEXT: .LBB15_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB15_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB15_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s23, 3
; VI-NEXT: s_and_b32 s4, s23, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
@@ -2012,7 +2069,7 @@ define inreg <8 x i32> @bitcast_v16i16_to_v8i32_scalar(<16 x i16> inreg %a, i32
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB15_3: ; %end
+; VI-NEXT: .LBB15_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -2022,17 +2079,19 @@ define inreg <8 x i32> @bitcast_v16i16_to_v8i32_scalar(<16 x i16> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB15_4:
-; VI-NEXT: s_branch .LBB15_2
;
; GFX9-LABEL: bitcast_v16i16_to_v8i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB15_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB15_4
-; GFX9-NEXT: .LBB15_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB15_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB15_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v7, s23, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v6, s22, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v5, s21, 3 op_sel_hi:[1,0]
@@ -2042,8 +2101,6 @@ define inreg <8 x i32> @bitcast_v16i16_to_v8i32_scalar(<16 x i16> inreg %a, i32
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB15_3:
-; GFX9-NEXT: s_branch .LBB15_2
; GFX9-NEXT: .LBB15_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -2063,12 +2120,15 @@ define inreg <8 x i32> @bitcast_v16i16_to_v8i32_scalar(<16 x i16> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB15_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB15_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB15_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB15_4
-; GFX11-NEXT: .LBB15_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v7, s7, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
@@ -2078,8 +2138,6 @@ define inreg <8 x i32> @bitcast_v16i16_to_v8i32_scalar(<16 x i16> inreg %a, i32
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB15_3:
-; GFX11-NEXT: s_branch .LBB15_2
; GFX11-NEXT: .LBB15_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -2295,6 +2353,7 @@ define inreg <16 x half> @bitcast_v8i32_to_v16f16_scalar(<8 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB17_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s4, s23, 16
@@ -2374,16 +2433,22 @@ define inreg <16 x half> @bitcast_v8i32_to_v16f16_scalar(<8 x i32> inreg %a, i32
; SI-NEXT: ; implicit-def: $vgpr13
; SI-NEXT: ; implicit-def: $vgpr14
; SI-NEXT: ; implicit-def: $vgpr15
-; SI-NEXT: s_branch .LBB17_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB17_2
+; SI-NEXT: s_branch .LBB17_3
;
; VI-LABEL: bitcast_v8i32_to_v16f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB17_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB17_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB17_3
-; VI-NEXT: .LBB17_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB17_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB17_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s23, s23, 3
; VI-NEXT: s_add_i32 s22, s22, 3
; VI-NEXT: s_add_i32 s21, s21, 3
@@ -2392,7 +2457,7 @@ define inreg <16 x half> @bitcast_v8i32_to_v16f16_scalar(<8 x i32> inreg %a, i32
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB17_3: ; %end
+; VI-NEXT: .LBB17_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -2402,17 +2467,19 @@ define inreg <16 x half> @bitcast_v8i32_to_v16f16_scalar(<8 x i32> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB17_4:
-; VI-NEXT: s_branch .LBB17_2
;
; GFX9-LABEL: bitcast_v8i32_to_v16f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB17_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB17_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB17_3
-; GFX9-NEXT: .LBB17_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB17_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB17_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s23, s23, 3
; GFX9-NEXT: s_add_i32 s22, s22, 3
; GFX9-NEXT: s_add_i32 s21, s21, 3
@@ -2421,7 +2488,7 @@ define inreg <16 x half> @bitcast_v8i32_to_v16f16_scalar(<8 x i32> inreg %a, i32
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB17_3: ; %end
+; GFX9-NEXT: .LBB17_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -2431,19 +2498,20 @@ define inreg <16 x half> @bitcast_v8i32_to_v16f16_scalar(<8 x i32> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v6, s22
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB17_4:
-; GFX9-NEXT: s_branch .LBB17_2
;
; GFX11-LABEL: bitcast_v8i32_to_v16f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB17_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB17_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB17_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB17_3
-; GFX11-NEXT: .LBB17_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB17_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s19, s19, 3
; GFX11-NEXT: s_add_i32 s18, s18, 3
; GFX11-NEXT: s_add_i32 s17, s17, 3
@@ -2452,15 +2520,13 @@ define inreg <16 x half> @bitcast_v8i32_to_v16f16_scalar(<8 x i32> inreg %a, i32
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB17_3: ; %end
+; GFX11-NEXT: .LBB17_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB17_4:
-; GFX11-NEXT: s_branch .LBB17_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2731,6 +2797,7 @@ define inreg <8 x i32> @bitcast_v16f16_to_v8i32_scalar(<16 x half> inreg %a, i32
; SI-NEXT: v_cvt_f16_f32_e32 v8, v0
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB19_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v23
@@ -2819,16 +2886,22 @@ define inreg <8 x i32> @bitcast_v16f16_to_v8i32_scalar(<16 x half> inreg %a, i32
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB19_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; SI-NEXT: s_branch .LBB19_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB19_2
+; SI-NEXT: s_branch .LBB19_3
;
; VI-LABEL: bitcast_v16f16_to_v8i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB19_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB19_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB19_4
-; VI-NEXT: .LBB19_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB19_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB19_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s23, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -2871,8 +2944,6 @@ define inreg <8 x i32> @bitcast_v16f16_to_v8i32_scalar(<16 x half> inreg %a, i32
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v8
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB19_3:
-; VI-NEXT: s_branch .LBB19_2
; VI-NEXT: .LBB19_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -2888,10 +2959,14 @@ define inreg <8 x i32> @bitcast_v16f16_to_v8i32_scalar(<16 x half> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB19_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB19_4
-; GFX9-NEXT: .LBB19_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB19_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB19_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v7, s23, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v6, s22, v0 op_sel_hi:[1,0]
@@ -2902,8 +2977,6 @@ define inreg <8 x i32> @bitcast_v16f16_to_v8i32_scalar(<16 x half> inreg %a, i32
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB19_3:
-; GFX9-NEXT: s_branch .LBB19_2
; GFX9-NEXT: .LBB19_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -2923,12 +2996,15 @@ define inreg <8 x i32> @bitcast_v16f16_to_v8i32_scalar(<16 x half> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB19_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB19_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB19_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB19_4
-; GFX11-NEXT: .LBB19_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v7, 0x200, s7 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
@@ -2938,8 +3014,6 @@ define inreg <8 x i32> @bitcast_v16f16_to_v8i32_scalar(<16 x half> inreg %a, i32
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB19_3:
-; GFX11-NEXT: s_branch .LBB19_2
; GFX11-NEXT: .LBB19_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -3139,6 +3213,7 @@ define inreg <16 x bfloat> @bitcast_v8i32_to_v16bf16_scalar(<8 x i32> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB21_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s6, s23, 0xffff0000
@@ -3218,16 +3293,22 @@ define inreg <16 x bfloat> @bitcast_v8i32_to_v16bf16_scalar(<8 x i32> inreg %a,
; SI-NEXT: ; implicit-def: $sgpr8
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB21_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB21_2
+; SI-NEXT: s_branch .LBB21_3
;
; VI-LABEL: bitcast_v8i32_to_v16bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB21_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB21_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB21_3
-; VI-NEXT: .LBB21_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB21_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB21_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s23, s23, 3
; VI-NEXT: s_add_i32 s22, s22, 3
; VI-NEXT: s_add_i32 s21, s21, 3
@@ -3236,7 +3317,7 @@ define inreg <16 x bfloat> @bitcast_v8i32_to_v16bf16_scalar(<8 x i32> inreg %a,
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB21_3: ; %end
+; VI-NEXT: .LBB21_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -3246,17 +3327,19 @@ define inreg <16 x bfloat> @bitcast_v8i32_to_v16bf16_scalar(<8 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB21_4:
-; VI-NEXT: s_branch .LBB21_2
;
; GFX9-LABEL: bitcast_v8i32_to_v16bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB21_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB21_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB21_3
-; GFX9-NEXT: .LBB21_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB21_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s23, s23, 3
; GFX9-NEXT: s_add_i32 s22, s22, 3
; GFX9-NEXT: s_add_i32 s21, s21, 3
@@ -3265,7 +3348,7 @@ define inreg <16 x bfloat> @bitcast_v8i32_to_v16bf16_scalar(<8 x i32> inreg %a,
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB21_3: ; %end
+; GFX9-NEXT: .LBB21_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -3275,19 +3358,20 @@ define inreg <16 x bfloat> @bitcast_v8i32_to_v16bf16_scalar(<8 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v6, s22
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB21_4:
-; GFX9-NEXT: s_branch .LBB21_2
;
; GFX11-LABEL: bitcast_v8i32_to_v16bf16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB21_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB21_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB21_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB21_3
-; GFX11-NEXT: .LBB21_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s19, s19, 3
; GFX11-NEXT: s_add_i32 s18, s18, 3
; GFX11-NEXT: s_add_i32 s17, s17, 3
@@ -3296,15 +3380,13 @@ define inreg <16 x bfloat> @bitcast_v8i32_to_v16bf16_scalar(<8 x i32> inreg %a,
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB21_3: ; %end
+; GFX11-NEXT: .LBB21_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB21_4:
-; GFX11-NEXT: s_branch .LBB21_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -4070,6 +4152,7 @@ define inreg <8 x i32> @bitcast_v16bf16_to_v8i32_scalar(<16 x bfloat> inreg %a,
; SI-NEXT: v_mul_f32_e64 v11, 1.0, s28
; SI-NEXT: v_mul_f32_e32 v8, 1.0, v1
; SI-NEXT: v_mul_f32_e32 v9, 1.0, v0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB23_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v22
@@ -4142,16 +4225,22 @@ define inreg <8 x i32> @bitcast_v16bf16_to_v8i32_scalar(<16 x bfloat> inreg %a,
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB23_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; SI-NEXT: s_branch .LBB23_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB23_2
+; SI-NEXT: s_branch .LBB23_3
;
; VI-LABEL: bitcast_v16bf16_to_v8i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB23_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB23_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB23_4
-; VI-NEXT: .LBB23_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB23_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB23_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s23, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x40c00000
; VI-NEXT: v_add_f32_e32 v1, s4, v0
@@ -4298,8 +4387,6 @@ define inreg <8 x i32> @bitcast_v16bf16_to_v8i32_scalar(<16 x bfloat> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; VI-NEXT: v_alignbit_b32 v0, v0, v8, 16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB23_3:
-; VI-NEXT: s_branch .LBB23_2
; VI-NEXT: .LBB23_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -4315,10 +4402,14 @@ define inreg <8 x i32> @bitcast_v16bf16_to_v8i32_scalar(<16 x bfloat> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB23_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB23_4
-; GFX9-NEXT: .LBB23_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB23_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_and_b32 s4, s23, 0xffff0000
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
; GFX9-NEXT: v_add_f32_e32 v1, s4, v0
@@ -4474,8 +4565,6 @@ define inreg <8 x i32> @bitcast_v16bf16_to_v8i32_scalar(<16 x bfloat> inreg %a,
; GFX9-NEXT: v_and_b32_sdwa v0, v8, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: v_lshl_or_b32 v0, v9, 16, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB23_3:
-; GFX9-NEXT: s_branch .LBB23_2
; GFX9-NEXT: .LBB23_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -4495,12 +4584,15 @@ define inreg <8 x i32> @bitcast_v16bf16_to_v8i32_scalar(<16 x bfloat> inreg %a,
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB23_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB23_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB23_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB23_4
-; GFX11-NEXT: .LBB23_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_lshl_b32 s8, s7, 16
; GFX11-NEXT: s_and_b32 s7, s7, 0xffff0000
; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s8
@@ -4676,8 +4768,6 @@ define inreg <8 x i32> @bitcast_v16bf16_to_v8i32_scalar(<16 x bfloat> inreg %a,
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-NEXT: v_lshl_or_b32 v0, v12, 16, v9
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB23_3:
-; GFX11-NEXT: s_branch .LBB23_2
; GFX11-NEXT: .LBB23_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -5251,6 +5341,7 @@ define inreg <32 x i8> @bitcast_v8i32_to_v32i8_scalar(<8 x i32> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB25_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s22
@@ -5366,12 +5457,15 @@ define inreg <32 x i8> @bitcast_v8i32_to_v32i8_scalar(<8 x i32> inreg %a, i32 in
; SI-NEXT: ; implicit-def: $sgpr9
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB25_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB25_2
+; SI-NEXT: s_branch .LBB25_3
;
; VI-LABEL: bitcast_v8i32_to_v32i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
+; VI-NEXT: s_mov_b64 s[12:13], -1
; VI-NEXT: s_cbranch_scc0 .LBB25_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s14, s23, 24
@@ -5491,12 +5585,15 @@ define inreg <32 x i8> @bitcast_v8i32_to_v32i8_scalar(<8 x i32> inreg %a, i32 in
; VI-NEXT: ; implicit-def: $sgpr24
; VI-NEXT: ; implicit-def: $sgpr15
; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: s_branch .LBB25_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[12:13]
+; VI-NEXT: s_cbranch_vccz .LBB25_2
+; VI-NEXT: s_branch .LBB25_3
;
; GFX9-LABEL: bitcast_v8i32_to_v32i8_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
+; GFX9-NEXT: s_mov_b64 s[12:13], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB25_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s14, s23, 24
@@ -5616,15 +5713,18 @@ define inreg <32 x i8> @bitcast_v8i32_to_v32i8_scalar(<8 x i32> inreg %a, i32 in
; GFX9-NEXT: ; implicit-def: $sgpr24
; GFX9-NEXT: ; implicit-def: $sgpr15
; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: s_branch .LBB25_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[12:13]
+; GFX9-NEXT: s_cbranch_vccz .LBB25_2
+; GFX9-NEXT: s_branch .LBB25_3
;
; GFX11-LABEL: bitcast_v8i32_to_v32i8_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
-; GFX11-NEXT: s_mov_b32 s46, 0
+; GFX11-NEXT: s_mov_b32 s5, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB25_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-NEXT: s_lshr_b64 s[4:5], s[18:19], 24
; GFX11-NEXT: s_lshr_b32 s12, s19, 24
; GFX11-NEXT: s_lshr_b32 s13, s19, 16
; GFX11-NEXT: s_lshr_b32 s14, s19, 8
@@ -5645,12 +5745,10 @@ define inreg <32 x i8> @bitcast_v8i32_to_v32i8_scalar(<8 x i32> inreg %a, i32 in
; GFX11-NEXT: s_lshr_b32 s43, s1, 8
; GFX11-NEXT: s_lshr_b32 s44, s0, 16
; GFX11-NEXT: s_lshr_b32 s45, s0, 8
-; GFX11-NEXT: s_lshr_b64 s[4:5], s[18:19], 24
; GFX11-NEXT: s_lshr_b64 s[6:7], s[16:17], 24
; GFX11-NEXT: s_lshr_b64 s[8:9], s[2:3], 24
; GFX11-NEXT: s_lshr_b64 s[10:11], s[0:1], 24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
-; GFX11-NEXT: s_cbranch_vccnz .LBB25_3
+; GFX11-NEXT: s_cbranch_execnz .LBB25_3
; GFX11-NEXT: .LBB25_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
@@ -5728,7 +5826,9 @@ define inreg <32 x i8> @bitcast_v8i32_to_v32i8_scalar(<8 x i32> inreg %a, i32 in
; GFX11-NEXT: ; implicit-def: $sgpr14
; GFX11-NEXT: ; implicit-def: $sgpr13
; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: s_branch .LBB25_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
+; GFX11-NEXT: s_cbranch_vccz .LBB25_2
+; GFX11-NEXT: s_branch .LBB25_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -6766,11 +6866,12 @@ define inreg <8 x i32> @bitcast_v32i8_to_v8i32_scalar(<32 x i8> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v22, v6
; SI-NEXT: v_mov_b32_e32 v21, v4
; SI-NEXT: v_mov_b32_e32 v20, v2
; SI-NEXT: v_mov_b32_e32 v19, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v1
; SI-NEXT: v_lshlrev_b32_e32 v26, 8, v3
; SI-NEXT: v_lshlrev_b32_e32 v25, 24, v5
@@ -6952,17 +7053,20 @@ define inreg <8 x i32> @bitcast_v32i8_to_v8i32_scalar(<32 x i8> inreg %a, i32 in
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB27_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; SI-NEXT: s_branch .LBB27_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB27_2
+; SI-NEXT: s_branch .LBB27_3
;
; VI-LABEL: bitcast_v32i8_to_v8i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v22, v6
; VI-NEXT: v_mov_b32_e32 v21, v4
; VI-NEXT: v_mov_b32_e32 v20, v2
; VI-NEXT: v_mov_b32_e32 v19, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_lshlrev_b32_e32 v27, 8, v1
; VI-NEXT: v_lshlrev_b32_e32 v26, 8, v3
; VI-NEXT: v_lshlrev_b32_e32 v25, 8, v5
@@ -7108,17 +7212,20 @@ define inreg <8 x i32> @bitcast_v32i8_to_v8i32_scalar(<32 x i8> inreg %a, i32 in
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB27_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; VI-NEXT: s_branch .LBB27_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB27_2
+; VI-NEXT: s_branch .LBB27_3
;
; GFX9-LABEL: bitcast_v32i8_to_v8i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v22, v6
; GFX9-NEXT: v_mov_b32_e32 v21, v4
; GFX9-NEXT: v_mov_b32_e32 v20, v2
; GFX9-NEXT: v_mov_b32_e32 v19, v0
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_lshlrev_b32_e32 v27, 8, v1
; GFX9-NEXT: v_lshlrev_b32_e32 v26, 8, v3
; GFX9-NEXT: v_lshlrev_b32_e32 v25, 8, v5
@@ -7265,7 +7372,9 @@ define inreg <8 x i32> @bitcast_v32i8_to_v8i32_scalar(<32 x i8> inreg %a, i32 in
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB27_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; GFX9-NEXT: s_branch .LBB27_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB27_2
+; GFX9-NEXT: s_branch .LBB27_3
;
; GFX11-LABEL: bitcast_v32i8_to_v8i32_scalar:
; GFX11: ; %bb.0:
@@ -7280,44 +7389,44 @@ define inreg <8 x i32> @bitcast_v32i8_to_v8i32_scalar(<32 x i8> inreg %a, i32 in
; GFX11-NEXT: v_lshlrev_b32_e32 v9, 8, v9
; GFX11-NEXT: v_lshlrev_b32_e32 v11, 8, v11
; GFX11-NEXT: v_lshlrev_b32_e32 v13, 8, v13
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s4, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB27_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s3, 8
+; GFX11-NEXT: s_and_b32 s4, s0, 0xff
+; GFX11-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-NEXT: s_and_b32 s6, s2, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-NEXT: s_or_b32 s4, s4, s5
+; GFX11-NEXT: s_or_b32 s5, s6, s7
+; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX11-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-NEXT: s_lshl_b32 s6, s17, 8
+; GFX11-NEXT: s_or_b32 s4, s4, s5
+; GFX11-NEXT: s_and_b32 s5, s16, 0xff
+; GFX11-NEXT: s_and_b32 s7, s18, 0xff
+; GFX11-NEXT: s_lshl_b32 s8, s19, 8
; GFX11-NEXT: s_or_b32 s5, s5, s6
; GFX11-NEXT: s_or_b32 s6, s7, s8
+; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v16
+; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v17
; GFX11-NEXT: s_and_b32 s5, s5, 0xffff
; GFX11-NEXT: s_lshl_b32 s6, s6, 16
-; GFX11-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-NEXT: s_lshl_b32 s8, s21, 8
; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_and_b32 s6, s16, 0xff
-; GFX11-NEXT: s_and_b32 s8, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s9, s19, 8
-; GFX11-NEXT: s_or_b32 s6, s6, s7
-; GFX11-NEXT: s_or_b32 s7, s8, s9
-; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v16
-; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v17
-; GFX11-NEXT: s_and_b32 s6, s6, 0xffff
-; GFX11-NEXT: s_lshl_b32 s7, s7, 16
-; GFX11-NEXT: s_and_b32 s8, s20, 0xff
-; GFX11-NEXT: s_lshl_b32 s9, s21, 8
-; GFX11-NEXT: s_or_b32 s6, s6, s7
-; GFX11-NEXT: s_or_b32 s7, s8, s9
-; GFX11-NEXT: s_and_b32 s8, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-NEXT: s_or_b32 s6, s7, s8
+; GFX11-NEXT: s_and_b32 s7, s22, 0xff
+; GFX11-NEXT: s_lshl_b32 s8, s23, 8
; GFX11-NEXT: v_or_b32_e32 v1, v1, v21
; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v10
; GFX11-NEXT: v_or_b32_e32 v2, v2, v14
-; GFX11-NEXT: s_or_b32 s8, s8, s9
-; GFX11-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-NEXT: s_lshl_b32 s8, s8, 16
-; GFX11-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s25, 8
; GFX11-NEXT: s_or_b32 s7, s7, s8
+; GFX11-NEXT: s_and_b32 s6, s6, 0xffff
+; GFX11-NEXT: s_lshl_b32 s7, s7, 16
+; GFX11-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s25, 8
+; GFX11-NEXT: s_or_b32 s6, s6, s7
; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v15
; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v18
@@ -7325,35 +7434,34 @@ define inreg <8 x i32> @bitcast_v32i8_to_v8i32_scalar(<32 x i8> inreg %a, i32 in
; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v12
; GFX11-NEXT: v_or_b32_e32 v5, v5, v11
; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_or_b32 s9, s9, s10
-; GFX11-NEXT: s_lshl_b32 s10, s27, 8
-; GFX11-NEXT: s_and_b32 s8, s9, 0xffff
-; GFX11-NEXT: s_and_b32 s9, s26, 0xff
+; GFX11-NEXT: s_or_b32 s8, s8, s9
+; GFX11-NEXT: s_lshl_b32 s9, s27, 8
+; GFX11-NEXT: s_and_b32 s7, s8, 0xffff
+; GFX11-NEXT: s_and_b32 s8, s26, 0xff
; GFX11-NEXT: v_or_b32_e32 v3, v3, v19
; GFX11-NEXT: v_or_b32_e32 v4, v4, v9
; GFX11-NEXT: v_or_b32_e32 v6, v6, v13
; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v5
; GFX11-NEXT: v_or_b32_e32 v5, v1, v2
-; GFX11-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-NEXT: v_mov_b32_e32 v1, s5
; GFX11-NEXT: v_or_b32_e32 v0, v0, v20
-; GFX11-NEXT: s_or_b32 s9, s9, s10
-; GFX11-NEXT: s_and_b32 s10, s28, 0xff
-; GFX11-NEXT: s_lshl_b32 s9, s9, 16
-; GFX11-NEXT: s_lshl_b32 s11, s29, 8
+; GFX11-NEXT: s_or_b32 s8, s8, s9
+; GFX11-NEXT: s_and_b32 s9, s28, 0xff
+; GFX11-NEXT: s_lshl_b32 s8, s8, 16
+; GFX11-NEXT: s_lshl_b32 s10, s29, 8
; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
; GFX11-NEXT: v_lshlrev_b32_e32 v7, 16, v4
; GFX11-NEXT: v_lshlrev_b32_e32 v23, 16, v6
-; GFX11-NEXT: s_or_b32 s8, s8, s9
+; GFX11-NEXT: s_or_b32 s7, s7, s8
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_or_b32 s10, s10, s11
+; GFX11-NEXT: s_or_b32 s9, s9, s10
; GFX11-NEXT: v_or_b32_e32 v6, v3, v7
-; GFX11-NEXT: s_and_b32 s10, s10, 0xffff
+; GFX11-NEXT: s_and_b32 s9, s9, 0xffff
; GFX11-NEXT: v_or_b32_e32 v7, v22, v23
-; GFX11-NEXT: v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
-; GFX11-NEXT: v_or_b32_e32 v4, s10, v0
-; GFX11-NEXT: v_mov_b32_e32 v0, s5
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB27_3
+; GFX11-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
+; GFX11-NEXT: v_or_b32_e32 v4, s9, v0
+; GFX11-NEXT: v_mov_b32_e32 v0, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB27_3
; GFX11-NEXT: .LBB27_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
; GFX11-NEXT: s_add_i32 s2, s2, 3
@@ -7460,7 +7568,9 @@ define inreg <8 x i32> @bitcast_v32i8_to_v8i32_scalar(<32 x i8> inreg %a, i32 in
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB27_4:
; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; GFX11-NEXT: s_branch .LBB27_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_vccz .LBB27_2
+; GFX11-NEXT: s_branch .LBB27_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -7577,10 +7687,14 @@ define inreg <4 x i64> @bitcast_v8f32_to_v4i64_scalar(<8 x float> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB29_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB29_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB29_4
-; SI-NEXT: .LBB29_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB29_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB29_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v7, s23, 1.0
; SI-NEXT: v_add_f32_e64 v6, s22, 1.0
; SI-NEXT: v_add_f32_e64 v5, s21, 1.0
@@ -7590,8 +7704,6 @@ define inreg <4 x i64> @bitcast_v8f32_to_v4i64_scalar(<8 x float> inreg %a, i32
; SI-NEXT: v_add_f32_e64 v1, s17, 1.0
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB29_3:
-; SI-NEXT: s_branch .LBB29_2
; SI-NEXT: .LBB29_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -7607,10 +7719,14 @@ define inreg <4 x i64> @bitcast_v8f32_to_v4i64_scalar(<8 x float> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB29_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB29_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB29_4
-; VI-NEXT: .LBB29_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB29_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB29_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v7, s23, 1.0
; VI-NEXT: v_add_f32_e64 v6, s22, 1.0
; VI-NEXT: v_add_f32_e64 v5, s21, 1.0
@@ -7620,8 +7736,6 @@ define inreg <4 x i64> @bitcast_v8f32_to_v4i64_scalar(<8 x float> inreg %a, i32
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB29_3:
-; VI-NEXT: s_branch .LBB29_2
; VI-NEXT: .LBB29_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -7637,10 +7751,14 @@ define inreg <4 x i64> @bitcast_v8f32_to_v4i64_scalar(<8 x float> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB29_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB29_4
-; GFX9-NEXT: .LBB29_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB29_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB29_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v7, s23, 1.0
; GFX9-NEXT: v_add_f32_e64 v6, s22, 1.0
; GFX9-NEXT: v_add_f32_e64 v5, s21, 1.0
@@ -7650,8 +7768,6 @@ define inreg <4 x i64> @bitcast_v8f32_to_v4i64_scalar(<8 x float> inreg %a, i32
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB29_3:
-; GFX9-NEXT: s_branch .LBB29_2
; GFX9-NEXT: .LBB29_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -7671,12 +7787,15 @@ define inreg <4 x i64> @bitcast_v8f32_to_v4i64_scalar(<8 x float> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB29_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB29_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB29_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB29_4
-; GFX11-NEXT: .LBB29_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v7, s7, 1.0
; GFX11-NEXT: v_add_f32_e64 v6, s6, 1.0
; GFX11-NEXT: v_add_f32_e64 v5, s5, 1.0
@@ -7686,8 +7805,6 @@ define inreg <4 x i64> @bitcast_v8f32_to_v4i64_scalar(<8 x float> inreg %a, i32
; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB29_3:
-; GFX11-NEXT: s_branch .LBB29_2
; GFX11-NEXT: .LBB29_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -7817,10 +7934,14 @@ define inreg <8 x float> @bitcast_v4i64_to_v8f32_scalar(<4 x i64> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB31_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB31_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB31_3
-; SI-NEXT: .LBB31_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB31_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB31_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_u32 s22, s22, 3
; SI-NEXT: s_addc_u32 s23, s23, 0
; SI-NEXT: s_add_u32 s20, s20, 3
@@ -7829,7 +7950,7 @@ define inreg <8 x float> @bitcast_v4i64_to_v8f32_scalar(<4 x i64> inreg %a, i32
; SI-NEXT: s_addc_u32 s19, s19, 0
; SI-NEXT: s_add_u32 s16, s16, 3
; SI-NEXT: s_addc_u32 s17, s17, 0
-; SI-NEXT: .LBB31_3: ; %end
+; SI-NEXT: .LBB31_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -7839,17 +7960,19 @@ define inreg <8 x float> @bitcast_v4i64_to_v8f32_scalar(<4 x i64> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v6, s22
; SI-NEXT: v_mov_b32_e32 v7, s23
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB31_4:
-; SI-NEXT: s_branch .LBB31_2
;
; VI-LABEL: bitcast_v4i64_to_v8f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB31_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB31_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB31_3
-; VI-NEXT: .LBB31_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB31_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB31_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_u32 s22, s22, 3
; VI-NEXT: s_addc_u32 s23, s23, 0
; VI-NEXT: s_add_u32 s20, s20, 3
@@ -7858,7 +7981,7 @@ define inreg <8 x float> @bitcast_v4i64_to_v8f32_scalar(<4 x i64> inreg %a, i32
; VI-NEXT: s_addc_u32 s19, s19, 0
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
-; VI-NEXT: .LBB31_3: ; %end
+; VI-NEXT: .LBB31_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -7868,17 +7991,19 @@ define inreg <8 x float> @bitcast_v4i64_to_v8f32_scalar(<4 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB31_4:
-; VI-NEXT: s_branch .LBB31_2
;
; GFX9-LABEL: bitcast_v4i64_to_v8f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB31_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB31_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB31_3
-; GFX9-NEXT: .LBB31_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB31_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB31_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_u32 s22, s22, 3
; GFX9-NEXT: s_addc_u32 s23, s23, 0
; GFX9-NEXT: s_add_u32 s20, s20, 3
@@ -7887,7 +8012,7 @@ define inreg <8 x float> @bitcast_v4i64_to_v8f32_scalar(<4 x i64> inreg %a, i32
; GFX9-NEXT: s_addc_u32 s19, s19, 0
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
-; GFX9-NEXT: .LBB31_3: ; %end
+; GFX9-NEXT: .LBB31_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -7897,19 +8022,20 @@ define inreg <8 x float> @bitcast_v4i64_to_v8f32_scalar(<4 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v6, s22
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB31_4:
-; GFX9-NEXT: s_branch .LBB31_2
;
; GFX11-LABEL: bitcast_v4i64_to_v8f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB31_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB31_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB31_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB31_3
-; GFX11-NEXT: .LBB31_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB31_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s18, s18, 3
; GFX11-NEXT: s_addc_u32 s19, s19, 0
; GFX11-NEXT: s_add_u32 s16, s16, 3
@@ -7918,15 +8044,13 @@ define inreg <8 x float> @bitcast_v4i64_to_v8f32_scalar(<4 x i64> inreg %a, i32
; GFX11-NEXT: s_addc_u32 s3, s3, 0
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: .LBB31_3: ; %end
+; GFX11-NEXT: .LBB31_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB31_4:
-; GFX11-NEXT: s_branch .LBB31_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -8043,10 +8167,14 @@ define inreg <4 x double> @bitcast_v8f32_to_v4f64_scalar(<8 x float> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB33_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB33_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB33_4
-; SI-NEXT: .LBB33_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB33_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB33_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v7, s23, 1.0
; SI-NEXT: v_add_f32_e64 v6, s22, 1.0
; SI-NEXT: v_add_f32_e64 v5, s21, 1.0
@@ -8056,8 +8184,6 @@ define inreg <4 x double> @bitcast_v8f32_to_v4f64_scalar(<8 x float> inreg %a, i
; SI-NEXT: v_add_f32_e64 v1, s17, 1.0
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB33_3:
-; SI-NEXT: s_branch .LBB33_2
; SI-NEXT: .LBB33_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -8073,10 +8199,14 @@ define inreg <4 x double> @bitcast_v8f32_to_v4f64_scalar(<8 x float> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB33_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB33_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB33_4
-; VI-NEXT: .LBB33_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB33_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB33_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v7, s23, 1.0
; VI-NEXT: v_add_f32_e64 v6, s22, 1.0
; VI-NEXT: v_add_f32_e64 v5, s21, 1.0
@@ -8086,8 +8216,6 @@ define inreg <4 x double> @bitcast_v8f32_to_v4f64_scalar(<8 x float> inreg %a, i
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB33_3:
-; VI-NEXT: s_branch .LBB33_2
; VI-NEXT: .LBB33_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -8103,10 +8231,14 @@ define inreg <4 x double> @bitcast_v8f32_to_v4f64_scalar(<8 x float> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB33_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB33_4
-; GFX9-NEXT: .LBB33_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB33_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB33_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v7, s23, 1.0
; GFX9-NEXT: v_add_f32_e64 v6, s22, 1.0
; GFX9-NEXT: v_add_f32_e64 v5, s21, 1.0
@@ -8116,8 +8248,6 @@ define inreg <4 x double> @bitcast_v8f32_to_v4f64_scalar(<8 x float> inreg %a, i
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB33_3:
-; GFX9-NEXT: s_branch .LBB33_2
; GFX9-NEXT: .LBB33_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -8137,12 +8267,15 @@ define inreg <4 x double> @bitcast_v8f32_to_v4f64_scalar(<8 x float> inreg %a, i
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB33_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB33_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB33_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB33_4
-; GFX11-NEXT: .LBB33_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v7, s7, 1.0
; GFX11-NEXT: v_add_f32_e64 v6, s6, 1.0
; GFX11-NEXT: v_add_f32_e64 v5, s5, 1.0
@@ -8152,8 +8285,6 @@ define inreg <4 x double> @bitcast_v8f32_to_v4f64_scalar(<8 x float> inreg %a, i
; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB33_3:
-; GFX11-NEXT: s_branch .LBB33_2
; GFX11-NEXT: .LBB33_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -8267,17 +8398,19 @@ define inreg <8 x float> @bitcast_v4f64_to_v8f32_scalar(<4 x double> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB35_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB35_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB35_4
-; SI-NEXT: .LBB35_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB35_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB35_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
; SI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB35_3:
-; SI-NEXT: s_branch .LBB35_2
; SI-NEXT: .LBB35_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -8293,17 +8426,19 @@ define inreg <8 x float> @bitcast_v4f64_to_v8f32_scalar(<4 x double> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB35_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB35_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB35_4
-; VI-NEXT: .LBB35_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB35_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB35_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
; VI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB35_3:
-; VI-NEXT: s_branch .LBB35_2
; VI-NEXT: .LBB35_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -8319,17 +8454,19 @@ define inreg <8 x float> @bitcast_v4f64_to_v8f32_scalar(<4 x double> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB35_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB35_4
-; GFX9-NEXT: .LBB35_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB35_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB35_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
; GFX9-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB35_3:
-; GFX9-NEXT: s_branch .LBB35_2
; GFX9-NEXT: .LBB35_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -8349,19 +8486,20 @@ define inreg <8 x float> @bitcast_v4f64_to_v8f32_scalar(<4 x double> inreg %a, i
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB35_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB35_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB35_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB35_4
-; GFX11-NEXT: .LBB35_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[6:7], s[6:7], 1.0
; GFX11-NEXT: v_add_f64 v[4:5], s[4:5], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB35_3:
-; GFX11-NEXT: s_branch .LBB35_2
; GFX11-NEXT: .LBB35_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -8519,6 +8657,7 @@ define inreg <16 x i16> @bitcast_v8f32_to_v16i16_scalar(<8 x float> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB37_3
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s22
@@ -8561,7 +8700,8 @@ define inreg <16 x i16> @bitcast_v8f32_to_v16i16_scalar(<8 x float> inreg %a, i3
; SI-NEXT: ; implicit-def: $sgpr8
; SI-NEXT: ; implicit-def: $vgpr13
; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: s_branch .LBB37_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB37_2
; SI-NEXT: .LBB37_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v2, s17
@@ -8581,10 +8721,14 @@ define inreg <16 x i16> @bitcast_v8f32_to_v16i16_scalar(<8 x float> inreg %a, i3
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB37_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB37_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB37_4
-; VI-NEXT: .LBB37_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB37_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB37_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v7, s23, 1.0
; VI-NEXT: v_add_f32_e64 v6, s22, 1.0
; VI-NEXT: v_add_f32_e64 v5, s21, 1.0
@@ -8594,8 +8738,6 @@ define inreg <16 x i16> @bitcast_v8f32_to_v16i16_scalar(<8 x float> inreg %a, i3
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB37_3:
-; VI-NEXT: s_branch .LBB37_2
; VI-NEXT: .LBB37_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -8611,10 +8753,14 @@ define inreg <16 x i16> @bitcast_v8f32_to_v16i16_scalar(<8 x float> inreg %a, i3
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB37_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB37_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB37_4
-; GFX9-NEXT: .LBB37_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB37_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB37_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v7, s23, 1.0
; GFX9-NEXT: v_add_f32_e64 v6, s22, 1.0
; GFX9-NEXT: v_add_f32_e64 v5, s21, 1.0
@@ -8624,8 +8770,6 @@ define inreg <16 x i16> @bitcast_v8f32_to_v16i16_scalar(<8 x float> inreg %a, i3
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB37_3:
-; GFX9-NEXT: s_branch .LBB37_2
; GFX9-NEXT: .LBB37_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -8645,12 +8789,15 @@ define inreg <16 x i16> @bitcast_v8f32_to_v16i16_scalar(<8 x float> inreg %a, i3
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB37_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB37_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB37_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB37_4
-; GFX11-NEXT: .LBB37_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v7, s7, 1.0
; GFX11-NEXT: v_add_f32_e64 v6, s6, 1.0
; GFX11-NEXT: v_add_f32_e64 v5, s5, 1.0
@@ -8660,8 +8807,6 @@ define inreg <16 x i16> @bitcast_v8f32_to_v16i16_scalar(<8 x float> inreg %a, i3
; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB37_3:
-; GFX11-NEXT: s_branch .LBB37_2
; GFX11-NEXT: .LBB37_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -8885,8 +9030,9 @@ define inreg <8 x float> @bitcast_v16i16_to_v8f32_scalar(<16 x i16> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; SI-NEXT: v_mov_b32_e32 v8, v0
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: v_mov_b32_e32 v8, v0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v1
; SI-NEXT: s_cbranch_scc0 .LBB39_4
; SI-NEXT: ; %bb.1: ; %cmp.false
@@ -8972,16 +9118,22 @@ define inreg <8 x float> @bitcast_v16i16_to_v8f32_scalar(<16 x i16> inreg %a, i3
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB39_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; SI-NEXT: s_branch .LBB39_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB39_2
+; SI-NEXT: s_branch .LBB39_3
;
; VI-LABEL: bitcast_v16i16_to_v8f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB39_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB39_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB39_3
-; VI-NEXT: .LBB39_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB39_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB39_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s23, 3
; VI-NEXT: s_and_b32 s4, s23, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
@@ -9022,7 +9174,7 @@ define inreg <8 x float> @bitcast_v16i16_to_v8f32_scalar(<16 x i16> inreg %a, i3
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB39_3: ; %end
+; VI-NEXT: .LBB39_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -9032,17 +9184,19 @@ define inreg <8 x float> @bitcast_v16i16_to_v8f32_scalar(<16 x i16> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB39_4:
-; VI-NEXT: s_branch .LBB39_2
;
; GFX9-LABEL: bitcast_v16i16_to_v8f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB39_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB39_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB39_4
-; GFX9-NEXT: .LBB39_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB39_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB39_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v7, s23, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v6, s22, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v5, s21, 3 op_sel_hi:[1,0]
@@ -9052,8 +9206,6 @@ define inreg <8 x float> @bitcast_v16i16_to_v8f32_scalar(<16 x i16> inreg %a, i3
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB39_3:
-; GFX9-NEXT: s_branch .LBB39_2
; GFX9-NEXT: .LBB39_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -9073,12 +9225,15 @@ define inreg <8 x float> @bitcast_v16i16_to_v8f32_scalar(<16 x i16> inreg %a, i3
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB39_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB39_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB39_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB39_4
-; GFX11-NEXT: .LBB39_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v7, s7, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
@@ -9088,8 +9243,6 @@ define inreg <8 x float> @bitcast_v16i16_to_v8f32_scalar(<16 x i16> inreg %a, i3
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB39_3:
-; GFX11-NEXT: s_branch .LBB39_2
; GFX11-NEXT: .LBB39_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -9300,6 +9453,7 @@ define inreg <16 x half> @bitcast_v8f32_to_v16f16_scalar(<8 x float> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB41_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s4, s23, 16
@@ -9379,16 +9533,22 @@ define inreg <16 x half> @bitcast_v8f32_to_v16f16_scalar(<8 x float> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr13
; SI-NEXT: ; implicit-def: $vgpr14
; SI-NEXT: ; implicit-def: $vgpr15
-; SI-NEXT: s_branch .LBB41_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB41_2
+; SI-NEXT: s_branch .LBB41_3
;
; VI-LABEL: bitcast_v8f32_to_v16f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB41_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB41_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB41_4
-; VI-NEXT: .LBB41_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB41_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB41_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v7, s23, 1.0
; VI-NEXT: v_add_f32_e64 v6, s22, 1.0
; VI-NEXT: v_add_f32_e64 v5, s21, 1.0
@@ -9398,8 +9558,6 @@ define inreg <16 x half> @bitcast_v8f32_to_v16f16_scalar(<8 x float> inreg %a, i
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB41_3:
-; VI-NEXT: s_branch .LBB41_2
; VI-NEXT: .LBB41_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -9415,10 +9573,14 @@ define inreg <16 x half> @bitcast_v8f32_to_v16f16_scalar(<8 x float> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB41_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB41_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB41_4
-; GFX9-NEXT: .LBB41_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB41_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB41_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v7, s23, 1.0
; GFX9-NEXT: v_add_f32_e64 v6, s22, 1.0
; GFX9-NEXT: v_add_f32_e64 v5, s21, 1.0
@@ -9428,8 +9590,6 @@ define inreg <16 x half> @bitcast_v8f32_to_v16f16_scalar(<8 x float> inreg %a, i
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB41_3:
-; GFX9-NEXT: s_branch .LBB41_2
; GFX9-NEXT: .LBB41_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -9449,12 +9609,15 @@ define inreg <16 x half> @bitcast_v8f32_to_v16f16_scalar(<8 x float> inreg %a, i
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB41_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB41_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB41_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB41_4
-; GFX11-NEXT: .LBB41_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v7, s7, 1.0
; GFX11-NEXT: v_add_f32_e64 v6, s6, 1.0
; GFX11-NEXT: v_add_f32_e64 v5, s5, 1.0
@@ -9464,8 +9627,6 @@ define inreg <16 x half> @bitcast_v8f32_to_v16f16_scalar(<8 x float> inreg %a, i
; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB41_3:
-; GFX11-NEXT: s_branch .LBB41_2
; GFX11-NEXT: .LBB41_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -9742,6 +9903,7 @@ define inreg <8 x float> @bitcast_v16f16_to_v8f32_scalar(<16 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v8, v0
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB43_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v23
@@ -9830,16 +9992,22 @@ define inreg <8 x float> @bitcast_v16f16_to_v8f32_scalar(<16 x half> inreg %a, i
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB43_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; SI-NEXT: s_branch .LBB43_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB43_2
+; SI-NEXT: s_branch .LBB43_3
;
; VI-LABEL: bitcast_v16f16_to_v8f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB43_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB43_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB43_4
-; VI-NEXT: .LBB43_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB43_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB43_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s23, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -9882,8 +10050,6 @@ define inreg <8 x float> @bitcast_v16f16_to_v8f32_scalar(<16 x half> inreg %a, i
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v8
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB43_3:
-; VI-NEXT: s_branch .LBB43_2
; VI-NEXT: .LBB43_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -9899,10 +10065,14 @@ define inreg <8 x float> @bitcast_v16f16_to_v8f32_scalar(<16 x half> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB43_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB43_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB43_4
-; GFX9-NEXT: .LBB43_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB43_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB43_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v7, s23, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v6, s22, v0 op_sel_hi:[1,0]
@@ -9913,8 +10083,6 @@ define inreg <8 x float> @bitcast_v16f16_to_v8f32_scalar(<16 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB43_3:
-; GFX9-NEXT: s_branch .LBB43_2
; GFX9-NEXT: .LBB43_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -9934,12 +10102,15 @@ define inreg <8 x float> @bitcast_v16f16_to_v8f32_scalar(<16 x half> inreg %a, i
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB43_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB43_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB43_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB43_4
-; GFX11-NEXT: .LBB43_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v7, 0x200, s7 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
@@ -9949,8 +10120,6 @@ define inreg <8 x float> @bitcast_v16f16_to_v8f32_scalar(<16 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB43_3:
-; GFX11-NEXT: s_branch .LBB43_2
; GFX11-NEXT: .LBB43_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -10145,6 +10314,7 @@ define inreg <16 x bfloat> @bitcast_v8f32_to_v16bf16_scalar(<8 x float> inreg %a
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB45_3
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s6, s23, 0xffff0000
@@ -10207,7 +10377,8 @@ define inreg <16 x bfloat> @bitcast_v8f32_to_v16bf16_scalar(<8 x float> inreg %a
; SI-NEXT: ; implicit-def: $sgpr8
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB45_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB45_2
; SI-NEXT: .LBB45_4:
; SI-NEXT: v_mov_b32_e32 v0, s29
; SI-NEXT: v_mov_b32_e32 v1, s28
@@ -10231,10 +10402,14 @@ define inreg <16 x bfloat> @bitcast_v8f32_to_v16bf16_scalar(<8 x float> inreg %a
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB45_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB45_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB45_4
-; VI-NEXT: .LBB45_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB45_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB45_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v7, s23, 1.0
; VI-NEXT: v_add_f32_e64 v6, s22, 1.0
; VI-NEXT: v_add_f32_e64 v5, s21, 1.0
@@ -10244,8 +10419,6 @@ define inreg <16 x bfloat> @bitcast_v8f32_to_v16bf16_scalar(<8 x float> inreg %a
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB45_3:
-; VI-NEXT: s_branch .LBB45_2
; VI-NEXT: .LBB45_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -10261,10 +10434,14 @@ define inreg <16 x bfloat> @bitcast_v8f32_to_v16bf16_scalar(<8 x float> inreg %a
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB45_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB45_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB45_4
-; GFX9-NEXT: .LBB45_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB45_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB45_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v7, s23, 1.0
; GFX9-NEXT: v_add_f32_e64 v6, s22, 1.0
; GFX9-NEXT: v_add_f32_e64 v5, s21, 1.0
@@ -10274,8 +10451,6 @@ define inreg <16 x bfloat> @bitcast_v8f32_to_v16bf16_scalar(<8 x float> inreg %a
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB45_3:
-; GFX9-NEXT: s_branch .LBB45_2
; GFX9-NEXT: .LBB45_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -10295,12 +10470,15 @@ define inreg <16 x bfloat> @bitcast_v8f32_to_v16bf16_scalar(<8 x float> inreg %a
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB45_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB45_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB45_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB45_4
-; GFX11-NEXT: .LBB45_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v7, s7, 1.0
; GFX11-NEXT: v_add_f32_e64 v6, s6, 1.0
; GFX11-NEXT: v_add_f32_e64 v5, s5, 1.0
@@ -10310,8 +10488,6 @@ define inreg <16 x bfloat> @bitcast_v8f32_to_v16bf16_scalar(<8 x float> inreg %a
; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB45_3:
-; GFX11-NEXT: s_branch .LBB45_2
; GFX11-NEXT: .LBB45_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -11083,6 +11259,7 @@ define inreg <8 x float> @bitcast_v16bf16_to_v8f32_scalar(<16 x bfloat> inreg %a
; SI-NEXT: v_mul_f32_e64 v11, 1.0, s28
; SI-NEXT: v_mul_f32_e32 v8, 1.0, v1
; SI-NEXT: v_mul_f32_e32 v9, 1.0, v0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB47_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v22
@@ -11155,16 +11332,22 @@ define inreg <8 x float> @bitcast_v16bf16_to_v8f32_scalar(<16 x bfloat> inreg %a
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB47_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; SI-NEXT: s_branch .LBB47_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB47_2
+; SI-NEXT: s_branch .LBB47_3
;
; VI-LABEL: bitcast_v16bf16_to_v8f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB47_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB47_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB47_4
-; VI-NEXT: .LBB47_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB47_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB47_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s23, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x40c00000
; VI-NEXT: v_add_f32_e32 v1, s4, v0
@@ -11311,8 +11494,6 @@ define inreg <8 x float> @bitcast_v16bf16_to_v8f32_scalar(<16 x bfloat> inreg %a
; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; VI-NEXT: v_alignbit_b32 v0, v0, v8, 16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB47_3:
-; VI-NEXT: s_branch .LBB47_2
; VI-NEXT: .LBB47_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -11328,10 +11509,14 @@ define inreg <8 x float> @bitcast_v16bf16_to_v8f32_scalar(<16 x bfloat> inreg %a
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB47_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB47_4
-; GFX9-NEXT: .LBB47_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB47_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_and_b32 s4, s23, 0xffff0000
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
; GFX9-NEXT: v_add_f32_e32 v1, s4, v0
@@ -11487,8 +11672,6 @@ define inreg <8 x float> @bitcast_v16bf16_to_v8f32_scalar(<16 x bfloat> inreg %a
; GFX9-NEXT: v_and_b32_sdwa v0, v8, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: v_lshl_or_b32 v0, v9, 16, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB47_3:
-; GFX9-NEXT: s_branch .LBB47_2
; GFX9-NEXT: .LBB47_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -11508,12 +11691,15 @@ define inreg <8 x float> @bitcast_v16bf16_to_v8f32_scalar(<16 x bfloat> inreg %a
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB47_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB47_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB47_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB47_4
-; GFX11-NEXT: .LBB47_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_lshl_b32 s8, s7, 16
; GFX11-NEXT: s_and_b32 s7, s7, 0xffff0000
; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s8
@@ -11689,8 +11875,6 @@ define inreg <8 x float> @bitcast_v16bf16_to_v8f32_scalar(<16 x bfloat> inreg %a
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-NEXT: v_lshl_or_b32 v0, v12, 16, v9
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB47_3:
-; GFX11-NEXT: s_branch .LBB47_2
; GFX11-NEXT: .LBB47_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -12260,6 +12444,7 @@ define inreg <32 x i8> @bitcast_v8f32_to_v32i8_scalar(<8 x float> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB49_3
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s22
@@ -12350,7 +12535,8 @@ define inreg <32 x i8> @bitcast_v8f32_to_v32i8_scalar(<8 x float> inreg %a, i32
; SI-NEXT: ; implicit-def: $sgpr25
; SI-NEXT: ; implicit-def: $sgpr24
; SI-NEXT: ; implicit-def: $sgpr15
-; SI-NEXT: s_branch .LBB49_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB49_2
; SI-NEXT: .LBB49_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v4, s17
@@ -12378,6 +12564,7 @@ define inreg <32 x i8> @bitcast_v8f32_to_v32i8_scalar(<8 x float> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
+; VI-NEXT: s_mov_b64 s[12:13], -1
; VI-NEXT: s_cbranch_scc0 .LBB49_3
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s14, s23, 24
@@ -12464,7 +12651,8 @@ define inreg <32 x i8> @bitcast_v8f32_to_v32i8_scalar(<8 x float> inreg %a, i32
; VI-NEXT: ; implicit-def: $sgpr25
; VI-NEXT: ; implicit-def: $sgpr15
; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: s_branch .LBB49_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[12:13]
+; VI-NEXT: s_cbranch_vccz .LBB49_2
; VI-NEXT: .LBB49_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -12513,6 +12701,7 @@ define inreg <32 x i8> @bitcast_v8f32_to_v32i8_scalar(<8 x float> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
+; GFX9-NEXT: s_mov_b64 s[12:13], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB49_3
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s14, s23, 24
@@ -12599,7 +12788,8 @@ define inreg <32 x i8> @bitcast_v8f32_to_v32i8_scalar(<8 x float> inreg %a, i32
; GFX9-NEXT: ; implicit-def: $sgpr25
; GFX9-NEXT: ; implicit-def: $sgpr15
; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: s_branch .LBB49_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[12:13]
+; GFX9-NEXT: s_cbranch_vccz .LBB49_2
; GFX9-NEXT: .LBB49_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -12648,35 +12838,34 @@ define inreg <32 x i8> @bitcast_v8f32_to_v32i8_scalar(<8 x float> inreg %a, i32
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
-; GFX11-NEXT: s_mov_b32 s12, 0
+; GFX11-NEXT: s_mov_b32 s5, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB49_3
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s13, s19, 24
-; GFX11-NEXT: s_lshr_b32 s14, s19, 16
-; GFX11-NEXT: s_lshr_b32 s20, s19, 8
-; GFX11-NEXT: s_lshr_b32 s15, s18, 16
-; GFX11-NEXT: s_lshr_b32 s21, s18, 8
-; GFX11-NEXT: s_lshr_b32 s22, s17, 24
-; GFX11-NEXT: s_lshr_b32 s23, s17, 16
-; GFX11-NEXT: s_lshr_b32 s25, s17, 8
-; GFX11-NEXT: s_lshr_b32 s24, s16, 16
-; GFX11-NEXT: s_lshr_b32 s26, s16, 8
-; GFX11-NEXT: s_lshr_b32 s27, s3, 24
-; GFX11-NEXT: s_lshr_b32 s28, s3, 16
-; GFX11-NEXT: s_lshr_b32 s40, s3, 8
-; GFX11-NEXT: s_lshr_b32 s29, s2, 16
-; GFX11-NEXT: s_lshr_b32 s41, s2, 8
-; GFX11-NEXT: s_lshr_b32 s42, s1, 24
-; GFX11-NEXT: s_lshr_b32 s43, s1, 16
-; GFX11-NEXT: s_lshr_b32 s45, s1, 8
-; GFX11-NEXT: s_lshr_b32 s44, s0, 16
-; GFX11-NEXT: s_lshr_b32 s46, s0, 8
+; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
+; GFX11-NEXT: s_lshr_b32 s12, s19, 24
+; GFX11-NEXT: s_lshr_b32 s13, s19, 16
+; GFX11-NEXT: s_lshr_b32 s15, s19, 8
+; GFX11-NEXT: s_lshr_b32 s14, s18, 16
+; GFX11-NEXT: s_lshr_b32 s20, s18, 8
+; GFX11-NEXT: s_lshr_b32 s21, s17, 24
+; GFX11-NEXT: s_lshr_b32 s22, s17, 16
+; GFX11-NEXT: s_lshr_b32 s24, s17, 8
+; GFX11-NEXT: s_lshr_b32 s23, s16, 16
+; GFX11-NEXT: s_lshr_b32 s25, s16, 8
+; GFX11-NEXT: s_lshr_b32 s26, s3, 24
+; GFX11-NEXT: s_lshr_b32 s27, s3, 16
+; GFX11-NEXT: s_lshr_b32 s29, s3, 8
+; GFX11-NEXT: s_lshr_b32 s28, s2, 16
+; GFX11-NEXT: s_lshr_b32 s40, s2, 8
+; GFX11-NEXT: s_lshr_b32 s41, s1, 24
+; GFX11-NEXT: s_lshr_b32 s42, s1, 16
+; GFX11-NEXT: s_lshr_b32 s44, s1, 8
+; GFX11-NEXT: s_lshr_b32 s43, s0, 16
+; GFX11-NEXT: s_lshr_b32 s45, s0, 8
; GFX11-NEXT: s_lshr_b64 s[10:11], s[18:19], 24
; GFX11-NEXT: s_lshr_b64 s[8:9], s[16:17], 24
; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
-; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s12
-; GFX11-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX11-NEXT: s_cbranch_execnz .LBB49_4
; GFX11-NEXT: .LBB49_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v39, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v37, s3, 1.0
@@ -12712,48 +12901,49 @@ define inreg <32 x i8> @bitcast_v8f32_to_v32i8_scalar(<8 x float> inreg %a, i32
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v38
; GFX11-NEXT: s_branch .LBB49_5
; GFX11-NEXT: .LBB49_3:
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr4
; GFX11-NEXT: ; implicit-def: $sgpr45
; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr4
+; GFX11-NEXT: ; implicit-def: $sgpr44
; GFX11-NEXT: ; implicit-def: $sgpr42
; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr29
-; GFX11-NEXT: ; implicit-def: $sgpr6
; GFX11-NEXT: ; implicit-def: $sgpr40
; GFX11-NEXT: ; implicit-def: $sgpr28
+; GFX11-NEXT: ; implicit-def: $sgpr6
+; GFX11-NEXT: ; implicit-def: $sgpr29
; GFX11-NEXT: ; implicit-def: $sgpr27
; GFX11-NEXT: ; implicit-def: $sgpr26
-; GFX11-NEXT: ; implicit-def: $sgpr24
-; GFX11-NEXT: ; implicit-def: $sgpr8
; GFX11-NEXT: ; implicit-def: $sgpr25
; GFX11-NEXT: ; implicit-def: $sgpr23
+; GFX11-NEXT: ; implicit-def: $sgpr8
+; GFX11-NEXT: ; implicit-def: $sgpr24
; GFX11-NEXT: ; implicit-def: $sgpr22
; GFX11-NEXT: ; implicit-def: $sgpr21
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr10
; GFX11-NEXT: ; implicit-def: $sgpr20
; GFX11-NEXT: ; implicit-def: $sgpr14
+; GFX11-NEXT: ; implicit-def: $sgpr10
+; GFX11-NEXT: ; implicit-def: $sgpr15
; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: s_branch .LBB49_2
+; GFX11-NEXT: ; implicit-def: $sgpr12
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
+; GFX11-NEXT: s_cbranch_vccz .LBB49_2
; GFX11-NEXT: .LBB49_4:
; GFX11-NEXT: v_dual_mov_b32 v38, s0 :: v_dual_mov_b32 v39, s1
; GFX11-NEXT: v_dual_mov_b32 v36, s2 :: v_dual_mov_b32 v37, s3
; GFX11-NEXT: v_dual_mov_b32 v34, s16 :: v_dual_mov_b32 v35, s17
; GFX11-NEXT: v_dual_mov_b32 v32, s18 :: v_dual_mov_b32 v33, s19
-; GFX11-NEXT: v_dual_mov_b32 v1, s46 :: v_dual_mov_b32 v2, s44
-; GFX11-NEXT: v_dual_mov_b32 v5, s45 :: v_dual_mov_b32 v6, s43
-; GFX11-NEXT: v_dual_mov_b32 v7, s42 :: v_dual_mov_b32 v10, s29
-; GFX11-NEXT: v_dual_mov_b32 v9, s41 :: v_dual_mov_b32 v14, s28
-; GFX11-NEXT: v_dual_mov_b32 v13, s40 :: v_dual_mov_b32 v18, s24
-; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v22, s23
-; GFX11-NEXT: v_dual_mov_b32 v17, s26 :: v_dual_mov_b32 v26, s15
-; GFX11-NEXT: v_dual_mov_b32 v21, s25 :: v_dual_mov_b32 v30, s14
-; GFX11-NEXT: v_mov_b32_e32 v23, s22
-; GFX11-NEXT: v_mov_b32_e32 v25, s21
-; GFX11-NEXT: v_mov_b32_e32 v29, s20
-; GFX11-NEXT: v_mov_b32_e32 v31, s13
+; GFX11-NEXT: v_dual_mov_b32 v1, s45 :: v_dual_mov_b32 v2, s43
+; GFX11-NEXT: v_dual_mov_b32 v5, s44 :: v_dual_mov_b32 v6, s42
+; GFX11-NEXT: v_dual_mov_b32 v7, s41 :: v_dual_mov_b32 v10, s28
+; GFX11-NEXT: v_dual_mov_b32 v9, s40 :: v_dual_mov_b32 v14, s27
+; GFX11-NEXT: v_dual_mov_b32 v13, s29 :: v_dual_mov_b32 v18, s23
+; GFX11-NEXT: v_dual_mov_b32 v15, s26 :: v_dual_mov_b32 v22, s22
+; GFX11-NEXT: v_dual_mov_b32 v17, s25 :: v_dual_mov_b32 v26, s14
+; GFX11-NEXT: v_dual_mov_b32 v21, s24 :: v_dual_mov_b32 v30, s13
+; GFX11-NEXT: v_mov_b32_e32 v23, s21
+; GFX11-NEXT: v_mov_b32_e32 v25, s20
+; GFX11-NEXT: v_mov_b32_e32 v29, s15
+; GFX11-NEXT: v_mov_b32_e32 v31, s12
; GFX11-NEXT: v_mov_b32_e32 v27, s10
; GFX11-NEXT: v_mov_b32_e32 v19, s8
; GFX11-NEXT: v_mov_b32_e32 v11, s6
@@ -13805,11 +13995,12 @@ define inreg <8 x float> @bitcast_v32i8_to_v8f32_scalar(<32 x i8> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v22, v6
; SI-NEXT: v_mov_b32_e32 v21, v4
; SI-NEXT: v_mov_b32_e32 v20, v2
; SI-NEXT: v_mov_b32_e32 v19, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v1
; SI-NEXT: v_lshlrev_b32_e32 v26, 8, v3
; SI-NEXT: v_lshlrev_b32_e32 v25, 24, v5
@@ -13991,17 +14182,20 @@ define inreg <8 x float> @bitcast_v32i8_to_v8f32_scalar(<32 x i8> inreg %a, i32
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB51_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; SI-NEXT: s_branch .LBB51_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB51_2
+; SI-NEXT: s_branch .LBB51_3
;
; VI-LABEL: bitcast_v32i8_to_v8f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v22, v6
; VI-NEXT: v_mov_b32_e32 v21, v4
; VI-NEXT: v_mov_b32_e32 v20, v2
; VI-NEXT: v_mov_b32_e32 v19, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_lshlrev_b32_e32 v27, 8, v1
; VI-NEXT: v_lshlrev_b32_e32 v26, 8, v3
; VI-NEXT: v_lshlrev_b32_e32 v25, 8, v5
@@ -14147,17 +14341,20 @@ define inreg <8 x float> @bitcast_v32i8_to_v8f32_scalar(<32 x i8> inreg %a, i32
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB51_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; VI-NEXT: s_branch .LBB51_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB51_2
+; VI-NEXT: s_branch .LBB51_3
;
; GFX9-LABEL: bitcast_v32i8_to_v8f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v22, v6
; GFX9-NEXT: v_mov_b32_e32 v21, v4
; GFX9-NEXT: v_mov_b32_e32 v20, v2
; GFX9-NEXT: v_mov_b32_e32 v19, v0
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_lshlrev_b32_e32 v27, 8, v1
; GFX9-NEXT: v_lshlrev_b32_e32 v26, 8, v3
; GFX9-NEXT: v_lshlrev_b32_e32 v25, 8, v5
@@ -14304,7 +14501,9 @@ define inreg <8 x float> @bitcast_v32i8_to_v8f32_scalar(<32 x i8> inreg %a, i32
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB51_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; GFX9-NEXT: s_branch .LBB51_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB51_2
+; GFX9-NEXT: s_branch .LBB51_3
;
; GFX11-LABEL: bitcast_v32i8_to_v8f32_scalar:
; GFX11: ; %bb.0:
@@ -14319,44 +14518,44 @@ define inreg <8 x float> @bitcast_v32i8_to_v8f32_scalar(<32 x i8> inreg %a, i32
; GFX11-NEXT: v_lshlrev_b32_e32 v9, 8, v9
; GFX11-NEXT: v_lshlrev_b32_e32 v11, 8, v11
; GFX11-NEXT: v_lshlrev_b32_e32 v13, 8, v13
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s4, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB51_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s3, 8
+; GFX11-NEXT: s_and_b32 s4, s0, 0xff
+; GFX11-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-NEXT: s_and_b32 s6, s2, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-NEXT: s_or_b32 s4, s4, s5
+; GFX11-NEXT: s_or_b32 s5, s6, s7
+; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX11-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-NEXT: s_lshl_b32 s6, s17, 8
+; GFX11-NEXT: s_or_b32 s4, s4, s5
+; GFX11-NEXT: s_and_b32 s5, s16, 0xff
+; GFX11-NEXT: s_and_b32 s7, s18, 0xff
+; GFX11-NEXT: s_lshl_b32 s8, s19, 8
; GFX11-NEXT: s_or_b32 s5, s5, s6
; GFX11-NEXT: s_or_b32 s6, s7, s8
+; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v16
+; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v17
; GFX11-NEXT: s_and_b32 s5, s5, 0xffff
; GFX11-NEXT: s_lshl_b32 s6, s6, 16
-; GFX11-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-NEXT: s_lshl_b32 s8, s21, 8
; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_and_b32 s6, s16, 0xff
-; GFX11-NEXT: s_and_b32 s8, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s9, s19, 8
-; GFX11-NEXT: s_or_b32 s6, s6, s7
-; GFX11-NEXT: s_or_b32 s7, s8, s9
-; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v16
-; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v17
-; GFX11-NEXT: s_and_b32 s6, s6, 0xffff
-; GFX11-NEXT: s_lshl_b32 s7, s7, 16
-; GFX11-NEXT: s_and_b32 s8, s20, 0xff
-; GFX11-NEXT: s_lshl_b32 s9, s21, 8
-; GFX11-NEXT: s_or_b32 s6, s6, s7
-; GFX11-NEXT: s_or_b32 s7, s8, s9
-; GFX11-NEXT: s_and_b32 s8, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-NEXT: s_or_b32 s6, s7, s8
+; GFX11-NEXT: s_and_b32 s7, s22, 0xff
+; GFX11-NEXT: s_lshl_b32 s8, s23, 8
; GFX11-NEXT: v_or_b32_e32 v1, v1, v21
; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v10
; GFX11-NEXT: v_or_b32_e32 v2, v2, v14
-; GFX11-NEXT: s_or_b32 s8, s8, s9
-; GFX11-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-NEXT: s_lshl_b32 s8, s8, 16
-; GFX11-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s25, 8
; GFX11-NEXT: s_or_b32 s7, s7, s8
+; GFX11-NEXT: s_and_b32 s6, s6, 0xffff
+; GFX11-NEXT: s_lshl_b32 s7, s7, 16
+; GFX11-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s25, 8
+; GFX11-NEXT: s_or_b32 s6, s6, s7
; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v15
; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v18
@@ -14364,35 +14563,34 @@ define inreg <8 x float> @bitcast_v32i8_to_v8f32_scalar(<32 x i8> inreg %a, i32
; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v12
; GFX11-NEXT: v_or_b32_e32 v5, v5, v11
; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_or_b32 s9, s9, s10
-; GFX11-NEXT: s_lshl_b32 s10, s27, 8
-; GFX11-NEXT: s_and_b32 s8, s9, 0xffff
-; GFX11-NEXT: s_and_b32 s9, s26, 0xff
+; GFX11-NEXT: s_or_b32 s8, s8, s9
+; GFX11-NEXT: s_lshl_b32 s9, s27, 8
+; GFX11-NEXT: s_and_b32 s7, s8, 0xffff
+; GFX11-NEXT: s_and_b32 s8, s26, 0xff
; GFX11-NEXT: v_or_b32_e32 v3, v3, v19
; GFX11-NEXT: v_or_b32_e32 v4, v4, v9
; GFX11-NEXT: v_or_b32_e32 v6, v6, v13
; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v5
; GFX11-NEXT: v_or_b32_e32 v5, v1, v2
-; GFX11-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-NEXT: v_mov_b32_e32 v1, s5
; GFX11-NEXT: v_or_b32_e32 v0, v0, v20
-; GFX11-NEXT: s_or_b32 s9, s9, s10
-; GFX11-NEXT: s_and_b32 s10, s28, 0xff
-; GFX11-NEXT: s_lshl_b32 s9, s9, 16
-; GFX11-NEXT: s_lshl_b32 s11, s29, 8
+; GFX11-NEXT: s_or_b32 s8, s8, s9
+; GFX11-NEXT: s_and_b32 s9, s28, 0xff
+; GFX11-NEXT: s_lshl_b32 s8, s8, 16
+; GFX11-NEXT: s_lshl_b32 s10, s29, 8
; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
; GFX11-NEXT: v_lshlrev_b32_e32 v7, 16, v4
; GFX11-NEXT: v_lshlrev_b32_e32 v23, 16, v6
-; GFX11-NEXT: s_or_b32 s8, s8, s9
+; GFX11-NEXT: s_or_b32 s7, s7, s8
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_or_b32 s10, s10, s11
+; GFX11-NEXT: s_or_b32 s9, s9, s10
; GFX11-NEXT: v_or_b32_e32 v6, v3, v7
-; GFX11-NEXT: s_and_b32 s10, s10, 0xffff
+; GFX11-NEXT: s_and_b32 s9, s9, 0xffff
; GFX11-NEXT: v_or_b32_e32 v7, v22, v23
-; GFX11-NEXT: v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
-; GFX11-NEXT: v_or_b32_e32 v4, s10, v0
-; GFX11-NEXT: v_mov_b32_e32 v0, s5
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB51_3
+; GFX11-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
+; GFX11-NEXT: v_or_b32_e32 v4, s9, v0
+; GFX11-NEXT: v_mov_b32_e32 v0, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB51_3
; GFX11-NEXT: .LBB51_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
; GFX11-NEXT: s_add_i32 s2, s2, 3
@@ -14499,7 +14697,9 @@ define inreg <8 x float> @bitcast_v32i8_to_v8f32_scalar(<32 x i8> inreg %a, i32
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB51_4:
; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; GFX11-NEXT: s_branch .LBB51_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_vccz .LBB51_2
+; GFX11-NEXT: s_branch .LBB51_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -14623,10 +14823,14 @@ define inreg <4 x double> @bitcast_v4i64_to_v4f64_scalar(<4 x i64> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB53_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB53_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB53_3
-; SI-NEXT: .LBB53_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB53_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB53_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_u32 s16, s16, 3
; SI-NEXT: s_addc_u32 s17, s17, 0
; SI-NEXT: s_add_u32 s18, s18, 3
@@ -14635,7 +14839,7 @@ define inreg <4 x double> @bitcast_v4i64_to_v4f64_scalar(<4 x i64> inreg %a, i32
; SI-NEXT: s_addc_u32 s21, s21, 0
; SI-NEXT: s_add_u32 s22, s22, 3
; SI-NEXT: s_addc_u32 s23, s23, 0
-; SI-NEXT: .LBB53_3: ; %end
+; SI-NEXT: .LBB53_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -14645,17 +14849,19 @@ define inreg <4 x double> @bitcast_v4i64_to_v4f64_scalar(<4 x i64> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v6, s22
; SI-NEXT: v_mov_b32_e32 v7, s23
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB53_4:
-; SI-NEXT: s_branch .LBB53_2
;
; VI-LABEL: bitcast_v4i64_to_v4f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB53_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB53_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB53_3
-; VI-NEXT: .LBB53_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB53_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB53_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
; VI-NEXT: s_add_u32 s18, s18, 3
@@ -14664,7 +14870,7 @@ define inreg <4 x double> @bitcast_v4i64_to_v4f64_scalar(<4 x i64> inreg %a, i32
; VI-NEXT: s_addc_u32 s21, s21, 0
; VI-NEXT: s_add_u32 s22, s22, 3
; VI-NEXT: s_addc_u32 s23, s23, 0
-; VI-NEXT: .LBB53_3: ; %end
+; VI-NEXT: .LBB53_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -14674,17 +14880,19 @@ define inreg <4 x double> @bitcast_v4i64_to_v4f64_scalar(<4 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB53_4:
-; VI-NEXT: s_branch .LBB53_2
;
; GFX9-LABEL: bitcast_v4i64_to_v4f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB53_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB53_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB53_3
-; GFX9-NEXT: .LBB53_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB53_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB53_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
; GFX9-NEXT: s_add_u32 s18, s18, 3
@@ -14693,7 +14901,7 @@ define inreg <4 x double> @bitcast_v4i64_to_v4f64_scalar(<4 x i64> inreg %a, i32
; GFX9-NEXT: s_addc_u32 s21, s21, 0
; GFX9-NEXT: s_add_u32 s22, s22, 3
; GFX9-NEXT: s_addc_u32 s23, s23, 0
-; GFX9-NEXT: .LBB53_3: ; %end
+; GFX9-NEXT: .LBB53_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -14703,19 +14911,20 @@ define inreg <4 x double> @bitcast_v4i64_to_v4f64_scalar(<4 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v6, s22
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB53_4:
-; GFX9-NEXT: s_branch .LBB53_2
;
; GFX11-LABEL: bitcast_v4i64_to_v4f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB53_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB53_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB53_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB53_3
-; GFX11-NEXT: .LBB53_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB53_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
; GFX11-NEXT: s_add_u32 s2, s2, 3
@@ -14724,14 +14933,12 @@ define inreg <4 x double> @bitcast_v4i64_to_v4f64_scalar(<4 x i64> inreg %a, i32
; GFX11-NEXT: s_addc_u32 s17, s17, 0
; GFX11-NEXT: s_add_u32 s18, s18, 3
; GFX11-NEXT: s_addc_u32 s19, s19, 0
-; GFX11-NEXT: .LBB53_3: ; %end
+; GFX11-NEXT: .LBB53_4: ; %end
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB53_4:
-; GFX11-NEXT: s_branch .LBB53_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -14839,17 +15046,19 @@ define inreg <4 x i64> @bitcast_v4f64_to_v4i64_scalar(<4 x double> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
-; SI-NEXT: s_cbranch_scc0 .LBB55_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB55_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB55_4
-; SI-NEXT: .LBB55_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB55_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB55_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; SI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; SI-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB55_3:
-; SI-NEXT: s_branch .LBB55_2
; SI-NEXT: .LBB55_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -14865,17 +15074,19 @@ define inreg <4 x i64> @bitcast_v4f64_to_v4i64_scalar(<4 x double> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB55_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB55_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB55_4
-; VI-NEXT: .LBB55_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB55_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB55_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; VI-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB55_3:
-; VI-NEXT: s_branch .LBB55_2
; VI-NEXT: .LBB55_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -14891,17 +15102,19 @@ define inreg <4 x i64> @bitcast_v4f64_to_v4i64_scalar(<4 x double> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB55_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB55_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB55_4
-; GFX9-NEXT: .LBB55_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB55_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB55_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; GFX9-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB55_3:
-; GFX9-NEXT: s_branch .LBB55_2
; GFX9-NEXT: .LBB55_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -14921,19 +15134,20 @@ define inreg <4 x i64> @bitcast_v4f64_to_v4i64_scalar(<4 x double> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB55_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB55_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB55_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB55_4
-; GFX11-NEXT: .LBB55_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[4:5], s[4:5], 1.0
; GFX11-NEXT: v_add_f64 v[6:7], s[6:7], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB55_3:
-; GFX11-NEXT: s_branch .LBB55_2
; GFX11-NEXT: .LBB55_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -15098,6 +15312,7 @@ define inreg <16 x i16> @bitcast_v4i64_to_v16i16_scalar(<4 x i64> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB57_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s22
@@ -15157,16 +15372,22 @@ define inreg <16 x i16> @bitcast_v4i64_to_v16i16_scalar(<4 x i64> inreg %a, i32
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $vgpr13
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB57_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB57_2
+; SI-NEXT: s_branch .LBB57_3
;
; VI-LABEL: bitcast_v4i64_to_v16i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB57_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB57_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB57_3
-; VI-NEXT: .LBB57_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB57_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB57_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_u32 s22, s22, 3
; VI-NEXT: s_addc_u32 s23, s23, 0
; VI-NEXT: s_add_u32 s20, s20, 3
@@ -15175,7 +15396,7 @@ define inreg <16 x i16> @bitcast_v4i64_to_v16i16_scalar(<4 x i64> inreg %a, i32
; VI-NEXT: s_addc_u32 s19, s19, 0
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
-; VI-NEXT: .LBB57_3: ; %end
+; VI-NEXT: .LBB57_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -15185,17 +15406,19 @@ define inreg <16 x i16> @bitcast_v4i64_to_v16i16_scalar(<4 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB57_4:
-; VI-NEXT: s_branch .LBB57_2
;
; GFX9-LABEL: bitcast_v4i64_to_v16i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB57_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB57_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB57_3
-; GFX9-NEXT: .LBB57_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB57_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB57_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_u32 s22, s22, 3
; GFX9-NEXT: s_addc_u32 s23, s23, 0
; GFX9-NEXT: s_add_u32 s20, s20, 3
@@ -15204,7 +15427,7 @@ define inreg <16 x i16> @bitcast_v4i64_to_v16i16_scalar(<4 x i64> inreg %a, i32
; GFX9-NEXT: s_addc_u32 s19, s19, 0
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
-; GFX9-NEXT: .LBB57_3: ; %end
+; GFX9-NEXT: .LBB57_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -15214,19 +15437,20 @@ define inreg <16 x i16> @bitcast_v4i64_to_v16i16_scalar(<4 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v6, s22
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB57_4:
-; GFX9-NEXT: s_branch .LBB57_2
;
; GFX11-LABEL: bitcast_v4i64_to_v16i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB57_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB57_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB57_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB57_3
-; GFX11-NEXT: .LBB57_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB57_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s18, s18, 3
; GFX11-NEXT: s_addc_u32 s19, s19, 0
; GFX11-NEXT: s_add_u32 s16, s16, 3
@@ -15235,15 +15459,13 @@ define inreg <16 x i16> @bitcast_v4i64_to_v16i16_scalar(<4 x i64> inreg %a, i32
; GFX11-NEXT: s_addc_u32 s3, s3, 0
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: .LBB57_3: ; %end
+; GFX11-NEXT: .LBB57_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB57_4:
-; GFX11-NEXT: s_branch .LBB57_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -15461,8 +15683,9 @@ define inreg <4 x i64> @bitcast_v16i16_to_v4i64_scalar(<16 x i16> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; SI-NEXT: v_mov_b32_e32 v8, v0
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: v_mov_b32_e32 v8, v0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v1
; SI-NEXT: s_cbranch_scc0 .LBB59_4
; SI-NEXT: ; %bb.1: ; %cmp.false
@@ -15548,16 +15771,22 @@ define inreg <4 x i64> @bitcast_v16i16_to_v4i64_scalar(<16 x i16> inreg %a, i32
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB59_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; SI-NEXT: s_branch .LBB59_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB59_2
+; SI-NEXT: s_branch .LBB59_3
;
; VI-LABEL: bitcast_v16i16_to_v4i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB59_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB59_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB59_3
-; VI-NEXT: .LBB59_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB59_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB59_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s23, 3
; VI-NEXT: s_and_b32 s4, s23, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
@@ -15598,7 +15827,7 @@ define inreg <4 x i64> @bitcast_v16i16_to_v4i64_scalar(<16 x i16> inreg %a, i32
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB59_3: ; %end
+; VI-NEXT: .LBB59_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -15608,17 +15837,19 @@ define inreg <4 x i64> @bitcast_v16i16_to_v4i64_scalar(<16 x i16> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB59_4:
-; VI-NEXT: s_branch .LBB59_2
;
; GFX9-LABEL: bitcast_v16i16_to_v4i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB59_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB59_4
-; GFX9-NEXT: .LBB59_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB59_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB59_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v7, s23, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v6, s22, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v5, s21, 3 op_sel_hi:[1,0]
@@ -15628,8 +15859,6 @@ define inreg <4 x i64> @bitcast_v16i16_to_v4i64_scalar(<16 x i16> inreg %a, i32
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB59_3:
-; GFX9-NEXT: s_branch .LBB59_2
; GFX9-NEXT: .LBB59_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -15649,12 +15878,15 @@ define inreg <4 x i64> @bitcast_v16i16_to_v4i64_scalar(<16 x i16> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB59_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB59_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB59_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB59_4
-; GFX11-NEXT: .LBB59_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v7, s7, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
@@ -15664,8 +15896,6 @@ define inreg <4 x i64> @bitcast_v16i16_to_v4i64_scalar(<16 x i16> inreg %a, i32
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB59_3:
-; GFX11-NEXT: s_branch .LBB59_2
; GFX11-NEXT: .LBB59_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -15883,6 +16113,7 @@ define inreg <16 x half> @bitcast_v4i64_to_v16f16_scalar(<4 x i64> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB61_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s4, s23, 16
@@ -15962,16 +16193,22 @@ define inreg <16 x half> @bitcast_v4i64_to_v16f16_scalar(<4 x i64> inreg %a, i32
; SI-NEXT: ; implicit-def: $vgpr13
; SI-NEXT: ; implicit-def: $vgpr14
; SI-NEXT: ; implicit-def: $vgpr15
-; SI-NEXT: s_branch .LBB61_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB61_2
+; SI-NEXT: s_branch .LBB61_3
;
; VI-LABEL: bitcast_v4i64_to_v16f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB61_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB61_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB61_3
-; VI-NEXT: .LBB61_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB61_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB61_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_u32 s22, s22, 3
; VI-NEXT: s_addc_u32 s23, s23, 0
; VI-NEXT: s_add_u32 s20, s20, 3
@@ -15980,7 +16217,7 @@ define inreg <16 x half> @bitcast_v4i64_to_v16f16_scalar(<4 x i64> inreg %a, i32
; VI-NEXT: s_addc_u32 s19, s19, 0
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
-; VI-NEXT: .LBB61_3: ; %end
+; VI-NEXT: .LBB61_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -15990,17 +16227,19 @@ define inreg <16 x half> @bitcast_v4i64_to_v16f16_scalar(<4 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB61_4:
-; VI-NEXT: s_branch .LBB61_2
;
; GFX9-LABEL: bitcast_v4i64_to_v16f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB61_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB61_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB61_3
-; GFX9-NEXT: .LBB61_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB61_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB61_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_u32 s22, s22, 3
; GFX9-NEXT: s_addc_u32 s23, s23, 0
; GFX9-NEXT: s_add_u32 s20, s20, 3
@@ -16009,7 +16248,7 @@ define inreg <16 x half> @bitcast_v4i64_to_v16f16_scalar(<4 x i64> inreg %a, i32
; GFX9-NEXT: s_addc_u32 s19, s19, 0
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
-; GFX9-NEXT: .LBB61_3: ; %end
+; GFX9-NEXT: .LBB61_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -16019,19 +16258,20 @@ define inreg <16 x half> @bitcast_v4i64_to_v16f16_scalar(<4 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v6, s22
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB61_4:
-; GFX9-NEXT: s_branch .LBB61_2
;
; GFX11-LABEL: bitcast_v4i64_to_v16f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB61_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB61_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB61_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB61_3
-; GFX11-NEXT: .LBB61_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB61_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s18, s18, 3
; GFX11-NEXT: s_addc_u32 s19, s19, 0
; GFX11-NEXT: s_add_u32 s16, s16, 3
@@ -16040,15 +16280,13 @@ define inreg <16 x half> @bitcast_v4i64_to_v16f16_scalar(<4 x i64> inreg %a, i32
; GFX11-NEXT: s_addc_u32 s3, s3, 0
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: .LBB61_3: ; %end
+; GFX11-NEXT: .LBB61_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB61_4:
-; GFX11-NEXT: s_branch .LBB61_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -16319,6 +16557,7 @@ define inreg <4 x i64> @bitcast_v16f16_to_v4i64_scalar(<16 x half> inreg %a, i32
; SI-NEXT: v_cvt_f16_f32_e32 v8, v0
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB63_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v23
@@ -16407,16 +16646,22 @@ define inreg <4 x i64> @bitcast_v16f16_to_v4i64_scalar(<16 x half> inreg %a, i32
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB63_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; SI-NEXT: s_branch .LBB63_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB63_2
+; SI-NEXT: s_branch .LBB63_3
;
; VI-LABEL: bitcast_v16f16_to_v4i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB63_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB63_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB63_4
-; VI-NEXT: .LBB63_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB63_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB63_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s23, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -16459,8 +16704,6 @@ define inreg <4 x i64> @bitcast_v16f16_to_v4i64_scalar(<16 x half> inreg %a, i32
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v8
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB63_3:
-; VI-NEXT: s_branch .LBB63_2
; VI-NEXT: .LBB63_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -16476,10 +16719,14 @@ define inreg <4 x i64> @bitcast_v16f16_to_v4i64_scalar(<16 x half> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB63_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB63_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB63_4
-; GFX9-NEXT: .LBB63_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB63_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB63_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v7, s23, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v6, s22, v0 op_sel_hi:[1,0]
@@ -16490,8 +16737,6 @@ define inreg <4 x i64> @bitcast_v16f16_to_v4i64_scalar(<16 x half> inreg %a, i32
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB63_3:
-; GFX9-NEXT: s_branch .LBB63_2
; GFX9-NEXT: .LBB63_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -16511,12 +16756,15 @@ define inreg <4 x i64> @bitcast_v16f16_to_v4i64_scalar(<16 x half> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB63_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB63_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB63_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB63_4
-; GFX11-NEXT: .LBB63_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v7, 0x200, s7 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
@@ -16526,8 +16774,6 @@ define inreg <4 x i64> @bitcast_v16f16_to_v4i64_scalar(<16 x half> inreg %a, i32
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB63_3:
-; GFX11-NEXT: s_branch .LBB63_2
; GFX11-NEXT: .LBB63_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -16729,6 +16975,7 @@ define inreg <16 x bfloat> @bitcast_v4i64_to_v16bf16_scalar(<4 x i64> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB65_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s6, s23, 0xffff0000
@@ -16808,16 +17055,22 @@ define inreg <16 x bfloat> @bitcast_v4i64_to_v16bf16_scalar(<4 x i64> inreg %a,
; SI-NEXT: ; implicit-def: $sgpr8
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB65_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB65_2
+; SI-NEXT: s_branch .LBB65_3
;
; VI-LABEL: bitcast_v4i64_to_v16bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB65_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB65_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB65_3
-; VI-NEXT: .LBB65_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB65_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB65_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_u32 s22, s22, 3
; VI-NEXT: s_addc_u32 s23, s23, 0
; VI-NEXT: s_add_u32 s20, s20, 3
@@ -16826,7 +17079,7 @@ define inreg <16 x bfloat> @bitcast_v4i64_to_v16bf16_scalar(<4 x i64> inreg %a,
; VI-NEXT: s_addc_u32 s19, s19, 0
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
-; VI-NEXT: .LBB65_3: ; %end
+; VI-NEXT: .LBB65_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -16836,17 +17089,19 @@ define inreg <16 x bfloat> @bitcast_v4i64_to_v16bf16_scalar(<4 x i64> inreg %a,
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB65_4:
-; VI-NEXT: s_branch .LBB65_2
;
; GFX9-LABEL: bitcast_v4i64_to_v16bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB65_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB65_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB65_3
-; GFX9-NEXT: .LBB65_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB65_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB65_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_u32 s22, s22, 3
; GFX9-NEXT: s_addc_u32 s23, s23, 0
; GFX9-NEXT: s_add_u32 s20, s20, 3
@@ -16855,7 +17110,7 @@ define inreg <16 x bfloat> @bitcast_v4i64_to_v16bf16_scalar(<4 x i64> inreg %a,
; GFX9-NEXT: s_addc_u32 s19, s19, 0
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
-; GFX9-NEXT: .LBB65_3: ; %end
+; GFX9-NEXT: .LBB65_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -16865,19 +17120,20 @@ define inreg <16 x bfloat> @bitcast_v4i64_to_v16bf16_scalar(<4 x i64> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v6, s22
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB65_4:
-; GFX9-NEXT: s_branch .LBB65_2
;
; GFX11-LABEL: bitcast_v4i64_to_v16bf16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB65_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB65_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB65_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB65_3
-; GFX11-NEXT: .LBB65_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB65_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s18, s18, 3
; GFX11-NEXT: s_addc_u32 s19, s19, 0
; GFX11-NEXT: s_add_u32 s16, s16, 3
@@ -16886,15 +17142,13 @@ define inreg <16 x bfloat> @bitcast_v4i64_to_v16bf16_scalar(<4 x i64> inreg %a,
; GFX11-NEXT: s_addc_u32 s3, s3, 0
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: .LBB65_3: ; %end
+; GFX11-NEXT: .LBB65_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB65_4:
-; GFX11-NEXT: s_branch .LBB65_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -17660,6 +17914,7 @@ define inreg <4 x i64> @bitcast_v16bf16_to_v4i64_scalar(<16 x bfloat> inreg %a,
; SI-NEXT: v_mul_f32_e64 v11, 1.0, s28
; SI-NEXT: v_mul_f32_e32 v8, 1.0, v1
; SI-NEXT: v_mul_f32_e32 v9, 1.0, v0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB67_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v22
@@ -17732,16 +17987,22 @@ define inreg <4 x i64> @bitcast_v16bf16_to_v4i64_scalar(<16 x bfloat> inreg %a,
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB67_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; SI-NEXT: s_branch .LBB67_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB67_2
+; SI-NEXT: s_branch .LBB67_3
;
; VI-LABEL: bitcast_v16bf16_to_v4i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB67_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB67_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB67_4
-; VI-NEXT: .LBB67_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB67_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB67_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s23, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x40c00000
; VI-NEXT: v_add_f32_e32 v1, s4, v0
@@ -17888,8 +18149,6 @@ define inreg <4 x i64> @bitcast_v16bf16_to_v4i64_scalar(<16 x bfloat> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; VI-NEXT: v_alignbit_b32 v0, v0, v8, 16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB67_3:
-; VI-NEXT: s_branch .LBB67_2
; VI-NEXT: .LBB67_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -17905,10 +18164,14 @@ define inreg <4 x i64> @bitcast_v16bf16_to_v4i64_scalar(<16 x bfloat> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB67_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB67_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB67_4
-; GFX9-NEXT: .LBB67_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB67_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB67_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_and_b32 s4, s23, 0xffff0000
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
; GFX9-NEXT: v_add_f32_e32 v1, s4, v0
@@ -18064,8 +18327,6 @@ define inreg <4 x i64> @bitcast_v16bf16_to_v4i64_scalar(<16 x bfloat> inreg %a,
; GFX9-NEXT: v_and_b32_sdwa v0, v8, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: v_lshl_or_b32 v0, v9, 16, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB67_3:
-; GFX9-NEXT: s_branch .LBB67_2
; GFX9-NEXT: .LBB67_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -18085,12 +18346,15 @@ define inreg <4 x i64> @bitcast_v16bf16_to_v4i64_scalar(<16 x bfloat> inreg %a,
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB67_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB67_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB67_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB67_4
-; GFX11-NEXT: .LBB67_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_lshl_b32 s8, s7, 16
; GFX11-NEXT: s_and_b32 s7, s7, 0xffff0000
; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s8
@@ -18266,8 +18530,6 @@ define inreg <4 x i64> @bitcast_v16bf16_to_v4i64_scalar(<16 x bfloat> inreg %a,
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-NEXT: v_lshl_or_b32 v0, v12, 16, v9
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB67_3:
-; GFX11-NEXT: s_branch .LBB67_2
; GFX11-NEXT: .LBB67_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -18847,6 +19109,7 @@ define inreg <32 x i8> @bitcast_v4i64_to_v32i8_scalar(<4 x i64> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB69_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s22
@@ -18962,12 +19225,15 @@ define inreg <32 x i8> @bitcast_v4i64_to_v32i8_scalar(<4 x i64> inreg %a, i32 in
; SI-NEXT: ; implicit-def: $sgpr9
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB69_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB69_2
+; SI-NEXT: s_branch .LBB69_3
;
; VI-LABEL: bitcast_v4i64_to_v32i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
+; VI-NEXT: s_mov_b64 s[12:13], -1
; VI-NEXT: s_cbranch_scc0 .LBB69_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s14, s23, 24
@@ -19087,12 +19353,15 @@ define inreg <32 x i8> @bitcast_v4i64_to_v32i8_scalar(<4 x i64> inreg %a, i32 in
; VI-NEXT: ; implicit-def: $sgpr24
; VI-NEXT: ; implicit-def: $sgpr15
; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: s_branch .LBB69_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[12:13]
+; VI-NEXT: s_cbranch_vccz .LBB69_2
+; VI-NEXT: s_branch .LBB69_3
;
; GFX9-LABEL: bitcast_v4i64_to_v32i8_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
+; GFX9-NEXT: s_mov_b64 s[12:13], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB69_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s14, s23, 24
@@ -19212,15 +19481,18 @@ define inreg <32 x i8> @bitcast_v4i64_to_v32i8_scalar(<4 x i64> inreg %a, i32 in
; GFX9-NEXT: ; implicit-def: $sgpr24
; GFX9-NEXT: ; implicit-def: $sgpr15
; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: s_branch .LBB69_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[12:13]
+; GFX9-NEXT: s_cbranch_vccz .LBB69_2
+; GFX9-NEXT: s_branch .LBB69_3
;
; GFX11-LABEL: bitcast_v4i64_to_v32i8_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
-; GFX11-NEXT: s_mov_b32 s46, 0
+; GFX11-NEXT: s_mov_b32 s5, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB69_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-NEXT: s_lshr_b64 s[4:5], s[18:19], 24
; GFX11-NEXT: s_lshr_b32 s12, s19, 24
; GFX11-NEXT: s_lshr_b32 s13, s19, 16
; GFX11-NEXT: s_lshr_b32 s14, s19, 8
@@ -19241,12 +19513,10 @@ define inreg <32 x i8> @bitcast_v4i64_to_v32i8_scalar(<4 x i64> inreg %a, i32 in
; GFX11-NEXT: s_lshr_b32 s43, s1, 8
; GFX11-NEXT: s_lshr_b32 s44, s0, 16
; GFX11-NEXT: s_lshr_b32 s45, s0, 8
-; GFX11-NEXT: s_lshr_b64 s[4:5], s[18:19], 24
; GFX11-NEXT: s_lshr_b64 s[6:7], s[16:17], 24
; GFX11-NEXT: s_lshr_b64 s[8:9], s[2:3], 24
; GFX11-NEXT: s_lshr_b64 s[10:11], s[0:1], 24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
-; GFX11-NEXT: s_cbranch_vccnz .LBB69_3
+; GFX11-NEXT: s_cbranch_execnz .LBB69_3
; GFX11-NEXT: .LBB69_2: ; %cmp.true
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
@@ -19324,7 +19594,9 @@ define inreg <32 x i8> @bitcast_v4i64_to_v32i8_scalar(<4 x i64> inreg %a, i32 in
; GFX11-NEXT: ; implicit-def: $sgpr14
; GFX11-NEXT: ; implicit-def: $sgpr13
; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: s_branch .LBB69_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
+; GFX11-NEXT: s_cbranch_vccz .LBB69_2
+; GFX11-NEXT: s_branch .LBB69_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -20362,11 +20634,12 @@ define inreg <4 x i64> @bitcast_v32i8_to_v4i64_scalar(<32 x i8> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v22, v6
; SI-NEXT: v_mov_b32_e32 v21, v4
; SI-NEXT: v_mov_b32_e32 v20, v2
; SI-NEXT: v_mov_b32_e32 v19, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v1
; SI-NEXT: v_lshlrev_b32_e32 v26, 8, v3
; SI-NEXT: v_lshlrev_b32_e32 v25, 24, v5
@@ -20548,17 +20821,20 @@ define inreg <4 x i64> @bitcast_v32i8_to_v4i64_scalar(<32 x i8> inreg %a, i32 in
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB71_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; SI-NEXT: s_branch .LBB71_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB71_2
+; SI-NEXT: s_branch .LBB71_3
;
; VI-LABEL: bitcast_v32i8_to_v4i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v22, v6
; VI-NEXT: v_mov_b32_e32 v21, v4
; VI-NEXT: v_mov_b32_e32 v20, v2
; VI-NEXT: v_mov_b32_e32 v19, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_lshlrev_b32_e32 v27, 8, v1
; VI-NEXT: v_lshlrev_b32_e32 v26, 8, v3
; VI-NEXT: v_lshlrev_b32_e32 v25, 8, v5
@@ -20704,17 +20980,20 @@ define inreg <4 x i64> @bitcast_v32i8_to_v4i64_scalar(<32 x i8> inreg %a, i32 in
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB71_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; VI-NEXT: s_branch .LBB71_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB71_2
+; VI-NEXT: s_branch .LBB71_3
;
; GFX9-LABEL: bitcast_v32i8_to_v4i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v22, v6
; GFX9-NEXT: v_mov_b32_e32 v21, v4
; GFX9-NEXT: v_mov_b32_e32 v20, v2
; GFX9-NEXT: v_mov_b32_e32 v19, v0
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_lshlrev_b32_e32 v27, 8, v1
; GFX9-NEXT: v_lshlrev_b32_e32 v26, 8, v3
; GFX9-NEXT: v_lshlrev_b32_e32 v25, 8, v5
@@ -20861,7 +21140,9 @@ define inreg <4 x i64> @bitcast_v32i8_to_v4i64_scalar(<32 x i8> inreg %a, i32 in
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB71_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; GFX9-NEXT: s_branch .LBB71_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB71_2
+; GFX9-NEXT: s_branch .LBB71_3
;
; GFX11-LABEL: bitcast_v32i8_to_v4i64_scalar:
; GFX11: ; %bb.0:
@@ -20876,44 +21157,44 @@ define inreg <4 x i64> @bitcast_v32i8_to_v4i64_scalar(<32 x i8> inreg %a, i32 in
; GFX11-NEXT: v_lshlrev_b32_e32 v9, 8, v9
; GFX11-NEXT: v_lshlrev_b32_e32 v11, 8, v11
; GFX11-NEXT: v_lshlrev_b32_e32 v13, 8, v13
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s4, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB71_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s3, 8
+; GFX11-NEXT: s_and_b32 s4, s0, 0xff
+; GFX11-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-NEXT: s_and_b32 s6, s2, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-NEXT: s_or_b32 s4, s4, s5
+; GFX11-NEXT: s_or_b32 s5, s6, s7
+; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX11-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-NEXT: s_lshl_b32 s6, s17, 8
+; GFX11-NEXT: s_or_b32 s4, s4, s5
+; GFX11-NEXT: s_and_b32 s5, s16, 0xff
+; GFX11-NEXT: s_and_b32 s7, s18, 0xff
+; GFX11-NEXT: s_lshl_b32 s8, s19, 8
; GFX11-NEXT: s_or_b32 s5, s5, s6
; GFX11-NEXT: s_or_b32 s6, s7, s8
+; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v16
+; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v17
; GFX11-NEXT: s_and_b32 s5, s5, 0xffff
; GFX11-NEXT: s_lshl_b32 s6, s6, 16
-; GFX11-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-NEXT: s_lshl_b32 s8, s21, 8
; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_and_b32 s6, s16, 0xff
-; GFX11-NEXT: s_and_b32 s8, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s9, s19, 8
-; GFX11-NEXT: s_or_b32 s6, s6, s7
-; GFX11-NEXT: s_or_b32 s7, s8, s9
-; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v16
-; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v17
-; GFX11-NEXT: s_and_b32 s6, s6, 0xffff
-; GFX11-NEXT: s_lshl_b32 s7, s7, 16
-; GFX11-NEXT: s_and_b32 s8, s20, 0xff
-; GFX11-NEXT: s_lshl_b32 s9, s21, 8
-; GFX11-NEXT: s_or_b32 s6, s6, s7
-; GFX11-NEXT: s_or_b32 s7, s8, s9
-; GFX11-NEXT: s_and_b32 s8, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-NEXT: s_or_b32 s6, s7, s8
+; GFX11-NEXT: s_and_b32 s7, s22, 0xff
+; GFX11-NEXT: s_lshl_b32 s8, s23, 8
; GFX11-NEXT: v_or_b32_e32 v1, v1, v21
; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v10
; GFX11-NEXT: v_or_b32_e32 v2, v2, v14
-; GFX11-NEXT: s_or_b32 s8, s8, s9
-; GFX11-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-NEXT: s_lshl_b32 s8, s8, 16
-; GFX11-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s25, 8
; GFX11-NEXT: s_or_b32 s7, s7, s8
+; GFX11-NEXT: s_and_b32 s6, s6, 0xffff
+; GFX11-NEXT: s_lshl_b32 s7, s7, 16
+; GFX11-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s25, 8
+; GFX11-NEXT: s_or_b32 s6, s6, s7
; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v15
; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v18
@@ -20921,35 +21202,34 @@ define inreg <4 x i64> @bitcast_v32i8_to_v4i64_scalar(<32 x i8> inreg %a, i32 in
; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v12
; GFX11-NEXT: v_or_b32_e32 v5, v5, v11
; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_or_b32 s9, s9, s10
-; GFX11-NEXT: s_lshl_b32 s10, s27, 8
-; GFX11-NEXT: s_and_b32 s8, s9, 0xffff
-; GFX11-NEXT: s_and_b32 s9, s26, 0xff
+; GFX11-NEXT: s_or_b32 s8, s8, s9
+; GFX11-NEXT: s_lshl_b32 s9, s27, 8
+; GFX11-NEXT: s_and_b32 s7, s8, 0xffff
+; GFX11-NEXT: s_and_b32 s8, s26, 0xff
; GFX11-NEXT: v_or_b32_e32 v3, v3, v19
; GFX11-NEXT: v_or_b32_e32 v4, v4, v9
; GFX11-NEXT: v_or_b32_e32 v6, v6, v13
; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v5
; GFX11-NEXT: v_or_b32_e32 v5, v1, v2
-; GFX11-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-NEXT: v_mov_b32_e32 v1, s5
; GFX11-NEXT: v_or_b32_e32 v0, v0, v20
-; GFX11-NEXT: s_or_b32 s9, s9, s10
-; GFX11-NEXT: s_and_b32 s10, s28, 0xff
-; GFX11-NEXT: s_lshl_b32 s9, s9, 16
-; GFX11-NEXT: s_lshl_b32 s11, s29, 8
+; GFX11-NEXT: s_or_b32 s8, s8, s9
+; GFX11-NEXT: s_and_b32 s9, s28, 0xff
+; GFX11-NEXT: s_lshl_b32 s8, s8, 16
+; GFX11-NEXT: s_lshl_b32 s10, s29, 8
; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
; GFX11-NEXT: v_lshlrev_b32_e32 v7, 16, v4
; GFX11-NEXT: v_lshlrev_b32_e32 v23, 16, v6
-; GFX11-NEXT: s_or_b32 s8, s8, s9
+; GFX11-NEXT: s_or_b32 s7, s7, s8
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_or_b32 s10, s10, s11
+; GFX11-NEXT: s_or_b32 s9, s9, s10
; GFX11-NEXT: v_or_b32_e32 v6, v3, v7
-; GFX11-NEXT: s_and_b32 s10, s10, 0xffff
+; GFX11-NEXT: s_and_b32 s9, s9, 0xffff
; GFX11-NEXT: v_or_b32_e32 v7, v22, v23
-; GFX11-NEXT: v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
-; GFX11-NEXT: v_or_b32_e32 v4, s10, v0
-; GFX11-NEXT: v_mov_b32_e32 v0, s5
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB71_3
+; GFX11-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
+; GFX11-NEXT: v_or_b32_e32 v4, s9, v0
+; GFX11-NEXT: v_mov_b32_e32 v0, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB71_3
; GFX11-NEXT: .LBB71_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
; GFX11-NEXT: s_add_i32 s2, s2, 3
@@ -21056,7 +21336,9 @@ define inreg <4 x i64> @bitcast_v32i8_to_v4i64_scalar(<32 x i8> inreg %a, i32 in
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB71_4:
; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; GFX11-NEXT: s_branch .LBB71_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_vccz .LBB71_2
+; GFX11-NEXT: s_branch .LBB71_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -21207,6 +21489,7 @@ define inreg <16 x i16> @bitcast_v4f64_to_v16i16_scalar(<4 x double> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB73_3
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s22
@@ -21245,7 +21528,8 @@ define inreg <16 x i16> @bitcast_v4f64_to_v16i16_scalar(<4 x double> inreg %a, i
; SI-NEXT: ; implicit-def: $sgpr8
; SI-NEXT: ; implicit-def: $vgpr13
; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: s_branch .LBB73_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB73_2
; SI-NEXT: .LBB73_4:
; SI-NEXT: v_mov_b32_e32 v23, s17
; SI-NEXT: v_mov_b32_e32 v21, s19
@@ -21274,17 +21558,19 @@ define inreg <16 x i16> @bitcast_v4f64_to_v16i16_scalar(<4 x double> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB73_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB73_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB73_4
-; VI-NEXT: .LBB73_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB73_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB73_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
; VI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB73_3:
-; VI-NEXT: s_branch .LBB73_2
; VI-NEXT: .LBB73_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -21300,17 +21586,19 @@ define inreg <16 x i16> @bitcast_v4f64_to_v16i16_scalar(<4 x double> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB73_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB73_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB73_4
-; GFX9-NEXT: .LBB73_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB73_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB73_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
; GFX9-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB73_3:
-; GFX9-NEXT: s_branch .LBB73_2
; GFX9-NEXT: .LBB73_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -21330,19 +21618,20 @@ define inreg <16 x i16> @bitcast_v4f64_to_v16i16_scalar(<4 x double> inreg %a, i
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB73_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB73_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB73_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB73_4
-; GFX11-NEXT: .LBB73_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[6:7], s[6:7], 1.0
; GFX11-NEXT: v_add_f64 v[4:5], s[4:5], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB73_3:
-; GFX11-NEXT: s_branch .LBB73_2
; GFX11-NEXT: .LBB73_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -21566,8 +21855,9 @@ define inreg <4 x double> @bitcast_v16i16_to_v4f64_scalar(<16 x i16> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; SI-NEXT: v_mov_b32_e32 v8, v0
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: v_mov_b32_e32 v8, v0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v1
; SI-NEXT: s_cbranch_scc0 .LBB75_4
; SI-NEXT: ; %bb.1: ; %cmp.false
@@ -21653,16 +21943,22 @@ define inreg <4 x double> @bitcast_v16i16_to_v4f64_scalar(<16 x i16> inreg %a, i
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB75_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; SI-NEXT: s_branch .LBB75_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB75_2
+; SI-NEXT: s_branch .LBB75_3
;
; VI-LABEL: bitcast_v16i16_to_v4f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB75_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB75_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB75_3
-; VI-NEXT: .LBB75_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB75_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB75_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s23, 3
; VI-NEXT: s_and_b32 s4, s23, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
@@ -21703,7 +21999,7 @@ define inreg <4 x double> @bitcast_v16i16_to_v4f64_scalar(<16 x i16> inreg %a, i
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB75_3: ; %end
+; VI-NEXT: .LBB75_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -21713,17 +22009,19 @@ define inreg <4 x double> @bitcast_v16i16_to_v4f64_scalar(<16 x i16> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB75_4:
-; VI-NEXT: s_branch .LBB75_2
;
; GFX9-LABEL: bitcast_v16i16_to_v4f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB75_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB75_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB75_4
-; GFX9-NEXT: .LBB75_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB75_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB75_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v7, s23, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v6, s22, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v5, s21, 3 op_sel_hi:[1,0]
@@ -21733,8 +22031,6 @@ define inreg <4 x double> @bitcast_v16i16_to_v4f64_scalar(<16 x i16> inreg %a, i
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB75_3:
-; GFX9-NEXT: s_branch .LBB75_2
; GFX9-NEXT: .LBB75_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -21754,12 +22050,15 @@ define inreg <4 x double> @bitcast_v16i16_to_v4f64_scalar(<16 x i16> inreg %a, i
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB75_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB75_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB75_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB75_4
-; GFX11-NEXT: .LBB75_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v7, s7, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
@@ -21769,8 +22068,6 @@ define inreg <4 x double> @bitcast_v16i16_to_v4f64_scalar(<16 x i16> inreg %a, i
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB75_3:
-; GFX11-NEXT: s_branch .LBB75_2
; GFX11-NEXT: .LBB75_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -21964,6 +22261,7 @@ define inreg <16 x half> @bitcast_v4f64_to_v16f16_scalar(<4 x double> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB77_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s4, s23, 16
@@ -22039,23 +22337,27 @@ define inreg <16 x half> @bitcast_v4f64_to_v16f16_scalar(<4 x double> inreg %a,
; SI-NEXT: ; implicit-def: $vgpr13
; SI-NEXT: ; implicit-def: $vgpr14
; SI-NEXT: ; implicit-def: $vgpr15
-; SI-NEXT: s_branch .LBB77_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB77_2
+; SI-NEXT: s_branch .LBB77_3
;
; VI-LABEL: bitcast_v4f64_to_v16f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB77_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB77_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB77_4
-; VI-NEXT: .LBB77_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB77_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB77_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
; VI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB77_3:
-; VI-NEXT: s_branch .LBB77_2
; VI-NEXT: .LBB77_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -22071,17 +22373,19 @@ define inreg <16 x half> @bitcast_v4f64_to_v16f16_scalar(<4 x double> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB77_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB77_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB77_4
-; GFX9-NEXT: .LBB77_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB77_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB77_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
; GFX9-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB77_3:
-; GFX9-NEXT: s_branch .LBB77_2
; GFX9-NEXT: .LBB77_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -22101,19 +22405,20 @@ define inreg <16 x half> @bitcast_v4f64_to_v16f16_scalar(<4 x double> inreg %a,
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB77_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB77_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB77_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB77_4
-; GFX11-NEXT: .LBB77_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[6:7], s[6:7], 1.0
; GFX11-NEXT: v_add_f64 v[4:5], s[4:5], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB77_3:
-; GFX11-NEXT: s_branch .LBB77_2
; GFX11-NEXT: .LBB77_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -22390,6 +22695,7 @@ define inreg <4 x double> @bitcast_v16f16_to_v4f64_scalar(<16 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v8, v0
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB79_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v23
@@ -22478,16 +22784,22 @@ define inreg <4 x double> @bitcast_v16f16_to_v4f64_scalar(<16 x half> inreg %a,
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB79_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; SI-NEXT: s_branch .LBB79_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB79_2
+; SI-NEXT: s_branch .LBB79_3
;
; VI-LABEL: bitcast_v16f16_to_v4f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB79_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB79_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB79_4
-; VI-NEXT: .LBB79_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB79_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB79_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s23, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -22530,8 +22842,6 @@ define inreg <4 x double> @bitcast_v16f16_to_v4f64_scalar(<16 x half> inreg %a,
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v8
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB79_3:
-; VI-NEXT: s_branch .LBB79_2
; VI-NEXT: .LBB79_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -22547,10 +22857,14 @@ define inreg <4 x double> @bitcast_v16f16_to_v4f64_scalar(<16 x half> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB79_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB79_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB79_4
-; GFX9-NEXT: .LBB79_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB79_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB79_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v7, s23, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v6, s22, v0 op_sel_hi:[1,0]
@@ -22561,8 +22875,6 @@ define inreg <4 x double> @bitcast_v16f16_to_v4f64_scalar(<16 x half> inreg %a,
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB79_3:
-; GFX9-NEXT: s_branch .LBB79_2
; GFX9-NEXT: .LBB79_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -22582,12 +22894,15 @@ define inreg <4 x double> @bitcast_v16f16_to_v4f64_scalar(<16 x half> inreg %a,
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB79_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB79_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB79_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB79_4
-; GFX11-NEXT: .LBB79_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v7, 0x200, s7 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
@@ -22597,8 +22912,6 @@ define inreg <4 x double> @bitcast_v16f16_to_v4f64_scalar(<16 x half> inreg %a,
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB79_3:
-; GFX11-NEXT: s_branch .LBB79_2
; GFX11-NEXT: .LBB79_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -22776,6 +23089,7 @@ define inreg <16 x bfloat> @bitcast_v4f64_to_v16bf16_scalar(<4 x double> inreg %
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB81_3
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s29, s23, 0xffff0000
@@ -22834,7 +23148,8 @@ define inreg <16 x bfloat> @bitcast_v4f64_to_v16bf16_scalar(<4 x double> inreg %
; SI-NEXT: ; implicit-def: $sgpr27
; SI-NEXT: ; implicit-def: $sgpr28
; SI-NEXT: ; implicit-def: $sgpr29
-; SI-NEXT: s_branch .LBB81_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB81_2
; SI-NEXT: .LBB81_4:
; SI-NEXT: v_mov_b32_e32 v15, s29
; SI-NEXT: v_mov_b32_e32 v14, s28
@@ -22858,17 +23173,19 @@ define inreg <16 x bfloat> @bitcast_v4f64_to_v16bf16_scalar(<4 x double> inreg %
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB81_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB81_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB81_4
-; VI-NEXT: .LBB81_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB81_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB81_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
; VI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB81_3:
-; VI-NEXT: s_branch .LBB81_2
; VI-NEXT: .LBB81_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -22884,17 +23201,19 @@ define inreg <16 x bfloat> @bitcast_v4f64_to_v16bf16_scalar(<4 x double> inreg %
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB81_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB81_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB81_4
-; GFX9-NEXT: .LBB81_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB81_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB81_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
; GFX9-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB81_3:
-; GFX9-NEXT: s_branch .LBB81_2
; GFX9-NEXT: .LBB81_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -22914,19 +23233,20 @@ define inreg <16 x bfloat> @bitcast_v4f64_to_v16bf16_scalar(<4 x double> inreg %
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB81_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB81_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB81_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB81_4
-; GFX11-NEXT: .LBB81_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[6:7], s[6:7], 1.0
; GFX11-NEXT: v_add_f64 v[4:5], s[4:5], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB81_3:
-; GFX11-NEXT: s_branch .LBB81_2
; GFX11-NEXT: .LBB81_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -23698,6 +24018,7 @@ define inreg <4 x double> @bitcast_v16bf16_to_v4f64_scalar(<16 x bfloat> inreg %
; SI-NEXT: v_mul_f32_e64 v11, 1.0, s28
; SI-NEXT: v_mul_f32_e32 v8, 1.0, v1
; SI-NEXT: v_mul_f32_e32 v9, 1.0, v0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB83_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v22
@@ -23770,16 +24091,22 @@ define inreg <4 x double> @bitcast_v16bf16_to_v4f64_scalar(<16 x bfloat> inreg %
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB83_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; SI-NEXT: s_branch .LBB83_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB83_2
+; SI-NEXT: s_branch .LBB83_3
;
; VI-LABEL: bitcast_v16bf16_to_v4f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB83_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB83_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB83_4
-; VI-NEXT: .LBB83_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB83_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB83_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s23, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x40c00000
; VI-NEXT: v_add_f32_e32 v1, s4, v0
@@ -23926,8 +24253,6 @@ define inreg <4 x double> @bitcast_v16bf16_to_v4f64_scalar(<16 x bfloat> inreg %
; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; VI-NEXT: v_alignbit_b32 v0, v0, v8, 16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB83_3:
-; VI-NEXT: s_branch .LBB83_2
; VI-NEXT: .LBB83_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -23943,10 +24268,14 @@ define inreg <4 x double> @bitcast_v16bf16_to_v4f64_scalar(<16 x bfloat> inreg %
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB83_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB83_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB83_4
-; GFX9-NEXT: .LBB83_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB83_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB83_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_and_b32 s4, s23, 0xffff0000
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
; GFX9-NEXT: v_add_f32_e32 v1, s4, v0
@@ -24102,8 +24431,6 @@ define inreg <4 x double> @bitcast_v16bf16_to_v4f64_scalar(<16 x bfloat> inreg %
; GFX9-NEXT: v_and_b32_sdwa v0, v8, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: v_lshl_or_b32 v0, v9, 16, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB83_3:
-; GFX9-NEXT: s_branch .LBB83_2
; GFX9-NEXT: .LBB83_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -24123,12 +24450,15 @@ define inreg <4 x double> @bitcast_v16bf16_to_v4f64_scalar(<16 x bfloat> inreg %
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB83_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB83_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB83_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB83_4
-; GFX11-NEXT: .LBB83_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_lshl_b32 s8, s7, 16
; GFX11-NEXT: s_and_b32 s7, s7, 0xffff0000
; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s8
@@ -24304,8 +24634,6 @@ define inreg <4 x double> @bitcast_v16bf16_to_v4f64_scalar(<16 x bfloat> inreg %
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-NEXT: v_lshl_or_b32 v0, v12, 16, v9
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB83_3:
-; GFX11-NEXT: s_branch .LBB83_2
; GFX11-NEXT: .LBB83_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -24869,6 +25197,7 @@ define inreg <32 x i8> @bitcast_v4f64_to_v32i8_scalar(<4 x double> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB85_3
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s22
@@ -24955,7 +25284,8 @@ define inreg <32 x i8> @bitcast_v4f64_to_v32i8_scalar(<4 x double> inreg %a, i32
; SI-NEXT: ; implicit-def: $sgpr15
; SI-NEXT: ; implicit-def: $sgpr24
; SI-NEXT: ; implicit-def: $sgpr25
-; SI-NEXT: s_branch .LBB85_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB85_2
; SI-NEXT: .LBB85_4:
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v9, s19
@@ -24992,6 +25322,7 @@ define inreg <32 x i8> @bitcast_v4f64_to_v32i8_scalar(<4 x double> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
+; VI-NEXT: s_mov_b64 s[12:13], -1
; VI-NEXT: s_cbranch_scc0 .LBB85_3
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s43, s23, 24
@@ -25074,7 +25405,8 @@ define inreg <32 x i8> @bitcast_v4f64_to_v32i8_scalar(<4 x double> inreg %a, i32
; VI-NEXT: ; implicit-def: $sgpr41
; VI-NEXT: ; implicit-def: $sgpr42
; VI-NEXT: ; implicit-def: $sgpr43
-; VI-NEXT: s_branch .LBB85_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[12:13]
+; VI-NEXT: s_cbranch_vccz .LBB85_2
; VI-NEXT: .LBB85_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v8, s18
@@ -25123,6 +25455,7 @@ define inreg <32 x i8> @bitcast_v4f64_to_v32i8_scalar(<4 x double> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
+; GFX9-NEXT: s_mov_b64 s[12:13], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB85_3
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s43, s23, 24
@@ -25205,7 +25538,8 @@ define inreg <32 x i8> @bitcast_v4f64_to_v32i8_scalar(<4 x double> inreg %a, i32
; GFX9-NEXT: ; implicit-def: $sgpr41
; GFX9-NEXT: ; implicit-def: $sgpr42
; GFX9-NEXT: ; implicit-def: $sgpr43
-; GFX9-NEXT: s_branch .LBB85_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[12:13]
+; GFX9-NEXT: s_cbranch_vccz .LBB85_2
; GFX9-NEXT: .LBB85_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v8, s18
@@ -25254,35 +25588,34 @@ define inreg <32 x i8> @bitcast_v4f64_to_v32i8_scalar(<4 x double> inreg %a, i32
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
-; GFX11-NEXT: s_mov_b32 s12, 0
+; GFX11-NEXT: s_mov_b32 s5, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB85_3
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s28, s19, 24
-; GFX11-NEXT: s_lshr_b32 s27, s19, 16
-; GFX11-NEXT: s_lshr_b32 s26, s19, 8
-; GFX11-NEXT: s_lshr_b32 s40, s18, 16
-; GFX11-NEXT: s_lshr_b32 s29, s18, 8
-; GFX11-NEXT: s_lshr_b32 s25, s17, 24
-; GFX11-NEXT: s_lshr_b32 s24, s17, 16
-; GFX11-NEXT: s_lshr_b32 s23, s17, 8
-; GFX11-NEXT: s_lshr_b32 s42, s16, 16
-; GFX11-NEXT: s_lshr_b32 s41, s16, 8
-; GFX11-NEXT: s_lshr_b32 s22, s3, 24
-; GFX11-NEXT: s_lshr_b32 s21, s3, 16
-; GFX11-NEXT: s_lshr_b32 s20, s3, 8
-; GFX11-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-NEXT: s_lshr_b32 s43, s2, 8
-; GFX11-NEXT: s_lshr_b32 s15, s1, 24
-; GFX11-NEXT: s_lshr_b32 s14, s1, 16
-; GFX11-NEXT: s_lshr_b32 s13, s1, 8
-; GFX11-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-NEXT: s_lshr_b32 s45, s0, 8
+; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
+; GFX11-NEXT: s_lshr_b32 s27, s19, 24
+; GFX11-NEXT: s_lshr_b32 s26, s19, 16
+; GFX11-NEXT: s_lshr_b32 s25, s19, 8
+; GFX11-NEXT: s_lshr_b32 s29, s18, 16
+; GFX11-NEXT: s_lshr_b32 s28, s18, 8
+; GFX11-NEXT: s_lshr_b32 s24, s17, 24
+; GFX11-NEXT: s_lshr_b32 s23, s17, 16
+; GFX11-NEXT: s_lshr_b32 s22, s17, 8
+; GFX11-NEXT: s_lshr_b32 s41, s16, 16
+; GFX11-NEXT: s_lshr_b32 s40, s16, 8
+; GFX11-NEXT: s_lshr_b32 s21, s3, 24
+; GFX11-NEXT: s_lshr_b32 s20, s3, 16
+; GFX11-NEXT: s_lshr_b32 s15, s3, 8
+; GFX11-NEXT: s_lshr_b32 s43, s2, 16
+; GFX11-NEXT: s_lshr_b32 s42, s2, 8
+; GFX11-NEXT: s_lshr_b32 s14, s1, 24
+; GFX11-NEXT: s_lshr_b32 s13, s1, 16
+; GFX11-NEXT: s_lshr_b32 s12, s1, 8
+; GFX11-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-NEXT: s_lshr_b32 s44, s0, 8
; GFX11-NEXT: s_lshr_b64 s[10:11], s[18:19], 24
; GFX11-NEXT: s_lshr_b64 s[8:9], s[16:17], 24
; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
-; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s12
-; GFX11-NEXT: s_cbranch_vccnz .LBB85_4
+; GFX11-NEXT: s_cbranch_execnz .LBB85_4
; GFX11-NEXT: .LBB85_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[32:33], s[18:19], 1.0
; GFX11-NEXT: v_add_f64 v[34:35], s[16:17], 1.0
@@ -25316,52 +25649,53 @@ define inreg <32 x i8> @bitcast_v4f64_to_v32i8_scalar(<4 x double> inreg %a, i32
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v38
; GFX11-NEXT: s_branch .LBB85_5
; GFX11-NEXT: .LBB85_3:
+; GFX11-NEXT: ; implicit-def: $sgpr44
; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr46
; GFX11-NEXT: ; implicit-def: $sgpr4
+; GFX11-NEXT: ; implicit-def: $sgpr12
; GFX11-NEXT: ; implicit-def: $sgpr13
; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr15
+; GFX11-NEXT: ; implicit-def: $sgpr42
; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr44
; GFX11-NEXT: ; implicit-def: $sgpr6
+; GFX11-NEXT: ; implicit-def: $sgpr15
; GFX11-NEXT: ; implicit-def: $sgpr20
; GFX11-NEXT: ; implicit-def: $sgpr21
-; GFX11-NEXT: ; implicit-def: $sgpr22
+; GFX11-NEXT: ; implicit-def: $sgpr40
; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr42
; GFX11-NEXT: ; implicit-def: $sgpr8
+; GFX11-NEXT: ; implicit-def: $sgpr22
; GFX11-NEXT: ; implicit-def: $sgpr23
; GFX11-NEXT: ; implicit-def: $sgpr24
-; GFX11-NEXT: ; implicit-def: $sgpr25
+; GFX11-NEXT: ; implicit-def: $sgpr28
; GFX11-NEXT: ; implicit-def: $sgpr29
-; GFX11-NEXT: ; implicit-def: $sgpr40
; GFX11-NEXT: ; implicit-def: $sgpr10
+; GFX11-NEXT: ; implicit-def: $sgpr25
; GFX11-NEXT: ; implicit-def: $sgpr26
; GFX11-NEXT: ; implicit-def: $sgpr27
-; GFX11-NEXT: ; implicit-def: $sgpr28
-; GFX11-NEXT: s_branch .LBB85_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
+; GFX11-NEXT: s_cbranch_vccz .LBB85_2
; GFX11-NEXT: .LBB85_4:
; GFX11-NEXT: v_dual_mov_b32 v38, s0 :: v_dual_mov_b32 v33, s19
; GFX11-NEXT: v_dual_mov_b32 v36, s2 :: v_dual_mov_b32 v35, s17
; GFX11-NEXT: v_dual_mov_b32 v34, s16 :: v_dual_mov_b32 v37, s3
; GFX11-NEXT: v_dual_mov_b32 v32, s18 :: v_dual_mov_b32 v39, s1
-; GFX11-NEXT: v_dual_mov_b32 v2, s46 :: v_dual_mov_b32 v1, s45
-; GFX11-NEXT: v_dual_mov_b32 v10, s44 :: v_dual_mov_b32 v9, s43
-; GFX11-NEXT: v_dual_mov_b32 v18, s42 :: v_dual_mov_b32 v17, s41
-; GFX11-NEXT: v_dual_mov_b32 v26, s40 :: v_dual_mov_b32 v25, s29
-; GFX11-NEXT: v_dual_mov_b32 v3, s4 :: v_dual_mov_b32 v30, s27
-; GFX11-NEXT: v_dual_mov_b32 v11, s6 :: v_dual_mov_b32 v22, s24
-; GFX11-NEXT: v_dual_mov_b32 v19, s8 :: v_dual_mov_b32 v14, s21
-; GFX11-NEXT: v_dual_mov_b32 v27, s10 :: v_dual_mov_b32 v6, s14
-; GFX11-NEXT: v_mov_b32_e32 v31, s28
-; GFX11-NEXT: v_mov_b32_e32 v29, s26
-; GFX11-NEXT: v_mov_b32_e32 v23, s25
-; GFX11-NEXT: v_mov_b32_e32 v21, s23
-; GFX11-NEXT: v_mov_b32_e32 v15, s22
-; GFX11-NEXT: v_mov_b32_e32 v13, s20
-; GFX11-NEXT: v_mov_b32_e32 v7, s15
-; GFX11-NEXT: v_mov_b32_e32 v5, s13
+; GFX11-NEXT: v_dual_mov_b32 v2, s45 :: v_dual_mov_b32 v1, s44
+; GFX11-NEXT: v_dual_mov_b32 v10, s43 :: v_dual_mov_b32 v9, s42
+; GFX11-NEXT: v_dual_mov_b32 v18, s41 :: v_dual_mov_b32 v17, s40
+; GFX11-NEXT: v_dual_mov_b32 v26, s29 :: v_dual_mov_b32 v25, s28
+; GFX11-NEXT: v_dual_mov_b32 v3, s4 :: v_dual_mov_b32 v30, s26
+; GFX11-NEXT: v_dual_mov_b32 v11, s6 :: v_dual_mov_b32 v22, s23
+; GFX11-NEXT: v_dual_mov_b32 v19, s8 :: v_dual_mov_b32 v14, s20
+; GFX11-NEXT: v_dual_mov_b32 v27, s10 :: v_dual_mov_b32 v6, s13
+; GFX11-NEXT: v_mov_b32_e32 v31, s27
+; GFX11-NEXT: v_mov_b32_e32 v29, s25
+; GFX11-NEXT: v_mov_b32_e32 v23, s24
+; GFX11-NEXT: v_mov_b32_e32 v21, s22
+; GFX11-NEXT: v_mov_b32_e32 v15, s21
+; GFX11-NEXT: v_mov_b32_e32 v13, s15
+; GFX11-NEXT: v_mov_b32_e32 v7, s14
+; GFX11-NEXT: v_mov_b32_e32 v5, s12
; GFX11-NEXT: .LBB85_5: ; %end
; GFX11-NEXT: v_mov_b32_e32 v0, v38
; GFX11-NEXT: v_mov_b32_e32 v4, v39
@@ -26409,11 +26743,12 @@ define inreg <4 x double> @bitcast_v32i8_to_v4f64_scalar(<32 x i8> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v22, v6
; SI-NEXT: v_mov_b32_e32 v21, v4
; SI-NEXT: v_mov_b32_e32 v20, v2
; SI-NEXT: v_mov_b32_e32 v19, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v1
; SI-NEXT: v_lshlrev_b32_e32 v26, 8, v3
; SI-NEXT: v_lshlrev_b32_e32 v25, 24, v5
@@ -26595,17 +26930,20 @@ define inreg <4 x double> @bitcast_v32i8_to_v4f64_scalar(<32 x i8> inreg %a, i32
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB87_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; SI-NEXT: s_branch .LBB87_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB87_2
+; SI-NEXT: s_branch .LBB87_3
;
; VI-LABEL: bitcast_v32i8_to_v4f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v22, v6
; VI-NEXT: v_mov_b32_e32 v21, v4
; VI-NEXT: v_mov_b32_e32 v20, v2
; VI-NEXT: v_mov_b32_e32 v19, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_lshlrev_b32_e32 v27, 8, v1
; VI-NEXT: v_lshlrev_b32_e32 v26, 8, v3
; VI-NEXT: v_lshlrev_b32_e32 v25, 8, v5
@@ -26751,17 +27089,20 @@ define inreg <4 x double> @bitcast_v32i8_to_v4f64_scalar(<32 x i8> inreg %a, i32
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB87_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; VI-NEXT: s_branch .LBB87_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB87_2
+; VI-NEXT: s_branch .LBB87_3
;
; GFX9-LABEL: bitcast_v32i8_to_v4f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v22, v6
; GFX9-NEXT: v_mov_b32_e32 v21, v4
; GFX9-NEXT: v_mov_b32_e32 v20, v2
; GFX9-NEXT: v_mov_b32_e32 v19, v0
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_lshlrev_b32_e32 v27, 8, v1
; GFX9-NEXT: v_lshlrev_b32_e32 v26, 8, v3
; GFX9-NEXT: v_lshlrev_b32_e32 v25, 8, v5
@@ -26908,7 +27249,9 @@ define inreg <4 x double> @bitcast_v32i8_to_v4f64_scalar(<32 x i8> inreg %a, i32
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB87_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; GFX9-NEXT: s_branch .LBB87_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB87_2
+; GFX9-NEXT: s_branch .LBB87_3
;
; GFX11-LABEL: bitcast_v32i8_to_v4f64_scalar:
; GFX11: ; %bb.0:
@@ -26923,44 +27266,44 @@ define inreg <4 x double> @bitcast_v32i8_to_v4f64_scalar(<32 x i8> inreg %a, i32
; GFX11-NEXT: v_lshlrev_b32_e32 v9, 8, v9
; GFX11-NEXT: v_lshlrev_b32_e32 v11, 8, v11
; GFX11-NEXT: v_lshlrev_b32_e32 v13, 8, v13
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s4, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB87_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s3, 8
+; GFX11-NEXT: s_and_b32 s4, s0, 0xff
+; GFX11-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-NEXT: s_and_b32 s6, s2, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-NEXT: s_or_b32 s4, s4, s5
+; GFX11-NEXT: s_or_b32 s5, s6, s7
+; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX11-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-NEXT: s_lshl_b32 s6, s17, 8
+; GFX11-NEXT: s_or_b32 s4, s4, s5
+; GFX11-NEXT: s_and_b32 s5, s16, 0xff
+; GFX11-NEXT: s_and_b32 s7, s18, 0xff
+; GFX11-NEXT: s_lshl_b32 s8, s19, 8
; GFX11-NEXT: s_or_b32 s5, s5, s6
; GFX11-NEXT: s_or_b32 s6, s7, s8
+; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v16
+; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v17
; GFX11-NEXT: s_and_b32 s5, s5, 0xffff
; GFX11-NEXT: s_lshl_b32 s6, s6, 16
-; GFX11-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-NEXT: s_lshl_b32 s8, s21, 8
; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_and_b32 s6, s16, 0xff
-; GFX11-NEXT: s_and_b32 s8, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s9, s19, 8
-; GFX11-NEXT: s_or_b32 s6, s6, s7
-; GFX11-NEXT: s_or_b32 s7, s8, s9
-; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v16
-; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v17
-; GFX11-NEXT: s_and_b32 s6, s6, 0xffff
-; GFX11-NEXT: s_lshl_b32 s7, s7, 16
-; GFX11-NEXT: s_and_b32 s8, s20, 0xff
-; GFX11-NEXT: s_lshl_b32 s9, s21, 8
-; GFX11-NEXT: s_or_b32 s6, s6, s7
-; GFX11-NEXT: s_or_b32 s7, s8, s9
-; GFX11-NEXT: s_and_b32 s8, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-NEXT: s_or_b32 s6, s7, s8
+; GFX11-NEXT: s_and_b32 s7, s22, 0xff
+; GFX11-NEXT: s_lshl_b32 s8, s23, 8
; GFX11-NEXT: v_or_b32_e32 v1, v1, v21
; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v10
; GFX11-NEXT: v_or_b32_e32 v2, v2, v14
-; GFX11-NEXT: s_or_b32 s8, s8, s9
-; GFX11-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-NEXT: s_lshl_b32 s8, s8, 16
-; GFX11-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s25, 8
; GFX11-NEXT: s_or_b32 s7, s7, s8
+; GFX11-NEXT: s_and_b32 s6, s6, 0xffff
+; GFX11-NEXT: s_lshl_b32 s7, s7, 16
+; GFX11-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s25, 8
+; GFX11-NEXT: s_or_b32 s6, s6, s7
; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v15
; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v18
@@ -26968,35 +27311,34 @@ define inreg <4 x double> @bitcast_v32i8_to_v4f64_scalar(<32 x i8> inreg %a, i32
; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v12
; GFX11-NEXT: v_or_b32_e32 v5, v5, v11
; GFX11-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; GFX11-NEXT: s_or_b32 s9, s9, s10
-; GFX11-NEXT: s_lshl_b32 s10, s27, 8
-; GFX11-NEXT: s_and_b32 s8, s9, 0xffff
-; GFX11-NEXT: s_and_b32 s9, s26, 0xff
+; GFX11-NEXT: s_or_b32 s8, s8, s9
+; GFX11-NEXT: s_lshl_b32 s9, s27, 8
+; GFX11-NEXT: s_and_b32 s7, s8, 0xffff
+; GFX11-NEXT: s_and_b32 s8, s26, 0xff
; GFX11-NEXT: v_or_b32_e32 v3, v3, v19
; GFX11-NEXT: v_or_b32_e32 v4, v4, v9
; GFX11-NEXT: v_or_b32_e32 v6, v6, v13
; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v5
; GFX11-NEXT: v_or_b32_e32 v5, v1, v2
-; GFX11-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-NEXT: v_mov_b32_e32 v1, s5
; GFX11-NEXT: v_or_b32_e32 v0, v0, v20
-; GFX11-NEXT: s_or_b32 s9, s9, s10
-; GFX11-NEXT: s_and_b32 s10, s28, 0xff
-; GFX11-NEXT: s_lshl_b32 s9, s9, 16
-; GFX11-NEXT: s_lshl_b32 s11, s29, 8
+; GFX11-NEXT: s_or_b32 s8, s8, s9
+; GFX11-NEXT: s_and_b32 s9, s28, 0xff
+; GFX11-NEXT: s_lshl_b32 s8, s8, 16
+; GFX11-NEXT: s_lshl_b32 s10, s29, 8
; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
; GFX11-NEXT: v_lshlrev_b32_e32 v7, 16, v4
; GFX11-NEXT: v_lshlrev_b32_e32 v23, 16, v6
-; GFX11-NEXT: s_or_b32 s8, s8, s9
+; GFX11-NEXT: s_or_b32 s7, s7, s8
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; GFX11-NEXT: s_or_b32 s10, s10, s11
+; GFX11-NEXT: s_or_b32 s9, s9, s10
; GFX11-NEXT: v_or_b32_e32 v6, v3, v7
-; GFX11-NEXT: s_and_b32 s10, s10, 0xffff
+; GFX11-NEXT: s_and_b32 s9, s9, 0xffff
; GFX11-NEXT: v_or_b32_e32 v7, v22, v23
-; GFX11-NEXT: v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
-; GFX11-NEXT: v_or_b32_e32 v4, s10, v0
-; GFX11-NEXT: v_mov_b32_e32 v0, s5
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB87_3
+; GFX11-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
+; GFX11-NEXT: v_or_b32_e32 v4, s9, v0
+; GFX11-NEXT: v_mov_b32_e32 v0, s4
+; GFX11-NEXT: s_cbranch_execnz .LBB87_3
; GFX11-NEXT: .LBB87_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
; GFX11-NEXT: s_add_i32 s2, s2, 3
@@ -27103,7 +27445,9 @@ define inreg <4 x double> @bitcast_v32i8_to_v4f64_scalar(<32 x i8> inreg %a, i32
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB87_4:
; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; GFX11-NEXT: s_branch .LBB87_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_vccz .LBB87_2
+; GFX11-NEXT: s_branch .LBB87_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -27339,9 +27683,10 @@ define inreg <16 x half> @bitcast_v16i16_to_v16f16_scalar(<16 x i16> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v16, v1
; SI-NEXT: v_mov_b32_e32 v17, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB89_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_cvt_f32_f16_e32 v0, s16
@@ -27413,16 +27758,22 @@ define inreg <16 x half> @bitcast_v16i16_to_v16f16_scalar(<16 x i16> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr13
; SI-NEXT: ; implicit-def: $vgpr14
; SI-NEXT: ; implicit-def: $vgpr15
-; SI-NEXT: s_branch .LBB89_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB89_2
+; SI-NEXT: s_branch .LBB89_3
;
; VI-LABEL: bitcast_v16i16_to_v16f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB89_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB89_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB89_3
-; VI-NEXT: .LBB89_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB89_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB89_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s16, 3
; VI-NEXT: s_and_b32 s6, s17, 0xffff0000
; VI-NEXT: s_add_i32 s7, s17, 3
@@ -27463,7 +27814,7 @@ define inreg <16 x half> @bitcast_v16i16_to_v16f16_scalar(<16 x i16> inreg %a, i
; VI-NEXT: s_add_i32 s18, s8, 0x30000
; VI-NEXT: s_add_i32 s17, s6, 0x30000
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB89_3: ; %end
+; VI-NEXT: .LBB89_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -27473,17 +27824,19 @@ define inreg <16 x half> @bitcast_v16i16_to_v16f16_scalar(<16 x i16> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB89_4:
-; VI-NEXT: s_branch .LBB89_2
;
; GFX9-LABEL: bitcast_v16i16_to_v16f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB89_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB89_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB89_4
-; GFX9-NEXT: .LBB89_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB89_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB89_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v7, s23, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v6, s22, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v5, s21, 3 op_sel_hi:[1,0]
@@ -27493,8 +27846,6 @@ define inreg <16 x half> @bitcast_v16i16_to_v16f16_scalar(<16 x i16> inreg %a, i
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB89_3:
-; GFX9-NEXT: s_branch .LBB89_2
; GFX9-NEXT: .LBB89_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -27514,12 +27865,15 @@ define inreg <16 x half> @bitcast_v16i16_to_v16f16_scalar(<16 x i16> inreg %a, i
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB89_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB89_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB89_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB89_4
-; GFX11-NEXT: .LBB89_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v7, s7, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
@@ -27529,8 +27883,6 @@ define inreg <16 x half> @bitcast_v16i16_to_v16f16_scalar(<16 x i16> inreg %a, i
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB89_3:
-; GFX11-NEXT: s_branch .LBB89_2
; GFX11-NEXT: .LBB89_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -27774,10 +28126,14 @@ define inreg <16 x i16> @bitcast_v16f16_to_v16i16_scalar(<16 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v15, v15
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: s_cbranch_scc0 .LBB91_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB91_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB91_3
-; SI-NEXT: .LBB91_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB91_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB91_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v15, v15
; SI-NEXT: v_cvt_f32_f16_e32 v14, v14
; SI-NEXT: v_cvt_f32_f16_e32 v11, v11
@@ -27846,19 +28202,21 @@ define inreg <16 x i16> @bitcast_v16f16_to_v16i16_scalar(<16 x half> inreg %a, i
; SI-NEXT: v_alignbit_b32 v5, v6, v5, 16
; SI-NEXT: v_alignbit_b32 v9, v10, v9, 16
; SI-NEXT: v_alignbit_b32 v13, v14, v13, 16
-; SI-NEXT: .LBB91_3: ; %end
+; SI-NEXT: .LBB91_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB91_4:
-; SI-NEXT: s_branch .LBB91_2
;
; VI-LABEL: bitcast_v16f16_to_v16i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB91_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB91_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB91_4
-; VI-NEXT: .LBB91_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB91_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB91_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v1, s4
; VI-NEXT: s_lshr_b32 s4, s17, 16
@@ -27901,8 +28259,6 @@ define inreg <16 x i16> @bitcast_v16f16_to_v16i16_scalar(<16 x half> inreg %a, i
; VI-NEXT: v_or_b32_e32 v1, v1, v10
; VI-NEXT: v_or_b32_e32 v0, v8, v9
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB91_3:
-; VI-NEXT: s_branch .LBB91_2
; VI-NEXT: .LBB91_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -27918,10 +28274,14 @@ define inreg <16 x i16> @bitcast_v16f16_to_v16i16_scalar(<16 x half> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB91_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB91_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB91_4
-; GFX9-NEXT: .LBB91_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB91_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB91_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v7, s23, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v6, s22, v0 op_sel_hi:[1,0]
@@ -27932,8 +28292,6 @@ define inreg <16 x i16> @bitcast_v16f16_to_v16i16_scalar(<16 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB91_3:
-; GFX9-NEXT: s_branch .LBB91_2
; GFX9-NEXT: .LBB91_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -27953,12 +28311,15 @@ define inreg <16 x i16> @bitcast_v16f16_to_v16i16_scalar(<16 x half> inreg %a, i
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB91_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB91_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB91_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB91_4
-; GFX11-NEXT: .LBB91_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v7, 0x200, s7 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
@@ -27968,8 +28329,6 @@ define inreg <16 x i16> @bitcast_v16f16_to_v16i16_scalar(<16 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB91_3:
-; GFX11-NEXT: s_branch .LBB91_2
; GFX11-NEXT: .LBB91_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -28205,6 +28564,7 @@ define inreg <16 x bfloat> @bitcast_v16i16_to_v16bf16_scalar(<16 x i16> inreg %a
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v1
; SI-NEXT: s_cbranch_scc0 .LBB93_4
; SI-NEXT: ; %bb.1: ; %cmp.false
@@ -28312,16 +28672,22 @@ define inreg <16 x bfloat> @bitcast_v16i16_to_v16bf16_scalar(<16 x i16> inreg %a
; SI-NEXT: ; implicit-def: $sgpr42
; SI-NEXT: ; implicit-def: $sgpr43
; SI-NEXT: ; implicit-def: $vgpr14
-; SI-NEXT: s_branch .LBB93_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB93_2
+; SI-NEXT: s_branch .LBB93_3
;
; VI-LABEL: bitcast_v16i16_to_v16bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB93_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB93_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB93_3
-; VI-NEXT: .LBB93_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB93_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB93_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s16, 3
; VI-NEXT: s_and_b32 s6, s17, 0xffff0000
; VI-NEXT: s_add_i32 s7, s17, 3
@@ -28362,7 +28728,7 @@ define inreg <16 x bfloat> @bitcast_v16i16_to_v16bf16_scalar(<16 x i16> inreg %a
; VI-NEXT: s_add_i32 s18, s8, 0x30000
; VI-NEXT: s_add_i32 s17, s6, 0x30000
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB93_3: ; %end
+; VI-NEXT: .LBB93_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -28372,17 +28738,19 @@ define inreg <16 x bfloat> @bitcast_v16i16_to_v16bf16_scalar(<16 x i16> inreg %a
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB93_4:
-; VI-NEXT: s_branch .LBB93_2
;
; GFX9-LABEL: bitcast_v16i16_to_v16bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB93_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB93_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB93_4
-; GFX9-NEXT: .LBB93_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB93_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB93_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v7, s23, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v6, s22, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v5, s21, 3 op_sel_hi:[1,0]
@@ -28392,8 +28760,6 @@ define inreg <16 x bfloat> @bitcast_v16i16_to_v16bf16_scalar(<16 x i16> inreg %a
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB93_3:
-; GFX9-NEXT: s_branch .LBB93_2
; GFX9-NEXT: .LBB93_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -28413,12 +28779,15 @@ define inreg <16 x bfloat> @bitcast_v16i16_to_v16bf16_scalar(<16 x i16> inreg %a
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB93_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB93_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB93_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB93_4
-; GFX11-NEXT: .LBB93_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v7, s7, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
@@ -28428,8 +28797,6 @@ define inreg <16 x bfloat> @bitcast_v16i16_to_v16bf16_scalar(<16 x i16> inreg %a
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB93_3:
-; GFX11-NEXT: s_branch .LBB93_2
; GFX11-NEXT: .LBB93_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -29237,6 +29604,7 @@ define inreg <16 x i16> @bitcast_v16bf16_to_v16i16_scalar(<16 x bfloat> inreg %a
; SI-NEXT: v_mul_f32_e64 v24, 1.0, s29
; SI-NEXT: v_mul_f32_e32 v23, 1.0, v0
; SI-NEXT: v_mul_f32_e32 v22, 1.0, v1
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB95_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v31
@@ -29332,16 +29700,22 @@ define inreg <16 x i16> @bitcast_v16bf16_to_v16i16_scalar(<16 x bfloat> inreg %a
; SI-NEXT: ; implicit-def: $vgpr13
; SI-NEXT: ; implicit-def: $vgpr14
; SI-NEXT: ; implicit-def: $vgpr15
-; SI-NEXT: s_branch .LBB95_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB95_2
+; SI-NEXT: s_branch .LBB95_3
;
; VI-LABEL: bitcast_v16bf16_to_v16i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB95_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB95_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB95_4
-; VI-NEXT: .LBB95_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB95_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB95_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v1, 0x40c00000
; VI-NEXT: v_add_f32_e32 v0, s4, v1
@@ -29488,8 +29862,6 @@ define inreg <16 x i16> @bitcast_v16bf16_to_v16i16_scalar(<16 x bfloat> inreg %a
; VI-NEXT: v_alignbit_b32 v1, v15, v8, 16
; VI-NEXT: v_alignbit_b32 v0, v14, v0, 16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB95_3:
-; VI-NEXT: s_branch .LBB95_2
; VI-NEXT: .LBB95_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -29505,10 +29877,14 @@ define inreg <16 x i16> @bitcast_v16bf16_to_v16i16_scalar(<16 x bfloat> inreg %a
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB95_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB95_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB95_4
-; GFX9-NEXT: .LBB95_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB95_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB95_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_and_b32 s4, s16, 0xffff0000
; GFX9-NEXT: v_mov_b32_e32 v1, 0x40c00000
; GFX9-NEXT: v_add_f32_e32 v0, s4, v1
@@ -29656,8 +30032,6 @@ define inreg <16 x i16> @bitcast_v16bf16_to_v16i16_scalar(<16 x bfloat> inreg %a
; GFX9-NEXT: v_and_or_b32 v1, v9, v13, v1
; GFX9-NEXT: v_and_or_b32 v0, v0, v13, v8
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB95_3:
-; GFX9-NEXT: s_branch .LBB95_2
; GFX9-NEXT: .LBB95_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -29677,12 +30051,15 @@ define inreg <16 x i16> @bitcast_v16bf16_to_v16i16_scalar(<16 x bfloat> inreg %a
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB95_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB95_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB95_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB95_4
-; GFX11-NEXT: .LBB95_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_and_b32 s8, s0, 0xffff0000
; GFX11-NEXT: s_lshl_b32 s0, s0, 16
; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s8
@@ -29830,8 +30207,6 @@ define inreg <16 x i16> @bitcast_v16bf16_to_v16i16_scalar(<16 x bfloat> inreg %a
; GFX11-NEXT: v_and_or_b32 v1, 0xffff0000, v9, v14
; GFX11-NEXT: v_and_or_b32 v0, 0xffff0000, v0, v15
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB95_3:
-; GFX11-NEXT: s_branch .LBB95_2
; GFX11-NEXT: .LBB95_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -30518,9 +30893,10 @@ define inreg <32 x i8> @bitcast_v16i16_to_v32i8_scalar(<16 x i16> inreg %a, i32
; SI-LABEL: bitcast_v16i16_to_v32i8_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v4, v1
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
+; SI-NEXT: v_mov_b32_e32 v4, v1
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v4
; SI-NEXT: s_cbranch_scc0 .LBB97_4
; SI-NEXT: ; %bb.1: ; %cmp.false
@@ -30694,12 +31070,15 @@ define inreg <32 x i8> @bitcast_v16i16_to_v32i8_scalar(<16 x i16> inreg %a, i32
; SI-NEXT: ; implicit-def: $vgpr29
; SI-NEXT: ; implicit-def: $vgpr30
; SI-NEXT: ; implicit-def: $vgpr31
-; SI-NEXT: s_branch .LBB97_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB97_2
+; SI-NEXT: s_branch .LBB97_3
;
; VI-LABEL: bitcast_v16i16_to_v32i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
+; VI-NEXT: s_mov_b64 s[12:13], -1
; VI-NEXT: s_cbranch_scc0 .LBB97_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s14, s23, 24
@@ -30851,12 +31230,15 @@ define inreg <32 x i8> @bitcast_v16i16_to_v32i8_scalar(<16 x i16> inreg %a, i32
; VI-NEXT: ; implicit-def: $sgpr24
; VI-NEXT: ; implicit-def: $sgpr15
; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: s_branch .LBB97_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[12:13]
+; VI-NEXT: s_cbranch_vccz .LBB97_2
+; VI-NEXT: s_branch .LBB97_3
;
; GFX9-LABEL: bitcast_v16i16_to_v32i8_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
+; GFX9-NEXT: s_mov_b64 s[12:13], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB97_3
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s14, s23, 24
@@ -30943,7 +31325,8 @@ define inreg <32 x i8> @bitcast_v16i16_to_v32i8_scalar(<16 x i16> inreg %a, i32
; GFX9-NEXT: ; implicit-def: $sgpr25
; GFX9-NEXT: ; implicit-def: $sgpr15
; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: s_branch .LBB97_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[12:13]
+; GFX9-NEXT: s_cbranch_vccz .LBB97_2
; GFX9-NEXT: .LBB97_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -30992,35 +31375,34 @@ define inreg <32 x i8> @bitcast_v16i16_to_v32i8_scalar(<16 x i16> inreg %a, i32
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
-; GFX11-NEXT: s_mov_b32 s12, 0
+; GFX11-NEXT: s_mov_b32 s5, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB97_3
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s13, s19, 24
-; GFX11-NEXT: s_lshr_b32 s14, s19, 16
-; GFX11-NEXT: s_lshr_b32 s20, s19, 8
-; GFX11-NEXT: s_lshr_b32 s15, s18, 16
-; GFX11-NEXT: s_lshr_b32 s21, s18, 8
-; GFX11-NEXT: s_lshr_b32 s22, s17, 24
-; GFX11-NEXT: s_lshr_b32 s23, s17, 16
-; GFX11-NEXT: s_lshr_b32 s25, s17, 8
-; GFX11-NEXT: s_lshr_b32 s24, s16, 16
-; GFX11-NEXT: s_lshr_b32 s26, s16, 8
-; GFX11-NEXT: s_lshr_b32 s27, s3, 24
-; GFX11-NEXT: s_lshr_b32 s28, s3, 16
-; GFX11-NEXT: s_lshr_b32 s40, s3, 8
-; GFX11-NEXT: s_lshr_b32 s29, s2, 16
-; GFX11-NEXT: s_lshr_b32 s41, s2, 8
-; GFX11-NEXT: s_lshr_b32 s42, s1, 24
-; GFX11-NEXT: s_lshr_b32 s43, s1, 16
-; GFX11-NEXT: s_lshr_b32 s45, s1, 8
-; GFX11-NEXT: s_lshr_b32 s44, s0, 16
-; GFX11-NEXT: s_lshr_b32 s46, s0, 8
+; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
+; GFX11-NEXT: s_lshr_b32 s12, s19, 24
+; GFX11-NEXT: s_lshr_b32 s13, s19, 16
+; GFX11-NEXT: s_lshr_b32 s15, s19, 8
+; GFX11-NEXT: s_lshr_b32 s14, s18, 16
+; GFX11-NEXT: s_lshr_b32 s20, s18, 8
+; GFX11-NEXT: s_lshr_b32 s21, s17, 24
+; GFX11-NEXT: s_lshr_b32 s22, s17, 16
+; GFX11-NEXT: s_lshr_b32 s24, s17, 8
+; GFX11-NEXT: s_lshr_b32 s23, s16, 16
+; GFX11-NEXT: s_lshr_b32 s25, s16, 8
+; GFX11-NEXT: s_lshr_b32 s26, s3, 24
+; GFX11-NEXT: s_lshr_b32 s27, s3, 16
+; GFX11-NEXT: s_lshr_b32 s29, s3, 8
+; GFX11-NEXT: s_lshr_b32 s28, s2, 16
+; GFX11-NEXT: s_lshr_b32 s40, s2, 8
+; GFX11-NEXT: s_lshr_b32 s41, s1, 24
+; GFX11-NEXT: s_lshr_b32 s42, s1, 16
+; GFX11-NEXT: s_lshr_b32 s44, s1, 8
+; GFX11-NEXT: s_lshr_b32 s43, s0, 16
+; GFX11-NEXT: s_lshr_b32 s45, s0, 8
; GFX11-NEXT: s_lshr_b64 s[10:11], s[18:19], 24
; GFX11-NEXT: s_lshr_b64 s[8:9], s[16:17], 24
; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
-; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s12
-; GFX11-NEXT: s_cbranch_vccnz .LBB97_4
+; GFX11-NEXT: s_cbranch_execnz .LBB97_4
; GFX11-NEXT: .LBB97_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v39, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v37, s3, 3 op_sel_hi:[1,0]
@@ -31056,48 +31438,49 @@ define inreg <32 x i8> @bitcast_v16i16_to_v32i8_scalar(<16 x i16> inreg %a, i32
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v38
; GFX11-NEXT: s_branch .LBB97_5
; GFX11-NEXT: .LBB97_3:
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr4
; GFX11-NEXT: ; implicit-def: $sgpr45
; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr4
+; GFX11-NEXT: ; implicit-def: $sgpr44
; GFX11-NEXT: ; implicit-def: $sgpr42
; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr29
-; GFX11-NEXT: ; implicit-def: $sgpr6
; GFX11-NEXT: ; implicit-def: $sgpr40
; GFX11-NEXT: ; implicit-def: $sgpr28
+; GFX11-NEXT: ; implicit-def: $sgpr6
+; GFX11-NEXT: ; implicit-def: $sgpr29
; GFX11-NEXT: ; implicit-def: $sgpr27
; GFX11-NEXT: ; implicit-def: $sgpr26
-; GFX11-NEXT: ; implicit-def: $sgpr24
-; GFX11-NEXT: ; implicit-def: $sgpr8
; GFX11-NEXT: ; implicit-def: $sgpr25
; GFX11-NEXT: ; implicit-def: $sgpr23
+; GFX11-NEXT: ; implicit-def: $sgpr8
+; GFX11-NEXT: ; implicit-def: $sgpr24
; GFX11-NEXT: ; implicit-def: $sgpr22
; GFX11-NEXT: ; implicit-def: $sgpr21
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr10
; GFX11-NEXT: ; implicit-def: $sgpr20
; GFX11-NEXT: ; implicit-def: $sgpr14
+; GFX11-NEXT: ; implicit-def: $sgpr10
+; GFX11-NEXT: ; implicit-def: $sgpr15
; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: s_branch .LBB97_2
+; GFX11-NEXT: ; implicit-def: $sgpr12
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
+; GFX11-NEXT: s_cbranch_vccz .LBB97_2
; GFX11-NEXT: .LBB97_4:
; GFX11-NEXT: v_dual_mov_b32 v38, s0 :: v_dual_mov_b32 v39, s1
; GFX11-NEXT: v_dual_mov_b32 v36, s2 :: v_dual_mov_b32 v37, s3
; GFX11-NEXT: v_dual_mov_b32 v34, s16 :: v_dual_mov_b32 v35, s17
; GFX11-NEXT: v_dual_mov_b32 v32, s18 :: v_dual_mov_b32 v33, s19
-; GFX11-NEXT: v_dual_mov_b32 v1, s46 :: v_dual_mov_b32 v2, s44
-; GFX11-NEXT: v_dual_mov_b32 v5, s45 :: v_dual_mov_b32 v6, s43
-; GFX11-NEXT: v_dual_mov_b32 v7, s42 :: v_dual_mov_b32 v10, s29
-; GFX11-NEXT: v_dual_mov_b32 v9, s41 :: v_dual_mov_b32 v14, s28
-; GFX11-NEXT: v_dual_mov_b32 v13, s40 :: v_dual_mov_b32 v18, s24
-; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v22, s23
-; GFX11-NEXT: v_dual_mov_b32 v17, s26 :: v_dual_mov_b32 v26, s15
-; GFX11-NEXT: v_dual_mov_b32 v21, s25 :: v_dual_mov_b32 v30, s14
-; GFX11-NEXT: v_mov_b32_e32 v23, s22
-; GFX11-NEXT: v_mov_b32_e32 v25, s21
-; GFX11-NEXT: v_mov_b32_e32 v29, s20
-; GFX11-NEXT: v_mov_b32_e32 v31, s13
+; GFX11-NEXT: v_dual_mov_b32 v1, s45 :: v_dual_mov_b32 v2, s43
+; GFX11-NEXT: v_dual_mov_b32 v5, s44 :: v_dual_mov_b32 v6, s42
+; GFX11-NEXT: v_dual_mov_b32 v7, s41 :: v_dual_mov_b32 v10, s28
+; GFX11-NEXT: v_dual_mov_b32 v9, s40 :: v_dual_mov_b32 v14, s27
+; GFX11-NEXT: v_dual_mov_b32 v13, s29 :: v_dual_mov_b32 v18, s23
+; GFX11-NEXT: v_dual_mov_b32 v15, s26 :: v_dual_mov_b32 v22, s22
+; GFX11-NEXT: v_dual_mov_b32 v17, s25 :: v_dual_mov_b32 v26, s14
+; GFX11-NEXT: v_dual_mov_b32 v21, s24 :: v_dual_mov_b32 v30, s13
+; GFX11-NEXT: v_mov_b32_e32 v23, s21
+; GFX11-NEXT: v_mov_b32_e32 v25, s20
+; GFX11-NEXT: v_mov_b32_e32 v29, s15
+; GFX11-NEXT: v_mov_b32_e32 v31, s12
; GFX11-NEXT: v_mov_b32_e32 v27, s10
; GFX11-NEXT: v_mov_b32_e32 v19, s8
; GFX11-NEXT: v_mov_b32_e32 v11, s6
@@ -32091,6 +32474,7 @@ define inreg <16 x i16> @bitcast_v32i8_to_v16i16_scalar(<32 x i8> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v19, v14
; SI-NEXT: v_mov_b32_e32 v20, v12
; SI-NEXT: v_readfirstlane_b32 s13, v11
@@ -32099,7 +32483,7 @@ define inreg <16 x i16> @bitcast_v32i8_to_v16i16_scalar(<32 x i8> inreg %a, i32
; SI-NEXT: v_readfirstlane_b32 s10, v2
; SI-NEXT: v_readfirstlane_b32 s7, v1
; SI-NEXT: v_readfirstlane_b32 s6, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v7
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v9
; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v5
@@ -32320,17 +32704,20 @@ define inreg <16 x i16> @bitcast_v32i8_to_v16i16_scalar(<32 x i8> inreg %a, i32
; SI-NEXT: ; implicit-def: $vgpr13
; SI-NEXT: ; implicit-def: $vgpr14
; SI-NEXT: ; implicit-def: $vgpr15
-; SI-NEXT: s_branch .LBB99_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB99_2
+; SI-NEXT: s_branch .LBB99_3
;
; VI-LABEL: bitcast_v32i8_to_v16i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v21, v6
; VI-NEXT: v_mov_b32_e32 v20, v4
; VI-NEXT: v_mov_b32_e32 v22, v2
; VI-NEXT: v_mov_b32_e32 v19, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_lshlrev_b32_e32 v18, 8, v1
; VI-NEXT: v_lshlrev_b32_e32 v23, 8, v3
; VI-NEXT: v_lshlrev_b32_e32 v24, 8, v5
@@ -32476,17 +32863,20 @@ define inreg <16 x i16> @bitcast_v32i8_to_v16i16_scalar(<32 x i8> inreg %a, i32
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB99_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; VI-NEXT: s_branch .LBB99_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB99_2
+; VI-NEXT: s_branch .LBB99_3
;
; GFX9-LABEL: bitcast_v32i8_to_v16i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v20, v6
; GFX9-NEXT: v_mov_b32_e32 v22, v4
; GFX9-NEXT: v_mov_b32_e32 v21, v2
; GFX9-NEXT: v_mov_b32_e32 v19, v0
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_lshlrev_b32_e32 v18, 8, v1
; GFX9-NEXT: v_lshlrev_b32_e32 v24, 8, v3
; GFX9-NEXT: v_lshlrev_b32_e32 v23, 8, v5
@@ -32630,7 +33020,9 @@ define inreg <16 x i16> @bitcast_v32i8_to_v16i16_scalar(<32 x i8> inreg %a, i32
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB99_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; GFX9-NEXT: s_branch .LBB99_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB99_2
+; GFX9-NEXT: s_branch .LBB99_3
;
; GFX11-LABEL: bitcast_v32i8_to_v16i16_scalar:
; GFX11: ; %bb.0:
@@ -32645,42 +33037,42 @@ define inreg <16 x i16> @bitcast_v32i8_to_v16i16_scalar(<32 x i8> inreg %a, i32
; GFX11-NEXT: v_lshlrev_b32_e32 v9, 8, v9
; GFX11-NEXT: v_lshlrev_b32_e32 v11, 8, v11
; GFX11-NEXT: v_lshlrev_b32_e32 v13, 8, v13
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s4, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB99_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s3, 8
-; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_or_b32 s6, s7, s8
-; GFX11-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_pack_ll_b32_b16 s5, s5, s6
-; GFX11-NEXT: s_pack_ll_b32_b16 s6, s7, s8
-; GFX11-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s23, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s25, 8
+; GFX11-NEXT: s_and_b32 s4, s0, 0xff
+; GFX11-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-NEXT: s_and_b32 s6, s2, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-NEXT: s_or_b32 s4, s4, s5
+; GFX11-NEXT: s_or_b32 s5, s6, s7
+; GFX11-NEXT: s_and_b32 s6, s16, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-NEXT: s_or_b32 s6, s6, s7
+; GFX11-NEXT: s_or_b32 s7, s8, s9
+; GFX11-NEXT: s_pack_ll_b32_b16 s4, s4, s5
+; GFX11-NEXT: s_pack_ll_b32_b16 s5, s6, s7
+; GFX11-NEXT: s_and_b32 s6, s20, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s21, 8
+; GFX11-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-NEXT: s_or_b32 s6, s6, s7
+; GFX11-NEXT: s_or_b32 s7, s8, s9
+; GFX11-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s25, 8
; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v15
-; GFX11-NEXT: s_pack_ll_b32_b16 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_and_b32 s9, s26, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s27, 8
+; GFX11-NEXT: s_pack_ll_b32_b16 s6, s6, s7
+; GFX11-NEXT: s_or_b32 s7, s8, s9
+; GFX11-NEXT: s_and_b32 s8, s26, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s27, 8
; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v16
-; GFX11-NEXT: s_or_b32 s9, s9, s10
+; GFX11-NEXT: s_or_b32 s8, s8, s9
; GFX11-NEXT: v_or_b32_e32 v1, v1, v20
; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v10
-; GFX11-NEXT: s_pack_ll_b32_b16 s8, s8, s9
+; GFX11-NEXT: s_pack_ll_b32_b16 s7, s7, s8
; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v18
; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v17
; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v8
@@ -32689,25 +33081,24 @@ define inreg <16 x i16> @bitcast_v32i8_to_v16i16_scalar(<32 x i8> inreg %a, i32
; GFX11-NEXT: v_or_b32_e32 v2, v2, v19
; GFX11-NEXT: v_or_b32_e32 v5, v5, v11
; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: s_and_b32 s11, s28, 0xff
-; GFX11-NEXT: s_lshl_b32 s12, s29, 8
+; GFX11-NEXT: s_and_b32 s10, s28, 0xff
+; GFX11-NEXT: s_lshl_b32 s11, s29, 8
; GFX11-NEXT: v_or_b32_e32 v6, v6, v9
-; GFX11-NEXT: s_or_b32 s10, s11, s12
+; GFX11-NEXT: s_or_b32 s9, s10, s11
; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e64 v3, 0xffff, s10
+; GFX11-NEXT: v_and_b32_e64 v3, 0xffff, s9
; GFX11-NEXT: v_or_b32_e32 v7, v7, v13
; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v5
; GFX11-NEXT: v_lshl_or_b32 v5, v2, 16, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-NEXT: v_mov_b32_e32 v1, s5
; GFX11-NEXT: v_or_b32_e32 v0, v0, v14
; GFX11-NEXT: v_lshl_or_b32 v6, v6, 16, v22
; GFX11-NEXT: v_lshl_or_b32 v7, v7, 16, v23
-; GFX11-NEXT: v_mov_b32_e32 v2, s7
+; GFX11-NEXT: v_mov_b32_e32 v2, s6
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
; GFX11-NEXT: v_lshl_or_b32 v4, v0, 16, v3
-; GFX11-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v3, s8
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB99_3
+; GFX11-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v3, s7
+; GFX11-NEXT: s_cbranch_execnz .LBB99_3
; GFX11-NEXT: .LBB99_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s28, s28, 3
; GFX11-NEXT: s_lshl_b32 s5, s29, 8
@@ -32802,7 +33193,9 @@ define inreg <16 x i16> @bitcast_v32i8_to_v16i16_scalar(<32 x i8> inreg %a, i32
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB99_4:
; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; GFX11-NEXT: s_branch .LBB99_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_vccz .LBB99_2
+; GFX11-NEXT: s_branch .LBB99_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -33088,6 +33481,7 @@ define inreg <16 x bfloat> @bitcast_v16f16_to_v16bf16_scalar(<16 x half> inreg %
; SI-NEXT: v_cvt_f16_f32_e32 v31, v1
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB101_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v16
@@ -33191,16 +33585,22 @@ define inreg <16 x bfloat> @bitcast_v16f16_to_v16bf16_scalar(<16 x half> inreg %
; SI-NEXT: ; implicit-def: $vgpr13
; SI-NEXT: ; implicit-def: $vgpr14
; SI-NEXT: ; implicit-def: $vgpr15
-; SI-NEXT: s_branch .LBB101_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB101_2
+; SI-NEXT: s_branch .LBB101_3
;
; VI-LABEL: bitcast_v16f16_to_v16bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB101_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB101_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB101_4
-; VI-NEXT: .LBB101_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB101_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB101_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v1, s4
; VI-NEXT: s_lshr_b32 s4, s17, 16
@@ -33243,8 +33643,6 @@ define inreg <16 x bfloat> @bitcast_v16f16_to_v16bf16_scalar(<16 x half> inreg %
; VI-NEXT: v_or_b32_e32 v1, v1, v10
; VI-NEXT: v_or_b32_e32 v0, v8, v9
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB101_3:
-; VI-NEXT: s_branch .LBB101_2
; VI-NEXT: .LBB101_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -33260,10 +33658,14 @@ define inreg <16 x bfloat> @bitcast_v16f16_to_v16bf16_scalar(<16 x half> inreg %
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB101_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB101_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB101_4
-; GFX9-NEXT: .LBB101_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB101_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB101_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v7, s23, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v6, s22, v0 op_sel_hi:[1,0]
@@ -33274,8 +33676,6 @@ define inreg <16 x bfloat> @bitcast_v16f16_to_v16bf16_scalar(<16 x half> inreg %
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB101_3:
-; GFX9-NEXT: s_branch .LBB101_2
; GFX9-NEXT: .LBB101_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -33295,12 +33695,15 @@ define inreg <16 x bfloat> @bitcast_v16f16_to_v16bf16_scalar(<16 x half> inreg %
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB101_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB101_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB101_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB101_4
-; GFX11-NEXT: .LBB101_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v7, 0x200, s7 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
@@ -33310,8 +33713,6 @@ define inreg <16 x bfloat> @bitcast_v16f16_to_v16bf16_scalar(<16 x half> inreg %
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB101_3:
-; GFX11-NEXT: s_branch .LBB101_2
; GFX11-NEXT: .LBB101_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -34134,6 +34535,7 @@ define inreg <16 x half> @bitcast_v16bf16_to_v16f16_scalar(<16 x bfloat> inreg %
; SI-NEXT: v_mul_f32_e64 v29, 1.0, s29
; SI-NEXT: v_mul_f32_e32 v30, 1.0, v0
; SI-NEXT: v_mul_f32_e32 v31, 1.0, v1
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB103_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v16
@@ -34253,16 +34655,22 @@ define inreg <16 x half> @bitcast_v16bf16_to_v16f16_scalar(<16 x bfloat> inreg %
; SI-NEXT: ; implicit-def: $vgpr13
; SI-NEXT: ; implicit-def: $vgpr14
; SI-NEXT: ; implicit-def: $vgpr15
-; SI-NEXT: s_branch .LBB103_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB103_2
+; SI-NEXT: s_branch .LBB103_3
;
; VI-LABEL: bitcast_v16bf16_to_v16f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
-; VI-NEXT: s_cbranch_scc0 .LBB103_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB103_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB103_4
-; VI-NEXT: .LBB103_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB103_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB103_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v1, 0x40c00000
; VI-NEXT: v_add_f32_e32 v0, s4, v1
@@ -34409,8 +34817,6 @@ define inreg <16 x half> @bitcast_v16bf16_to_v16f16_scalar(<16 x bfloat> inreg %
; VI-NEXT: v_alignbit_b32 v1, v15, v8, 16
; VI-NEXT: v_alignbit_b32 v0, v14, v0, 16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB103_3:
-; VI-NEXT: s_branch .LBB103_2
; VI-NEXT: .LBB103_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -34426,10 +34832,14 @@ define inreg <16 x half> @bitcast_v16bf16_to_v16f16_scalar(<16 x bfloat> inreg %
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB103_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB103_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB103_4
-; GFX9-NEXT: .LBB103_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB103_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB103_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_and_b32 s4, s16, 0xffff0000
; GFX9-NEXT: v_mov_b32_e32 v1, 0x40c00000
; GFX9-NEXT: v_add_f32_e32 v0, s4, v1
@@ -34585,8 +34995,6 @@ define inreg <16 x half> @bitcast_v16bf16_to_v16f16_scalar(<16 x bfloat> inreg %
; GFX9-NEXT: v_lshl_or_b32 v1, v1, 16, v8
; GFX9-NEXT: v_lshl_or_b32 v0, v16, 16, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB103_3:
-; GFX9-NEXT: s_branch .LBB103_2
; GFX9-NEXT: .LBB103_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -34606,12 +35014,15 @@ define inreg <16 x half> @bitcast_v16bf16_to_v16f16_scalar(<16 x bfloat> inreg %
; GFX11-NEXT: s_mov_b32 s5, s17
; GFX11-NEXT: s_mov_b32 s4, s16
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB103_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s8, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB103_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB103_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
; GFX11-NEXT: s_cbranch_vccnz .LBB103_4
-; GFX11-NEXT: .LBB103_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_and_b32 s8, s0, 0xffff0000
; GFX11-NEXT: s_lshl_b32 s0, s0, 16
; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s8
@@ -34776,8 +35187,6 @@ define inreg <16 x half> @bitcast_v16bf16_to_v16f16_scalar(<16 x bfloat> inreg %
; GFX11-NEXT: v_lshl_or_b32 v1, v8, 16, v13
; GFX11-NEXT: v_lshl_or_b32 v0, v0, 16, v14
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB103_3:
-; GFX11-NEXT: s_branch .LBB103_2
; GFX11-NEXT: .LBB103_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -35472,6 +35881,7 @@ define inreg <32 x i8> @bitcast_v16f16_to_v32i8_scalar(<16 x half> inreg %a, i32
; SI-NEXT: v_cvt_f16_f32_e32 v49, v0
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB105_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v37
@@ -35627,12 +36037,15 @@ define inreg <32 x i8> @bitcast_v16f16_to_v32i8_scalar(<16 x half> inreg %a, i32
; SI-NEXT: ; implicit-def: $vgpr28
; SI-NEXT: ; implicit-def: $vgpr29
; SI-NEXT: ; implicit-def: $vgpr31
-; SI-NEXT: s_branch .LBB105_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB105_2
+; SI-NEXT: s_branch .LBB105_3
;
; VI-LABEL: bitcast_v16f16_to_v32i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
+; VI-NEXT: s_mov_b64 s[12:13], -1
; VI-NEXT: s_cbranch_scc0 .LBB105_3
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s40, s23, 24
@@ -35744,7 +36157,8 @@ define inreg <32 x i8> @bitcast_v16f16_to_v32i8_scalar(<16 x half> inreg %a, i32
; VI-NEXT: ; implicit-def: $sgpr25
; VI-NEXT: ; implicit-def: $sgpr44
; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: s_branch .LBB105_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[12:13]
+; VI-NEXT: s_cbranch_vccz .LBB105_2
; VI-NEXT: .LBB105_4:
; VI-NEXT: v_mov_b32_e32 v2, s59
; VI-NEXT: v_mov_b32_e32 v6, s58
@@ -35789,6 +36203,7 @@ define inreg <32 x i8> @bitcast_v16f16_to_v32i8_scalar(<16 x half> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
+; GFX9-NEXT: s_mov_b64 s[12:13], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB105_3
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s14, s23, 24
@@ -35876,7 +36291,8 @@ define inreg <32 x i8> @bitcast_v16f16_to_v32i8_scalar(<16 x half> inreg %a, i32
; GFX9-NEXT: ; implicit-def: $sgpr25
; GFX9-NEXT: ; implicit-def: $sgpr15
; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: s_branch .LBB105_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[12:13]
+; GFX9-NEXT: s_cbranch_vccz .LBB105_2
; GFX9-NEXT: .LBB105_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -35925,35 +36341,34 @@ define inreg <32 x i8> @bitcast_v16f16_to_v32i8_scalar(<16 x half> inreg %a, i32
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
-; GFX11-NEXT: s_mov_b32 s12, 0
+; GFX11-NEXT: s_mov_b32 s5, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB105_3
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s13, s19, 24
-; GFX11-NEXT: s_lshr_b32 s14, s19, 16
-; GFX11-NEXT: s_lshr_b32 s20, s19, 8
-; GFX11-NEXT: s_lshr_b32 s15, s18, 16
-; GFX11-NEXT: s_lshr_b32 s21, s18, 8
-; GFX11-NEXT: s_lshr_b32 s22, s17, 24
-; GFX11-NEXT: s_lshr_b32 s23, s17, 16
-; GFX11-NEXT: s_lshr_b32 s25, s17, 8
-; GFX11-NEXT: s_lshr_b32 s24, s16, 16
-; GFX11-NEXT: s_lshr_b32 s26, s16, 8
-; GFX11-NEXT: s_lshr_b32 s27, s3, 24
-; GFX11-NEXT: s_lshr_b32 s28, s3, 16
-; GFX11-NEXT: s_lshr_b32 s40, s3, 8
-; GFX11-NEXT: s_lshr_b32 s29, s2, 16
-; GFX11-NEXT: s_lshr_b32 s41, s2, 8
-; GFX11-NEXT: s_lshr_b32 s42, s1, 24
-; GFX11-NEXT: s_lshr_b32 s43, s1, 16
-; GFX11-NEXT: s_lshr_b32 s45, s1, 8
-; GFX11-NEXT: s_lshr_b32 s44, s0, 16
-; GFX11-NEXT: s_lshr_b32 s46, s0, 8
+; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
+; GFX11-NEXT: s_lshr_b32 s12, s19, 24
+; GFX11-NEXT: s_lshr_b32 s13, s19, 16
+; GFX11-NEXT: s_lshr_b32 s15, s19, 8
+; GFX11-NEXT: s_lshr_b32 s14, s18, 16
+; GFX11-NEXT: s_lshr_b32 s20, s18, 8
+; GFX11-NEXT: s_lshr_b32 s21, s17, 24
+; GFX11-NEXT: s_lshr_b32 s22, s17, 16
+; GFX11-NEXT: s_lshr_b32 s24, s17, 8
+; GFX11-NEXT: s_lshr_b32 s23, s16, 16
+; GFX11-NEXT: s_lshr_b32 s25, s16, 8
+; GFX11-NEXT: s_lshr_b32 s26, s3, 24
+; GFX11-NEXT: s_lshr_b32 s27, s3, 16
+; GFX11-NEXT: s_lshr_b32 s29, s3, 8
+; GFX11-NEXT: s_lshr_b32 s28, s2, 16
+; GFX11-NEXT: s_lshr_b32 s40, s2, 8
+; GFX11-NEXT: s_lshr_b32 s41, s1, 24
+; GFX11-NEXT: s_lshr_b32 s42, s1, 16
+; GFX11-NEXT: s_lshr_b32 s44, s1, 8
+; GFX11-NEXT: s_lshr_b32 s43, s0, 16
+; GFX11-NEXT: s_lshr_b32 s45, s0, 8
; GFX11-NEXT: s_lshr_b64 s[10:11], s[18:19], 24
; GFX11-NEXT: s_lshr_b64 s[8:9], s[16:17], 24
; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
-; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s12
-; GFX11-NEXT: s_cbranch_vccnz .LBB105_4
+; GFX11-NEXT: s_cbranch_execnz .LBB105_4
; GFX11-NEXT: .LBB105_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v39, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v37, 0x200, s3 op_sel_hi:[0,1]
@@ -35989,48 +36404,49 @@ define inreg <32 x i8> @bitcast_v16f16_to_v32i8_scalar(<16 x half> inreg %a, i32
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v38
; GFX11-NEXT: s_branch .LBB105_5
; GFX11-NEXT: .LBB105_3:
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr4
; GFX11-NEXT: ; implicit-def: $sgpr45
; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr4
+; GFX11-NEXT: ; implicit-def: $sgpr44
; GFX11-NEXT: ; implicit-def: $sgpr42
; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr29
-; GFX11-NEXT: ; implicit-def: $sgpr6
; GFX11-NEXT: ; implicit-def: $sgpr40
; GFX11-NEXT: ; implicit-def: $sgpr28
+; GFX11-NEXT: ; implicit-def: $sgpr6
+; GFX11-NEXT: ; implicit-def: $sgpr29
; GFX11-NEXT: ; implicit-def: $sgpr27
; GFX11-NEXT: ; implicit-def: $sgpr26
-; GFX11-NEXT: ; implicit-def: $sgpr24
-; GFX11-NEXT: ; implicit-def: $sgpr8
; GFX11-NEXT: ; implicit-def: $sgpr25
; GFX11-NEXT: ; implicit-def: $sgpr23
+; GFX11-NEXT: ; implicit-def: $sgpr8
+; GFX11-NEXT: ; implicit-def: $sgpr24
; GFX11-NEXT: ; implicit-def: $sgpr22
; GFX11-NEXT: ; implicit-def: $sgpr21
-; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: ; implicit-def: $sgpr10
; GFX11-NEXT: ; implicit-def: $sgpr20
; GFX11-NEXT: ; implicit-def: $sgpr14
+; GFX11-NEXT: ; implicit-def: $sgpr10
+; GFX11-NEXT: ; implicit-def: $sgpr15
; GFX11-NEXT: ; implicit-def: $sgpr13
-; GFX11-NEXT: s_branch .LBB105_2
+; GFX11-NEXT: ; implicit-def: $sgpr12
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
+; GFX11-NEXT: s_cbranch_vccz .LBB105_2
; GFX11-NEXT: .LBB105_4:
; GFX11-NEXT: v_dual_mov_b32 v38, s0 :: v_dual_mov_b32 v39, s1
; GFX11-NEXT: v_dual_mov_b32 v36, s2 :: v_dual_mov_b32 v37, s3
; GFX11-NEXT: v_dual_mov_b32 v34, s16 :: v_dual_mov_b32 v35, s17
; GFX11-NEXT: v_dual_mov_b32 v32, s18 :: v_dual_mov_b32 v33, s19
-; GFX11-NEXT: v_dual_mov_b32 v1, s46 :: v_dual_mov_b32 v2, s44
-; GFX11-NEXT: v_dual_mov_b32 v5, s45 :: v_dual_mov_b32 v6, s43
-; GFX11-NEXT: v_dual_mov_b32 v7, s42 :: v_dual_mov_b32 v10, s29
-; GFX11-NEXT: v_dual_mov_b32 v9, s41 :: v_dual_mov_b32 v14, s28
-; GFX11-NEXT: v_dual_mov_b32 v13, s40 :: v_dual_mov_b32 v18, s24
-; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v22, s23
-; GFX11-NEXT: v_dual_mov_b32 v17, s26 :: v_dual_mov_b32 v26, s15
-; GFX11-NEXT: v_dual_mov_b32 v21, s25 :: v_dual_mov_b32 v30, s14
-; GFX11-NEXT: v_mov_b32_e32 v23, s22
-; GFX11-NEXT: v_mov_b32_e32 v25, s21
-; GFX11-NEXT: v_mov_b32_e32 v29, s20
-; GFX11-NEXT: v_mov_b32_e32 v31, s13
+; GFX11-NEXT: v_dual_mov_b32 v1, s45 :: v_dual_mov_b32 v2, s43
+; GFX11-NEXT: v_dual_mov_b32 v5, s44 :: v_dual_mov_b32 v6, s42
+; GFX11-NEXT: v_dual_mov_b32 v7, s41 :: v_dual_mov_b32 v10, s28
+; GFX11-NEXT: v_dual_mov_b32 v9, s40 :: v_dual_mov_b32 v14, s27
+; GFX11-NEXT: v_dual_mov_b32 v13, s29 :: v_dual_mov_b32 v18, s23
+; GFX11-NEXT: v_dual_mov_b32 v15, s26 :: v_dual_mov_b32 v22, s22
+; GFX11-NEXT: v_dual_mov_b32 v17, s25 :: v_dual_mov_b32 v26, s14
+; GFX11-NEXT: v_dual_mov_b32 v21, s24 :: v_dual_mov_b32 v30, s13
+; GFX11-NEXT: v_mov_b32_e32 v23, s21
+; GFX11-NEXT: v_mov_b32_e32 v25, s20
+; GFX11-NEXT: v_mov_b32_e32 v29, s15
+; GFX11-NEXT: v_mov_b32_e32 v31, s12
; GFX11-NEXT: v_mov_b32_e32 v27, s10
; GFX11-NEXT: v_mov_b32_e32 v19, s8
; GFX11-NEXT: v_mov_b32_e32 v11, s6
@@ -36991,6 +37407,7 @@ define inreg <16 x half> @bitcast_v32i8_to_v16f16_scalar(<32 x i8> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s46, v17
; SI-NEXT: v_readfirstlane_b32 s47, v16
; SI-NEXT: v_readfirstlane_b32 s44, v15
@@ -37005,11 +37422,11 @@ define inreg <16 x half> @bitcast_v32i8_to_v16f16_scalar(<32 x i8> inreg %a, i32
; SI-NEXT: v_readfirstlane_b32 s13, v6
; SI-NEXT: v_readfirstlane_b32 s10, v5
; SI-NEXT: v_readfirstlane_b32 s11, v4
-; SI-NEXT: v_readfirstlane_b32 s7, v3
+; SI-NEXT: v_readfirstlane_b32 s8, v3
; SI-NEXT: v_readfirstlane_b32 s9, v2
; SI-NEXT: v_readfirstlane_b32 s6, v1
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: v_readfirstlane_b32 s8, v0
+; SI-NEXT: v_readfirstlane_b32 s7, v0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB107_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
@@ -37040,12 +37457,12 @@ define inreg <16 x half> @bitcast_v32i8_to_v16f16_scalar(<32 x i8> inreg %a, i32
; SI-NEXT: s_lshl_b32 s5, s29, 8
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_cvt_f32_f16_e32 v6, s4
-; SI-NEXT: s_and_b32 s4, s8, 0xff
+; SI-NEXT: s_and_b32 s4, s7, 0xff
; SI-NEXT: s_lshl_b32 s5, s6, 8
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_cvt_f32_f16_e32 v7, s4
; SI-NEXT: s_and_b32 s4, s9, 0xff
-; SI-NEXT: s_lshl_b32 s5, s7, 8
+; SI-NEXT: s_lshl_b32 s5, s8, 8
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: v_cvt_f32_f16_e32 v8, s4
; SI-NEXT: s_and_b32 s4, s11, 0xff
@@ -37079,26 +37496,26 @@ define inreg <16 x half> @bitcast_v32i8_to_v16f16_scalar(<32 x i8> inreg %a, i32
; SI-NEXT: s_cbranch_execnz .LBB107_3
; SI-NEXT: .LBB107_2: ; %cmp.true
; SI-NEXT: s_add_i32 s9, s9, 3
-; SI-NEXT: s_add_i32 s8, s8, 3
+; SI-NEXT: s_add_i32 s7, s7, 3
; SI-NEXT: s_add_i32 s11, s11, 3
; SI-NEXT: s_and_b32 s9, s9, 0xff
-; SI-NEXT: s_lshl_b32 s7, s7, 8
-; SI-NEXT: s_and_b32 s8, s8, 0xff
+; SI-NEXT: s_lshl_b32 s8, s8, 8
+; SI-NEXT: s_and_b32 s7, s7, 0xff
; SI-NEXT: s_lshl_b32 s6, s6, 8
; SI-NEXT: s_add_i32 s28, s28, 3
; SI-NEXT: s_add_i32 s13, s13, 3
; SI-NEXT: s_and_b32 s11, s11, 0xff
; SI-NEXT: s_lshl_b32 s10, s10, 8
-; SI-NEXT: s_or_b32 s7, s7, s9
-; SI-NEXT: s_or_b32 s6, s6, s8
-; SI-NEXT: s_and_b32 s8, s28, 0xff
+; SI-NEXT: s_or_b32 s8, s8, s9
+; SI-NEXT: s_or_b32 s6, s6, s7
+; SI-NEXT: s_and_b32 s7, s28, 0xff
; SI-NEXT: s_lshl_b32 s9, s29, 8
; SI-NEXT: s_add_i32 s26, s26, 3
; SI-NEXT: s_add_i32 s15, s15, 3
; SI-NEXT: s_and_b32 s13, s13, 0xff
; SI-NEXT: s_lshl_b32 s12, s12, 8
; SI-NEXT: s_or_b32 s10, s10, s11
-; SI-NEXT: s_or_b32 s8, s9, s8
+; SI-NEXT: s_or_b32 s7, s9, s7
; SI-NEXT: s_and_b32 s9, s26, 0xff
; SI-NEXT: s_lshl_b32 s11, s27, 8
; SI-NEXT: s_add_i32 s24, s24, 3
@@ -37149,9 +37566,9 @@ define inreg <16 x half> @bitcast_v32i8_to_v16f16_scalar(<32 x i8> inreg %a, i32
; SI-NEXT: s_addk_i32 s14, 0x300
; SI-NEXT: s_addk_i32 s12, 0x300
; SI-NEXT: s_addk_i32 s10, 0x300
-; SI-NEXT: s_addk_i32 s7, 0x300
-; SI-NEXT: s_addk_i32 s6, 0x300
; SI-NEXT: s_addk_i32 s8, 0x300
+; SI-NEXT: s_addk_i32 s6, 0x300
+; SI-NEXT: s_addk_i32 s7, 0x300
; SI-NEXT: s_addk_i32 s9, 0x300
; SI-NEXT: s_addk_i32 s11, 0x300
; SI-NEXT: s_addk_i32 s13, 0x300
@@ -37164,9 +37581,9 @@ define inreg <16 x half> @bitcast_v32i8_to_v16f16_scalar(<32 x i8> inreg %a, i32
; SI-NEXT: v_cvt_f32_f16_e32 v3, s13
; SI-NEXT: v_cvt_f32_f16_e32 v4, s11
; SI-NEXT: v_cvt_f32_f16_e32 v5, s9
-; SI-NEXT: v_cvt_f32_f16_e32 v6, s8
+; SI-NEXT: v_cvt_f32_f16_e32 v6, s7
; SI-NEXT: v_cvt_f32_f16_e32 v7, s6
-; SI-NEXT: v_cvt_f32_f16_e32 v8, s7
+; SI-NEXT: v_cvt_f32_f16_e32 v8, s8
; SI-NEXT: v_cvt_f32_f16_e32 v9, s10
; SI-NEXT: v_cvt_f32_f16_e32 v10, s12
; SI-NEXT: v_cvt_f32_f16_e32 v11, s14
@@ -37193,17 +37610,20 @@ define inreg <16 x half> @bitcast_v32i8_to_v16f16_scalar(<32 x i8> inreg %a, i32
; SI-NEXT: ; implicit-def: $vgpr13
; SI-NEXT: ; implicit-def: $vgpr14
; SI-NEXT: ; implicit-def: $vgpr15
-; SI-NEXT: s_branch .LBB107_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB107_2
+; SI-NEXT: s_branch .LBB107_3
;
; VI-LABEL: bitcast_v32i8_to_v16f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v21, v6
; VI-NEXT: v_mov_b32_e32 v20, v4
; VI-NEXT: v_mov_b32_e32 v22, v2
; VI-NEXT: v_mov_b32_e32 v19, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_lshlrev_b32_e32 v18, 8, v1
; VI-NEXT: v_lshlrev_b32_e32 v23, 8, v3
; VI-NEXT: v_lshlrev_b32_e32 v24, 8, v5
@@ -37349,17 +37769,20 @@ define inreg <16 x half> @bitcast_v32i8_to_v16f16_scalar(<32 x i8> inreg %a, i32
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB107_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; VI-NEXT: s_branch .LBB107_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB107_2
+; VI-NEXT: s_branch .LBB107_3
;
; GFX9-LABEL: bitcast_v32i8_to_v16f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v20, v6
; GFX9-NEXT: v_mov_b32_e32 v22, v4
; GFX9-NEXT: v_mov_b32_e32 v21, v2
; GFX9-NEXT: v_mov_b32_e32 v19, v0
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_lshlrev_b32_e32 v18, 8, v1
; GFX9-NEXT: v_lshlrev_b32_e32 v24, 8, v3
; GFX9-NEXT: v_lshlrev_b32_e32 v23, 8, v5
@@ -37503,7 +37926,9 @@ define inreg <16 x half> @bitcast_v32i8_to_v16f16_scalar(<32 x i8> inreg %a, i32
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB107_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; GFX9-NEXT: s_branch .LBB107_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB107_2
+; GFX9-NEXT: s_branch .LBB107_3
;
; GFX11-LABEL: bitcast_v32i8_to_v16f16_scalar:
; GFX11: ; %bb.0:
@@ -37518,42 +37943,42 @@ define inreg <16 x half> @bitcast_v32i8_to_v16f16_scalar(<32 x i8> inreg %a, i32
; GFX11-NEXT: v_lshlrev_b32_e32 v9, 8, v9
; GFX11-NEXT: v_lshlrev_b32_e32 v11, 8, v11
; GFX11-NEXT: v_lshlrev_b32_e32 v13, 8, v13
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s4, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB107_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s3, 8
-; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_or_b32 s6, s7, s8
-; GFX11-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_pack_ll_b32_b16 s5, s5, s6
-; GFX11-NEXT: s_pack_ll_b32_b16 s6, s7, s8
-; GFX11-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s23, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s25, 8
+; GFX11-NEXT: s_and_b32 s4, s0, 0xff
+; GFX11-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-NEXT: s_and_b32 s6, s2, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-NEXT: s_or_b32 s4, s4, s5
+; GFX11-NEXT: s_or_b32 s5, s6, s7
+; GFX11-NEXT: s_and_b32 s6, s16, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-NEXT: s_or_b32 s6, s6, s7
+; GFX11-NEXT: s_or_b32 s7, s8, s9
+; GFX11-NEXT: s_pack_ll_b32_b16 s4, s4, s5
+; GFX11-NEXT: s_pack_ll_b32_b16 s5, s6, s7
+; GFX11-NEXT: s_and_b32 s6, s20, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s21, 8
+; GFX11-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-NEXT: s_or_b32 s6, s6, s7
+; GFX11-NEXT: s_or_b32 s7, s8, s9
+; GFX11-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s25, 8
; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v15
-; GFX11-NEXT: s_pack_ll_b32_b16 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_and_b32 s9, s26, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s27, 8
+; GFX11-NEXT: s_pack_ll_b32_b16 s6, s6, s7
+; GFX11-NEXT: s_or_b32 s7, s8, s9
+; GFX11-NEXT: s_and_b32 s8, s26, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s27, 8
; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v16
-; GFX11-NEXT: s_or_b32 s9, s9, s10
+; GFX11-NEXT: s_or_b32 s8, s8, s9
; GFX11-NEXT: v_or_b32_e32 v1, v1, v20
; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v10
-; GFX11-NEXT: s_pack_ll_b32_b16 s8, s8, s9
+; GFX11-NEXT: s_pack_ll_b32_b16 s7, s7, s8
; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v18
; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v17
; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v8
@@ -37562,25 +37987,24 @@ define inreg <16 x half> @bitcast_v32i8_to_v16f16_scalar(<32 x i8> inreg %a, i32
; GFX11-NEXT: v_or_b32_e32 v2, v2, v19
; GFX11-NEXT: v_or_b32_e32 v5, v5, v11
; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: s_and_b32 s11, s28, 0xff
-; GFX11-NEXT: s_lshl_b32 s12, s29, 8
+; GFX11-NEXT: s_and_b32 s10, s28, 0xff
+; GFX11-NEXT: s_lshl_b32 s11, s29, 8
; GFX11-NEXT: v_or_b32_e32 v6, v6, v9
-; GFX11-NEXT: s_or_b32 s10, s11, s12
+; GFX11-NEXT: s_or_b32 s9, s10, s11
; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e64 v3, 0xffff, s10
+; GFX11-NEXT: v_and_b32_e64 v3, 0xffff, s9
; GFX11-NEXT: v_or_b32_e32 v7, v7, v13
; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v5
; GFX11-NEXT: v_lshl_or_b32 v5, v2, 16, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-NEXT: v_mov_b32_e32 v1, s5
; GFX11-NEXT: v_or_b32_e32 v0, v0, v14
; GFX11-NEXT: v_lshl_or_b32 v6, v6, 16, v22
; GFX11-NEXT: v_lshl_or_b32 v7, v7, 16, v23
-; GFX11-NEXT: v_mov_b32_e32 v2, s7
+; GFX11-NEXT: v_mov_b32_e32 v2, s6
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
; GFX11-NEXT: v_lshl_or_b32 v4, v0, 16, v3
-; GFX11-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v3, s8
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB107_3
+; GFX11-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v3, s7
+; GFX11-NEXT: s_cbranch_execnz .LBB107_3
; GFX11-NEXT: .LBB107_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s28, s28, 3
; GFX11-NEXT: s_lshl_b32 s5, s29, 8
@@ -37675,7 +38099,9 @@ define inreg <16 x half> @bitcast_v32i8_to_v16f16_scalar(<32 x i8> inreg %a, i32
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB107_4:
; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; GFX11-NEXT: s_branch .LBB107_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_vccz .LBB107_2
+; GFX11-NEXT: s_branch .LBB107_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -38887,6 +39313,7 @@ define inreg <32 x i8> @bitcast_v16bf16_to_v32i8_scalar(<16 x bfloat> inreg %a,
; SI-NEXT: v_mul_f32_e64 v55, 1.0, s28
; SI-NEXT: v_mul_f32_e32 v52, 1.0, v1
; SI-NEXT: v_mul_f32_e32 v53, 1.0, v0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB109_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v38
@@ -39030,12 +39457,15 @@ define inreg <32 x i8> @bitcast_v16bf16_to_v32i8_scalar(<16 x bfloat> inreg %a,
; SI-NEXT: ; implicit-def: $vgpr29
; SI-NEXT: ; implicit-def: $vgpr30
; SI-NEXT: ; implicit-def: $vgpr31
-; SI-NEXT: s_branch .LBB109_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB109_2
+; SI-NEXT: s_branch .LBB109_3
;
; VI-LABEL: bitcast_v16bf16_to_v32i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
+; VI-NEXT: s_mov_b64 s[12:13], -1
; VI-NEXT: s_cbranch_scc0 .LBB109_3
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s14, s23, 24
@@ -39259,7 +39689,8 @@ define inreg <32 x i8> @bitcast_v16bf16_to_v32i8_scalar(<16 x bfloat> inreg %a,
; VI-NEXT: ; implicit-def: $sgpr25
; VI-NEXT: ; implicit-def: $sgpr15
; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: s_branch .LBB109_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[12:13]
+; VI-NEXT: s_cbranch_vccz .LBB109_2
; VI-NEXT: .LBB109_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -39308,6 +39739,7 @@ define inreg <32 x i8> @bitcast_v16bf16_to_v32i8_scalar(<16 x bfloat> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
+; GFX9-NEXT: s_mov_b64 s[12:13], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB109_3
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s42, s23, 24
@@ -39543,7 +39975,8 @@ define inreg <32 x i8> @bitcast_v16bf16_to_v32i8_scalar(<16 x bfloat> inreg %a,
; GFX9-NEXT: ; implicit-def: $sgpr45
; GFX9-NEXT: ; implicit-def: $sgpr59
; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: s_branch .LBB109_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[12:13]
+; GFX9-NEXT: s_cbranch_vccz .LBB109_2
; GFX9-NEXT: .LBB109_4:
; GFX9-NEXT: v_mov_b32_e32 v24, s22
; GFX9-NEXT: v_mov_b32_e32 v32, s23
@@ -39588,35 +40021,34 @@ define inreg <32 x i8> @bitcast_v16bf16_to_v32i8_scalar(<16 x bfloat> inreg %a,
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
-; GFX11-NEXT: s_mov_b32 s12, 0
+; GFX11-NEXT: s_mov_b32 s5, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB109_3
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s27, s19, 24
-; GFX11-NEXT: s_lshr_b32 s46, s19, 16
-; GFX11-NEXT: s_lshr_b32 s40, s19, 8
-; GFX11-NEXT: s_lshr_b32 s42, s18, 16
-; GFX11-NEXT: s_lshr_b32 s41, s18, 8
-; GFX11-NEXT: s_lshr_b32 s23, s17, 24
-; GFX11-NEXT: s_lshr_b32 s45, s17, 16
-; GFX11-NEXT: s_lshr_b32 s26, s17, 8
-; GFX11-NEXT: s_lshr_b32 s29, s16, 16
-; GFX11-NEXT: s_lshr_b32 s28, s16, 8
-; GFX11-NEXT: s_lshr_b32 s15, s3, 24
-; GFX11-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-NEXT: s_lshr_b32 s22, s3, 8
-; GFX11-NEXT: s_lshr_b32 s25, s2, 16
-; GFX11-NEXT: s_lshr_b32 s24, s2, 8
-; GFX11-NEXT: s_lshr_b32 s13, s1, 24
-; GFX11-NEXT: s_lshr_b32 s43, s1, 16
-; GFX11-NEXT: s_lshr_b32 s14, s1, 8
-; GFX11-NEXT: s_lshr_b32 s21, s0, 16
-; GFX11-NEXT: s_lshr_b32 s20, s0, 8
+; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
+; GFX11-NEXT: s_lshr_b32 s26, s19, 24
+; GFX11-NEXT: s_lshr_b32 s45, s19, 16
+; GFX11-NEXT: s_lshr_b32 s29, s19, 8
+; GFX11-NEXT: s_lshr_b32 s41, s18, 16
+; GFX11-NEXT: s_lshr_b32 s40, s18, 8
+; GFX11-NEXT: s_lshr_b32 s22, s17, 24
+; GFX11-NEXT: s_lshr_b32 s44, s17, 16
+; GFX11-NEXT: s_lshr_b32 s25, s17, 8
+; GFX11-NEXT: s_lshr_b32 s28, s16, 16
+; GFX11-NEXT: s_lshr_b32 s27, s16, 8
+; GFX11-NEXT: s_lshr_b32 s14, s3, 24
+; GFX11-NEXT: s_lshr_b32 s43, s3, 16
+; GFX11-NEXT: s_lshr_b32 s21, s3, 8
+; GFX11-NEXT: s_lshr_b32 s24, s2, 16
+; GFX11-NEXT: s_lshr_b32 s23, s2, 8
+; GFX11-NEXT: s_lshr_b32 s12, s1, 24
+; GFX11-NEXT: s_lshr_b32 s42, s1, 16
+; GFX11-NEXT: s_lshr_b32 s13, s1, 8
+; GFX11-NEXT: s_lshr_b32 s20, s0, 16
+; GFX11-NEXT: s_lshr_b32 s15, s0, 8
; GFX11-NEXT: s_lshr_b64 s[10:11], s[18:19], 24
; GFX11-NEXT: s_lshr_b64 s[8:9], s[16:17], 24
; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
-; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s12
-; GFX11-NEXT: s_cbranch_vccnz .LBB109_4
+; GFX11-NEXT: s_cbranch_execnz .LBB109_4
; GFX11-NEXT: .LBB109_2: ; %cmp.true
; GFX11-NEXT: s_lshl_b32 s4, s1, 16
; GFX11-NEXT: s_and_b32 s1, s1, 0xffff0000
@@ -39809,46 +40241,47 @@ define inreg <32 x i8> @bitcast_v16bf16_to_v32i8_scalar(<16 x bfloat> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v1
; GFX11-NEXT: s_branch .LBB109_5
; GFX11-NEXT: .LBB109_3:
+; GFX11-NEXT: ; implicit-def: $sgpr15
; GFX11-NEXT: ; implicit-def: $sgpr20
-; GFX11-NEXT: ; implicit-def: $sgpr21
; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr43
; GFX11-NEXT: ; implicit-def: $sgpr13
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr12
+; GFX11-NEXT: ; implicit-def: $sgpr23
; GFX11-NEXT: ; implicit-def: $sgpr24
-; GFX11-NEXT: ; implicit-def: $sgpr25
; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: ; implicit-def: $sgpr22
-; GFX11-NEXT: ; implicit-def: $sgpr44
-; GFX11-NEXT: ; implicit-def: $sgpr15
+; GFX11-NEXT: ; implicit-def: $sgpr21
+; GFX11-NEXT: ; implicit-def: $sgpr43
+; GFX11-NEXT: ; implicit-def: $sgpr14
+; GFX11-NEXT: ; implicit-def: $sgpr27
; GFX11-NEXT: ; implicit-def: $sgpr28
-; GFX11-NEXT: ; implicit-def: $sgpr29
; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr26
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr23
+; GFX11-NEXT: ; implicit-def: $sgpr25
+; GFX11-NEXT: ; implicit-def: $sgpr44
+; GFX11-NEXT: ; implicit-def: $sgpr22
+; GFX11-NEXT: ; implicit-def: $sgpr40
; GFX11-NEXT: ; implicit-def: $sgpr41
-; GFX11-NEXT: ; implicit-def: $sgpr42
; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr27
-; GFX11-NEXT: s_branch .LBB109_2
+; GFX11-NEXT: ; implicit-def: $sgpr29
+; GFX11-NEXT: ; implicit-def: $sgpr45
+; GFX11-NEXT: ; implicit-def: $sgpr26
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
+; GFX11-NEXT: s_cbranch_vccz .LBB109_2
; GFX11-NEXT: .LBB109_4:
; GFX11-NEXT: v_dual_mov_b32 v24, s18 :: v_dual_mov_b32 v33, s17
; GFX11-NEXT: v_dual_mov_b32 v32, s19 :: v_dual_mov_b32 v35, s1
-; GFX11-NEXT: v_dual_mov_b32 v30, s46 :: v_dual_mov_b32 v25, s41
-; GFX11-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v31, s27
-; GFX11-NEXT: v_dual_mov_b32 v22, s45 :: v_dual_mov_b32 v29, s40
-; GFX11-NEXT: v_dual_mov_b32 v8, s2 :: v_dual_mov_b32 v17, s28
-; GFX11-NEXT: v_dual_mov_b32 v34, s3 :: v_dual_mov_b32 v23, s23
-; GFX11-NEXT: v_dual_mov_b32 v14, s44 :: v_dual_mov_b32 v21, s26
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v9, s24
-; GFX11-NEXT: v_dual_mov_b32 v6, s43 :: v_dual_mov_b32 v15, s15
-; GFX11-NEXT: v_dual_mov_b32 v26, s42 :: v_dual_mov_b32 v13, s22
-; GFX11-NEXT: v_dual_mov_b32 v18, s29 :: v_dual_mov_b32 v1, s20
-; GFX11-NEXT: v_dual_mov_b32 v10, s25 :: v_dual_mov_b32 v7, s13
-; GFX11-NEXT: v_dual_mov_b32 v2, s21 :: v_dual_mov_b32 v5, s14
+; GFX11-NEXT: v_dual_mov_b32 v30, s45 :: v_dual_mov_b32 v25, s40
+; GFX11-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v31, s26
+; GFX11-NEXT: v_dual_mov_b32 v22, s44 :: v_dual_mov_b32 v29, s29
+; GFX11-NEXT: v_dual_mov_b32 v8, s2 :: v_dual_mov_b32 v17, s27
+; GFX11-NEXT: v_dual_mov_b32 v34, s3 :: v_dual_mov_b32 v23, s22
+; GFX11-NEXT: v_dual_mov_b32 v14, s43 :: v_dual_mov_b32 v21, s25
+; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v9, s23
+; GFX11-NEXT: v_dual_mov_b32 v6, s42 :: v_dual_mov_b32 v15, s14
+; GFX11-NEXT: v_dual_mov_b32 v26, s41 :: v_dual_mov_b32 v13, s21
+; GFX11-NEXT: v_dual_mov_b32 v18, s28 :: v_dual_mov_b32 v1, s15
+; GFX11-NEXT: v_dual_mov_b32 v10, s24 :: v_dual_mov_b32 v7, s12
+; GFX11-NEXT: v_dual_mov_b32 v2, s20 :: v_dual_mov_b32 v5, s13
; GFX11-NEXT: v_mov_b32_e32 v27, s10
; GFX11-NEXT: v_mov_b32_e32 v19, s8
; GFX11-NEXT: v_mov_b32_e32 v11, s6
@@ -40836,13 +41269,14 @@ define inreg <16 x bfloat> @bitcast_v32i8_to_v16bf16_scalar(<32 x i8> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s42, v15
; SI-NEXT: v_readfirstlane_b32 s43, v14
; SI-NEXT: v_readfirstlane_b32 s40, v7
; SI-NEXT: v_readfirstlane_b32 s41, v6
; SI-NEXT: v_readfirstlane_b32 s10, v1
; SI-NEXT: v_readfirstlane_b32 s9, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v5
; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v9
; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v13
@@ -41045,7 +41479,8 @@ define inreg <16 x bfloat> @bitcast_v32i8_to_v16bf16_scalar(<32 x i8> inreg %a,
; SI-NEXT: ; implicit-def: $vgpr13
; SI-NEXT: ; implicit-def: $sgpr45
; SI-NEXT: ; implicit-def: $vgpr15
-; SI-NEXT: s_branch .LBB111_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB111_2
; SI-NEXT: .LBB111_4:
; SI-NEXT: v_mov_b32_e32 v10, s44
; SI-NEXT: v_mov_b32_e32 v14, s45
@@ -41067,11 +41502,12 @@ define inreg <16 x bfloat> @bitcast_v32i8_to_v16bf16_scalar(<32 x i8> inreg %a,
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v21, v6
; VI-NEXT: v_mov_b32_e32 v20, v4
; VI-NEXT: v_mov_b32_e32 v22, v2
; VI-NEXT: v_mov_b32_e32 v19, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_lshlrev_b32_e32 v18, 8, v1
; VI-NEXT: v_lshlrev_b32_e32 v23, 8, v3
; VI-NEXT: v_lshlrev_b32_e32 v24, 8, v5
@@ -41217,17 +41653,20 @@ define inreg <16 x bfloat> @bitcast_v32i8_to_v16bf16_scalar(<32 x i8> inreg %a,
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB111_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; VI-NEXT: s_branch .LBB111_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB111_2
+; VI-NEXT: s_branch .LBB111_3
;
; GFX9-LABEL: bitcast_v32i8_to_v16bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v20, v6
; GFX9-NEXT: v_mov_b32_e32 v22, v4
; GFX9-NEXT: v_mov_b32_e32 v21, v2
; GFX9-NEXT: v_mov_b32_e32 v19, v0
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_lshlrev_b32_e32 v18, 8, v1
; GFX9-NEXT: v_lshlrev_b32_e32 v24, 8, v3
; GFX9-NEXT: v_lshlrev_b32_e32 v23, 8, v5
@@ -41371,7 +41810,9 @@ define inreg <16 x bfloat> @bitcast_v32i8_to_v16bf16_scalar(<32 x i8> inreg %a,
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB111_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; GFX9-NEXT: s_branch .LBB111_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB111_2
+; GFX9-NEXT: s_branch .LBB111_3
;
; GFX11-LABEL: bitcast_v32i8_to_v16bf16_scalar:
; GFX11: ; %bb.0:
@@ -41386,42 +41827,42 @@ define inreg <16 x bfloat> @bitcast_v32i8_to_v16bf16_scalar(<32 x i8> inreg %a,
; GFX11-NEXT: v_lshlrev_b32_e32 v9, 8, v9
; GFX11-NEXT: v_lshlrev_b32_e32 v11, 8, v11
; GFX11-NEXT: v_lshlrev_b32_e32 v13, 8, v13
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s4, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB111_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s3, 8
-; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_or_b32 s6, s7, s8
-; GFX11-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_pack_ll_b32_b16 s5, s5, s6
-; GFX11-NEXT: s_pack_ll_b32_b16 s6, s7, s8
-; GFX11-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s23, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s25, 8
+; GFX11-NEXT: s_and_b32 s4, s0, 0xff
+; GFX11-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-NEXT: s_and_b32 s6, s2, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-NEXT: s_or_b32 s4, s4, s5
+; GFX11-NEXT: s_or_b32 s5, s6, s7
+; GFX11-NEXT: s_and_b32 s6, s16, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-NEXT: s_or_b32 s6, s6, s7
+; GFX11-NEXT: s_or_b32 s7, s8, s9
+; GFX11-NEXT: s_pack_ll_b32_b16 s4, s4, s5
+; GFX11-NEXT: s_pack_ll_b32_b16 s5, s6, s7
+; GFX11-NEXT: s_and_b32 s6, s20, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s21, 8
+; GFX11-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-NEXT: s_or_b32 s6, s6, s7
+; GFX11-NEXT: s_or_b32 s7, s8, s9
+; GFX11-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s25, 8
; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v15
-; GFX11-NEXT: s_pack_ll_b32_b16 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_and_b32 s9, s26, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s27, 8
+; GFX11-NEXT: s_pack_ll_b32_b16 s6, s6, s7
+; GFX11-NEXT: s_or_b32 s7, s8, s9
+; GFX11-NEXT: s_and_b32 s8, s26, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s27, 8
; GFX11-NEXT: v_and_b32_e32 v4, 0xff, v16
-; GFX11-NEXT: s_or_b32 s9, s9, s10
+; GFX11-NEXT: s_or_b32 s8, s8, s9
; GFX11-NEXT: v_or_b32_e32 v1, v1, v20
; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v10
-; GFX11-NEXT: s_pack_ll_b32_b16 s8, s8, s9
+; GFX11-NEXT: s_pack_ll_b32_b16 s7, s7, s8
; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v18
; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v17
; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v8
@@ -41430,25 +41871,24 @@ define inreg <16 x bfloat> @bitcast_v32i8_to_v16bf16_scalar(<32 x i8> inreg %a,
; GFX11-NEXT: v_or_b32_e32 v2, v2, v19
; GFX11-NEXT: v_or_b32_e32 v5, v5, v11
; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: s_and_b32 s11, s28, 0xff
-; GFX11-NEXT: s_lshl_b32 s12, s29, 8
+; GFX11-NEXT: s_and_b32 s10, s28, 0xff
+; GFX11-NEXT: s_lshl_b32 s11, s29, 8
; GFX11-NEXT: v_or_b32_e32 v6, v6, v9
-; GFX11-NEXT: s_or_b32 s10, s11, s12
+; GFX11-NEXT: s_or_b32 s9, s10, s11
; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v4
-; GFX11-NEXT: v_and_b32_e64 v3, 0xffff, s10
+; GFX11-NEXT: v_and_b32_e64 v3, 0xffff, s9
; GFX11-NEXT: v_or_b32_e32 v7, v7, v13
; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v5
; GFX11-NEXT: v_lshl_or_b32 v5, v2, 16, v1
-; GFX11-NEXT: v_mov_b32_e32 v1, s6
+; GFX11-NEXT: v_mov_b32_e32 v1, s5
; GFX11-NEXT: v_or_b32_e32 v0, v0, v14
; GFX11-NEXT: v_lshl_or_b32 v6, v6, 16, v22
; GFX11-NEXT: v_lshl_or_b32 v7, v7, 16, v23
-; GFX11-NEXT: v_mov_b32_e32 v2, s7
+; GFX11-NEXT: v_mov_b32_e32 v2, s6
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
; GFX11-NEXT: v_lshl_or_b32 v4, v0, 16, v3
-; GFX11-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v3, s8
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB111_3
+; GFX11-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v3, s7
+; GFX11-NEXT: s_cbranch_execnz .LBB111_3
; GFX11-NEXT: .LBB111_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s28, s28, 3
; GFX11-NEXT: s_lshl_b32 s5, s29, 8
@@ -41543,7 +41983,9 @@ define inreg <16 x bfloat> @bitcast_v32i8_to_v16bf16_scalar(<32 x i8> inreg %a,
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB111_4:
; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7
-; GFX11-NEXT: s_branch .LBB111_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_vccz .LBB111_2
+; GFX11-NEXT: s_branch .LBB111_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.288bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.288bit.ll
index 6cf53d1..6ea0f24 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.288bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.288bit.ll
@@ -114,10 +114,14 @@ define inreg <9 x float> @bitcast_v9i32_to_v9f32_scalar(<9 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s25, 0
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB1_3
-; SI-NEXT: .LBB1_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB1_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB1_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_i32 s24, s24, 3
; SI-NEXT: s_add_i32 s23, s23, 3
; SI-NEXT: s_add_i32 s22, s22, 3
@@ -127,7 +131,7 @@ define inreg <9 x float> @bitcast_v9i32_to_v9f32_scalar(<9 x i32> inreg %a, i32
; SI-NEXT: s_add_i32 s18, s18, 3
; SI-NEXT: s_add_i32 s17, s17, 3
; SI-NEXT: s_add_i32 s16, s16, 3
-; SI-NEXT: .LBB1_3: ; %end
+; SI-NEXT: .LBB1_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -138,17 +142,19 @@ define inreg <9 x float> @bitcast_v9i32_to_v9f32_scalar(<9 x i32> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v7, s23
; SI-NEXT: v_mov_b32_e32 v8, s24
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: s_branch .LBB1_2
;
; VI-LABEL: bitcast_v9i32_to_v9f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s25, 0
-; VI-NEXT: s_cbranch_scc0 .LBB1_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB1_3
-; VI-NEXT: .LBB1_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB1_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB1_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s24, s24, 3
; VI-NEXT: s_add_i32 s23, s23, 3
; VI-NEXT: s_add_i32 s22, s22, 3
@@ -158,7 +164,7 @@ define inreg <9 x float> @bitcast_v9i32_to_v9f32_scalar(<9 x i32> inreg %a, i32
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB1_3: ; %end
+; VI-NEXT: .LBB1_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -169,17 +175,19 @@ define inreg <9 x float> @bitcast_v9i32_to_v9f32_scalar(<9 x i32> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_4:
-; VI-NEXT: s_branch .LBB1_2
;
; GFX9-LABEL: bitcast_v9i32_to_v9f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s25, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB1_3
-; GFX9-NEXT: .LBB1_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB1_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s24, s24, 3
; GFX9-NEXT: s_add_i32 s23, s23, 3
; GFX9-NEXT: s_add_i32 s22, s22, 3
@@ -189,7 +197,7 @@ define inreg <9 x float> @bitcast_v9i32_to_v9f32_scalar(<9 x i32> inreg %a, i32
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB1_3: ; %end
+; GFX9-NEXT: .LBB1_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -200,19 +208,20 @@ define inreg <9 x float> @bitcast_v9i32_to_v9f32_scalar(<9 x i32> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: v_mov_b32_e32 v8, s24
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-LABEL: bitcast_v9i32_to_v9f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s21, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB1_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB1_3
-; GFX11-NEXT: .LBB1_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s20, s20, 3
; GFX11-NEXT: s_add_i32 s19, s19, 3
; GFX11-NEXT: s_add_i32 s18, s18, 3
@@ -222,7 +231,7 @@ define inreg <9 x float> @bitcast_v9i32_to_v9f32_scalar(<9 x i32> inreg %a, i32
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB1_3: ; %end
+; GFX11-NEXT: .LBB1_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -230,8 +239,6 @@ define inreg <9 x float> @bitcast_v9i32_to_v9f32_scalar(<9 x i32> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: v_mov_b32_e32 v8, s20
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_4:
-; GFX11-NEXT: s_branch .LBB1_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -352,10 +359,14 @@ define inreg <9 x i32> @bitcast_v9f32_to_v9i32_scalar(<9 x float> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s25, 0
-; SI-NEXT: s_cbranch_scc0 .LBB3_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB3_4
-; SI-NEXT: .LBB3_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB3_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB3_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v8, s24, 1.0
; SI-NEXT: v_add_f32_e64 v7, s23, 1.0
; SI-NEXT: v_add_f32_e64 v6, s22, 1.0
@@ -366,8 +377,6 @@ define inreg <9 x i32> @bitcast_v9f32_to_v9i32_scalar(<9 x float> inreg %a, i32
; SI-NEXT: v_add_f32_e64 v1, s17, 1.0
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB3_3:
-; SI-NEXT: s_branch .LBB3_2
; SI-NEXT: .LBB3_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -384,10 +393,14 @@ define inreg <9 x i32> @bitcast_v9f32_to_v9i32_scalar(<9 x float> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s25, 0
-; VI-NEXT: s_cbranch_scc0 .LBB3_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_4
-; VI-NEXT: .LBB3_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB3_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB3_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v8, s24, 1.0
; VI-NEXT: v_add_f32_e64 v7, s23, 1.0
; VI-NEXT: v_add_f32_e64 v6, s22, 1.0
@@ -398,8 +411,6 @@ define inreg <9 x i32> @bitcast_v9f32_to_v9i32_scalar(<9 x float> inreg %a, i32
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB3_3:
-; VI-NEXT: s_branch .LBB3_2
; VI-NEXT: .LBB3_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -416,10 +427,14 @@ define inreg <9 x i32> @bitcast_v9f32_to_v9i32_scalar(<9 x float> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s25, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_4
-; GFX9-NEXT: .LBB3_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB3_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v8, s24, 1.0
; GFX9-NEXT: v_add_f32_e64 v7, s23, 1.0
; GFX9-NEXT: v_add_f32_e64 v6, s22, 1.0
@@ -430,8 +445,6 @@ define inreg <9 x i32> @bitcast_v9f32_to_v9i32_scalar(<9 x float> inreg %a, i32
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB3_3:
-; GFX9-NEXT: s_branch .LBB3_2
; GFX9-NEXT: .LBB3_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -452,12 +465,15 @@ define inreg <9 x i32> @bitcast_v9f32_to_v9i32_scalar(<9 x float> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s21, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB3_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
-; GFX11-NEXT: .LBB3_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v8, s20, 1.0
; GFX11-NEXT: v_add_f32_e64 v7, s19, 1.0
; GFX11-NEXT: v_add_f32_e64 v6, s18, 1.0
@@ -468,8 +484,6 @@ define inreg <9 x i32> @bitcast_v9f32_to_v9i32_scalar(<9 x float> inreg %a, i32
; GFX11-NEXT: v_add_f32_e64 v1, s13, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB3_3:
-; GFX11-NEXT: s_branch .LBB3_2
; GFX11-NEXT: .LBB3_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -645,6 +659,7 @@ define inreg <18 x i16> @bitcast_v9i32_to_v18i16_scalar(<9 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s25, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB5_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s24
@@ -711,16 +726,22 @@ define inreg <18 x i16> @bitcast_v9i32_to_v18i16_scalar(<9 x i32> inreg %a, i32
; SI-NEXT: ; implicit-def: $vgpr13
; SI-NEXT: ; implicit-def: $sgpr6
; SI-NEXT: ; implicit-def: $vgpr17
-; SI-NEXT: s_branch .LBB5_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB5_2
+; SI-NEXT: s_branch .LBB5_3
;
; VI-LABEL: bitcast_v9i32_to_v18i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s25, 0
-; VI-NEXT: s_cbranch_scc0 .LBB5_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB5_3
-; VI-NEXT: .LBB5_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB5_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB5_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s24, s24, 3
; VI-NEXT: s_add_i32 s23, s23, 3
; VI-NEXT: s_add_i32 s22, s22, 3
@@ -730,7 +751,7 @@ define inreg <18 x i16> @bitcast_v9i32_to_v18i16_scalar(<9 x i32> inreg %a, i32
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB5_3: ; %end
+; VI-NEXT: .LBB5_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -741,17 +762,19 @@ define inreg <18 x i16> @bitcast_v9i32_to_v18i16_scalar(<9 x i32> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB5_4:
-; VI-NEXT: s_branch .LBB5_2
;
; GFX9-LABEL: bitcast_v9i32_to_v18i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s25, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB5_3
-; GFX9-NEXT: .LBB5_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB5_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s24, s24, 3
; GFX9-NEXT: s_add_i32 s23, s23, 3
; GFX9-NEXT: s_add_i32 s22, s22, 3
@@ -761,7 +784,7 @@ define inreg <18 x i16> @bitcast_v9i32_to_v18i16_scalar(<9 x i32> inreg %a, i32
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB5_3: ; %end
+; GFX9-NEXT: .LBB5_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -772,19 +795,20 @@ define inreg <18 x i16> @bitcast_v9i32_to_v18i16_scalar(<9 x i32> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: v_mov_b32_e32 v8, s24
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_4:
-; GFX9-NEXT: s_branch .LBB5_2
;
; GFX11-LABEL: bitcast_v9i32_to_v18i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s21, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB5_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB5_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB5_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB5_3
-; GFX11-NEXT: .LBB5_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s20, s20, 3
; GFX11-NEXT: s_add_i32 s19, s19, 3
; GFX11-NEXT: s_add_i32 s18, s18, 3
@@ -794,7 +818,7 @@ define inreg <18 x i16> @bitcast_v9i32_to_v18i16_scalar(<9 x i32> inreg %a, i32
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB5_3: ; %end
+; GFX11-NEXT: .LBB5_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -802,8 +826,6 @@ define inreg <18 x i16> @bitcast_v9i32_to_v18i16_scalar(<9 x i32> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: v_mov_b32_e32 v8, s20
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB5_4:
-; GFX11-NEXT: s_branch .LBB5_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1036,9 +1058,10 @@ define inreg <9 x i32> @bitcast_v18i16_to_v9i32_scalar(<18 x i16> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v9, v2
; SI-NEXT: v_mov_b32_e32 v10, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v3
; SI-NEXT: s_cbranch_scc0 .LBB7_4
@@ -1131,16 +1154,22 @@ define inreg <9 x i32> @bitcast_v18i16_to_v9i32_scalar(<18 x i16> inreg %a, i32
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB7_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8
-; SI-NEXT: s_branch .LBB7_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB7_2
+; SI-NEXT: s_branch .LBB7_3
;
; VI-LABEL: bitcast_v18i16_to_v9i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s25, 0
-; VI-NEXT: s_cbranch_scc0 .LBB7_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB7_3
-; VI-NEXT: .LBB7_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB7_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB7_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s24, 3
; VI-NEXT: s_and_b32 s4, s24, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
@@ -1186,7 +1215,7 @@ define inreg <9 x i32> @bitcast_v18i16_to_v9i32_scalar(<18 x i16> inreg %a, i32
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB7_3: ; %end
+; VI-NEXT: .LBB7_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -1197,17 +1226,19 @@ define inreg <9 x i32> @bitcast_v18i16_to_v9i32_scalar(<18 x i16> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB7_4:
-; VI-NEXT: s_branch .LBB7_2
;
; GFX9-LABEL: bitcast_v18i16_to_v9i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s25, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB7_4
-; GFX9-NEXT: .LBB7_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB7_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB7_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v8, s24, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v7, s23, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v6, s22, 3 op_sel_hi:[1,0]
@@ -1218,8 +1249,6 @@ define inreg <9 x i32> @bitcast_v18i16_to_v9i32_scalar(<18 x i16> inreg %a, i32
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB7_3:
-; GFX9-NEXT: s_branch .LBB7_2
; GFX9-NEXT: .LBB7_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -1240,12 +1269,15 @@ define inreg <9 x i32> @bitcast_v18i16_to_v9i32_scalar(<18 x i16> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s21, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB7_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB7_4
-; GFX11-NEXT: .LBB7_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v8, s20, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v7, s19, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v6, s18, 3 op_sel_hi:[1,0]
@@ -1256,8 +1288,6 @@ define inreg <9 x i32> @bitcast_v18i16_to_v9i32_scalar(<18 x i16> inreg %a, i32
; GFX11-NEXT: v_pk_add_u16 v1, s13, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB7_3:
-; GFX11-NEXT: s_branch .LBB7_2
; GFX11-NEXT: .LBB7_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -1488,6 +1518,7 @@ define inreg <18 x half> @bitcast_v9i32_to_v18f16_scalar(<9 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s25, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB9_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s4, s24, 16
@@ -1576,16 +1607,22 @@ define inreg <18 x half> @bitcast_v9i32_to_v18f16_scalar(<9 x i32> inreg %a, i32
; SI-NEXT: ; implicit-def: $vgpr15
; SI-NEXT: ; implicit-def: $vgpr16
; SI-NEXT: ; implicit-def: $vgpr17
-; SI-NEXT: s_branch .LBB9_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB9_2
+; SI-NEXT: s_branch .LBB9_3
;
; VI-LABEL: bitcast_v9i32_to_v18f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s25, 0
-; VI-NEXT: s_cbranch_scc0 .LBB9_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB9_3
-; VI-NEXT: .LBB9_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB9_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB9_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s24, s24, 3
; VI-NEXT: s_add_i32 s23, s23, 3
; VI-NEXT: s_add_i32 s22, s22, 3
@@ -1595,7 +1632,7 @@ define inreg <18 x half> @bitcast_v9i32_to_v18f16_scalar(<9 x i32> inreg %a, i32
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB9_3: ; %end
+; VI-NEXT: .LBB9_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -1606,17 +1643,19 @@ define inreg <18 x half> @bitcast_v9i32_to_v18f16_scalar(<9 x i32> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB9_4:
-; VI-NEXT: s_branch .LBB9_2
;
; GFX9-LABEL: bitcast_v9i32_to_v18f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s25, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB9_3
-; GFX9-NEXT: .LBB9_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB9_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s24, s24, 3
; GFX9-NEXT: s_add_i32 s23, s23, 3
; GFX9-NEXT: s_add_i32 s22, s22, 3
@@ -1626,7 +1665,7 @@ define inreg <18 x half> @bitcast_v9i32_to_v18f16_scalar(<9 x i32> inreg %a, i32
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB9_3: ; %end
+; GFX9-NEXT: .LBB9_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -1637,19 +1676,20 @@ define inreg <18 x half> @bitcast_v9i32_to_v18f16_scalar(<9 x i32> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: v_mov_b32_e32 v8, s24
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB9_4:
-; GFX9-NEXT: s_branch .LBB9_2
;
; GFX11-LABEL: bitcast_v9i32_to_v18f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s21, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB9_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB9_3
-; GFX11-NEXT: .LBB9_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s20, s20, 3
; GFX11-NEXT: s_add_i32 s19, s19, 3
; GFX11-NEXT: s_add_i32 s18, s18, 3
@@ -1659,7 +1699,7 @@ define inreg <18 x half> @bitcast_v9i32_to_v18f16_scalar(<9 x i32> inreg %a, i32
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB9_3: ; %end
+; GFX11-NEXT: .LBB9_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -1667,8 +1707,6 @@ define inreg <18 x half> @bitcast_v9i32_to_v18f16_scalar(<9 x i32> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: v_mov_b32_e32 v8, s20
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB9_4:
-; GFX11-NEXT: s_branch .LBB9_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1961,6 +1999,7 @@ define inreg <9 x i32> @bitcast_v18f16_to_v9i32_scalar(<18 x half> inreg %a, i32
; SI-NEXT: v_cvt_f16_f32_e32 v9, v2
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB11_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v26
@@ -2059,16 +2098,22 @@ define inreg <9 x i32> @bitcast_v18f16_to_v9i32_scalar(<18 x half> inreg %a, i32
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB11_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8
-; SI-NEXT: s_branch .LBB11_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB11_2
+; SI-NEXT: s_branch .LBB11_3
;
; VI-LABEL: bitcast_v18f16_to_v9i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s25, 0
-; VI-NEXT: s_cbranch_scc0 .LBB11_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB11_4
-; VI-NEXT: .LBB11_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB11_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB11_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s24, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -2116,8 +2161,6 @@ define inreg <9 x i32> @bitcast_v18f16_to_v9i32_scalar(<18 x half> inreg %a, i32
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v9
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB11_3:
-; VI-NEXT: s_branch .LBB11_2
; VI-NEXT: .LBB11_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -2134,10 +2177,14 @@ define inreg <9 x i32> @bitcast_v18f16_to_v9i32_scalar(<18 x half> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s25, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_4
-; GFX9-NEXT: .LBB11_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB11_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v8, s24, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v7, s23, v0 op_sel_hi:[1,0]
@@ -2149,8 +2196,6 @@ define inreg <9 x i32> @bitcast_v18f16_to_v9i32_scalar(<18 x half> inreg %a, i32
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB11_3:
-; GFX9-NEXT: s_branch .LBB11_2
; GFX9-NEXT: .LBB11_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -2171,12 +2216,15 @@ define inreg <9 x i32> @bitcast_v18f16_to_v9i32_scalar(<18 x half> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s21, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB11_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
-; GFX11-NEXT: .LBB11_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v8, 0x200, s20 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v7, 0x200, s19 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v6, 0x200, s18 op_sel_hi:[0,1]
@@ -2187,8 +2235,6 @@ define inreg <9 x i32> @bitcast_v18f16_to_v9i32_scalar(<18 x half> inreg %a, i32
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s13 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: s_branch .LBB11_2
; GFX11-NEXT: .LBB11_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -2359,6 +2405,7 @@ define inreg <18 x i16> @bitcast_v9f32_to_v18i16_scalar(<9 x float> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s25, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB13_3
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s24
@@ -2406,7 +2453,8 @@ define inreg <18 x i16> @bitcast_v9f32_to_v18i16_scalar(<9 x float> inreg %a, i3
; SI-NEXT: ; implicit-def: $vgpr13
; SI-NEXT: ; implicit-def: $sgpr9
; SI-NEXT: ; implicit-def: $vgpr17
-; SI-NEXT: s_branch .LBB13_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB13_2
; SI-NEXT: .LBB13_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v2, s17
@@ -2427,10 +2475,14 @@ define inreg <18 x i16> @bitcast_v9f32_to_v18i16_scalar(<9 x float> inreg %a, i3
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s25, 0
-; VI-NEXT: s_cbranch_scc0 .LBB13_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB13_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB13_4
-; VI-NEXT: .LBB13_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB13_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB13_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v8, s24, 1.0
; VI-NEXT: v_add_f32_e64 v7, s23, 1.0
; VI-NEXT: v_add_f32_e64 v6, s22, 1.0
@@ -2441,8 +2493,6 @@ define inreg <18 x i16> @bitcast_v9f32_to_v18i16_scalar(<9 x float> inreg %a, i3
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB13_3:
-; VI-NEXT: s_branch .LBB13_2
; VI-NEXT: .LBB13_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -2466,10 +2516,14 @@ define inreg <18 x i16> @bitcast_v9f32_to_v18i16_scalar(<9 x float> inreg %a, i3
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s25, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB13_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB13_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB13_4
-; GFX9-NEXT: .LBB13_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB13_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB13_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v8, s24, 1.0
; GFX9-NEXT: v_add_f32_e64 v7, s23, 1.0
; GFX9-NEXT: v_add_f32_e64 v6, s22, 1.0
@@ -2480,8 +2534,6 @@ define inreg <18 x i16> @bitcast_v9f32_to_v18i16_scalar(<9 x float> inreg %a, i3
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB13_3:
-; GFX9-NEXT: s_branch .LBB13_2
; GFX9-NEXT: .LBB13_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -2509,12 +2561,15 @@ define inreg <18 x i16> @bitcast_v9f32_to_v18i16_scalar(<9 x float> inreg %a, i3
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s21, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB13_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB13_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB13_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB13_4
-; GFX11-NEXT: .LBB13_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v8, s20, 1.0
; GFX11-NEXT: v_add_f32_e64 v7, s19, 1.0
; GFX11-NEXT: v_add_f32_e64 v6, s18, 1.0
@@ -2525,8 +2580,6 @@ define inreg <18 x i16> @bitcast_v9f32_to_v18i16_scalar(<9 x float> inreg %a, i3
; GFX11-NEXT: v_add_f32_e64 v1, s13, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB13_3:
-; GFX11-NEXT: s_branch .LBB13_2
; GFX11-NEXT: .LBB13_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -2769,9 +2822,10 @@ define inreg <9 x float> @bitcast_v18i16_to_v9f32_scalar(<18 x i16> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v9, v2
; SI-NEXT: v_mov_b32_e32 v10, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v3
; SI-NEXT: s_cbranch_scc0 .LBB15_4
@@ -2864,16 +2918,22 @@ define inreg <9 x float> @bitcast_v18i16_to_v9f32_scalar(<18 x i16> inreg %a, i3
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB15_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8
-; SI-NEXT: s_branch .LBB15_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB15_2
+; SI-NEXT: s_branch .LBB15_3
;
; VI-LABEL: bitcast_v18i16_to_v9f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s25, 0
-; VI-NEXT: s_cbranch_scc0 .LBB15_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB15_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB15_3
-; VI-NEXT: .LBB15_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB15_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB15_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s24, 3
; VI-NEXT: s_and_b32 s4, s24, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
@@ -2919,7 +2979,7 @@ define inreg <9 x float> @bitcast_v18i16_to_v9f32_scalar(<18 x i16> inreg %a, i3
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB15_3: ; %end
+; VI-NEXT: .LBB15_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -2930,17 +2990,19 @@ define inreg <9 x float> @bitcast_v18i16_to_v9f32_scalar(<18 x i16> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB15_4:
-; VI-NEXT: s_branch .LBB15_2
;
; GFX9-LABEL: bitcast_v18i16_to_v9f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s25, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB15_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB15_4
-; GFX9-NEXT: .LBB15_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB15_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB15_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v8, s24, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v7, s23, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v6, s22, 3 op_sel_hi:[1,0]
@@ -2951,8 +3013,6 @@ define inreg <9 x float> @bitcast_v18i16_to_v9f32_scalar(<18 x i16> inreg %a, i3
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB15_3:
-; GFX9-NEXT: s_branch .LBB15_2
; GFX9-NEXT: .LBB15_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -2973,12 +3033,15 @@ define inreg <9 x float> @bitcast_v18i16_to_v9f32_scalar(<18 x i16> inreg %a, i3
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s21, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB15_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB15_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB15_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB15_4
-; GFX11-NEXT: .LBB15_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v8, s20, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v7, s19, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v6, s18, 3 op_sel_hi:[1,0]
@@ -2989,8 +3052,6 @@ define inreg <9 x float> @bitcast_v18i16_to_v9f32_scalar(<18 x i16> inreg %a, i3
; GFX11-NEXT: v_pk_add_u16 v1, s13, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB15_3:
-; GFX11-NEXT: s_branch .LBB15_2
; GFX11-NEXT: .LBB15_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -3216,6 +3277,7 @@ define inreg <18 x half> @bitcast_v9f32_to_v18f16_scalar(<9 x float> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s25, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB17_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s4, s24, 16
@@ -3304,16 +3366,22 @@ define inreg <18 x half> @bitcast_v9f32_to_v18f16_scalar(<9 x float> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr15
; SI-NEXT: ; implicit-def: $vgpr16
; SI-NEXT: ; implicit-def: $vgpr17
-; SI-NEXT: s_branch .LBB17_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB17_2
+; SI-NEXT: s_branch .LBB17_3
;
; VI-LABEL: bitcast_v9f32_to_v18f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s25, 0
-; VI-NEXT: s_cbranch_scc0 .LBB17_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB17_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB17_4
-; VI-NEXT: .LBB17_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB17_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB17_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v8, s24, 1.0
; VI-NEXT: v_add_f32_e64 v7, s23, 1.0
; VI-NEXT: v_add_f32_e64 v6, s22, 1.0
@@ -3324,8 +3392,6 @@ define inreg <18 x half> @bitcast_v9f32_to_v18f16_scalar(<9 x float> inreg %a, i
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB17_3:
-; VI-NEXT: s_branch .LBB17_2
; VI-NEXT: .LBB17_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -3349,10 +3415,14 @@ define inreg <18 x half> @bitcast_v9f32_to_v18f16_scalar(<9 x float> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s25, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB17_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB17_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB17_4
-; GFX9-NEXT: .LBB17_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB17_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB17_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v8, s24, 1.0
; GFX9-NEXT: v_add_f32_e64 v7, s23, 1.0
; GFX9-NEXT: v_add_f32_e64 v6, s22, 1.0
@@ -3363,8 +3433,6 @@ define inreg <18 x half> @bitcast_v9f32_to_v18f16_scalar(<9 x float> inreg %a, i
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB17_3:
-; GFX9-NEXT: s_branch .LBB17_2
; GFX9-NEXT: .LBB17_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -3392,12 +3460,15 @@ define inreg <18 x half> @bitcast_v9f32_to_v18f16_scalar(<9 x float> inreg %a, i
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s21, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB17_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB17_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB17_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB17_4
-; GFX11-NEXT: .LBB17_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v8, s20, 1.0
; GFX11-NEXT: v_add_f32_e64 v7, s19, 1.0
; GFX11-NEXT: v_add_f32_e64 v6, s18, 1.0
@@ -3408,8 +3479,6 @@ define inreg <18 x half> @bitcast_v9f32_to_v18f16_scalar(<9 x float> inreg %a, i
; GFX11-NEXT: v_add_f32_e64 v1, s13, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB17_3:
-; GFX11-NEXT: s_branch .LBB17_2
; GFX11-NEXT: .LBB17_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -3712,6 +3781,7 @@ define inreg <9 x float> @bitcast_v18f16_to_v9f32_scalar(<18 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v9, v2
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB19_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v26
@@ -3810,16 +3880,22 @@ define inreg <9 x float> @bitcast_v18f16_to_v9f32_scalar(<18 x half> inreg %a, i
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB19_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8
-; SI-NEXT: s_branch .LBB19_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB19_2
+; SI-NEXT: s_branch .LBB19_3
;
; VI-LABEL: bitcast_v18f16_to_v9f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s25, 0
-; VI-NEXT: s_cbranch_scc0 .LBB19_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB19_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB19_4
-; VI-NEXT: .LBB19_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB19_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB19_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s24, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -3867,8 +3943,6 @@ define inreg <9 x float> @bitcast_v18f16_to_v9f32_scalar(<18 x half> inreg %a, i
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v9
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB19_3:
-; VI-NEXT: s_branch .LBB19_2
; VI-NEXT: .LBB19_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -3885,10 +3959,14 @@ define inreg <9 x float> @bitcast_v18f16_to_v9f32_scalar(<18 x half> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s25, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB19_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB19_4
-; GFX9-NEXT: .LBB19_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB19_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB19_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v8, s24, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v7, s23, v0 op_sel_hi:[1,0]
@@ -3900,8 +3978,6 @@ define inreg <9 x float> @bitcast_v18f16_to_v9f32_scalar(<18 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB19_3:
-; GFX9-NEXT: s_branch .LBB19_2
; GFX9-NEXT: .LBB19_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -3922,12 +3998,15 @@ define inreg <9 x float> @bitcast_v18f16_to_v9f32_scalar(<18 x half> inreg %a, i
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s21, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB19_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB19_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB19_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB19_4
-; GFX11-NEXT: .LBB19_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v8, 0x200, s20 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v7, 0x200, s19 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v6, 0x200, s18 op_sel_hi:[0,1]
@@ -3938,8 +4017,6 @@ define inreg <9 x float> @bitcast_v18f16_to_v9f32_scalar(<18 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s13 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB19_3:
-; GFX11-NEXT: s_branch .LBB19_2
; GFX11-NEXT: .LBB19_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -4199,11 +4276,12 @@ define inreg <18 x half> @bitcast_v18i16_to_v18f16_scalar(<18 x i16> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v18, v3
; SI-NEXT: v_mov_b32_e32 v21, v2
; SI-NEXT: v_mov_b32_e32 v20, v1
; SI-NEXT: v_mov_b32_e32 v19, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB21_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_cvt_f32_f16_e32 v0, s16
@@ -4283,16 +4361,22 @@ define inreg <18 x half> @bitcast_v18i16_to_v18f16_scalar(<18 x i16> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr15
; SI-NEXT: ; implicit-def: $vgpr16
; SI-NEXT: ; implicit-def: $vgpr17
-; SI-NEXT: s_branch .LBB21_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB21_2
+; SI-NEXT: s_branch .LBB21_3
;
; VI-LABEL: bitcast_v18i16_to_v18f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s25, 0
-; VI-NEXT: s_cbranch_scc0 .LBB21_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB21_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB21_3
-; VI-NEXT: .LBB21_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB21_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB21_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s16, 3
; VI-NEXT: s_and_b32 s6, s17, 0xffff0000
; VI-NEXT: s_add_i32 s7, s17, 3
@@ -4338,7 +4422,7 @@ define inreg <18 x half> @bitcast_v18i16_to_v18f16_scalar(<18 x i16> inreg %a, i
; VI-NEXT: s_add_i32 s18, s8, 0x30000
; VI-NEXT: s_add_i32 s17, s6, 0x30000
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB21_3: ; %end
+; VI-NEXT: .LBB21_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -4349,17 +4433,19 @@ define inreg <18 x half> @bitcast_v18i16_to_v18f16_scalar(<18 x i16> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB21_4:
-; VI-NEXT: s_branch .LBB21_2
;
; GFX9-LABEL: bitcast_v18i16_to_v18f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s25, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB21_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB21_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB21_4
-; GFX9-NEXT: .LBB21_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB21_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v8, s24, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v7, s23, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v6, s22, 3 op_sel_hi:[1,0]
@@ -4370,8 +4456,6 @@ define inreg <18 x half> @bitcast_v18i16_to_v18f16_scalar(<18 x i16> inreg %a, i
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB21_3:
-; GFX9-NEXT: s_branch .LBB21_2
; GFX9-NEXT: .LBB21_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -4399,12 +4483,15 @@ define inreg <18 x half> @bitcast_v18i16_to_v18f16_scalar(<18 x i16> inreg %a, i
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s21, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB21_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB21_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB21_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB21_4
-; GFX11-NEXT: .LBB21_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v8, s20, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v7, s19, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v6, s18, 3 op_sel_hi:[1,0]
@@ -4415,8 +4502,6 @@ define inreg <18 x half> @bitcast_v18i16_to_v18f16_scalar(<18 x i16> inreg %a, i
; GFX11-NEXT: v_pk_add_u16 v1, s13, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB21_3:
-; GFX11-NEXT: s_branch .LBB21_2
; GFX11-NEXT: .LBB21_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -4684,10 +4769,14 @@ define inreg <18 x i16> @bitcast_v18f16_to_v18i16_scalar(<18 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v17, v17
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: s_cbranch_scc0 .LBB23_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB23_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB23_3
-; SI-NEXT: .LBB23_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB23_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB23_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v17, v17
; SI-NEXT: v_cvt_f32_f16_e32 v16, v16
; SI-NEXT: v_cvt_f32_f16_e32 v15, v15
@@ -4764,19 +4853,21 @@ define inreg <18 x i16> @bitcast_v18f16_to_v18i16_scalar(<18 x half> inreg %a, i
; SI-NEXT: v_alignbit_b32 v5, v6, v5, 16
; SI-NEXT: v_alignbit_b32 v9, v10, v9, 16
; SI-NEXT: v_alignbit_b32 v13, v14, v13, 16
-; SI-NEXT: .LBB23_3: ; %end
+; SI-NEXT: .LBB23_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB23_4:
-; SI-NEXT: s_branch .LBB23_2
;
; VI-LABEL: bitcast_v18f16_to_v18i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s25, 0
-; VI-NEXT: s_cbranch_scc0 .LBB23_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB23_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB23_4
-; VI-NEXT: .LBB23_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB23_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB23_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s5, s23, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v2, s5
@@ -4824,8 +4915,6 @@ define inreg <18 x i16> @bitcast_v18f16_to_v18i16_scalar(<18 x half> inreg %a, i
; VI-NEXT: v_or_b32_e32 v1, v0, v1
; VI-NEXT: v_or_b32_e32 v0, v9, v10
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB23_3:
-; VI-NEXT: s_branch .LBB23_2
; VI-NEXT: .LBB23_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -4849,10 +4938,14 @@ define inreg <18 x i16> @bitcast_v18f16_to_v18i16_scalar(<18 x half> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s25, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB23_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB23_4
-; GFX9-NEXT: .LBB23_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB23_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v8, s24, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v7, s23, v0 op_sel_hi:[1,0]
@@ -4864,8 +4957,6 @@ define inreg <18 x i16> @bitcast_v18f16_to_v18i16_scalar(<18 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB23_3:
-; GFX9-NEXT: s_branch .LBB23_2
; GFX9-NEXT: .LBB23_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -4893,12 +4984,15 @@ define inreg <18 x i16> @bitcast_v18f16_to_v18i16_scalar(<18 x half> inreg %a, i
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s21, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB23_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB23_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB23_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB23_4
-; GFX11-NEXT: .LBB23_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v8, 0x200, s20 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v7, 0x200, s19 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v6, 0x200, s18 op_sel_hi:[0,1]
@@ -4909,8 +5003,6 @@ define inreg <18 x i16> @bitcast_v18f16_to_v18i16_scalar(<18 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s13 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB23_3:
-; GFX11-NEXT: s_branch .LBB23_2
; GFX11-NEXT: .LBB23_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll
index 0cefbc1..989e0b9 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.320bit.ll
@@ -121,10 +121,14 @@ define inreg <10 x float> @bitcast_v10i32_to_v10f32_scalar(<10 x i32> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB1_3
-; SI-NEXT: .LBB1_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB1_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB1_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_i32 s25, s25, 3
; SI-NEXT: s_add_i32 s24, s24, 3
; SI-NEXT: s_add_i32 s23, s23, 3
@@ -135,7 +139,7 @@ define inreg <10 x float> @bitcast_v10i32_to_v10f32_scalar(<10 x i32> inreg %a,
; SI-NEXT: s_add_i32 s18, s18, 3
; SI-NEXT: s_add_i32 s17, s17, 3
; SI-NEXT: s_add_i32 s16, s16, 3
-; SI-NEXT: .LBB1_3: ; %end
+; SI-NEXT: .LBB1_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -147,17 +151,19 @@ define inreg <10 x float> @bitcast_v10i32_to_v10f32_scalar(<10 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v8, s24
; SI-NEXT: v_mov_b32_e32 v9, s25
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: s_branch .LBB1_2
;
; VI-LABEL: bitcast_v10i32_to_v10f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB1_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB1_3
-; VI-NEXT: .LBB1_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB1_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB1_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s25, s25, 3
; VI-NEXT: s_add_i32 s24, s24, 3
; VI-NEXT: s_add_i32 s23, s23, 3
@@ -168,7 +174,7 @@ define inreg <10 x float> @bitcast_v10i32_to_v10f32_scalar(<10 x i32> inreg %a,
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB1_3: ; %end
+; VI-NEXT: .LBB1_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -180,17 +186,19 @@ define inreg <10 x float> @bitcast_v10i32_to_v10f32_scalar(<10 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_4:
-; VI-NEXT: s_branch .LBB1_2
;
; GFX9-LABEL: bitcast_v10i32_to_v10f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB1_3
-; GFX9-NEXT: .LBB1_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB1_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s25, s25, 3
; GFX9-NEXT: s_add_i32 s24, s24, 3
; GFX9-NEXT: s_add_i32 s23, s23, 3
@@ -201,7 +209,7 @@ define inreg <10 x float> @bitcast_v10i32_to_v10f32_scalar(<10 x i32> inreg %a,
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB1_3: ; %end
+; GFX9-NEXT: .LBB1_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -213,19 +221,20 @@ define inreg <10 x float> @bitcast_v10i32_to_v10f32_scalar(<10 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v8, s24
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-LABEL: bitcast_v10i32_to_v10f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB1_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB1_3
-; GFX11-NEXT: .LBB1_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s21, s21, 3
; GFX11-NEXT: s_add_i32 s20, s20, 3
; GFX11-NEXT: s_add_i32 s19, s19, 3
@@ -236,7 +245,7 @@ define inreg <10 x float> @bitcast_v10i32_to_v10f32_scalar(<10 x i32> inreg %a,
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB1_3: ; %end
+; GFX11-NEXT: .LBB1_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -244,8 +253,6 @@ define inreg <10 x float> @bitcast_v10i32_to_v10f32_scalar(<10 x i32> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_4:
-; GFX11-NEXT: s_branch .LBB1_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -372,10 +379,14 @@ define inreg <10 x i32> @bitcast_v10f32_to_v10i32_scalar(<10 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
-; SI-NEXT: s_cbranch_scc0 .LBB3_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB3_4
-; SI-NEXT: .LBB3_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB3_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB3_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v9, s25, 1.0
; SI-NEXT: v_add_f32_e64 v8, s24, 1.0
; SI-NEXT: v_add_f32_e64 v7, s23, 1.0
@@ -387,8 +398,6 @@ define inreg <10 x i32> @bitcast_v10f32_to_v10i32_scalar(<10 x float> inreg %a,
; SI-NEXT: v_add_f32_e64 v1, s17, 1.0
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB3_3:
-; SI-NEXT: s_branch .LBB3_2
; SI-NEXT: .LBB3_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -406,10 +415,14 @@ define inreg <10 x i32> @bitcast_v10f32_to_v10i32_scalar(<10 x float> inreg %a,
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB3_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_4
-; VI-NEXT: .LBB3_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB3_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB3_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v9, s25, 1.0
; VI-NEXT: v_add_f32_e64 v8, s24, 1.0
; VI-NEXT: v_add_f32_e64 v7, s23, 1.0
@@ -421,8 +434,6 @@ define inreg <10 x i32> @bitcast_v10f32_to_v10i32_scalar(<10 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB3_3:
-; VI-NEXT: s_branch .LBB3_2
; VI-NEXT: .LBB3_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -440,10 +451,14 @@ define inreg <10 x i32> @bitcast_v10f32_to_v10i32_scalar(<10 x float> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_4
-; GFX9-NEXT: .LBB3_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB3_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v9, s25, 1.0
; GFX9-NEXT: v_add_f32_e64 v8, s24, 1.0
; GFX9-NEXT: v_add_f32_e64 v7, s23, 1.0
@@ -455,8 +470,6 @@ define inreg <10 x i32> @bitcast_v10f32_to_v10i32_scalar(<10 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB3_3:
-; GFX9-NEXT: s_branch .LBB3_2
; GFX9-NEXT: .LBB3_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -478,12 +491,15 @@ define inreg <10 x i32> @bitcast_v10f32_to_v10i32_scalar(<10 x float> inreg %a,
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB3_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
-; GFX11-NEXT: .LBB3_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v9, s21, 1.0
; GFX11-NEXT: v_add_f32_e64 v8, s20, 1.0
; GFX11-NEXT: v_add_f32_e64 v7, s19, 1.0
@@ -495,8 +511,6 @@ define inreg <10 x i32> @bitcast_v10f32_to_v10i32_scalar(<10 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v1, s13, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB3_3:
-; GFX11-NEXT: s_branch .LBB3_2
; GFX11-NEXT: .LBB3_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -679,6 +693,7 @@ define inreg <20 x i16> @bitcast_v10i32_to_v20i16_scalar(<10 x i32> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB5_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s24
@@ -751,16 +766,22 @@ define inreg <20 x i16> @bitcast_v10i32_to_v20i16_scalar(<10 x i32> inreg %a, i3
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $vgpr17
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB5_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB5_2
+; SI-NEXT: s_branch .LBB5_3
;
; VI-LABEL: bitcast_v10i32_to_v20i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB5_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB5_3
-; VI-NEXT: .LBB5_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB5_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB5_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s25, s25, 3
; VI-NEXT: s_add_i32 s24, s24, 3
; VI-NEXT: s_add_i32 s23, s23, 3
@@ -771,7 +792,7 @@ define inreg <20 x i16> @bitcast_v10i32_to_v20i16_scalar(<10 x i32> inreg %a, i3
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB5_3: ; %end
+; VI-NEXT: .LBB5_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -783,17 +804,19 @@ define inreg <20 x i16> @bitcast_v10i32_to_v20i16_scalar(<10 x i32> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB5_4:
-; VI-NEXT: s_branch .LBB5_2
;
; GFX9-LABEL: bitcast_v10i32_to_v20i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB5_3
-; GFX9-NEXT: .LBB5_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB5_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s25, s25, 3
; GFX9-NEXT: s_add_i32 s24, s24, 3
; GFX9-NEXT: s_add_i32 s23, s23, 3
@@ -804,7 +827,7 @@ define inreg <20 x i16> @bitcast_v10i32_to_v20i16_scalar(<10 x i32> inreg %a, i3
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB5_3: ; %end
+; GFX9-NEXT: .LBB5_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -816,19 +839,20 @@ define inreg <20 x i16> @bitcast_v10i32_to_v20i16_scalar(<10 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v8, s24
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_4:
-; GFX9-NEXT: s_branch .LBB5_2
;
; GFX11-LABEL: bitcast_v10i32_to_v20i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB5_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB5_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB5_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB5_3
-; GFX11-NEXT: .LBB5_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s21, s21, 3
; GFX11-NEXT: s_add_i32 s20, s20, 3
; GFX11-NEXT: s_add_i32 s19, s19, 3
@@ -839,7 +863,7 @@ define inreg <20 x i16> @bitcast_v10i32_to_v20i16_scalar(<10 x i32> inreg %a, i3
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB5_3: ; %end
+; GFX11-NEXT: .LBB5_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -847,8 +871,6 @@ define inreg <20 x i16> @bitcast_v10i32_to_v20i16_scalar(<10 x i32> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB5_4:
-; GFX11-NEXT: s_branch .LBB5_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1096,10 +1118,11 @@ define inreg <10 x i32> @bitcast_v20i16_to_v10i32_scalar(<20 x i16> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v10, v4
; SI-NEXT: v_mov_b32_e32 v11, v2
; SI-NEXT: v_mov_b32_e32 v12, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v3
; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v5
@@ -1199,16 +1222,22 @@ define inreg <10 x i32> @bitcast_v20i16_to_v10i32_scalar(<20 x i16> inreg %a, i3
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB7_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
-; SI-NEXT: s_branch .LBB7_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB7_2
+; SI-NEXT: s_branch .LBB7_3
;
; VI-LABEL: bitcast_v20i16_to_v10i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB7_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB7_3
-; VI-NEXT: .LBB7_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB7_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB7_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s25, 3
; VI-NEXT: s_and_b32 s4, s25, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
@@ -1259,7 +1288,7 @@ define inreg <10 x i32> @bitcast_v20i16_to_v10i32_scalar(<20 x i16> inreg %a, i3
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB7_3: ; %end
+; VI-NEXT: .LBB7_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -1271,17 +1300,19 @@ define inreg <10 x i32> @bitcast_v20i16_to_v10i32_scalar(<20 x i16> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB7_4:
-; VI-NEXT: s_branch .LBB7_2
;
; GFX9-LABEL: bitcast_v20i16_to_v10i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB7_4
-; GFX9-NEXT: .LBB7_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB7_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB7_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v9, s25, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v8, s24, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v7, s23, 3 op_sel_hi:[1,0]
@@ -1293,8 +1324,6 @@ define inreg <10 x i32> @bitcast_v20i16_to_v10i32_scalar(<20 x i16> inreg %a, i3
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB7_3:
-; GFX9-NEXT: s_branch .LBB7_2
; GFX9-NEXT: .LBB7_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -1316,12 +1345,15 @@ define inreg <10 x i32> @bitcast_v20i16_to_v10i32_scalar(<20 x i16> inreg %a, i3
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB7_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB7_4
-; GFX11-NEXT: .LBB7_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v9, s21, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v8, s20, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v7, s19, 3 op_sel_hi:[1,0]
@@ -1333,8 +1365,6 @@ define inreg <10 x i32> @bitcast_v20i16_to_v10i32_scalar(<20 x i16> inreg %a, i3
; GFX11-NEXT: v_pk_add_u16 v1, s13, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB7_3:
-; GFX11-NEXT: s_branch .LBB7_2
; GFX11-NEXT: .LBB7_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -1581,6 +1611,7 @@ define inreg <20 x half> @bitcast_v10i32_to_v20f16_scalar(<10 x i32> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB9_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s4, s25, 16
@@ -1678,16 +1709,22 @@ define inreg <20 x half> @bitcast_v10i32_to_v20f16_scalar(<10 x i32> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr17
; SI-NEXT: ; implicit-def: $vgpr18
; SI-NEXT: ; implicit-def: $vgpr19
-; SI-NEXT: s_branch .LBB9_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB9_2
+; SI-NEXT: s_branch .LBB9_3
;
; VI-LABEL: bitcast_v10i32_to_v20f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB9_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB9_3
-; VI-NEXT: .LBB9_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB9_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB9_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s25, s25, 3
; VI-NEXT: s_add_i32 s24, s24, 3
; VI-NEXT: s_add_i32 s23, s23, 3
@@ -1698,7 +1735,7 @@ define inreg <20 x half> @bitcast_v10i32_to_v20f16_scalar(<10 x i32> inreg %a, i
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB9_3: ; %end
+; VI-NEXT: .LBB9_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -1710,17 +1747,19 @@ define inreg <20 x half> @bitcast_v10i32_to_v20f16_scalar(<10 x i32> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB9_4:
-; VI-NEXT: s_branch .LBB9_2
;
; GFX9-LABEL: bitcast_v10i32_to_v20f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB9_3
-; GFX9-NEXT: .LBB9_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB9_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s25, s25, 3
; GFX9-NEXT: s_add_i32 s24, s24, 3
; GFX9-NEXT: s_add_i32 s23, s23, 3
@@ -1731,7 +1770,7 @@ define inreg <20 x half> @bitcast_v10i32_to_v20f16_scalar(<10 x i32> inreg %a, i
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB9_3: ; %end
+; GFX9-NEXT: .LBB9_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -1743,19 +1782,20 @@ define inreg <20 x half> @bitcast_v10i32_to_v20f16_scalar(<10 x i32> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v8, s24
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB9_4:
-; GFX9-NEXT: s_branch .LBB9_2
;
; GFX11-LABEL: bitcast_v10i32_to_v20f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB9_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB9_3
-; GFX11-NEXT: .LBB9_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s21, s21, 3
; GFX11-NEXT: s_add_i32 s20, s20, 3
; GFX11-NEXT: s_add_i32 s19, s19, 3
@@ -1766,7 +1806,7 @@ define inreg <20 x half> @bitcast_v10i32_to_v20f16_scalar(<10 x i32> inreg %a, i
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB9_3: ; %end
+; GFX11-NEXT: .LBB9_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -1774,8 +1814,6 @@ define inreg <20 x half> @bitcast_v10i32_to_v20f16_scalar(<10 x i32> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB9_4:
-; GFX11-NEXT: s_branch .LBB9_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2089,6 +2127,7 @@ define inreg <10 x i32> @bitcast_v20f16_to_v10i32_scalar(<20 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v10, v4
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB11_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v29
@@ -2197,16 +2236,22 @@ define inreg <10 x i32> @bitcast_v20f16_to_v10i32_scalar(<20 x half> inreg %a, i
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB11_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
-; SI-NEXT: s_branch .LBB11_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB11_2
+; SI-NEXT: s_branch .LBB11_3
;
; VI-LABEL: bitcast_v20f16_to_v10i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB11_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB11_4
-; VI-NEXT: .LBB11_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB11_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB11_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s25, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -2259,8 +2304,6 @@ define inreg <10 x i32> @bitcast_v20f16_to_v10i32_scalar(<20 x half> inreg %a, i
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v10
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB11_3:
-; VI-NEXT: s_branch .LBB11_2
; VI-NEXT: .LBB11_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -2278,10 +2321,14 @@ define inreg <10 x i32> @bitcast_v20f16_to_v10i32_scalar(<20 x half> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_4
-; GFX9-NEXT: .LBB11_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB11_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v9, s25, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v8, s24, v0 op_sel_hi:[1,0]
@@ -2294,8 +2341,6 @@ define inreg <10 x i32> @bitcast_v20f16_to_v10i32_scalar(<20 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB11_3:
-; GFX9-NEXT: s_branch .LBB11_2
; GFX9-NEXT: .LBB11_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -2317,12 +2362,15 @@ define inreg <10 x i32> @bitcast_v20f16_to_v10i32_scalar(<20 x half> inreg %a, i
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB11_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
-; GFX11-NEXT: .LBB11_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v9, 0x200, s21 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v8, 0x200, s20 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v7, 0x200, s19 op_sel_hi:[0,1]
@@ -2334,8 +2382,6 @@ define inreg <10 x i32> @bitcast_v20f16_to_v10i32_scalar(<20 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s13 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: s_branch .LBB11_2
; GFX11-NEXT: .LBB11_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -3381,6 +3427,7 @@ define inreg <40 x i8> @bitcast_v10i32_to_v40i8_scalar(<10 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB13_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v3, s24
@@ -3618,12 +3665,15 @@ define inreg <40 x i8> @bitcast_v10i32_to_v40i8_scalar(<10 x i32> inreg %a, i32
; SI-NEXT: ; implicit-def: $sgpr8
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB13_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB13_2
+; SI-NEXT: s_branch .LBB13_3
;
; VI-LABEL: bitcast_v10i32_to_v40i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
+; VI-NEXT: s_mov_b64 s[14:15], -1
; VI-NEXT: s_cbranch_scc0 .LBB13_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s26, s25, 24
@@ -3851,12 +3901,15 @@ define inreg <40 x i8> @bitcast_v10i32_to_v40i8_scalar(<10 x i32> inreg %a, i32
; VI-NEXT: ; implicit-def: $sgpr28
; VI-NEXT: ; implicit-def: $sgpr27
; VI-NEXT: ; implicit-def: $sgpr26
-; VI-NEXT: s_branch .LBB13_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[14:15]
+; VI-NEXT: s_cbranch_vccz .LBB13_2
+; VI-NEXT: s_branch .LBB13_3
;
; GFX9-LABEL: bitcast_v10i32_to_v40i8_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
+; GFX9-NEXT: s_mov_b64 s[14:15], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB13_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s26, s25, 24
@@ -4075,15 +4128,18 @@ define inreg <40 x i8> @bitcast_v10i32_to_v40i8_scalar(<10 x i32> inreg %a, i32
; GFX9-NEXT: ; implicit-def: $sgpr28
; GFX9-NEXT: ; implicit-def: $sgpr27
; GFX9-NEXT: ; implicit-def: $sgpr26
-; GFX9-NEXT: s_branch .LBB13_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[14:15]
+; GFX9-NEXT: s_cbranch_vccz .LBB13_2
+; GFX9-NEXT: s_branch .LBB13_3
;
; GFX11-LABEL: bitcast_v10i32_to_v40i8_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
-; GFX11-NEXT: s_mov_b32 s63, 0
+; GFX11-NEXT: s_mov_b32 s5, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB13_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-NEXT: s_lshr_b64 s[4:5], s[20:21], 24
; GFX11-NEXT: s_lshr_b32 s14, s21, 24
; GFX11-NEXT: s_lshr_b32 s15, s21, 16
; GFX11-NEXT: s_lshr_b32 s22, s21, 8
@@ -4109,13 +4165,11 @@ define inreg <40 x i8> @bitcast_v10i32_to_v40i8_scalar(<10 x i32> inreg %a, i32
; GFX11-NEXT: s_lshr_b32 s60, s1, 8
; GFX11-NEXT: s_lshr_b32 s61, s0, 16
; GFX11-NEXT: s_lshr_b32 s62, s0, 8
-; GFX11-NEXT: s_lshr_b64 s[4:5], s[20:21], 24
; GFX11-NEXT: s_lshr_b64 s[6:7], s[18:19], 24
; GFX11-NEXT: s_lshr_b64 s[8:9], s[16:17], 24
; GFX11-NEXT: s_lshr_b64 s[10:11], s[2:3], 24
; GFX11-NEXT: s_lshr_b64 s[12:13], s[0:1], 24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s63
-; GFX11-NEXT: s_cbranch_vccnz .LBB13_3
+; GFX11-NEXT: s_cbranch_execnz .LBB13_3
; GFX11-NEXT: .LBB13_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
@@ -4290,7 +4344,9 @@ define inreg <40 x i8> @bitcast_v10i32_to_v40i8_scalar(<10 x i32> inreg %a, i32
; GFX11-NEXT: ; implicit-def: $sgpr22
; GFX11-NEXT: ; implicit-def: $sgpr15
; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: s_branch .LBB13_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
+; GFX11-NEXT: s_cbranch_vccz .LBB13_2
+; GFX11-NEXT: s_branch .LBB13_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -5640,12 +5696,13 @@ define inreg <10 x i32> @bitcast_v40i8_to_v10i32_scalar(<40 x i8> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v26
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v31, v8
; SI-NEXT: v_mov_b32_e32 v30, v6
; SI-NEXT: v_mov_b32_e32 v29, v4
; SI-NEXT: v_mov_b32_e32 v28, v2
; SI-NEXT: v_mov_b32_e32 v27, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v39, 24, v1
; SI-NEXT: v_lshlrev_b32_e32 v38, 8, v3
; SI-NEXT: v_lshlrev_b32_e32 v37, 24, v5
@@ -5867,18 +5924,21 @@ define inreg <10 x i32> @bitcast_v40i8_to_v10i32_scalar(<40 x i8> inreg %a, i32
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB15_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
-; SI-NEXT: s_branch .LBB15_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB15_2
+; SI-NEXT: s_branch .LBB15_3
;
; VI-LABEL: bitcast_v40i8_to_v10i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v26
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v31, v8
; VI-NEXT: v_mov_b32_e32 v30, v6
; VI-NEXT: v_mov_b32_e32 v29, v4
; VI-NEXT: v_mov_b32_e32 v28, v2
; VI-NEXT: v_mov_b32_e32 v27, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_lshlrev_b32_e32 v39, 8, v1
; VI-NEXT: v_lshlrev_b32_e32 v38, 8, v3
; VI-NEXT: v_lshlrev_b32_e32 v37, 8, v5
@@ -6048,18 +6108,21 @@ define inreg <10 x i32> @bitcast_v40i8_to_v10i32_scalar(<40 x i8> inreg %a, i32
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB15_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
-; VI-NEXT: s_branch .LBB15_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB15_2
+; VI-NEXT: s_branch .LBB15_3
;
; GFX9-LABEL: bitcast_v40i8_to_v10i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v26
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v31, v8
; GFX9-NEXT: v_mov_b32_e32 v30, v6
; GFX9-NEXT: v_mov_b32_e32 v29, v4
; GFX9-NEXT: v_mov_b32_e32 v28, v2
; GFX9-NEXT: v_mov_b32_e32 v27, v0
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_lshlrev_b32_e32 v39, 8, v1
; GFX9-NEXT: v_lshlrev_b32_e32 v38, 8, v3
; GFX9-NEXT: v_lshlrev_b32_e32 v37, 8, v5
@@ -6230,7 +6293,9 @@ define inreg <10 x i32> @bitcast_v40i8_to_v10i32_scalar(<40 x i8> inreg %a, i32
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB15_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
-; GFX9-NEXT: s_branch .LBB15_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB15_2
+; GFX9-NEXT: s_branch .LBB15_3
;
; GFX11-LABEL: bitcast_v40i8_to_v10i32_scalar:
; GFX11: ; %bb.0:
@@ -6249,64 +6314,64 @@ define inreg <10 x i32> @bitcast_v40i8_to_v10i32_scalar(<40 x i8> inreg %a, i32
; GFX11-NEXT: v_lshlrev_b32_e32 v15, 8, v17
; GFX11-NEXT: v_lshlrev_b32_e32 v17, 8, v19
; GFX11-NEXT: v_lshlrev_b32_e32 v19, 8, v21
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s4, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB15_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s3, 8
-; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_or_b32 s6, s7, s8
-; GFX11-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_and_b32 s5, s5, 0xffff
-; GFX11-NEXT: s_lshl_b32 s6, s6, 16
-; GFX11-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-NEXT: s_lshl_b32 s8, s8, 16
-; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_or_b32 s6, s7, s8
-; GFX11-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s23, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-NEXT: s_lshl_b32 s8, s8, 16
+; GFX11-NEXT: s_and_b32 s4, s0, 0xff
+; GFX11-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-NEXT: s_and_b32 s6, s2, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-NEXT: s_or_b32 s4, s4, s5
+; GFX11-NEXT: s_or_b32 s5, s6, s7
+; GFX11-NEXT: s_and_b32 s6, s16, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-NEXT: s_or_b32 s6, s6, s7
+; GFX11-NEXT: s_or_b32 s7, s8, s9
+; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX11-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-NEXT: s_and_b32 s6, s6, 0xffff
+; GFX11-NEXT: s_lshl_b32 s7, s7, 16
+; GFX11-NEXT: s_or_b32 s4, s4, s5
+; GFX11-NEXT: s_or_b32 s5, s6, s7
+; GFX11-NEXT: s_and_b32 s6, s20, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s21, 8
+; GFX11-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-NEXT: s_or_b32 s6, s6, s7
+; GFX11-NEXT: s_or_b32 s7, s8, s9
+; GFX11-NEXT: s_and_b32 s6, s6, 0xffff
+; GFX11-NEXT: s_lshl_b32 s7, s7, 16
; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v23
-; GFX11-NEXT: s_or_b32 s7, s7, s8
+; GFX11-NEXT: s_or_b32 s6, s6, s7
; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v26
; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v27
-; GFX11-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s25, 8
+; GFX11-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s25, 8
; GFX11-NEXT: v_or_b32_e32 v0, v0, v32
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_and_b32 s9, s26, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s27, 8
+; GFX11-NEXT: s_or_b32 s7, s8, s9
+; GFX11-NEXT: s_and_b32 s8, s26, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s27, 8
; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v10
; GFX11-NEXT: v_or_b32_e32 v2, v2, v29
; GFX11-NEXT: v_or_b32_e32 v3, v3, v30
; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v14
-; GFX11-NEXT: s_or_b32 s9, s9, s10
-; GFX11-NEXT: s_and_b32 s8, s8, 0xffff
-; GFX11-NEXT: s_lshl_b32 s9, s9, 16
-; GFX11-NEXT: s_and_b32 s10, s28, 0xff
-; GFX11-NEXT: s_lshl_b32 s11, s29, 8
; GFX11-NEXT: s_or_b32 s8, s8, s9
+; GFX11-NEXT: s_and_b32 s7, s7, 0xffff
+; GFX11-NEXT: s_lshl_b32 s8, s8, 16
+; GFX11-NEXT: s_and_b32 s9, s28, 0xff
+; GFX11-NEXT: s_lshl_b32 s10, s29, 8
+; GFX11-NEXT: s_or_b32 s7, s7, s8
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX11-NEXT: v_or_b32_e32 v5, v5, v31
; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX11-NEXT: v_or_b32_e32 v6, v6, v13
-; GFX11-NEXT: s_or_b32 s10, s10, s11
+; GFX11-NEXT: s_or_b32 s9, s9, s10
; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v25
-; GFX11-NEXT: s_and_b32 s10, s10, 0xffff
+; GFX11-NEXT: s_and_b32 s9, s9, 0xffff
; GFX11-NEXT: v_and_b32_e32 v7, 0xffff, v5
; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v12
; GFX11-NEXT: v_and_b32_e32 v8, 0xff, v16
@@ -6314,8 +6379,8 @@ define inreg <10 x i32> @bitcast_v40i8_to_v10i32_scalar(<40 x i8> inreg %a, i32
; GFX11-NEXT: v_and_b32_e32 v21, 0xff, v20
; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v6
; GFX11-NEXT: v_or_b32_e32 v6, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, s8
-; GFX11-NEXT: v_or_b32_e32 v4, s10, v0
+; GFX11-NEXT: v_mov_b32_e32 v3, s7
+; GFX11-NEXT: v_or_b32_e32 v4, s9, v0
; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v24
; GFX11-NEXT: v_or_b32_e32 v1, v1, v28
; GFX11-NEXT: v_or_b32_e32 v5, v5, v11
@@ -6332,11 +6397,10 @@ define inreg <10 x i32> @bitcast_v40i8_to_v10i32_scalar(<40 x i8> inreg %a, i32
; GFX11-NEXT: v_or_b32_e32 v7, v7, v33
; GFX11-NEXT: v_or_b32_e32 v8, v34, v8
; GFX11-NEXT: v_or_b32_e32 v5, v0, v1
-; GFX11-NEXT: v_mov_b32_e32 v0, s5
+; GFX11-NEXT: v_mov_b32_e32 v0, s4
; GFX11-NEXT: v_or_b32_e32 v9, v9, v21
-; GFX11-NEXT: v_dual_mov_b32 v1, s6 :: v_dual_mov_b32 v2, s7
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB15_3
+; GFX11-NEXT: v_dual_mov_b32 v1, s5 :: v_dual_mov_b32 v2, s6
+; GFX11-NEXT: s_cbranch_execnz .LBB15_3
; GFX11-NEXT: .LBB15_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
; GFX11-NEXT: s_add_i32 s2, s2, 3
@@ -6464,7 +6528,9 @@ define inreg <10 x i32> @bitcast_v40i8_to_v10i32_scalar(<40 x i8> inreg %a, i32
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB15_4:
; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
-; GFX11-NEXT: s_branch .LBB15_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_vccz .LBB15_2
+; GFX11-NEXT: s_branch .LBB15_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -6597,10 +6663,14 @@ define inreg <5 x double> @bitcast_v10i32_to_v5f64_scalar(<10 x i32> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
-; SI-NEXT: s_cbranch_scc0 .LBB17_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB17_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB17_3
-; SI-NEXT: .LBB17_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB17_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB17_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_i32 s25, s25, 3
; SI-NEXT: s_add_i32 s24, s24, 3
; SI-NEXT: s_add_i32 s23, s23, 3
@@ -6611,7 +6681,7 @@ define inreg <5 x double> @bitcast_v10i32_to_v5f64_scalar(<10 x i32> inreg %a, i
; SI-NEXT: s_add_i32 s18, s18, 3
; SI-NEXT: s_add_i32 s17, s17, 3
; SI-NEXT: s_add_i32 s16, s16, 3
-; SI-NEXT: .LBB17_3: ; %end
+; SI-NEXT: .LBB17_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -6623,17 +6693,19 @@ define inreg <5 x double> @bitcast_v10i32_to_v5f64_scalar(<10 x i32> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v8, s24
; SI-NEXT: v_mov_b32_e32 v9, s25
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB17_4:
-; SI-NEXT: s_branch .LBB17_2
;
; VI-LABEL: bitcast_v10i32_to_v5f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB17_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB17_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB17_3
-; VI-NEXT: .LBB17_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB17_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB17_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s25, s25, 3
; VI-NEXT: s_add_i32 s24, s24, 3
; VI-NEXT: s_add_i32 s23, s23, 3
@@ -6644,7 +6716,7 @@ define inreg <5 x double> @bitcast_v10i32_to_v5f64_scalar(<10 x i32> inreg %a, i
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB17_3: ; %end
+; VI-NEXT: .LBB17_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -6656,17 +6728,19 @@ define inreg <5 x double> @bitcast_v10i32_to_v5f64_scalar(<10 x i32> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB17_4:
-; VI-NEXT: s_branch .LBB17_2
;
; GFX9-LABEL: bitcast_v10i32_to_v5f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB17_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB17_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB17_3
-; GFX9-NEXT: .LBB17_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB17_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB17_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s25, s25, 3
; GFX9-NEXT: s_add_i32 s24, s24, 3
; GFX9-NEXT: s_add_i32 s23, s23, 3
@@ -6677,7 +6751,7 @@ define inreg <5 x double> @bitcast_v10i32_to_v5f64_scalar(<10 x i32> inreg %a, i
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB17_3: ; %end
+; GFX9-NEXT: .LBB17_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -6689,19 +6763,20 @@ define inreg <5 x double> @bitcast_v10i32_to_v5f64_scalar(<10 x i32> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v8, s24
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB17_4:
-; GFX9-NEXT: s_branch .LBB17_2
;
; GFX11-LABEL: bitcast_v10i32_to_v5f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB17_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB17_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB17_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB17_3
-; GFX11-NEXT: .LBB17_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB17_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s21, s21, 3
; GFX11-NEXT: s_add_i32 s20, s20, 3
; GFX11-NEXT: s_add_i32 s19, s19, 3
@@ -6712,7 +6787,7 @@ define inreg <5 x double> @bitcast_v10i32_to_v5f64_scalar(<10 x i32> inreg %a, i
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB17_3: ; %end
+; GFX11-NEXT: .LBB17_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -6720,8 +6795,6 @@ define inreg <5 x double> @bitcast_v10i32_to_v5f64_scalar(<10 x i32> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB17_4:
-; GFX11-NEXT: s_branch .LBB17_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -6834,18 +6907,20 @@ define inreg <10 x i32> @bitcast_v5f64_to_v10i32_scalar(<5 x double> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
-; SI-NEXT: s_cbranch_scc0 .LBB19_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB19_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB19_4
-; SI-NEXT: .LBB19_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB19_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB19_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
; SI-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
; SI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB19_3:
-; SI-NEXT: s_branch .LBB19_2
; SI-NEXT: .LBB19_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -6863,18 +6938,20 @@ define inreg <10 x i32> @bitcast_v5f64_to_v10i32_scalar(<5 x double> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB19_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB19_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB19_4
-; VI-NEXT: .LBB19_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB19_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB19_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
; VI-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
; VI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB19_3:
-; VI-NEXT: s_branch .LBB19_2
; VI-NEXT: .LBB19_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -6892,18 +6969,20 @@ define inreg <10 x i32> @bitcast_v5f64_to_v10i32_scalar(<5 x double> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB19_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB19_4
-; GFX9-NEXT: .LBB19_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB19_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB19_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
; GFX9-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
; GFX9-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB19_3:
-; GFX9-NEXT: s_branch .LBB19_2
; GFX9-NEXT: .LBB19_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -6925,20 +7004,21 @@ define inreg <10 x i32> @bitcast_v5f64_to_v10i32_scalar(<5 x double> inreg %a, i
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB19_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB19_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB19_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB19_4
-; GFX11-NEXT: .LBB19_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[8:9], s[20:21], 1.0
; GFX11-NEXT: v_add_f64 v[6:7], s[18:19], 1.0
; GFX11-NEXT: v_add_f64 v[4:5], s[16:17], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[14:15], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[12:13], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB19_3:
-; GFX11-NEXT: s_branch .LBB19_2
; GFX11-NEXT: .LBB19_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -7078,10 +7158,14 @@ define inreg <5 x i64> @bitcast_v10i32_to_v5i64_scalar(<10 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
-; SI-NEXT: s_cbranch_scc0 .LBB21_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB21_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB21_3
-; SI-NEXT: .LBB21_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB21_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB21_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_i32 s25, s25, 3
; SI-NEXT: s_add_i32 s24, s24, 3
; SI-NEXT: s_add_i32 s23, s23, 3
@@ -7092,7 +7176,7 @@ define inreg <5 x i64> @bitcast_v10i32_to_v5i64_scalar(<10 x i32> inreg %a, i32
; SI-NEXT: s_add_i32 s18, s18, 3
; SI-NEXT: s_add_i32 s17, s17, 3
; SI-NEXT: s_add_i32 s16, s16, 3
-; SI-NEXT: .LBB21_3: ; %end
+; SI-NEXT: .LBB21_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -7104,17 +7188,19 @@ define inreg <5 x i64> @bitcast_v10i32_to_v5i64_scalar(<10 x i32> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v8, s24
; SI-NEXT: v_mov_b32_e32 v9, s25
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB21_4:
-; SI-NEXT: s_branch .LBB21_2
;
; VI-LABEL: bitcast_v10i32_to_v5i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB21_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB21_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB21_3
-; VI-NEXT: .LBB21_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB21_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB21_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s25, s25, 3
; VI-NEXT: s_add_i32 s24, s24, 3
; VI-NEXT: s_add_i32 s23, s23, 3
@@ -7125,7 +7211,7 @@ define inreg <5 x i64> @bitcast_v10i32_to_v5i64_scalar(<10 x i32> inreg %a, i32
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB21_3: ; %end
+; VI-NEXT: .LBB21_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -7137,17 +7223,19 @@ define inreg <5 x i64> @bitcast_v10i32_to_v5i64_scalar(<10 x i32> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB21_4:
-; VI-NEXT: s_branch .LBB21_2
;
; GFX9-LABEL: bitcast_v10i32_to_v5i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB21_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB21_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB21_3
-; GFX9-NEXT: .LBB21_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB21_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s25, s25, 3
; GFX9-NEXT: s_add_i32 s24, s24, 3
; GFX9-NEXT: s_add_i32 s23, s23, 3
@@ -7158,7 +7246,7 @@ define inreg <5 x i64> @bitcast_v10i32_to_v5i64_scalar(<10 x i32> inreg %a, i32
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB21_3: ; %end
+; GFX9-NEXT: .LBB21_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -7170,19 +7258,20 @@ define inreg <5 x i64> @bitcast_v10i32_to_v5i64_scalar(<10 x i32> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v8, s24
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB21_4:
-; GFX9-NEXT: s_branch .LBB21_2
;
; GFX11-LABEL: bitcast_v10i32_to_v5i64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB21_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB21_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB21_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB21_3
-; GFX11-NEXT: .LBB21_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s21, s21, 3
; GFX11-NEXT: s_add_i32 s20, s20, 3
; GFX11-NEXT: s_add_i32 s19, s19, 3
@@ -7193,7 +7282,7 @@ define inreg <5 x i64> @bitcast_v10i32_to_v5i64_scalar(<10 x i32> inreg %a, i32
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB21_3: ; %end
+; GFX11-NEXT: .LBB21_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -7201,8 +7290,6 @@ define inreg <5 x i64> @bitcast_v10i32_to_v5i64_scalar(<10 x i32> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB21_4:
-; GFX11-NEXT: s_branch .LBB21_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -7338,10 +7425,14 @@ define inreg <10 x i32> @bitcast_v5i64_to_v10i32_scalar(<5 x i64> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
-; SI-NEXT: s_cbranch_scc0 .LBB23_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB23_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB23_3
-; SI-NEXT: .LBB23_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB23_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB23_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_u32 s24, s24, 3
; SI-NEXT: s_addc_u32 s25, s25, 0
; SI-NEXT: s_add_u32 s22, s22, 3
@@ -7352,7 +7443,7 @@ define inreg <10 x i32> @bitcast_v5i64_to_v10i32_scalar(<5 x i64> inreg %a, i32
; SI-NEXT: s_addc_u32 s19, s19, 0
; SI-NEXT: s_add_u32 s16, s16, 3
; SI-NEXT: s_addc_u32 s17, s17, 0
-; SI-NEXT: .LBB23_3: ; %end
+; SI-NEXT: .LBB23_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -7364,17 +7455,19 @@ define inreg <10 x i32> @bitcast_v5i64_to_v10i32_scalar(<5 x i64> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v8, s24
; SI-NEXT: v_mov_b32_e32 v9, s25
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB23_4:
-; SI-NEXT: s_branch .LBB23_2
;
; VI-LABEL: bitcast_v5i64_to_v10i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB23_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB23_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB23_3
-; VI-NEXT: .LBB23_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB23_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB23_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_u32 s24, s24, 3
; VI-NEXT: s_addc_u32 s25, s25, 0
; VI-NEXT: s_add_u32 s22, s22, 3
@@ -7385,7 +7478,7 @@ define inreg <10 x i32> @bitcast_v5i64_to_v10i32_scalar(<5 x i64> inreg %a, i32
; VI-NEXT: s_addc_u32 s19, s19, 0
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
-; VI-NEXT: .LBB23_3: ; %end
+; VI-NEXT: .LBB23_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -7397,17 +7490,19 @@ define inreg <10 x i32> @bitcast_v5i64_to_v10i32_scalar(<5 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB23_4:
-; VI-NEXT: s_branch .LBB23_2
;
; GFX9-LABEL: bitcast_v5i64_to_v10i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB23_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB23_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB23_3
-; GFX9-NEXT: .LBB23_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB23_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_u32 s24, s24, 3
; GFX9-NEXT: s_addc_u32 s25, s25, 0
; GFX9-NEXT: s_add_u32 s22, s22, 3
@@ -7418,7 +7513,7 @@ define inreg <10 x i32> @bitcast_v5i64_to_v10i32_scalar(<5 x i64> inreg %a, i32
; GFX9-NEXT: s_addc_u32 s19, s19, 0
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
-; GFX9-NEXT: .LBB23_3: ; %end
+; GFX9-NEXT: .LBB23_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -7430,19 +7525,20 @@ define inreg <10 x i32> @bitcast_v5i64_to_v10i32_scalar(<5 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v8, s24
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB23_4:
-; GFX9-NEXT: s_branch .LBB23_2
;
; GFX11-LABEL: bitcast_v5i64_to_v10i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB23_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB23_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB23_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB23_3
-; GFX11-NEXT: .LBB23_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s20, s20, 3
; GFX11-NEXT: s_addc_u32 s21, s21, 0
; GFX11-NEXT: s_add_u32 s18, s18, 3
@@ -7453,7 +7549,7 @@ define inreg <10 x i32> @bitcast_v5i64_to_v10i32_scalar(<5 x i64> inreg %a, i32
; GFX11-NEXT: s_addc_u32 s3, s3, 0
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: .LBB23_3: ; %end
+; GFX11-NEXT: .LBB23_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -7461,8 +7557,6 @@ define inreg <10 x i32> @bitcast_v5i64_to_v10i32_scalar(<5 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB23_4:
-; GFX11-NEXT: s_branch .LBB23_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -7632,6 +7726,7 @@ define inreg <20 x i16> @bitcast_v10f32_to_v20i16_scalar(<10 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB25_3
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s24
@@ -7683,7 +7778,8 @@ define inreg <20 x i16> @bitcast_v10f32_to_v20i16_scalar(<10 x float> inreg %a,
; SI-NEXT: ; implicit-def: $sgpr9
; SI-NEXT: ; implicit-def: $vgpr17
; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: s_branch .LBB25_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB25_2
; SI-NEXT: .LBB25_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v2, s17
@@ -7706,10 +7802,14 @@ define inreg <20 x i16> @bitcast_v10f32_to_v20i16_scalar(<10 x float> inreg %a,
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB25_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB25_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB25_4
-; VI-NEXT: .LBB25_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB25_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB25_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v9, s25, 1.0
; VI-NEXT: v_add_f32_e64 v8, s24, 1.0
; VI-NEXT: v_add_f32_e64 v7, s23, 1.0
@@ -7721,8 +7821,6 @@ define inreg <20 x i16> @bitcast_v10f32_to_v20i16_scalar(<10 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB25_3:
-; VI-NEXT: s_branch .LBB25_2
; VI-NEXT: .LBB25_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -7746,10 +7844,14 @@ define inreg <20 x i16> @bitcast_v10f32_to_v20i16_scalar(<10 x float> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB25_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB25_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB25_4
-; GFX9-NEXT: .LBB25_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB25_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB25_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v9, s25, 1.0
; GFX9-NEXT: v_add_f32_e64 v8, s24, 1.0
; GFX9-NEXT: v_add_f32_e64 v7, s23, 1.0
@@ -7761,8 +7863,6 @@ define inreg <20 x i16> @bitcast_v10f32_to_v20i16_scalar(<10 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB25_3:
-; GFX9-NEXT: s_branch .LBB25_2
; GFX9-NEXT: .LBB25_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -7790,12 +7890,15 @@ define inreg <20 x i16> @bitcast_v10f32_to_v20i16_scalar(<10 x float> inreg %a,
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB25_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB25_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB25_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB25_4
-; GFX11-NEXT: .LBB25_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v9, s21, 1.0
; GFX11-NEXT: v_add_f32_e64 v8, s20, 1.0
; GFX11-NEXT: v_add_f32_e64 v7, s19, 1.0
@@ -7807,8 +7910,6 @@ define inreg <20 x i16> @bitcast_v10f32_to_v20i16_scalar(<10 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v1, s13, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB25_3:
-; GFX11-NEXT: s_branch .LBB25_2
; GFX11-NEXT: .LBB25_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -8066,10 +8167,11 @@ define inreg <10 x float> @bitcast_v20i16_to_v10f32_scalar(<20 x i16> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v10, v4
; SI-NEXT: v_mov_b32_e32 v11, v2
; SI-NEXT: v_mov_b32_e32 v12, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v3
; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v5
@@ -8169,16 +8271,22 @@ define inreg <10 x float> @bitcast_v20i16_to_v10f32_scalar(<20 x i16> inreg %a,
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB27_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
-; SI-NEXT: s_branch .LBB27_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB27_2
+; SI-NEXT: s_branch .LBB27_3
;
; VI-LABEL: bitcast_v20i16_to_v10f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB27_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB27_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB27_3
-; VI-NEXT: .LBB27_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB27_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB27_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s25, 3
; VI-NEXT: s_and_b32 s4, s25, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
@@ -8229,7 +8337,7 @@ define inreg <10 x float> @bitcast_v20i16_to_v10f32_scalar(<20 x i16> inreg %a,
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB27_3: ; %end
+; VI-NEXT: .LBB27_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -8241,17 +8349,19 @@ define inreg <10 x float> @bitcast_v20i16_to_v10f32_scalar(<20 x i16> inreg %a,
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB27_4:
-; VI-NEXT: s_branch .LBB27_2
;
; GFX9-LABEL: bitcast_v20i16_to_v10f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB27_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB27_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB27_4
-; GFX9-NEXT: .LBB27_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB27_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB27_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v9, s25, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v8, s24, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v7, s23, 3 op_sel_hi:[1,0]
@@ -8263,8 +8373,6 @@ define inreg <10 x float> @bitcast_v20i16_to_v10f32_scalar(<20 x i16> inreg %a,
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB27_3:
-; GFX9-NEXT: s_branch .LBB27_2
; GFX9-NEXT: .LBB27_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -8286,12 +8394,15 @@ define inreg <10 x float> @bitcast_v20i16_to_v10f32_scalar(<20 x i16> inreg %a,
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB27_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB27_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB27_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB27_4
-; GFX11-NEXT: .LBB27_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v9, s21, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v8, s20, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v7, s19, 3 op_sel_hi:[1,0]
@@ -8303,8 +8414,6 @@ define inreg <10 x float> @bitcast_v20i16_to_v10f32_scalar(<20 x i16> inreg %a,
; GFX11-NEXT: v_pk_add_u16 v1, s13, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB27_3:
-; GFX11-NEXT: s_branch .LBB27_2
; GFX11-NEXT: .LBB27_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -8545,6 +8654,7 @@ define inreg <20 x half> @bitcast_v10f32_to_v20f16_scalar(<10 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB29_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s4, s25, 16
@@ -8642,16 +8752,22 @@ define inreg <20 x half> @bitcast_v10f32_to_v20f16_scalar(<10 x float> inreg %a,
; SI-NEXT: ; implicit-def: $vgpr17
; SI-NEXT: ; implicit-def: $vgpr18
; SI-NEXT: ; implicit-def: $vgpr19
-; SI-NEXT: s_branch .LBB29_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB29_2
+; SI-NEXT: s_branch .LBB29_3
;
; VI-LABEL: bitcast_v10f32_to_v20f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB29_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB29_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB29_4
-; VI-NEXT: .LBB29_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB29_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB29_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v9, s25, 1.0
; VI-NEXT: v_add_f32_e64 v8, s24, 1.0
; VI-NEXT: v_add_f32_e64 v7, s23, 1.0
@@ -8663,8 +8779,6 @@ define inreg <20 x half> @bitcast_v10f32_to_v20f16_scalar(<10 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB29_3:
-; VI-NEXT: s_branch .LBB29_2
; VI-NEXT: .LBB29_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -8688,10 +8802,14 @@ define inreg <20 x half> @bitcast_v10f32_to_v20f16_scalar(<10 x float> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB29_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB29_4
-; GFX9-NEXT: .LBB29_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB29_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB29_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v9, s25, 1.0
; GFX9-NEXT: v_add_f32_e64 v8, s24, 1.0
; GFX9-NEXT: v_add_f32_e64 v7, s23, 1.0
@@ -8703,8 +8821,6 @@ define inreg <20 x half> @bitcast_v10f32_to_v20f16_scalar(<10 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB29_3:
-; GFX9-NEXT: s_branch .LBB29_2
; GFX9-NEXT: .LBB29_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -8732,12 +8848,15 @@ define inreg <20 x half> @bitcast_v10f32_to_v20f16_scalar(<10 x float> inreg %a,
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB29_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB29_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB29_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB29_4
-; GFX11-NEXT: .LBB29_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v9, s21, 1.0
; GFX11-NEXT: v_add_f32_e64 v8, s20, 1.0
; GFX11-NEXT: v_add_f32_e64 v7, s19, 1.0
@@ -8749,8 +8868,6 @@ define inreg <20 x half> @bitcast_v10f32_to_v20f16_scalar(<10 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v1, s13, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB29_3:
-; GFX11-NEXT: s_branch .LBB29_2
; GFX11-NEXT: .LBB29_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -9074,6 +9191,7 @@ define inreg <10 x float> @bitcast_v20f16_to_v10f32_scalar(<20 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v10, v4
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB31_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v29
@@ -9182,16 +9300,22 @@ define inreg <10 x float> @bitcast_v20f16_to_v10f32_scalar(<20 x half> inreg %a,
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB31_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
-; SI-NEXT: s_branch .LBB31_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB31_2
+; SI-NEXT: s_branch .LBB31_3
;
; VI-LABEL: bitcast_v20f16_to_v10f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB31_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB31_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB31_4
-; VI-NEXT: .LBB31_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB31_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB31_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s25, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -9244,8 +9368,6 @@ define inreg <10 x float> @bitcast_v20f16_to_v10f32_scalar(<20 x half> inreg %a,
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v10
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB31_3:
-; VI-NEXT: s_branch .LBB31_2
; VI-NEXT: .LBB31_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -9263,10 +9385,14 @@ define inreg <10 x float> @bitcast_v20f16_to_v10f32_scalar(<20 x half> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB31_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB31_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB31_4
-; GFX9-NEXT: .LBB31_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB31_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB31_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v9, s25, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v8, s24, v0 op_sel_hi:[1,0]
@@ -9279,8 +9405,6 @@ define inreg <10 x float> @bitcast_v20f16_to_v10f32_scalar(<20 x half> inreg %a,
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB31_3:
-; GFX9-NEXT: s_branch .LBB31_2
; GFX9-NEXT: .LBB31_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -9302,12 +9426,15 @@ define inreg <10 x float> @bitcast_v20f16_to_v10f32_scalar(<20 x half> inreg %a,
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB31_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB31_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB31_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB31_4
-; GFX11-NEXT: .LBB31_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v9, 0x200, s21 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v8, 0x200, s20 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v7, 0x200, s19 op_sel_hi:[0,1]
@@ -9319,8 +9446,6 @@ define inreg <10 x float> @bitcast_v20f16_to_v10f32_scalar(<20 x half> inreg %a,
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s13 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB31_3:
-; GFX11-NEXT: s_branch .LBB31_2
; GFX11-NEXT: .LBB31_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -10358,6 +10483,7 @@ define inreg <40 x i8> @bitcast_v10f32_to_v40i8_scalar(<10 x float> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB33_3
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v3, s24
@@ -10469,7 +10595,8 @@ define inreg <40 x i8> @bitcast_v10f32_to_v40i8_scalar(<10 x float> inreg %a, i3
; SI-NEXT: ; implicit-def: $sgpr40
; SI-NEXT: ; implicit-def: $sgpr29
; SI-NEXT: ; implicit-def: $sgpr28
-; SI-NEXT: s_branch .LBB33_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB33_2
; SI-NEXT: .LBB33_4:
; SI-NEXT: v_mov_b32_e32 v34, s16
; SI-NEXT: v_mov_b32_e32 v31, s17
@@ -10622,6 +10749,7 @@ define inreg <40 x i8> @bitcast_v10f32_to_v40i8_scalar(<10 x float> inreg %a, i3
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
+; VI-NEXT: s_mov_b64 s[14:15], -1
; VI-NEXT: s_cbranch_scc0 .LBB33_3
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s26, s25, 24
@@ -10728,7 +10856,8 @@ define inreg <40 x i8> @bitcast_v10f32_to_v40i8_scalar(<10 x float> inreg %a, i3
; VI-NEXT: ; implicit-def: $sgpr29
; VI-NEXT: ; implicit-def: $sgpr27
; VI-NEXT: ; implicit-def: $sgpr26
-; VI-NEXT: s_branch .LBB33_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[14:15]
+; VI-NEXT: s_cbranch_vccz .LBB33_2
; VI-NEXT: .LBB33_4:
; VI-NEXT: v_mov_b32_e32 v9, s16
; VI-NEXT: v_mov_b32_e32 v10, s17
@@ -10847,6 +10976,7 @@ define inreg <40 x i8> @bitcast_v10f32_to_v40i8_scalar(<10 x float> inreg %a, i3
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
+; GFX9-NEXT: s_mov_b64 s[14:15], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB33_3
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s26, s25, 24
@@ -10953,7 +11083,8 @@ define inreg <40 x i8> @bitcast_v10f32_to_v40i8_scalar(<10 x float> inreg %a, i3
; GFX9-NEXT: ; implicit-def: $sgpr29
; GFX9-NEXT: ; implicit-def: $sgpr27
; GFX9-NEXT: ; implicit-def: $sgpr26
-; GFX9-NEXT: s_branch .LBB33_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[14:15]
+; GFX9-NEXT: s_cbranch_vccz .LBB33_2
; GFX9-NEXT: .LBB33_4:
; GFX9-NEXT: v_mov_b32_e32 v9, s16
; GFX9-NEXT: v_mov_b32_e32 v10, s17
@@ -11063,41 +11194,40 @@ define inreg <40 x i8> @bitcast_v10f32_to_v40i8_scalar(<10 x float> inreg %a, i3
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
-; GFX11-NEXT: s_mov_b32 s14, 0
+; GFX11-NEXT: s_mov_b32 s5, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB33_3
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s15, s21, 24
-; GFX11-NEXT: s_lshr_b32 s22, s21, 16
-; GFX11-NEXT: s_lshr_b32 s24, s21, 8
-; GFX11-NEXT: s_lshr_b32 s23, s20, 16
-; GFX11-NEXT: s_lshr_b32 s25, s20, 8
-; GFX11-NEXT: s_lshr_b32 s26, s19, 24
-; GFX11-NEXT: s_lshr_b32 s27, s19, 16
-; GFX11-NEXT: s_lshr_b32 s29, s19, 8
-; GFX11-NEXT: s_lshr_b32 s28, s18, 16
-; GFX11-NEXT: s_lshr_b32 s40, s18, 8
-; GFX11-NEXT: s_lshr_b32 s41, s17, 24
-; GFX11-NEXT: s_lshr_b32 s42, s17, 16
-; GFX11-NEXT: s_lshr_b32 s44, s17, 8
-; GFX11-NEXT: s_lshr_b32 s43, s16, 16
-; GFX11-NEXT: s_lshr_b32 s45, s16, 8
-; GFX11-NEXT: s_lshr_b32 s46, s3, 24
-; GFX11-NEXT: s_lshr_b32 s47, s3, 16
-; GFX11-NEXT: s_lshr_b32 s57, s3, 8
-; GFX11-NEXT: s_lshr_b32 s56, s2, 16
-; GFX11-NEXT: s_lshr_b32 s58, s2, 8
-; GFX11-NEXT: s_lshr_b32 s59, s1, 24
-; GFX11-NEXT: s_lshr_b32 s60, s1, 16
-; GFX11-NEXT: s_lshr_b32 s62, s1, 8
-; GFX11-NEXT: s_lshr_b32 s61, s0, 16
-; GFX11-NEXT: s_lshr_b32 s63, s0, 8
+; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
+; GFX11-NEXT: s_lshr_b32 s14, s21, 24
+; GFX11-NEXT: s_lshr_b32 s15, s21, 16
+; GFX11-NEXT: s_lshr_b32 s23, s21, 8
+; GFX11-NEXT: s_lshr_b32 s22, s20, 16
+; GFX11-NEXT: s_lshr_b32 s24, s20, 8
+; GFX11-NEXT: s_lshr_b32 s25, s19, 24
+; GFX11-NEXT: s_lshr_b32 s26, s19, 16
+; GFX11-NEXT: s_lshr_b32 s28, s19, 8
+; GFX11-NEXT: s_lshr_b32 s27, s18, 16
+; GFX11-NEXT: s_lshr_b32 s29, s18, 8
+; GFX11-NEXT: s_lshr_b32 s40, s17, 24
+; GFX11-NEXT: s_lshr_b32 s41, s17, 16
+; GFX11-NEXT: s_lshr_b32 s43, s17, 8
+; GFX11-NEXT: s_lshr_b32 s42, s16, 16
+; GFX11-NEXT: s_lshr_b32 s44, s16, 8
+; GFX11-NEXT: s_lshr_b32 s45, s3, 24
+; GFX11-NEXT: s_lshr_b32 s46, s3, 16
+; GFX11-NEXT: s_lshr_b32 s56, s3, 8
+; GFX11-NEXT: s_lshr_b32 s47, s2, 16
+; GFX11-NEXT: s_lshr_b32 s57, s2, 8
+; GFX11-NEXT: s_lshr_b32 s58, s1, 24
+; GFX11-NEXT: s_lshr_b32 s59, s1, 16
+; GFX11-NEXT: s_lshr_b32 s61, s1, 8
+; GFX11-NEXT: s_lshr_b32 s60, s0, 16
+; GFX11-NEXT: s_lshr_b32 s62, s0, 8
; GFX11-NEXT: s_lshr_b64 s[12:13], s[20:21], 24
; GFX11-NEXT: s_lshr_b64 s[10:11], s[18:19], 24
; GFX11-NEXT: s_lshr_b64 s[8:9], s[16:17], 24
; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
-; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s14
-; GFX11-NEXT: s_cbranch_vccnz .LBB33_4
+; GFX11-NEXT: s_cbranch_execnz .LBB33_4
; GFX11-NEXT: .LBB33_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v6, s17, 1.0
; GFX11-NEXT: v_add_f32_e64 v5, s16, 1.0
@@ -11141,57 +11271,58 @@ define inreg <40 x i8> @bitcast_v10f32_to_v40i8_scalar(<10 x float> inreg %a, i3
; GFX11-NEXT: v_lshrrev_b32_e32 v48, 8, v13
; GFX11-NEXT: s_branch .LBB33_5
; GFX11-NEXT: .LBB33_3:
-; GFX11-NEXT: ; implicit-def: $sgpr63
-; GFX11-NEXT: ; implicit-def: $sgpr61
-; GFX11-NEXT: ; implicit-def: $sgpr4
; GFX11-NEXT: ; implicit-def: $sgpr62
; GFX11-NEXT: ; implicit-def: $sgpr60
+; GFX11-NEXT: ; implicit-def: $sgpr4
+; GFX11-NEXT: ; implicit-def: $sgpr61
; GFX11-NEXT: ; implicit-def: $sgpr59
; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr6
; GFX11-NEXT: ; implicit-def: $sgpr57
; GFX11-NEXT: ; implicit-def: $sgpr47
+; GFX11-NEXT: ; implicit-def: $sgpr6
+; GFX11-NEXT: ; implicit-def: $sgpr56
; GFX11-NEXT: ; implicit-def: $sgpr46
; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr8
; GFX11-NEXT: ; implicit-def: $sgpr44
; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr8
+; GFX11-NEXT: ; implicit-def: $sgpr43
; GFX11-NEXT: ; implicit-def: $sgpr41
; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr28
-; GFX11-NEXT: ; implicit-def: $sgpr10
; GFX11-NEXT: ; implicit-def: $sgpr29
; GFX11-NEXT: ; implicit-def: $sgpr27
+; GFX11-NEXT: ; implicit-def: $sgpr10
+; GFX11-NEXT: ; implicit-def: $sgpr28
; GFX11-NEXT: ; implicit-def: $sgpr26
; GFX11-NEXT: ; implicit-def: $sgpr25
-; GFX11-NEXT: ; implicit-def: $sgpr23
-; GFX11-NEXT: ; implicit-def: $sgpr12
; GFX11-NEXT: ; implicit-def: $sgpr24
; GFX11-NEXT: ; implicit-def: $sgpr22
+; GFX11-NEXT: ; implicit-def: $sgpr12
+; GFX11-NEXT: ; implicit-def: $sgpr23
; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: s_branch .LBB33_2
+; GFX11-NEXT: ; implicit-def: $sgpr14
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
+; GFX11-NEXT: s_cbranch_vccz .LBB33_2
; GFX11-NEXT: .LBB33_4:
; GFX11-NEXT: v_dual_mov_b32 v13, s0 :: v_dual_mov_b32 v14, s1
; GFX11-NEXT: v_dual_mov_b32 v9, s2 :: v_dual_mov_b32 v10, s3
; GFX11-NEXT: v_dual_mov_b32 v5, s16 :: v_dual_mov_b32 v6, s17
; GFX11-NEXT: v_dual_mov_b32 v3, s18 :: v_dual_mov_b32 v4, s19
; GFX11-NEXT: v_dual_mov_b32 v1, s20 :: v_dual_mov_b32 v2, s21
-; GFX11-NEXT: v_dual_mov_b32 v48, s63 :: v_dual_mov_b32 v39, s61
-; GFX11-NEXT: v_dual_mov_b32 v38, s62 :: v_dual_mov_b32 v37, s60
-; GFX11-NEXT: v_dual_mov_b32 v36, s59 :: v_dual_mov_b32 v35, s58
-; GFX11-NEXT: v_dual_mov_b32 v34, s56 :: v_dual_mov_b32 v33, s57
-; GFX11-NEXT: v_dual_mov_b32 v32, s47 :: v_dual_mov_b32 v31, s46
-; GFX11-NEXT: v_dual_mov_b32 v30, s45 :: v_dual_mov_b32 v29, s43
-; GFX11-NEXT: v_dual_mov_b32 v28, s44 :: v_dual_mov_b32 v27, s42
-; GFX11-NEXT: v_dual_mov_b32 v26, s41 :: v_dual_mov_b32 v25, s40
-; GFX11-NEXT: v_dual_mov_b32 v24, s28 :: v_dual_mov_b32 v23, s29
-; GFX11-NEXT: v_dual_mov_b32 v22, s27 :: v_dual_mov_b32 v21, s26
-; GFX11-NEXT: v_dual_mov_b32 v20, s25 :: v_dual_mov_b32 v19, s23
-; GFX11-NEXT: v_dual_mov_b32 v18, s24 :: v_dual_mov_b32 v17, s4
-; GFX11-NEXT: v_dual_mov_b32 v12, s22 :: v_dual_mov_b32 v15, s8
-; GFX11-NEXT: v_dual_mov_b32 v8, s15 :: v_dual_mov_b32 v11, s10
+; GFX11-NEXT: v_dual_mov_b32 v48, s62 :: v_dual_mov_b32 v39, s60
+; GFX11-NEXT: v_dual_mov_b32 v38, s61 :: v_dual_mov_b32 v37, s59
+; GFX11-NEXT: v_dual_mov_b32 v36, s58 :: v_dual_mov_b32 v35, s57
+; GFX11-NEXT: v_dual_mov_b32 v34, s47 :: v_dual_mov_b32 v33, s56
+; GFX11-NEXT: v_dual_mov_b32 v32, s46 :: v_dual_mov_b32 v31, s45
+; GFX11-NEXT: v_dual_mov_b32 v30, s44 :: v_dual_mov_b32 v29, s42
+; GFX11-NEXT: v_dual_mov_b32 v28, s43 :: v_dual_mov_b32 v27, s41
+; GFX11-NEXT: v_dual_mov_b32 v26, s40 :: v_dual_mov_b32 v25, s29
+; GFX11-NEXT: v_dual_mov_b32 v24, s27 :: v_dual_mov_b32 v23, s28
+; GFX11-NEXT: v_dual_mov_b32 v22, s26 :: v_dual_mov_b32 v21, s25
+; GFX11-NEXT: v_dual_mov_b32 v20, s24 :: v_dual_mov_b32 v19, s22
+; GFX11-NEXT: v_dual_mov_b32 v18, s23 :: v_dual_mov_b32 v17, s4
+; GFX11-NEXT: v_dual_mov_b32 v12, s15 :: v_dual_mov_b32 v15, s8
+; GFX11-NEXT: v_dual_mov_b32 v8, s14 :: v_dual_mov_b32 v11, s10
; GFX11-NEXT: v_dual_mov_b32 v16, s6 :: v_dual_mov_b32 v7, s12
; GFX11-NEXT: .LBB33_5: ; %end
; GFX11-NEXT: v_and_b32_e32 v13, 0xff, v13
@@ -12639,12 +12770,13 @@ define inreg <10 x float> @bitcast_v40i8_to_v10f32_scalar(<40 x i8> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v26
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v31, v8
; SI-NEXT: v_mov_b32_e32 v30, v6
; SI-NEXT: v_mov_b32_e32 v29, v4
; SI-NEXT: v_mov_b32_e32 v28, v2
; SI-NEXT: v_mov_b32_e32 v27, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v39, 24, v1
; SI-NEXT: v_lshlrev_b32_e32 v38, 8, v3
; SI-NEXT: v_lshlrev_b32_e32 v37, 24, v5
@@ -12866,18 +12998,21 @@ define inreg <10 x float> @bitcast_v40i8_to_v10f32_scalar(<40 x i8> inreg %a, i3
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB35_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
-; SI-NEXT: s_branch .LBB35_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB35_2
+; SI-NEXT: s_branch .LBB35_3
;
; VI-LABEL: bitcast_v40i8_to_v10f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v26
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v31, v8
; VI-NEXT: v_mov_b32_e32 v30, v6
; VI-NEXT: v_mov_b32_e32 v29, v4
; VI-NEXT: v_mov_b32_e32 v28, v2
; VI-NEXT: v_mov_b32_e32 v27, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_lshlrev_b32_e32 v39, 8, v1
; VI-NEXT: v_lshlrev_b32_e32 v38, 8, v3
; VI-NEXT: v_lshlrev_b32_e32 v37, 8, v5
@@ -13047,18 +13182,21 @@ define inreg <10 x float> @bitcast_v40i8_to_v10f32_scalar(<40 x i8> inreg %a, i3
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB35_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
-; VI-NEXT: s_branch .LBB35_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB35_2
+; VI-NEXT: s_branch .LBB35_3
;
; GFX9-LABEL: bitcast_v40i8_to_v10f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v26
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v31, v8
; GFX9-NEXT: v_mov_b32_e32 v30, v6
; GFX9-NEXT: v_mov_b32_e32 v29, v4
; GFX9-NEXT: v_mov_b32_e32 v28, v2
; GFX9-NEXT: v_mov_b32_e32 v27, v0
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_lshlrev_b32_e32 v39, 8, v1
; GFX9-NEXT: v_lshlrev_b32_e32 v38, 8, v3
; GFX9-NEXT: v_lshlrev_b32_e32 v37, 8, v5
@@ -13229,7 +13367,9 @@ define inreg <10 x float> @bitcast_v40i8_to_v10f32_scalar(<40 x i8> inreg %a, i3
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB35_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
-; GFX9-NEXT: s_branch .LBB35_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB35_2
+; GFX9-NEXT: s_branch .LBB35_3
;
; GFX11-LABEL: bitcast_v40i8_to_v10f32_scalar:
; GFX11: ; %bb.0:
@@ -13248,64 +13388,64 @@ define inreg <10 x float> @bitcast_v40i8_to_v10f32_scalar(<40 x i8> inreg %a, i3
; GFX11-NEXT: v_lshlrev_b32_e32 v15, 8, v17
; GFX11-NEXT: v_lshlrev_b32_e32 v17, 8, v19
; GFX11-NEXT: v_lshlrev_b32_e32 v19, 8, v21
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s4, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB35_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s3, 8
-; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_or_b32 s6, s7, s8
-; GFX11-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_and_b32 s5, s5, 0xffff
-; GFX11-NEXT: s_lshl_b32 s6, s6, 16
-; GFX11-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-NEXT: s_lshl_b32 s8, s8, 16
-; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_or_b32 s6, s7, s8
-; GFX11-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s23, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-NEXT: s_lshl_b32 s8, s8, 16
+; GFX11-NEXT: s_and_b32 s4, s0, 0xff
+; GFX11-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-NEXT: s_and_b32 s6, s2, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-NEXT: s_or_b32 s4, s4, s5
+; GFX11-NEXT: s_or_b32 s5, s6, s7
+; GFX11-NEXT: s_and_b32 s6, s16, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-NEXT: s_or_b32 s6, s6, s7
+; GFX11-NEXT: s_or_b32 s7, s8, s9
+; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX11-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-NEXT: s_and_b32 s6, s6, 0xffff
+; GFX11-NEXT: s_lshl_b32 s7, s7, 16
+; GFX11-NEXT: s_or_b32 s4, s4, s5
+; GFX11-NEXT: s_or_b32 s5, s6, s7
+; GFX11-NEXT: s_and_b32 s6, s20, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s21, 8
+; GFX11-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-NEXT: s_or_b32 s6, s6, s7
+; GFX11-NEXT: s_or_b32 s7, s8, s9
+; GFX11-NEXT: s_and_b32 s6, s6, 0xffff
+; GFX11-NEXT: s_lshl_b32 s7, s7, 16
; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v23
-; GFX11-NEXT: s_or_b32 s7, s7, s8
+; GFX11-NEXT: s_or_b32 s6, s6, s7
; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v26
; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v27
-; GFX11-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s25, 8
+; GFX11-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s25, 8
; GFX11-NEXT: v_or_b32_e32 v0, v0, v32
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_and_b32 s9, s26, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s27, 8
+; GFX11-NEXT: s_or_b32 s7, s8, s9
+; GFX11-NEXT: s_and_b32 s8, s26, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s27, 8
; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v10
; GFX11-NEXT: v_or_b32_e32 v2, v2, v29
; GFX11-NEXT: v_or_b32_e32 v3, v3, v30
; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v14
-; GFX11-NEXT: s_or_b32 s9, s9, s10
-; GFX11-NEXT: s_and_b32 s8, s8, 0xffff
-; GFX11-NEXT: s_lshl_b32 s9, s9, 16
-; GFX11-NEXT: s_and_b32 s10, s28, 0xff
-; GFX11-NEXT: s_lshl_b32 s11, s29, 8
; GFX11-NEXT: s_or_b32 s8, s8, s9
+; GFX11-NEXT: s_and_b32 s7, s7, 0xffff
+; GFX11-NEXT: s_lshl_b32 s8, s8, 16
+; GFX11-NEXT: s_and_b32 s9, s28, 0xff
+; GFX11-NEXT: s_lshl_b32 s10, s29, 8
+; GFX11-NEXT: s_or_b32 s7, s7, s8
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX11-NEXT: v_or_b32_e32 v5, v5, v31
; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX11-NEXT: v_or_b32_e32 v6, v6, v13
-; GFX11-NEXT: s_or_b32 s10, s10, s11
+; GFX11-NEXT: s_or_b32 s9, s9, s10
; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v25
-; GFX11-NEXT: s_and_b32 s10, s10, 0xffff
+; GFX11-NEXT: s_and_b32 s9, s9, 0xffff
; GFX11-NEXT: v_and_b32_e32 v7, 0xffff, v5
; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v12
; GFX11-NEXT: v_and_b32_e32 v8, 0xff, v16
@@ -13313,8 +13453,8 @@ define inreg <10 x float> @bitcast_v40i8_to_v10f32_scalar(<40 x i8> inreg %a, i3
; GFX11-NEXT: v_and_b32_e32 v21, 0xff, v20
; GFX11-NEXT: v_and_b32_e32 v34, 0xffff, v6
; GFX11-NEXT: v_or_b32_e32 v6, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, s8
-; GFX11-NEXT: v_or_b32_e32 v4, s10, v0
+; GFX11-NEXT: v_mov_b32_e32 v3, s7
+; GFX11-NEXT: v_or_b32_e32 v4, s9, v0
; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v24
; GFX11-NEXT: v_or_b32_e32 v1, v1, v28
; GFX11-NEXT: v_or_b32_e32 v5, v5, v11
@@ -13331,11 +13471,10 @@ define inreg <10 x float> @bitcast_v40i8_to_v10f32_scalar(<40 x i8> inreg %a, i3
; GFX11-NEXT: v_or_b32_e32 v7, v7, v33
; GFX11-NEXT: v_or_b32_e32 v8, v34, v8
; GFX11-NEXT: v_or_b32_e32 v5, v0, v1
-; GFX11-NEXT: v_mov_b32_e32 v0, s5
+; GFX11-NEXT: v_mov_b32_e32 v0, s4
; GFX11-NEXT: v_or_b32_e32 v9, v9, v21
-; GFX11-NEXT: v_dual_mov_b32 v1, s6 :: v_dual_mov_b32 v2, s7
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB35_3
+; GFX11-NEXT: v_dual_mov_b32 v1, s5 :: v_dual_mov_b32 v2, s6
+; GFX11-NEXT: s_cbranch_execnz .LBB35_3
; GFX11-NEXT: .LBB35_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
; GFX11-NEXT: s_add_i32 s2, s2, 3
@@ -13463,7 +13602,9 @@ define inreg <10 x float> @bitcast_v40i8_to_v10f32_scalar(<40 x i8> inreg %a, i3
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB35_4:
; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
-; GFX11-NEXT: s_branch .LBB35_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_vccz .LBB35_2
+; GFX11-NEXT: s_branch .LBB35_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -13590,10 +13731,14 @@ define inreg <5 x double> @bitcast_v10f32_to_v5f64_scalar(<10 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
-; SI-NEXT: s_cbranch_scc0 .LBB37_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB37_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB37_4
-; SI-NEXT: .LBB37_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB37_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB37_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v9, s25, 1.0
; SI-NEXT: v_add_f32_e64 v8, s24, 1.0
; SI-NEXT: v_add_f32_e64 v7, s23, 1.0
@@ -13605,8 +13750,6 @@ define inreg <5 x double> @bitcast_v10f32_to_v5f64_scalar(<10 x float> inreg %a,
; SI-NEXT: v_add_f32_e64 v1, s17, 1.0
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB37_3:
-; SI-NEXT: s_branch .LBB37_2
; SI-NEXT: .LBB37_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -13630,10 +13773,14 @@ define inreg <5 x double> @bitcast_v10f32_to_v5f64_scalar(<10 x float> inreg %a,
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB37_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB37_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB37_4
-; VI-NEXT: .LBB37_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB37_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB37_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v9, s25, 1.0
; VI-NEXT: v_add_f32_e64 v8, s24, 1.0
; VI-NEXT: v_add_f32_e64 v7, s23, 1.0
@@ -13645,8 +13792,6 @@ define inreg <5 x double> @bitcast_v10f32_to_v5f64_scalar(<10 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB37_3:
-; VI-NEXT: s_branch .LBB37_2
; VI-NEXT: .LBB37_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -13670,10 +13815,14 @@ define inreg <5 x double> @bitcast_v10f32_to_v5f64_scalar(<10 x float> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB37_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB37_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB37_4
-; GFX9-NEXT: .LBB37_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB37_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB37_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v9, s25, 1.0
; GFX9-NEXT: v_add_f32_e64 v8, s24, 1.0
; GFX9-NEXT: v_add_f32_e64 v7, s23, 1.0
@@ -13685,8 +13834,6 @@ define inreg <5 x double> @bitcast_v10f32_to_v5f64_scalar(<10 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB37_3:
-; GFX9-NEXT: s_branch .LBB37_2
; GFX9-NEXT: .LBB37_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -13714,12 +13861,15 @@ define inreg <5 x double> @bitcast_v10f32_to_v5f64_scalar(<10 x float> inreg %a,
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB37_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB37_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB37_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB37_4
-; GFX11-NEXT: .LBB37_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v9, s21, 1.0
; GFX11-NEXT: v_add_f32_e64 v8, s20, 1.0
; GFX11-NEXT: v_add_f32_e64 v7, s19, 1.0
@@ -13731,8 +13881,6 @@ define inreg <5 x double> @bitcast_v10f32_to_v5f64_scalar(<10 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v1, s13, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB37_3:
-; GFX11-NEXT: s_branch .LBB37_2
; GFX11-NEXT: .LBB37_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -13855,18 +14003,20 @@ define inreg <10 x float> @bitcast_v5f64_to_v10f32_scalar(<5 x double> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
-; SI-NEXT: s_cbranch_scc0 .LBB39_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB39_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB39_4
-; SI-NEXT: .LBB39_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB39_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB39_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
; SI-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
; SI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB39_3:
-; SI-NEXT: s_branch .LBB39_2
; SI-NEXT: .LBB39_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -13884,18 +14034,20 @@ define inreg <10 x float> @bitcast_v5f64_to_v10f32_scalar(<5 x double> inreg %a,
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB39_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB39_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB39_4
-; VI-NEXT: .LBB39_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB39_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB39_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
; VI-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
; VI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB39_3:
-; VI-NEXT: s_branch .LBB39_2
; VI-NEXT: .LBB39_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -13913,18 +14065,20 @@ define inreg <10 x float> @bitcast_v5f64_to_v10f32_scalar(<5 x double> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB39_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB39_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB39_4
-; GFX9-NEXT: .LBB39_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB39_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB39_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
; GFX9-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
; GFX9-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB39_3:
-; GFX9-NEXT: s_branch .LBB39_2
; GFX9-NEXT: .LBB39_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -13946,20 +14100,21 @@ define inreg <10 x float> @bitcast_v5f64_to_v10f32_scalar(<5 x double> inreg %a,
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB39_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB39_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB39_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB39_4
-; GFX11-NEXT: .LBB39_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[8:9], s[20:21], 1.0
; GFX11-NEXT: v_add_f64 v[6:7], s[18:19], 1.0
; GFX11-NEXT: v_add_f64 v[4:5], s[16:17], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[14:15], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[12:13], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB39_3:
-; GFX11-NEXT: s_branch .LBB39_2
; GFX11-NEXT: .LBB39_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -14093,10 +14248,14 @@ define inreg <5 x i64> @bitcast_v10f32_to_v5i64_scalar(<10 x float> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
-; SI-NEXT: s_cbranch_scc0 .LBB41_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB41_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB41_4
-; SI-NEXT: .LBB41_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB41_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB41_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v9, s25, 1.0
; SI-NEXT: v_add_f32_e64 v8, s24, 1.0
; SI-NEXT: v_add_f32_e64 v7, s23, 1.0
@@ -14108,8 +14267,6 @@ define inreg <5 x i64> @bitcast_v10f32_to_v5i64_scalar(<10 x float> inreg %a, i3
; SI-NEXT: v_add_f32_e64 v1, s17, 1.0
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB41_3:
-; SI-NEXT: s_branch .LBB41_2
; SI-NEXT: .LBB41_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -14133,10 +14290,14 @@ define inreg <5 x i64> @bitcast_v10f32_to_v5i64_scalar(<10 x float> inreg %a, i3
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB41_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB41_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB41_4
-; VI-NEXT: .LBB41_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB41_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB41_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v9, s25, 1.0
; VI-NEXT: v_add_f32_e64 v8, s24, 1.0
; VI-NEXT: v_add_f32_e64 v7, s23, 1.0
@@ -14148,8 +14309,6 @@ define inreg <5 x i64> @bitcast_v10f32_to_v5i64_scalar(<10 x float> inreg %a, i3
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB41_3:
-; VI-NEXT: s_branch .LBB41_2
; VI-NEXT: .LBB41_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -14173,10 +14332,14 @@ define inreg <5 x i64> @bitcast_v10f32_to_v5i64_scalar(<10 x float> inreg %a, i3
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB41_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB41_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB41_4
-; GFX9-NEXT: .LBB41_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB41_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB41_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v9, s25, 1.0
; GFX9-NEXT: v_add_f32_e64 v8, s24, 1.0
; GFX9-NEXT: v_add_f32_e64 v7, s23, 1.0
@@ -14188,8 +14351,6 @@ define inreg <5 x i64> @bitcast_v10f32_to_v5i64_scalar(<10 x float> inreg %a, i3
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB41_3:
-; GFX9-NEXT: s_branch .LBB41_2
; GFX9-NEXT: .LBB41_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -14217,12 +14378,15 @@ define inreg <5 x i64> @bitcast_v10f32_to_v5i64_scalar(<10 x float> inreg %a, i3
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB41_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB41_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB41_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB41_4
-; GFX11-NEXT: .LBB41_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v9, s21, 1.0
; GFX11-NEXT: v_add_f32_e64 v8, s20, 1.0
; GFX11-NEXT: v_add_f32_e64 v7, s19, 1.0
@@ -14234,8 +14398,6 @@ define inreg <5 x i64> @bitcast_v10f32_to_v5i64_scalar(<10 x float> inreg %a, i3
; GFX11-NEXT: v_add_f32_e64 v1, s13, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB41_3:
-; GFX11-NEXT: s_branch .LBB41_2
; GFX11-NEXT: .LBB41_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -14381,10 +14543,14 @@ define inreg <10 x float> @bitcast_v5i64_to_v10f32_scalar(<5 x i64> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
-; SI-NEXT: s_cbranch_scc0 .LBB43_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB43_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB43_3
-; SI-NEXT: .LBB43_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB43_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB43_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_u32 s24, s24, 3
; SI-NEXT: s_addc_u32 s25, s25, 0
; SI-NEXT: s_add_u32 s22, s22, 3
@@ -14395,7 +14561,7 @@ define inreg <10 x float> @bitcast_v5i64_to_v10f32_scalar(<5 x i64> inreg %a, i3
; SI-NEXT: s_addc_u32 s19, s19, 0
; SI-NEXT: s_add_u32 s16, s16, 3
; SI-NEXT: s_addc_u32 s17, s17, 0
-; SI-NEXT: .LBB43_3: ; %end
+; SI-NEXT: .LBB43_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -14407,17 +14573,19 @@ define inreg <10 x float> @bitcast_v5i64_to_v10f32_scalar(<5 x i64> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v8, s24
; SI-NEXT: v_mov_b32_e32 v9, s25
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB43_4:
-; SI-NEXT: s_branch .LBB43_2
;
; VI-LABEL: bitcast_v5i64_to_v10f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB43_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB43_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB43_3
-; VI-NEXT: .LBB43_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB43_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB43_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_u32 s24, s24, 3
; VI-NEXT: s_addc_u32 s25, s25, 0
; VI-NEXT: s_add_u32 s22, s22, 3
@@ -14428,7 +14596,7 @@ define inreg <10 x float> @bitcast_v5i64_to_v10f32_scalar(<5 x i64> inreg %a, i3
; VI-NEXT: s_addc_u32 s19, s19, 0
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
-; VI-NEXT: .LBB43_3: ; %end
+; VI-NEXT: .LBB43_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -14440,17 +14608,19 @@ define inreg <10 x float> @bitcast_v5i64_to_v10f32_scalar(<5 x i64> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB43_4:
-; VI-NEXT: s_branch .LBB43_2
;
; GFX9-LABEL: bitcast_v5i64_to_v10f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB43_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB43_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB43_3
-; GFX9-NEXT: .LBB43_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB43_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB43_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_u32 s24, s24, 3
; GFX9-NEXT: s_addc_u32 s25, s25, 0
; GFX9-NEXT: s_add_u32 s22, s22, 3
@@ -14461,7 +14631,7 @@ define inreg <10 x float> @bitcast_v5i64_to_v10f32_scalar(<5 x i64> inreg %a, i3
; GFX9-NEXT: s_addc_u32 s19, s19, 0
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
-; GFX9-NEXT: .LBB43_3: ; %end
+; GFX9-NEXT: .LBB43_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -14473,19 +14643,20 @@ define inreg <10 x float> @bitcast_v5i64_to_v10f32_scalar(<5 x i64> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v8, s24
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB43_4:
-; GFX9-NEXT: s_branch .LBB43_2
;
; GFX11-LABEL: bitcast_v5i64_to_v10f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB43_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB43_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB43_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB43_3
-; GFX11-NEXT: .LBB43_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB43_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s20, s20, 3
; GFX11-NEXT: s_addc_u32 s21, s21, 0
; GFX11-NEXT: s_add_u32 s18, s18, 3
@@ -14496,7 +14667,7 @@ define inreg <10 x float> @bitcast_v5i64_to_v10f32_scalar(<5 x i64> inreg %a, i3
; GFX11-NEXT: s_addc_u32 s3, s3, 0
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: .LBB43_3: ; %end
+; GFX11-NEXT: .LBB43_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -14504,8 +14675,6 @@ define inreg <10 x float> @bitcast_v5i64_to_v10f32_scalar(<5 x i64> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB43_4:
-; GFX11-NEXT: s_branch .LBB43_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -14776,13 +14945,14 @@ define inreg <20 x half> @bitcast_v20i16_to_v20f16_scalar(<20 x i16> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v22, v5
; SI-NEXT: v_mov_b32_e32 v21, v4
; SI-NEXT: v_mov_b32_e32 v20, v3
; SI-NEXT: v_mov_b32_e32 v25, v2
; SI-NEXT: v_mov_b32_e32 v24, v1
; SI-NEXT: v_mov_b32_e32 v23, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB45_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_cvt_f32_f16_e32 v0, s16
@@ -14870,16 +15040,22 @@ define inreg <20 x half> @bitcast_v20i16_to_v20f16_scalar(<20 x i16> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr17
; SI-NEXT: ; implicit-def: $vgpr18
; SI-NEXT: ; implicit-def: $vgpr19
-; SI-NEXT: s_branch .LBB45_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB45_2
+; SI-NEXT: s_branch .LBB45_3
;
; VI-LABEL: bitcast_v20i16_to_v20f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB45_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB45_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB45_3
-; VI-NEXT: .LBB45_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB45_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB45_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s16, 3
; VI-NEXT: s_and_b32 s6, s17, 0xffff0000
; VI-NEXT: s_add_i32 s7, s17, 3
@@ -14930,7 +15106,7 @@ define inreg <20 x half> @bitcast_v20i16_to_v20f16_scalar(<20 x i16> inreg %a, i
; VI-NEXT: s_add_i32 s18, s8, 0x30000
; VI-NEXT: s_add_i32 s17, s6, 0x30000
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB45_3: ; %end
+; VI-NEXT: .LBB45_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -14942,17 +15118,19 @@ define inreg <20 x half> @bitcast_v20i16_to_v20f16_scalar(<20 x i16> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB45_4:
-; VI-NEXT: s_branch .LBB45_2
;
; GFX9-LABEL: bitcast_v20i16_to_v20f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB45_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB45_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB45_4
-; GFX9-NEXT: .LBB45_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB45_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB45_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v9, s25, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v8, s24, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v7, s23, 3 op_sel_hi:[1,0]
@@ -14964,8 +15142,6 @@ define inreg <20 x half> @bitcast_v20i16_to_v20f16_scalar(<20 x i16> inreg %a, i
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB45_3:
-; GFX9-NEXT: s_branch .LBB45_2
; GFX9-NEXT: .LBB45_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -14993,12 +15169,15 @@ define inreg <20 x half> @bitcast_v20i16_to_v20f16_scalar(<20 x i16> inreg %a, i
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB45_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB45_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB45_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB45_4
-; GFX11-NEXT: .LBB45_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v9, s21, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v8, s20, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v7, s19, 3 op_sel_hi:[1,0]
@@ -15010,8 +15189,6 @@ define inreg <20 x half> @bitcast_v20i16_to_v20f16_scalar(<20 x i16> inreg %a, i
; GFX11-NEXT: v_pk_add_u16 v1, s13, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB45_3:
-; GFX11-NEXT: s_branch .LBB45_2
; GFX11-NEXT: .LBB45_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -15298,10 +15475,14 @@ define inreg <20 x i16> @bitcast_v20f16_to_v20i16_scalar(<20 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v18, v18
; SI-NEXT: v_cvt_f16_f32_e32 v19, v19
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: s_cbranch_scc0 .LBB47_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB47_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB47_3
-; SI-NEXT: .LBB47_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB47_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB47_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v19, v19
; SI-NEXT: v_cvt_f32_f16_e32 v18, v18
; SI-NEXT: v_cvt_f32_f16_e32 v15, v15
@@ -15387,19 +15568,21 @@ define inreg <20 x i16> @bitcast_v20f16_to_v20i16_scalar(<20 x half> inreg %a, i
; SI-NEXT: v_alignbit_b32 v9, v10, v9, 16
; SI-NEXT: v_alignbit_b32 v13, v14, v13, 16
; SI-NEXT: v_alignbit_b32 v17, v18, v17, 16
-; SI-NEXT: .LBB47_3: ; %end
+; SI-NEXT: .LBB47_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB47_4:
-; SI-NEXT: s_branch .LBB47_2
;
; VI-LABEL: bitcast_v20f16_to_v20i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB47_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB47_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB47_4
-; VI-NEXT: .LBB47_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB47_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB47_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s5, s24, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v2, s5
@@ -15452,8 +15635,6 @@ define inreg <20 x i16> @bitcast_v20f16_to_v20i16_scalar(<20 x half> inreg %a, i
; VI-NEXT: v_or_b32_e32 v1, v0, v1
; VI-NEXT: v_or_b32_e32 v0, v10, v11
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB47_3:
-; VI-NEXT: s_branch .LBB47_2
; VI-NEXT: .LBB47_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -15477,10 +15658,14 @@ define inreg <20 x i16> @bitcast_v20f16_to_v20i16_scalar(<20 x half> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB47_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB47_4
-; GFX9-NEXT: .LBB47_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB47_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v9, s25, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v8, s24, v0 op_sel_hi:[1,0]
@@ -15493,8 +15678,6 @@ define inreg <20 x i16> @bitcast_v20f16_to_v20i16_scalar(<20 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB47_3:
-; GFX9-NEXT: s_branch .LBB47_2
; GFX9-NEXT: .LBB47_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -15522,12 +15705,15 @@ define inreg <20 x i16> @bitcast_v20f16_to_v20i16_scalar(<20 x half> inreg %a, i
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB47_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB47_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB47_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB47_4
-; GFX11-NEXT: .LBB47_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v9, 0x200, s21 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v8, 0x200, s20 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v7, 0x200, s19 op_sel_hi:[0,1]
@@ -15539,8 +15725,6 @@ define inreg <20 x i16> @bitcast_v20f16_to_v20i16_scalar(<20 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s13 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB47_3:
-; GFX11-NEXT: s_branch .LBB47_2
; GFX11-NEXT: .LBB47_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -16773,11 +16957,12 @@ define inreg <40 x i8> @bitcast_v20i16_to_v40i8_scalar(<20 x i16> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v7
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s72, v6
; SI-NEXT: v_readfirstlane_b32 s73, v5
; SI-NEXT: v_readfirstlane_b32 s62, v2
; SI-NEXT: v_readfirstlane_b32 s63, v1
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v4
; SI-NEXT: s_cbranch_scc0 .LBB49_4
; SI-NEXT: ; %bb.1: ; %cmp.false
@@ -17096,12 +17281,15 @@ define inreg <40 x i8> @bitcast_v20i16_to_v40i8_scalar(<20 x i16> inreg %a, i32
; SI-NEXT: ; implicit-def: $sgpr15
; SI-NEXT: ; implicit-def: $sgpr40
; SI-NEXT: ; implicit-def: $sgpr41
-; SI-NEXT: s_branch .LBB49_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB49_2
+; SI-NEXT: s_branch .LBB49_3
;
; VI-LABEL: bitcast_v20i16_to_v40i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
+; VI-NEXT: s_mov_b64 s[14:15], -1
; VI-NEXT: s_cbranch_scc0 .LBB49_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s26, s25, 24
@@ -17369,12 +17557,15 @@ define inreg <40 x i8> @bitcast_v20i16_to_v40i8_scalar(<20 x i16> inreg %a, i32
; VI-NEXT: ; implicit-def: $sgpr28
; VI-NEXT: ; implicit-def: $sgpr27
; VI-NEXT: ; implicit-def: $sgpr26
-; VI-NEXT: s_branch .LBB49_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[14:15]
+; VI-NEXT: s_cbranch_vccz .LBB49_2
+; VI-NEXT: s_branch .LBB49_3
;
; GFX9-LABEL: bitcast_v20i16_to_v40i8_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
+; GFX9-NEXT: s_mov_b64 s[14:15], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB49_3
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s26, s25, 24
@@ -17481,7 +17672,8 @@ define inreg <40 x i8> @bitcast_v20i16_to_v40i8_scalar(<20 x i16> inreg %a, i32
; GFX9-NEXT: ; implicit-def: $sgpr29
; GFX9-NEXT: ; implicit-def: $sgpr27
; GFX9-NEXT: ; implicit-def: $sgpr26
-; GFX9-NEXT: s_branch .LBB49_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[14:15]
+; GFX9-NEXT: s_cbranch_vccz .LBB49_2
; GFX9-NEXT: .LBB49_4:
; GFX9-NEXT: v_mov_b32_e32 v9, s16
; GFX9-NEXT: v_mov_b32_e32 v10, s17
@@ -17591,41 +17783,40 @@ define inreg <40 x i8> @bitcast_v20i16_to_v40i8_scalar(<20 x i16> inreg %a, i32
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
-; GFX11-NEXT: s_mov_b32 s14, 0
+; GFX11-NEXT: s_mov_b32 s5, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB49_3
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s15, s21, 24
-; GFX11-NEXT: s_lshr_b32 s22, s21, 16
-; GFX11-NEXT: s_lshr_b32 s24, s21, 8
-; GFX11-NEXT: s_lshr_b32 s23, s20, 16
-; GFX11-NEXT: s_lshr_b32 s25, s20, 8
-; GFX11-NEXT: s_lshr_b32 s26, s19, 24
-; GFX11-NEXT: s_lshr_b32 s27, s19, 16
-; GFX11-NEXT: s_lshr_b32 s29, s19, 8
-; GFX11-NEXT: s_lshr_b32 s28, s18, 16
-; GFX11-NEXT: s_lshr_b32 s40, s18, 8
-; GFX11-NEXT: s_lshr_b32 s41, s17, 24
-; GFX11-NEXT: s_lshr_b32 s42, s17, 16
-; GFX11-NEXT: s_lshr_b32 s44, s17, 8
-; GFX11-NEXT: s_lshr_b32 s43, s16, 16
-; GFX11-NEXT: s_lshr_b32 s45, s16, 8
-; GFX11-NEXT: s_lshr_b32 s46, s3, 24
-; GFX11-NEXT: s_lshr_b32 s47, s3, 16
-; GFX11-NEXT: s_lshr_b32 s57, s3, 8
-; GFX11-NEXT: s_lshr_b32 s56, s2, 16
-; GFX11-NEXT: s_lshr_b32 s58, s2, 8
-; GFX11-NEXT: s_lshr_b32 s59, s1, 24
-; GFX11-NEXT: s_lshr_b32 s60, s1, 16
-; GFX11-NEXT: s_lshr_b32 s62, s1, 8
-; GFX11-NEXT: s_lshr_b32 s61, s0, 16
-; GFX11-NEXT: s_lshr_b32 s63, s0, 8
+; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
+; GFX11-NEXT: s_lshr_b32 s14, s21, 24
+; GFX11-NEXT: s_lshr_b32 s15, s21, 16
+; GFX11-NEXT: s_lshr_b32 s23, s21, 8
+; GFX11-NEXT: s_lshr_b32 s22, s20, 16
+; GFX11-NEXT: s_lshr_b32 s24, s20, 8
+; GFX11-NEXT: s_lshr_b32 s25, s19, 24
+; GFX11-NEXT: s_lshr_b32 s26, s19, 16
+; GFX11-NEXT: s_lshr_b32 s28, s19, 8
+; GFX11-NEXT: s_lshr_b32 s27, s18, 16
+; GFX11-NEXT: s_lshr_b32 s29, s18, 8
+; GFX11-NEXT: s_lshr_b32 s40, s17, 24
+; GFX11-NEXT: s_lshr_b32 s41, s17, 16
+; GFX11-NEXT: s_lshr_b32 s43, s17, 8
+; GFX11-NEXT: s_lshr_b32 s42, s16, 16
+; GFX11-NEXT: s_lshr_b32 s44, s16, 8
+; GFX11-NEXT: s_lshr_b32 s45, s3, 24
+; GFX11-NEXT: s_lshr_b32 s46, s3, 16
+; GFX11-NEXT: s_lshr_b32 s56, s3, 8
+; GFX11-NEXT: s_lshr_b32 s47, s2, 16
+; GFX11-NEXT: s_lshr_b32 s57, s2, 8
+; GFX11-NEXT: s_lshr_b32 s58, s1, 24
+; GFX11-NEXT: s_lshr_b32 s59, s1, 16
+; GFX11-NEXT: s_lshr_b32 s61, s1, 8
+; GFX11-NEXT: s_lshr_b32 s60, s0, 16
+; GFX11-NEXT: s_lshr_b32 s62, s0, 8
; GFX11-NEXT: s_lshr_b64 s[12:13], s[20:21], 24
; GFX11-NEXT: s_lshr_b64 s[10:11], s[18:19], 24
; GFX11-NEXT: s_lshr_b64 s[8:9], s[16:17], 24
; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
-; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s14
-; GFX11-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX11-NEXT: s_cbranch_execnz .LBB49_4
; GFX11-NEXT: .LBB49_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v6, s17, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v5, s16, 3 op_sel_hi:[1,0]
@@ -17669,57 +17860,58 @@ define inreg <40 x i8> @bitcast_v20i16_to_v40i8_scalar(<20 x i16> inreg %a, i32
; GFX11-NEXT: v_lshrrev_b32_e32 v48, 8, v13
; GFX11-NEXT: s_branch .LBB49_5
; GFX11-NEXT: .LBB49_3:
-; GFX11-NEXT: ; implicit-def: $sgpr63
-; GFX11-NEXT: ; implicit-def: $sgpr61
-; GFX11-NEXT: ; implicit-def: $sgpr4
; GFX11-NEXT: ; implicit-def: $sgpr62
; GFX11-NEXT: ; implicit-def: $sgpr60
+; GFX11-NEXT: ; implicit-def: $sgpr4
+; GFX11-NEXT: ; implicit-def: $sgpr61
; GFX11-NEXT: ; implicit-def: $sgpr59
; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr6
; GFX11-NEXT: ; implicit-def: $sgpr57
; GFX11-NEXT: ; implicit-def: $sgpr47
+; GFX11-NEXT: ; implicit-def: $sgpr6
+; GFX11-NEXT: ; implicit-def: $sgpr56
; GFX11-NEXT: ; implicit-def: $sgpr46
; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr8
; GFX11-NEXT: ; implicit-def: $sgpr44
; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr8
+; GFX11-NEXT: ; implicit-def: $sgpr43
; GFX11-NEXT: ; implicit-def: $sgpr41
; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr28
-; GFX11-NEXT: ; implicit-def: $sgpr10
; GFX11-NEXT: ; implicit-def: $sgpr29
; GFX11-NEXT: ; implicit-def: $sgpr27
+; GFX11-NEXT: ; implicit-def: $sgpr10
+; GFX11-NEXT: ; implicit-def: $sgpr28
; GFX11-NEXT: ; implicit-def: $sgpr26
; GFX11-NEXT: ; implicit-def: $sgpr25
-; GFX11-NEXT: ; implicit-def: $sgpr23
-; GFX11-NEXT: ; implicit-def: $sgpr12
; GFX11-NEXT: ; implicit-def: $sgpr24
; GFX11-NEXT: ; implicit-def: $sgpr22
+; GFX11-NEXT: ; implicit-def: $sgpr12
+; GFX11-NEXT: ; implicit-def: $sgpr23
; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: s_branch .LBB49_2
+; GFX11-NEXT: ; implicit-def: $sgpr14
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
+; GFX11-NEXT: s_cbranch_vccz .LBB49_2
; GFX11-NEXT: .LBB49_4:
; GFX11-NEXT: v_dual_mov_b32 v13, s0 :: v_dual_mov_b32 v14, s1
; GFX11-NEXT: v_dual_mov_b32 v9, s2 :: v_dual_mov_b32 v10, s3
; GFX11-NEXT: v_dual_mov_b32 v5, s16 :: v_dual_mov_b32 v6, s17
; GFX11-NEXT: v_dual_mov_b32 v3, s18 :: v_dual_mov_b32 v4, s19
; GFX11-NEXT: v_dual_mov_b32 v1, s20 :: v_dual_mov_b32 v2, s21
-; GFX11-NEXT: v_dual_mov_b32 v48, s63 :: v_dual_mov_b32 v39, s61
-; GFX11-NEXT: v_dual_mov_b32 v38, s62 :: v_dual_mov_b32 v37, s60
-; GFX11-NEXT: v_dual_mov_b32 v36, s59 :: v_dual_mov_b32 v35, s58
-; GFX11-NEXT: v_dual_mov_b32 v34, s56 :: v_dual_mov_b32 v33, s57
-; GFX11-NEXT: v_dual_mov_b32 v32, s47 :: v_dual_mov_b32 v31, s46
-; GFX11-NEXT: v_dual_mov_b32 v30, s45 :: v_dual_mov_b32 v29, s43
-; GFX11-NEXT: v_dual_mov_b32 v28, s44 :: v_dual_mov_b32 v27, s42
-; GFX11-NEXT: v_dual_mov_b32 v26, s41 :: v_dual_mov_b32 v25, s40
-; GFX11-NEXT: v_dual_mov_b32 v24, s28 :: v_dual_mov_b32 v23, s29
-; GFX11-NEXT: v_dual_mov_b32 v22, s27 :: v_dual_mov_b32 v21, s26
-; GFX11-NEXT: v_dual_mov_b32 v20, s25 :: v_dual_mov_b32 v19, s23
-; GFX11-NEXT: v_dual_mov_b32 v18, s24 :: v_dual_mov_b32 v7, s12
-; GFX11-NEXT: v_dual_mov_b32 v12, s22 :: v_dual_mov_b32 v11, s10
-; GFX11-NEXT: v_dual_mov_b32 v8, s15 :: v_dual_mov_b32 v15, s8
+; GFX11-NEXT: v_dual_mov_b32 v48, s62 :: v_dual_mov_b32 v39, s60
+; GFX11-NEXT: v_dual_mov_b32 v38, s61 :: v_dual_mov_b32 v37, s59
+; GFX11-NEXT: v_dual_mov_b32 v36, s58 :: v_dual_mov_b32 v35, s57
+; GFX11-NEXT: v_dual_mov_b32 v34, s47 :: v_dual_mov_b32 v33, s56
+; GFX11-NEXT: v_dual_mov_b32 v32, s46 :: v_dual_mov_b32 v31, s45
+; GFX11-NEXT: v_dual_mov_b32 v30, s44 :: v_dual_mov_b32 v29, s42
+; GFX11-NEXT: v_dual_mov_b32 v28, s43 :: v_dual_mov_b32 v27, s41
+; GFX11-NEXT: v_dual_mov_b32 v26, s40 :: v_dual_mov_b32 v25, s29
+; GFX11-NEXT: v_dual_mov_b32 v24, s27 :: v_dual_mov_b32 v23, s28
+; GFX11-NEXT: v_dual_mov_b32 v22, s26 :: v_dual_mov_b32 v21, s25
+; GFX11-NEXT: v_dual_mov_b32 v20, s24 :: v_dual_mov_b32 v19, s22
+; GFX11-NEXT: v_dual_mov_b32 v18, s23 :: v_dual_mov_b32 v7, s12
+; GFX11-NEXT: v_dual_mov_b32 v12, s15 :: v_dual_mov_b32 v11, s10
+; GFX11-NEXT: v_dual_mov_b32 v8, s14 :: v_dual_mov_b32 v15, s8
; GFX11-NEXT: v_dual_mov_b32 v16, s6 :: v_dual_mov_b32 v17, s4
; GFX11-NEXT: .LBB49_5: ; %end
; GFX11-NEXT: v_and_b32_e32 v13, 0xff, v13
@@ -19147,6 +19339,7 @@ define inreg <20 x i16> @bitcast_v40i8_to_v20i16_scalar(<40 x i8> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v26
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s14, v19
; SI-NEXT: v_readfirstlane_b32 s40, v18
; SI-NEXT: v_readfirstlane_b32 s12, v11
@@ -19155,7 +19348,7 @@ define inreg <20 x i16> @bitcast_v40i8_to_v20i16_scalar(<40 x i8> inreg %a, i32
; SI-NEXT: v_readfirstlane_b32 s9, v2
; SI-NEXT: v_readfirstlane_b32 s7, v1
; SI-NEXT: v_readfirstlane_b32 s6, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v7
; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v9
; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v5
@@ -19428,12 +19621,15 @@ define inreg <20 x i16> @bitcast_v40i8_to_v20i16_scalar(<40 x i8> inreg %a, i32
; SI-NEXT: ; implicit-def: $vgpr17
; SI-NEXT: ; implicit-def: $vgpr18
; SI-NEXT: ; implicit-def: $vgpr19
-; SI-NEXT: s_branch .LBB51_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB51_2
+; SI-NEXT: s_branch .LBB51_3
;
; VI-LABEL: bitcast_v40i8_to_v20i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v26
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v31, v14
; VI-NEXT: v_mov_b32_e32 v27, v12
; VI-NEXT: v_mov_b32_e32 v32, v10
@@ -19442,7 +19638,7 @@ define inreg <20 x i16> @bitcast_v40i8_to_v20i16_scalar(<40 x i8> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v30, v4
; VI-NEXT: v_mov_b32_e32 v34, v2
; VI-NEXT: v_mov_b32_e32 v28, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_lshlrev_b32_e32 v26, 8, v1
; VI-NEXT: v_lshlrev_b32_e32 v35, 8, v3
; VI-NEXT: v_lshlrev_b32_e32 v36, 8, v5
@@ -19458,6 +19654,30 @@ define inreg <20 x i16> @bitcast_v40i8_to_v20i16_scalar(<40 x i8> inreg %a, i32
; VI-NEXT: v_lshlrev_b32_e32 v25, 8, v25
; VI-NEXT: s_cbranch_scc0 .LBB51_4
; VI-NEXT: ; %bb.1: ; %cmp.false
+; VI-NEXT: v_or_b32_sdwa v0, v34, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v30, v36 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v32, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v27, v48 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v31, v49 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v16, v17 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v18, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v20, v21 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: s_and_b32 s4, s28, 0xff
+; VI-NEXT: s_lshl_b32 s5, s29, 8
+; VI-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v22, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v24, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: s_or_b32 s4, s4, s5
+; VI-NEXT: v_or_b32_sdwa v2, v33, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v3, v29, v38 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: s_and_b32 s4, s4, 0xffff
+; VI-NEXT: v_or_b32_sdwa v0, v28, v26 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_e32 v3, s4, v0
; VI-NEXT: s_and_b32 s4, s16, 0xff
; VI-NEXT: s_lshl_b32 s5, s17, 8
; VI-NEXT: s_or_b32 s4, s4, s5
@@ -19485,30 +19705,6 @@ define inreg <20 x i16> @bitcast_v40i8_to_v20i16_scalar(<40 x i8> inreg %a, i32
; VI-NEXT: s_and_b32 s6, s6, 0xffff
; VI-NEXT: s_lshl_b32 s7, s7, 16
; VI-NEXT: s_or_b32 s6, s6, s7
-; VI-NEXT: s_and_b32 s7, s28, 0xff
-; VI-NEXT: s_lshl_b32 s8, s29, 8
-; VI-NEXT: s_or_b32 s7, s7, s8
-; VI-NEXT: s_and_b32 s7, s7, 0xffff
-; VI-NEXT: v_or_b32_sdwa v0, v28, v26 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_e32 v3, s7, v0
-; VI-NEXT: v_or_b32_sdwa v0, v34, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v30, v36 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v33, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v29, v38 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v5, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v32, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v27, v48 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v31, v49 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v16, v17 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v18, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v20, v21 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v22, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v24, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: v_mov_b32_e32 v2, s6
@@ -19612,12 +19808,15 @@ define inreg <20 x i16> @bitcast_v40i8_to_v20i16_scalar(<40 x i8> inreg %a, i32
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB51_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; VI-NEXT: s_branch .LBB51_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB51_2
+; VI-NEXT: s_branch .LBB51_3
;
; GFX9-LABEL: bitcast_v40i8_to_v20i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v26
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v29, v14
; GFX9-NEXT: v_mov_b32_e32 v33, v12
; GFX9-NEXT: v_mov_b32_e32 v30, v10
@@ -19626,7 +19825,7 @@ define inreg <20 x i16> @bitcast_v40i8_to_v20i16_scalar(<40 x i8> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v34, v4
; GFX9-NEXT: v_mov_b32_e32 v31, v2
; GFX9-NEXT: v_mov_b32_e32 v32, v0
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_lshlrev_b32_e32 v26, 8, v1
; GFX9-NEXT: v_lshlrev_b32_e32 v36, 8, v3
; GFX9-NEXT: v_lshlrev_b32_e32 v35, 8, v5
@@ -19642,57 +19841,57 @@ define inreg <20 x i16> @bitcast_v40i8_to_v20i16_scalar(<40 x i8> inreg %a, i32
; GFX9-NEXT: v_lshlrev_b32_e32 v21, 8, v25
; GFX9-NEXT: s_cbranch_scc0 .LBB51_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
+; GFX9-NEXT: s_and_b32 s4, s28, 0xff
+; GFX9-NEXT: s_lshl_b32 s5, s29, 8
+; GFX9-NEXT: s_or_b32 s4, s4, s5
+; GFX9-NEXT: v_mov_b32_e32 v1, 0xffff
+; GFX9-NEXT: v_or_b32_sdwa v0, v32, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_and_b32_e32 v1, s4, v1
+; GFX9-NEXT: v_lshl_or_b32 v3, v0, 16, v1
+; GFX9-NEXT: v_or_b32_sdwa v0, v28, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_and_b32 s4, s16, 0xff
; GFX9-NEXT: s_lshl_b32 s5, s17, 8
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-NEXT: v_or_b32_sdwa v1, v27, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_or_b32 s4, s4, s5
; GFX9-NEXT: s_and_b32 s5, s18, 0xff
; GFX9-NEXT: s_lshl_b32 s6, s19, 8
+; GFX9-NEXT: v_lshl_or_b32 v5, v1, 16, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v30, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_or_b32 s5, s5, s6
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-NEXT: v_or_b32_sdwa v1, v33, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_pack_ll_b32_b16 s4, s4, s5
; GFX9-NEXT: s_and_b32 s5, s20, 0xff
; GFX9-NEXT: s_lshl_b32 s6, s21, 8
+; GFX9-NEXT: v_lshl_or_b32 v6, v1, 16, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v29, v49 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_or_b32 s5, s5, s6
; GFX9-NEXT: s_and_b32 s6, s22, 0xff
; GFX9-NEXT: s_lshl_b32 s7, s23, 8
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-NEXT: v_or_b32_sdwa v1, v16, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_or_b32 s6, s6, s7
+; GFX9-NEXT: v_lshl_or_b32 v7, v1, 16, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v18, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_pack_ll_b32_b16 s5, s5, s6
; GFX9-NEXT: s_and_b32 s6, s24, 0xff
; GFX9-NEXT: s_lshl_b32 s7, s25, 8
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-NEXT: v_or_b32_sdwa v1, v20, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_or_b32 s6, s6, s7
; GFX9-NEXT: s_and_b32 s7, s26, 0xff
; GFX9-NEXT: s_lshl_b32 s8, s27, 8
-; GFX9-NEXT: s_or_b32 s7, s7, s8
-; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s7
-; GFX9-NEXT: s_and_b32 s7, s28, 0xff
-; GFX9-NEXT: s_lshl_b32 s8, s29, 8
-; GFX9-NEXT: s_or_b32 s7, s7, s8
-; GFX9-NEXT: v_mov_b32_e32 v1, 0xffff
-; GFX9-NEXT: v_or_b32_sdwa v0, v32, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_and_b32_e32 v1, s7, v1
-; GFX9-NEXT: v_lshl_or_b32 v3, v0, 16, v1
-; GFX9-NEXT: v_or_b32_sdwa v0, v31, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v34, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: v_lshl_or_b32 v4, v1, 16, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v28, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v27, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: v_lshl_or_b32 v5, v1, 16, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v30, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v33, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: v_lshl_or_b32 v6, v1, 16, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v29, v49 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v16, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: v_lshl_or_b32 v7, v1, 16, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v18, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v20, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-NEXT: v_or_b32_sdwa v2, v31, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v8, v1, 16, v0
; GFX9-NEXT: v_or_b32_sdwa v0, v22, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v24, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_or_b32 s7, s7, s8
+; GFX9-NEXT: v_or_b32_sdwa v4, v34, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-NEXT: v_or_b32_sdwa v1, v24, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s7
+; GFX9-NEXT: v_lshl_or_b32 v4, v4, 16, v2
; GFX9-NEXT: v_lshl_or_b32 v9, v1, 16, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
@@ -19798,7 +19997,9 @@ define inreg <20 x i16> @bitcast_v40i8_to_v20i16_scalar(<40 x i8> inreg %a, i32
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB51_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX9-NEXT: s_branch .LBB51_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB51_2
+; GFX9-NEXT: s_branch .LBB51_3
;
; GFX11-LABEL: bitcast_v40i8_to_v20i16_scalar:
; GFX11: ; %bb.0:
@@ -19819,46 +20020,46 @@ define inreg <20 x i16> @bitcast_v40i8_to_v20i16_scalar(<40 x i8> inreg %a, i32
; GFX11-NEXT: v_lshlrev_b32_e32 v17, 8, v17
; GFX11-NEXT: v_lshlrev_b32_e32 v19, 8, v19
; GFX11-NEXT: v_lshlrev_b32_e32 v21, 8, v21
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s4, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB51_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s3, 8
-; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_or_b32 s6, s7, s8
-; GFX11-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_pack_ll_b32_b16 s5, s5, s6
-; GFX11-NEXT: s_pack_ll_b32_b16 s6, s7, s8
-; GFX11-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s23, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s25, 8
+; GFX11-NEXT: s_and_b32 s4, s0, 0xff
+; GFX11-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-NEXT: s_and_b32 s6, s2, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-NEXT: s_or_b32 s4, s4, s5
+; GFX11-NEXT: s_or_b32 s5, s6, s7
+; GFX11-NEXT: s_and_b32 s6, s16, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-NEXT: s_or_b32 s6, s6, s7
+; GFX11-NEXT: s_or_b32 s7, s8, s9
+; GFX11-NEXT: s_pack_ll_b32_b16 s4, s4, s5
+; GFX11-NEXT: s_pack_ll_b32_b16 s5, s6, s7
+; GFX11-NEXT: s_and_b32 s6, s20, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s21, 8
+; GFX11-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-NEXT: s_or_b32 s6, s6, s7
+; GFX11-NEXT: s_or_b32 s7, s8, s9
+; GFX11-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s25, 8
; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v23
-; GFX11-NEXT: s_pack_ll_b32_b16 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_and_b32 s9, s26, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s27, 8
-; GFX11-NEXT: s_and_b32 s11, s28, 0xff
-; GFX11-NEXT: s_lshl_b32 s12, s29, 8
-; GFX11-NEXT: s_or_b32 s9, s9, s10
-; GFX11-NEXT: s_or_b32 s10, s11, s12
+; GFX11-NEXT: s_pack_ll_b32_b16 s6, s6, s7
+; GFX11-NEXT: s_or_b32 s7, s8, s9
+; GFX11-NEXT: s_and_b32 s8, s26, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s27, 8
+; GFX11-NEXT: s_and_b32 s10, s28, 0xff
+; GFX11-NEXT: s_lshl_b32 s11, s29, 8
+; GFX11-NEXT: s_or_b32 s8, s8, s9
+; GFX11-NEXT: s_or_b32 s9, s10, s11
; GFX11-NEXT: v_or_b32_e32 v0, v0, v22
-; GFX11-NEXT: v_and_b32_e64 v2, 0xffff, s10
+; GFX11-NEXT: v_and_b32_e64 v2, 0xffff, s9
; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v30
; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v24
-; GFX11-NEXT: s_pack_ll_b32_b16 s8, s8, s9
+; GFX11-NEXT: s_pack_ll_b32_b16 s7, s7, s8
; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v28
; GFX11-NEXT: v_lshl_or_b32 v4, v0, 16, v2
; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v26
@@ -19878,7 +20079,7 @@ define inreg <20 x i16> @bitcast_v40i8_to_v20i16_scalar(<40 x i8> inreg %a, i32
; GFX11-NEXT: v_or_b32_e32 v8, v8, v19
; GFX11-NEXT: v_or_b32_e32 v12, v6, v17
; GFX11-NEXT: v_lshl_or_b32 v6, v0, 16, v3
-; GFX11-NEXT: v_mov_b32_e32 v0, s5
+; GFX11-NEXT: v_mov_b32_e32 v0, s4
; GFX11-NEXT: v_or_b32_e32 v1, v1, v32
; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v5
; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v7
@@ -19887,12 +20088,11 @@ define inreg <20 x i16> @bitcast_v40i8_to_v20i16_scalar(<40 x i8> inreg %a, i32
; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX11-NEXT: v_lshl_or_b32 v7, v9, 16, v11
; GFX11-NEXT: v_lshl_or_b32 v8, v12, 16, v13
-; GFX11-NEXT: v_mov_b32_e32 v3, s8
+; GFX11-NEXT: v_mov_b32_e32 v3, s7
; GFX11-NEXT: v_lshl_or_b32 v9, v10, 16, v14
; GFX11-NEXT: v_lshl_or_b32 v5, v2, 16, v1
-; GFX11-NEXT: v_dual_mov_b32 v1, s6 :: v_dual_mov_b32 v2, s7
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB51_3
+; GFX11-NEXT: v_dual_mov_b32 v1, s5 :: v_dual_mov_b32 v2, s6
+; GFX11-NEXT: s_cbranch_execnz .LBB51_3
; GFX11-NEXT: .LBB51_2: ; %cmp.true
; GFX11-NEXT: v_add_nc_u32_e32 v4, 3, v27
; GFX11-NEXT: s_add_i32 s28, s28, 3
@@ -20005,7 +20205,9 @@ define inreg <20 x i16> @bitcast_v40i8_to_v20i16_scalar(<40 x i8> inreg %a, i32
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB51_4:
; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-NEXT: s_branch .LBB51_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_vccz .LBB51_2
+; GFX11-NEXT: s_branch .LBB51_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -20256,10 +20458,11 @@ define inreg <5 x double> @bitcast_v20i16_to_v5f64_scalar(<20 x i16> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v16, v4
; SI-NEXT: v_mov_b32_e32 v17, v2
; SI-NEXT: v_mov_b32_e32 v18, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v3
; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v5
@@ -20359,16 +20562,22 @@ define inreg <5 x double> @bitcast_v20i16_to_v5f64_scalar(<20 x i16> inreg %a, i
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB53_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; SI-NEXT: s_branch .LBB53_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB53_2
+; SI-NEXT: s_branch .LBB53_3
;
; VI-LABEL: bitcast_v20i16_to_v5f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB53_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB53_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB53_3
-; VI-NEXT: .LBB53_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB53_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB53_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s25, 3
; VI-NEXT: s_and_b32 s4, s25, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
@@ -20419,7 +20628,7 @@ define inreg <5 x double> @bitcast_v20i16_to_v5f64_scalar(<20 x i16> inreg %a, i
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB53_3: ; %end
+; VI-NEXT: .LBB53_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -20431,17 +20640,19 @@ define inreg <5 x double> @bitcast_v20i16_to_v5f64_scalar(<20 x i16> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB53_4:
-; VI-NEXT: s_branch .LBB53_2
;
; GFX9-LABEL: bitcast_v20i16_to_v5f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB53_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB53_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB53_4
-; GFX9-NEXT: .LBB53_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB53_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB53_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v9, s25, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v8, s24, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v7, s23, 3 op_sel_hi:[1,0]
@@ -20453,8 +20664,6 @@ define inreg <5 x double> @bitcast_v20i16_to_v5f64_scalar(<20 x i16> inreg %a, i
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB53_3:
-; GFX9-NEXT: s_branch .LBB53_2
; GFX9-NEXT: .LBB53_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -20482,12 +20691,15 @@ define inreg <5 x double> @bitcast_v20i16_to_v5f64_scalar(<20 x i16> inreg %a, i
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB53_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB53_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB53_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB53_4
-; GFX11-NEXT: .LBB53_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v9, s21, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v8, s20, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v7, s19, 3 op_sel_hi:[1,0]
@@ -20499,8 +20711,6 @@ define inreg <5 x double> @bitcast_v20i16_to_v5f64_scalar(<20 x i16> inreg %a, i
; GFX11-NEXT: v_pk_add_u16 v1, s13, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB53_3:
-; GFX11-NEXT: s_branch .LBB53_2
; GFX11-NEXT: .LBB53_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -20674,6 +20884,7 @@ define inreg <20 x i16> @bitcast_v5f64_to_v20i16_scalar(<5 x double> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB55_3
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s24
@@ -20720,7 +20931,8 @@ define inreg <20 x i16> @bitcast_v5f64_to_v20i16_scalar(<5 x double> inreg %a, i
; SI-NEXT: ; implicit-def: $sgpr9
; SI-NEXT: ; implicit-def: $vgpr20
; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: s_branch .LBB55_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB55_2
; SI-NEXT: .LBB55_4:
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v5, s19
@@ -20754,18 +20966,20 @@ define inreg <20 x i16> @bitcast_v5f64_to_v20i16_scalar(<5 x double> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB55_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB55_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB55_4
-; VI-NEXT: .LBB55_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB55_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB55_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
; VI-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
; VI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB55_3:
-; VI-NEXT: s_branch .LBB55_2
; VI-NEXT: .LBB55_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -20789,18 +21003,20 @@ define inreg <20 x i16> @bitcast_v5f64_to_v20i16_scalar(<5 x double> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB55_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB55_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB55_4
-; GFX9-NEXT: .LBB55_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB55_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB55_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
; GFX9-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
; GFX9-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB55_3:
-; GFX9-NEXT: s_branch .LBB55_2
; GFX9-NEXT: .LBB55_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -20828,20 +21044,21 @@ define inreg <20 x i16> @bitcast_v5f64_to_v20i16_scalar(<5 x double> inreg %a, i
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB55_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB55_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB55_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB55_4
-; GFX11-NEXT: .LBB55_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[8:9], s[20:21], 1.0
; GFX11-NEXT: v_add_f64 v[6:7], s[18:19], 1.0
; GFX11-NEXT: v_add_f64 v[4:5], s[16:17], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[14:15], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[12:13], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB55_3:
-; GFX11-NEXT: s_branch .LBB55_2
; GFX11-NEXT: .LBB55_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -21102,10 +21319,11 @@ define inreg <5 x i64> @bitcast_v20i16_to_v5i64_scalar(<20 x i16> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v16, v4
; SI-NEXT: v_mov_b32_e32 v17, v2
; SI-NEXT: v_mov_b32_e32 v18, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v3
; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v5
@@ -21205,16 +21423,22 @@ define inreg <5 x i64> @bitcast_v20i16_to_v5i64_scalar(<20 x i16> inreg %a, i32
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB57_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; SI-NEXT: s_branch .LBB57_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB57_2
+; SI-NEXT: s_branch .LBB57_3
;
; VI-LABEL: bitcast_v20i16_to_v5i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB57_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB57_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB57_3
-; VI-NEXT: .LBB57_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB57_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB57_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s25, 3
; VI-NEXT: s_and_b32 s4, s25, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
@@ -21265,7 +21489,7 @@ define inreg <5 x i64> @bitcast_v20i16_to_v5i64_scalar(<20 x i16> inreg %a, i32
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB57_3: ; %end
+; VI-NEXT: .LBB57_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -21277,17 +21501,19 @@ define inreg <5 x i64> @bitcast_v20i16_to_v5i64_scalar(<20 x i16> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB57_4:
-; VI-NEXT: s_branch .LBB57_2
;
; GFX9-LABEL: bitcast_v20i16_to_v5i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB57_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB57_4
-; GFX9-NEXT: .LBB57_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB57_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB57_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v9, s25, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v8, s24, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v7, s23, 3 op_sel_hi:[1,0]
@@ -21299,8 +21525,6 @@ define inreg <5 x i64> @bitcast_v20i16_to_v5i64_scalar(<20 x i16> inreg %a, i32
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB57_3:
-; GFX9-NEXT: s_branch .LBB57_2
; GFX9-NEXT: .LBB57_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -21328,12 +21552,15 @@ define inreg <5 x i64> @bitcast_v20i16_to_v5i64_scalar(<20 x i16> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB57_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB57_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB57_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB57_4
-; GFX11-NEXT: .LBB57_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v9, s21, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v8, s20, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v7, s19, 3 op_sel_hi:[1,0]
@@ -21345,8 +21572,6 @@ define inreg <5 x i64> @bitcast_v20i16_to_v5i64_scalar(<20 x i16> inreg %a, i32
; GFX11-NEXT: v_pk_add_u16 v1, s13, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB57_3:
-; GFX11-NEXT: s_branch .LBB57_2
; GFX11-NEXT: .LBB57_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -21535,6 +21760,7 @@ define inreg <20 x i16> @bitcast_v5i64_to_v20i16_scalar(<5 x i64> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB59_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s24
@@ -21607,16 +21833,22 @@ define inreg <20 x i16> @bitcast_v5i64_to_v20i16_scalar(<5 x i64> inreg %a, i32
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $vgpr17
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB59_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB59_2
+; SI-NEXT: s_branch .LBB59_3
;
; VI-LABEL: bitcast_v5i64_to_v20i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB59_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB59_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB59_3
-; VI-NEXT: .LBB59_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB59_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB59_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_u32 s24, s24, 3
; VI-NEXT: s_addc_u32 s25, s25, 0
; VI-NEXT: s_add_u32 s22, s22, 3
@@ -21627,7 +21859,7 @@ define inreg <20 x i16> @bitcast_v5i64_to_v20i16_scalar(<5 x i64> inreg %a, i32
; VI-NEXT: s_addc_u32 s19, s19, 0
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
-; VI-NEXT: .LBB59_3: ; %end
+; VI-NEXT: .LBB59_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -21639,17 +21871,19 @@ define inreg <20 x i16> @bitcast_v5i64_to_v20i16_scalar(<5 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB59_4:
-; VI-NEXT: s_branch .LBB59_2
;
; GFX9-LABEL: bitcast_v5i64_to_v20i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB59_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB59_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB59_3
-; GFX9-NEXT: .LBB59_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB59_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB59_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_u32 s24, s24, 3
; GFX9-NEXT: s_addc_u32 s25, s25, 0
; GFX9-NEXT: s_add_u32 s22, s22, 3
@@ -21660,7 +21894,7 @@ define inreg <20 x i16> @bitcast_v5i64_to_v20i16_scalar(<5 x i64> inreg %a, i32
; GFX9-NEXT: s_addc_u32 s19, s19, 0
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
-; GFX9-NEXT: .LBB59_3: ; %end
+; GFX9-NEXT: .LBB59_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -21672,19 +21906,20 @@ define inreg <20 x i16> @bitcast_v5i64_to_v20i16_scalar(<5 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v8, s24
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB59_4:
-; GFX9-NEXT: s_branch .LBB59_2
;
; GFX11-LABEL: bitcast_v5i64_to_v20i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB59_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB59_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB59_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB59_3
-; GFX11-NEXT: .LBB59_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB59_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s20, s20, 3
; GFX11-NEXT: s_addc_u32 s21, s21, 0
; GFX11-NEXT: s_add_u32 s18, s18, 3
@@ -21695,7 +21930,7 @@ define inreg <20 x i16> @bitcast_v5i64_to_v20i16_scalar(<5 x i64> inreg %a, i32
; GFX11-NEXT: s_addc_u32 s3, s3, 0
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: .LBB59_3: ; %end
+; GFX11-NEXT: .LBB59_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -21703,8 +21938,6 @@ define inreg <20 x i16> @bitcast_v5i64_to_v20i16_scalar(<5 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB59_4:
-; GFX11-NEXT: s_branch .LBB59_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -22905,14 +23138,14 @@ define inreg <40 x i8> @bitcast_v20f16_to_v40i8_scalar(<20 x half> inreg %a, i32
; SI-NEXT: v_cvt_f16_f32_e32 v15, s16
; SI-NEXT: v_cvt_f16_f32_e32 v10, s19
; SI-NEXT: v_cvt_f16_f32_e32 v12, s18
-; SI-NEXT: v_cvt_f16_f32_e32 v39, s21
-; SI-NEXT: v_cvt_f16_f32_e32 v33, s20
+; SI-NEXT: v_cvt_f16_f32_e32 v37, s21
+; SI-NEXT: v_cvt_f16_f32_e32 v30, s20
; SI-NEXT: v_cvt_f16_f32_e32 v9, s23
; SI-NEXT: v_cvt_f16_f32_e32 v20, s22
; SI-NEXT: v_cvt_f16_f32_e32 v54, s25
-; SI-NEXT: v_cvt_f16_f32_e32 v53, s24
+; SI-NEXT: v_cvt_f16_f32_e32 v52, s24
; SI-NEXT: v_cvt_f16_f32_e32 v8, s27
-; SI-NEXT: v_cvt_f16_f32_e32 v50, s26
+; SI-NEXT: v_cvt_f16_f32_e32 v49, s26
; SI-NEXT: s_waitcnt expcnt(4)
; SI-NEXT: v_cvt_f16_f32_e32 v43, s29
; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
@@ -22925,20 +23158,21 @@ define inreg <40 x i8> @bitcast_v20f16_to_v40i8_scalar(<20 x half> inreg %a, i32
; SI-NEXT: v_cvt_f16_f32_e32 v44, s28
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v7
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB61_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v16
; SI-NEXT: v_or_b32_e32 v28, v15, v3
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v10
; SI-NEXT: v_or_b32_e32 v24, v12, v3
-; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v39
-; SI-NEXT: v_or_b32_e32 v14, v33, v3
+; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v37
+; SI-NEXT: v_or_b32_e32 v14, v30, v3
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v9
; SI-NEXT: v_or_b32_e32 v13, v20, v3
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v54
-; SI-NEXT: v_or_b32_e32 v7, v53, v3
+; SI-NEXT: v_or_b32_e32 v7, v52, v3
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v8
-; SI-NEXT: v_or_b32_e32 v11, v50, v3
+; SI-NEXT: v_or_b32_e32 v11, v49, v3
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v43
; SI-NEXT: v_or_b32_e32 v5, v44, v3
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v2
@@ -22947,15 +23181,15 @@ define inreg <40 x i8> @bitcast_v20f16_to_v40i8_scalar(<20 x half> inreg %a, i32
; SI-NEXT: v_or_b32_e32 v4, v46, v3
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v1
; SI-NEXT: v_or_b32_e32 v3, v45, v3
-; SI-NEXT: v_alignbit_b32 v30, v24, v28, 24
+; SI-NEXT: v_alignbit_b32 v31, v24, v28, 24
; SI-NEXT: v_alignbit_b32 v35, v24, v28, 16
-; SI-NEXT: v_alignbit_b32 v37, v24, v28, 8
+; SI-NEXT: v_alignbit_b32 v38, v24, v28, 8
; SI-NEXT: v_alignbit_b32 v29, v13, v14, 24
-; SI-NEXT: v_alignbit_b32 v31, v13, v14, 16
+; SI-NEXT: v_alignbit_b32 v32, v13, v14, 16
; SI-NEXT: v_alignbit_b32 v36, v13, v14, 8
; SI-NEXT: v_alignbit_b32 v23, v11, v7, 24
; SI-NEXT: v_alignbit_b32 v26, v11, v7, 16
-; SI-NEXT: v_alignbit_b32 v32, v11, v7, 8
+; SI-NEXT: v_alignbit_b32 v33, v11, v7, 8
; SI-NEXT: v_alignbit_b32 v19, v6, v5, 24
; SI-NEXT: v_alignbit_b32 v21, v6, v5, 16
; SI-NEXT: v_alignbit_b32 v27, v6, v5, 8
@@ -22963,9 +23197,9 @@ define inreg <40 x i8> @bitcast_v20f16_to_v40i8_scalar(<20 x half> inreg %a, i32
; SI-NEXT: v_alignbit_b32 v18, v3, v4, 16
; SI-NEXT: v_alignbit_b32 v22, v3, v4, 8
; SI-NEXT: v_lshrrev_b32_e32 v40, 8, v24
-; SI-NEXT: v_lshrrev_b32_e32 v52, 8, v13
-; SI-NEXT: v_lshrrev_b32_e32 v49, 8, v11
-; SI-NEXT: v_lshrrev_b32_e32 v38, 8, v6
+; SI-NEXT: v_lshrrev_b32_e32 v53, 8, v13
+; SI-NEXT: v_lshrrev_b32_e32 v50, 8, v11
+; SI-NEXT: v_lshrrev_b32_e32 v39, 8, v6
; SI-NEXT: v_lshrrev_b32_e32 v25, 8, v3
; SI-NEXT: v_bfe_u32 v42, v10, 8, 8
; SI-NEXT: v_bfe_u32 v55, v9, 8, 8
@@ -23006,22 +23240,22 @@ define inreg <40 x i8> @bitcast_v20f16_to_v40i8_scalar(<20 x half> inreg %a, i32
; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
; SI-NEXT: v_cvt_f16_f32_e32 v11, v11
; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6
-; SI-NEXT: v_cvt_f32_f16_e32 v13, v53
+; SI-NEXT: v_cvt_f32_f16_e32 v13, v52
; SI-NEXT: v_or_b32_e32 v5, v5, v6
; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v2
; SI-NEXT: v_cvt_f32_f16_e32 v8, v8
; SI-NEXT: v_or_b32_e32 v6, v7, v6
; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v11
-; SI-NEXT: v_cvt_f32_f16_e32 v11, v50
+; SI-NEXT: v_cvt_f32_f16_e32 v11, v49
; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13
; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8
; SI-NEXT: v_cvt_f16_f32_e32 v8, v8
; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11
; SI-NEXT: v_cvt_f16_f32_e32 v11, v11
-; SI-NEXT: v_cvt_f32_f16_e32 v17, v33
+; SI-NEXT: v_cvt_f32_f16_e32 v17, v30
; SI-NEXT: v_or_b32_e32 v7, v13, v7
-; SI-NEXT: v_cvt_f32_f16_e32 v13, v39
+; SI-NEXT: v_cvt_f32_f16_e32 v13, v37
; SI-NEXT: v_cvt_f32_f16_e32 v16, v16
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v8
; SI-NEXT: v_cvt_f32_f16_e32 v9, v9
@@ -23054,15 +23288,15 @@ define inreg <40 x i8> @bitcast_v20f16_to_v40i8_scalar(<20 x half> inreg %a, i32
; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v10
; SI-NEXT: v_or_b32_e32 v13, v17, v13
; SI-NEXT: v_or_b32_e32 v24, v12, v15
-; SI-NEXT: v_alignbit_b32 v30, v24, v28, 24
+; SI-NEXT: v_alignbit_b32 v31, v24, v28, 24
; SI-NEXT: v_alignbit_b32 v35, v24, v28, 16
-; SI-NEXT: v_alignbit_b32 v37, v24, v28, 8
+; SI-NEXT: v_alignbit_b32 v38, v24, v28, 8
; SI-NEXT: v_alignbit_b32 v29, v13, v14, 24
-; SI-NEXT: v_alignbit_b32 v31, v13, v14, 16
+; SI-NEXT: v_alignbit_b32 v32, v13, v14, 16
; SI-NEXT: v_alignbit_b32 v36, v13, v14, 8
; SI-NEXT: v_alignbit_b32 v23, v11, v7, 24
; SI-NEXT: v_alignbit_b32 v26, v11, v7, 16
-; SI-NEXT: v_alignbit_b32 v32, v11, v7, 8
+; SI-NEXT: v_alignbit_b32 v33, v11, v7, 8
; SI-NEXT: v_alignbit_b32 v19, v6, v5, 24
; SI-NEXT: v_alignbit_b32 v21, v6, v5, 16
; SI-NEXT: v_alignbit_b32 v27, v6, v5, 8
@@ -23070,9 +23304,9 @@ define inreg <40 x i8> @bitcast_v20f16_to_v40i8_scalar(<20 x half> inreg %a, i32
; SI-NEXT: v_alignbit_b32 v18, v3, v4, 16
; SI-NEXT: v_alignbit_b32 v22, v3, v4, 8
; SI-NEXT: v_lshrrev_b32_e32 v40, 8, v24
-; SI-NEXT: v_lshrrev_b32_e32 v52, 8, v13
-; SI-NEXT: v_lshrrev_b32_e32 v49, 8, v11
-; SI-NEXT: v_lshrrev_b32_e32 v38, 8, v6
+; SI-NEXT: v_lshrrev_b32_e32 v53, 8, v13
+; SI-NEXT: v_lshrrev_b32_e32 v50, 8, v11
+; SI-NEXT: v_lshrrev_b32_e32 v39, 8, v6
; SI-NEXT: v_lshrrev_b32_e32 v25, 8, v3
; SI-NEXT: v_bfe_u32 v42, v10, 8, 8
; SI-NEXT: v_bfe_u32 v55, v9, 8, 8
@@ -23081,11 +23315,11 @@ define inreg <40 x i8> @bitcast_v20f16_to_v40i8_scalar(<20 x half> inreg %a, i32
; SI-NEXT: v_bfe_u32 v34, v1, 8, 8
; SI-NEXT: .LBB61_3: ; %end
; SI-NEXT: v_and_b32_e32 v12, 0xff, v28
-; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v37
+; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v38
; SI-NEXT: v_or_b32_e32 v12, v12, v15
; SI-NEXT: v_and_b32_e32 v15, 0xff, v35
; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15
-; SI-NEXT: v_lshlrev_b32_e32 v16, 24, v30
+; SI-NEXT: v_lshlrev_b32_e32 v16, 24, v31
; SI-NEXT: v_and_b32_e32 v12, 0xffff, v12
; SI-NEXT: v_or_b32_e32 v15, v16, v15
; SI-NEXT: v_or_b32_e32 v12, v12, v15
@@ -23106,7 +23340,7 @@ define inreg <40 x i8> @bitcast_v20f16_to_v40i8_scalar(<20 x half> inreg %a, i32
; SI-NEXT: v_and_b32_e32 v10, 0xff, v14
; SI-NEXT: v_lshlrev_b32_e32 v12, 8, v36
; SI-NEXT: v_or_b32_e32 v10, v10, v12
-; SI-NEXT: v_and_b32_e32 v12, 0xff, v31
+; SI-NEXT: v_and_b32_e32 v12, 0xff, v32
; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12
; SI-NEXT: v_lshlrev_b32_e32 v14, 24, v29
; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10
@@ -23116,7 +23350,7 @@ define inreg <40 x i8> @bitcast_v20f16_to_v40i8_scalar(<20 x half> inreg %a, i32
; SI-NEXT: buffer_store_dword v10, v12, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_and_b32_e32 v10, 0xff, v13
-; SI-NEXT: v_lshlrev_b32_e32 v12, 8, v52
+; SI-NEXT: v_lshlrev_b32_e32 v12, 8, v53
; SI-NEXT: v_and_b32_e32 v9, 0xff, v9
; SI-NEXT: v_or_b32_e32 v10, v10, v12
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9
@@ -23128,7 +23362,7 @@ define inreg <40 x i8> @bitcast_v20f16_to_v40i8_scalar(<20 x half> inreg %a, i32
; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen
; SI-NEXT: v_and_b32_e32 v7, 0xff, v7
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v32
+; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v33
; SI-NEXT: v_or_b32_e32 v7, v7, v9
; SI-NEXT: v_and_b32_e32 v9, 0xff, v26
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9
@@ -23140,7 +23374,7 @@ define inreg <40 x i8> @bitcast_v20f16_to_v40i8_scalar(<20 x half> inreg %a, i32
; SI-NEXT: buffer_store_dword v7, v9, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_and_b32_e32 v7, 0xff, v11
-; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v49
+; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v50
; SI-NEXT: v_and_b32_e32 v8, 0xff, v8
; SI-NEXT: v_or_b32_e32 v7, v7, v9
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8
@@ -23164,7 +23398,7 @@ define inreg <40 x i8> @bitcast_v20f16_to_v40i8_scalar(<20 x half> inreg %a, i32
; SI-NEXT: buffer_store_dword v5, v7, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_and_b32_e32 v5, 0xff, v6
-; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v38
+; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v39
; SI-NEXT: v_and_b32_e32 v2, 0xff, v2
; SI-NEXT: v_or_b32_e32 v5, v5, v6
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
@@ -23210,32 +23444,32 @@ define inreg <40 x i8> @bitcast_v20f16_to_v40i8_scalar(<20 x half> inreg %a, i32
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB61_4:
; SI-NEXT: ; implicit-def: $vgpr28
-; SI-NEXT: ; implicit-def: $vgpr37
+; SI-NEXT: ; implicit-def: $vgpr38
; SI-NEXT: ; implicit-def: $vgpr35
-; SI-NEXT: ; implicit-def: $vgpr30
+; SI-NEXT: ; implicit-def: $vgpr31
; SI-NEXT: ; implicit-def: $vgpr24
; SI-NEXT: ; implicit-def: $vgpr40
; SI-NEXT: ; implicit-def: $vgpr42
; SI-NEXT: ; implicit-def: $vgpr14
; SI-NEXT: ; implicit-def: $vgpr36
-; SI-NEXT: ; implicit-def: $vgpr31
+; SI-NEXT: ; implicit-def: $vgpr32
; SI-NEXT: ; implicit-def: $vgpr29
; SI-NEXT: ; implicit-def: $vgpr13
-; SI-NEXT: ; implicit-def: $vgpr52
+; SI-NEXT: ; implicit-def: $vgpr53
; SI-NEXT: ; implicit-def: $vgpr55
; SI-NEXT: ; implicit-def: $vgpr7
-; SI-NEXT: ; implicit-def: $vgpr32
+; SI-NEXT: ; implicit-def: $vgpr33
; SI-NEXT: ; implicit-def: $vgpr26
; SI-NEXT: ; implicit-def: $vgpr23
; SI-NEXT: ; implicit-def: $vgpr11
-; SI-NEXT: ; implicit-def: $vgpr49
+; SI-NEXT: ; implicit-def: $vgpr50
; SI-NEXT: ; implicit-def: $vgpr51
; SI-NEXT: ; implicit-def: $vgpr5
; SI-NEXT: ; implicit-def: $vgpr27
; SI-NEXT: ; implicit-def: $vgpr21
; SI-NEXT: ; implicit-def: $vgpr19
; SI-NEXT: ; implicit-def: $vgpr6
-; SI-NEXT: ; implicit-def: $vgpr38
+; SI-NEXT: ; implicit-def: $vgpr39
; SI-NEXT: ; implicit-def: $vgpr48
; SI-NEXT: ; implicit-def: $vgpr4
; SI-NEXT: ; implicit-def: $vgpr22
@@ -23244,12 +23478,15 @@ define inreg <40 x i8> @bitcast_v20f16_to_v40i8_scalar(<20 x half> inreg %a, i32
; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; implicit-def: $vgpr25
; SI-NEXT: ; implicit-def: $vgpr34
-; SI-NEXT: s_branch .LBB61_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB61_2
+; SI-NEXT: s_branch .LBB61_3
;
; VI-LABEL: bitcast_v20f16_to_v40i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
+; VI-NEXT: s_mov_b64 s[14:15], -1
; VI-NEXT: s_cbranch_scc0 .LBB61_3
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s41, s25, 24
@@ -23387,7 +23624,8 @@ define inreg <40 x i8> @bitcast_v20f16_to_v40i8_scalar(<20 x half> inreg %a, i32
; VI-NEXT: ; implicit-def: $sgpr26
; VI-NEXT: ; implicit-def: $sgpr59
; VI-NEXT: ; implicit-def: $sgpr41
-; VI-NEXT: s_branch .LBB61_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[14:15]
+; VI-NEXT: s_cbranch_vccz .LBB61_2
; VI-NEXT: .LBB61_4:
; VI-NEXT: v_mov_b32_e32 v12, s76
; VI-NEXT: v_mov_b32_e32 v8, s75
@@ -23506,6 +23744,7 @@ define inreg <40 x i8> @bitcast_v20f16_to_v40i8_scalar(<20 x half> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
+; GFX9-NEXT: s_mov_b64 s[14:15], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB61_3
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s26, s25, 24
@@ -23613,7 +23852,8 @@ define inreg <40 x i8> @bitcast_v20f16_to_v40i8_scalar(<20 x half> inreg %a, i32
; GFX9-NEXT: ; implicit-def: $sgpr29
; GFX9-NEXT: ; implicit-def: $sgpr27
; GFX9-NEXT: ; implicit-def: $sgpr26
-; GFX9-NEXT: s_branch .LBB61_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[14:15]
+; GFX9-NEXT: s_cbranch_vccz .LBB61_2
; GFX9-NEXT: .LBB61_4:
; GFX9-NEXT: v_mov_b32_e32 v9, s16
; GFX9-NEXT: v_mov_b32_e32 v10, s17
@@ -23723,41 +23963,40 @@ define inreg <40 x i8> @bitcast_v20f16_to_v40i8_scalar(<20 x half> inreg %a, i32
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
-; GFX11-NEXT: s_mov_b32 s14, 0
+; GFX11-NEXT: s_mov_b32 s5, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB61_3
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s15, s21, 24
-; GFX11-NEXT: s_lshr_b32 s22, s21, 16
-; GFX11-NEXT: s_lshr_b32 s24, s21, 8
-; GFX11-NEXT: s_lshr_b32 s23, s20, 16
-; GFX11-NEXT: s_lshr_b32 s25, s20, 8
-; GFX11-NEXT: s_lshr_b32 s26, s19, 24
-; GFX11-NEXT: s_lshr_b32 s27, s19, 16
-; GFX11-NEXT: s_lshr_b32 s29, s19, 8
-; GFX11-NEXT: s_lshr_b32 s28, s18, 16
-; GFX11-NEXT: s_lshr_b32 s40, s18, 8
-; GFX11-NEXT: s_lshr_b32 s41, s17, 24
-; GFX11-NEXT: s_lshr_b32 s42, s17, 16
-; GFX11-NEXT: s_lshr_b32 s44, s17, 8
-; GFX11-NEXT: s_lshr_b32 s43, s16, 16
-; GFX11-NEXT: s_lshr_b32 s45, s16, 8
-; GFX11-NEXT: s_lshr_b32 s46, s3, 24
-; GFX11-NEXT: s_lshr_b32 s47, s3, 16
-; GFX11-NEXT: s_lshr_b32 s57, s3, 8
-; GFX11-NEXT: s_lshr_b32 s56, s2, 16
-; GFX11-NEXT: s_lshr_b32 s58, s2, 8
-; GFX11-NEXT: s_lshr_b32 s59, s1, 24
-; GFX11-NEXT: s_lshr_b32 s60, s1, 16
-; GFX11-NEXT: s_lshr_b32 s62, s1, 8
-; GFX11-NEXT: s_lshr_b32 s61, s0, 16
-; GFX11-NEXT: s_lshr_b32 s63, s0, 8
+; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
+; GFX11-NEXT: s_lshr_b32 s14, s21, 24
+; GFX11-NEXT: s_lshr_b32 s15, s21, 16
+; GFX11-NEXT: s_lshr_b32 s23, s21, 8
+; GFX11-NEXT: s_lshr_b32 s22, s20, 16
+; GFX11-NEXT: s_lshr_b32 s24, s20, 8
+; GFX11-NEXT: s_lshr_b32 s25, s19, 24
+; GFX11-NEXT: s_lshr_b32 s26, s19, 16
+; GFX11-NEXT: s_lshr_b32 s28, s19, 8
+; GFX11-NEXT: s_lshr_b32 s27, s18, 16
+; GFX11-NEXT: s_lshr_b32 s29, s18, 8
+; GFX11-NEXT: s_lshr_b32 s40, s17, 24
+; GFX11-NEXT: s_lshr_b32 s41, s17, 16
+; GFX11-NEXT: s_lshr_b32 s43, s17, 8
+; GFX11-NEXT: s_lshr_b32 s42, s16, 16
+; GFX11-NEXT: s_lshr_b32 s44, s16, 8
+; GFX11-NEXT: s_lshr_b32 s45, s3, 24
+; GFX11-NEXT: s_lshr_b32 s46, s3, 16
+; GFX11-NEXT: s_lshr_b32 s56, s3, 8
+; GFX11-NEXT: s_lshr_b32 s47, s2, 16
+; GFX11-NEXT: s_lshr_b32 s57, s2, 8
+; GFX11-NEXT: s_lshr_b32 s58, s1, 24
+; GFX11-NEXT: s_lshr_b32 s59, s1, 16
+; GFX11-NEXT: s_lshr_b32 s61, s1, 8
+; GFX11-NEXT: s_lshr_b32 s60, s0, 16
+; GFX11-NEXT: s_lshr_b32 s62, s0, 8
; GFX11-NEXT: s_lshr_b64 s[12:13], s[20:21], 24
; GFX11-NEXT: s_lshr_b64 s[10:11], s[18:19], 24
; GFX11-NEXT: s_lshr_b64 s[8:9], s[16:17], 24
; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
-; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s14
-; GFX11-NEXT: s_cbranch_vccnz .LBB61_4
+; GFX11-NEXT: s_cbranch_execnz .LBB61_4
; GFX11-NEXT: .LBB61_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v6, 0x200, s17 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v5, 0x200, s16 op_sel_hi:[0,1]
@@ -23801,57 +24040,58 @@ define inreg <40 x i8> @bitcast_v20f16_to_v40i8_scalar(<20 x half> inreg %a, i32
; GFX11-NEXT: v_lshrrev_b32_e32 v48, 8, v13
; GFX11-NEXT: s_branch .LBB61_5
; GFX11-NEXT: .LBB61_3:
-; GFX11-NEXT: ; implicit-def: $sgpr63
-; GFX11-NEXT: ; implicit-def: $sgpr61
-; GFX11-NEXT: ; implicit-def: $sgpr4
; GFX11-NEXT: ; implicit-def: $sgpr62
; GFX11-NEXT: ; implicit-def: $sgpr60
+; GFX11-NEXT: ; implicit-def: $sgpr4
+; GFX11-NEXT: ; implicit-def: $sgpr61
; GFX11-NEXT: ; implicit-def: $sgpr59
; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr6
; GFX11-NEXT: ; implicit-def: $sgpr57
; GFX11-NEXT: ; implicit-def: $sgpr47
+; GFX11-NEXT: ; implicit-def: $sgpr6
+; GFX11-NEXT: ; implicit-def: $sgpr56
; GFX11-NEXT: ; implicit-def: $sgpr46
; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: ; implicit-def: $sgpr8
; GFX11-NEXT: ; implicit-def: $sgpr44
; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr8
+; GFX11-NEXT: ; implicit-def: $sgpr43
; GFX11-NEXT: ; implicit-def: $sgpr41
; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: ; implicit-def: $sgpr28
-; GFX11-NEXT: ; implicit-def: $sgpr10
; GFX11-NEXT: ; implicit-def: $sgpr29
; GFX11-NEXT: ; implicit-def: $sgpr27
+; GFX11-NEXT: ; implicit-def: $sgpr10
+; GFX11-NEXT: ; implicit-def: $sgpr28
; GFX11-NEXT: ; implicit-def: $sgpr26
; GFX11-NEXT: ; implicit-def: $sgpr25
-; GFX11-NEXT: ; implicit-def: $sgpr23
-; GFX11-NEXT: ; implicit-def: $sgpr12
; GFX11-NEXT: ; implicit-def: $sgpr24
; GFX11-NEXT: ; implicit-def: $sgpr22
+; GFX11-NEXT: ; implicit-def: $sgpr12
+; GFX11-NEXT: ; implicit-def: $sgpr23
; GFX11-NEXT: ; implicit-def: $sgpr15
-; GFX11-NEXT: s_branch .LBB61_2
+; GFX11-NEXT: ; implicit-def: $sgpr14
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
+; GFX11-NEXT: s_cbranch_vccz .LBB61_2
; GFX11-NEXT: .LBB61_4:
; GFX11-NEXT: v_dual_mov_b32 v13, s0 :: v_dual_mov_b32 v14, s1
; GFX11-NEXT: v_dual_mov_b32 v9, s2 :: v_dual_mov_b32 v10, s3
; GFX11-NEXT: v_dual_mov_b32 v5, s16 :: v_dual_mov_b32 v6, s17
; GFX11-NEXT: v_dual_mov_b32 v3, s18 :: v_dual_mov_b32 v4, s19
; GFX11-NEXT: v_dual_mov_b32 v1, s20 :: v_dual_mov_b32 v2, s21
-; GFX11-NEXT: v_dual_mov_b32 v48, s63 :: v_dual_mov_b32 v39, s61
-; GFX11-NEXT: v_dual_mov_b32 v38, s62 :: v_dual_mov_b32 v37, s60
-; GFX11-NEXT: v_dual_mov_b32 v36, s59 :: v_dual_mov_b32 v35, s58
-; GFX11-NEXT: v_dual_mov_b32 v34, s56 :: v_dual_mov_b32 v33, s57
-; GFX11-NEXT: v_dual_mov_b32 v32, s47 :: v_dual_mov_b32 v31, s46
-; GFX11-NEXT: v_dual_mov_b32 v30, s45 :: v_dual_mov_b32 v29, s43
-; GFX11-NEXT: v_dual_mov_b32 v28, s44 :: v_dual_mov_b32 v27, s42
-; GFX11-NEXT: v_dual_mov_b32 v26, s41 :: v_dual_mov_b32 v25, s40
-; GFX11-NEXT: v_dual_mov_b32 v24, s28 :: v_dual_mov_b32 v23, s29
-; GFX11-NEXT: v_dual_mov_b32 v22, s27 :: v_dual_mov_b32 v21, s26
-; GFX11-NEXT: v_dual_mov_b32 v20, s25 :: v_dual_mov_b32 v19, s23
-; GFX11-NEXT: v_dual_mov_b32 v18, s24 :: v_dual_mov_b32 v7, s12
-; GFX11-NEXT: v_dual_mov_b32 v12, s22 :: v_dual_mov_b32 v11, s10
-; GFX11-NEXT: v_dual_mov_b32 v8, s15 :: v_dual_mov_b32 v15, s8
+; GFX11-NEXT: v_dual_mov_b32 v48, s62 :: v_dual_mov_b32 v39, s60
+; GFX11-NEXT: v_dual_mov_b32 v38, s61 :: v_dual_mov_b32 v37, s59
+; GFX11-NEXT: v_dual_mov_b32 v36, s58 :: v_dual_mov_b32 v35, s57
+; GFX11-NEXT: v_dual_mov_b32 v34, s47 :: v_dual_mov_b32 v33, s56
+; GFX11-NEXT: v_dual_mov_b32 v32, s46 :: v_dual_mov_b32 v31, s45
+; GFX11-NEXT: v_dual_mov_b32 v30, s44 :: v_dual_mov_b32 v29, s42
+; GFX11-NEXT: v_dual_mov_b32 v28, s43 :: v_dual_mov_b32 v27, s41
+; GFX11-NEXT: v_dual_mov_b32 v26, s40 :: v_dual_mov_b32 v25, s29
+; GFX11-NEXT: v_dual_mov_b32 v24, s27 :: v_dual_mov_b32 v23, s28
+; GFX11-NEXT: v_dual_mov_b32 v22, s26 :: v_dual_mov_b32 v21, s25
+; GFX11-NEXT: v_dual_mov_b32 v20, s24 :: v_dual_mov_b32 v19, s22
+; GFX11-NEXT: v_dual_mov_b32 v18, s23 :: v_dual_mov_b32 v7, s12
+; GFX11-NEXT: v_dual_mov_b32 v12, s15 :: v_dual_mov_b32 v11, s10
+; GFX11-NEXT: v_dual_mov_b32 v8, s14 :: v_dual_mov_b32 v15, s8
; GFX11-NEXT: v_dual_mov_b32 v16, s6 :: v_dual_mov_b32 v17, s4
; GFX11-NEXT: .LBB61_5: ; %end
; GFX11-NEXT: v_and_b32_e32 v13, 0xff, v13
@@ -25240,6 +25480,7 @@ define inreg <20 x half> @bitcast_v40i8_to_v20f16_scalar(<40 x i8> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v26
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s62, v25
; SI-NEXT: v_readfirstlane_b32 s63, v24
; SI-NEXT: v_readfirstlane_b32 s60, v23
@@ -25261,12 +25502,12 @@ define inreg <20 x half> @bitcast_v40i8_to_v20f16_scalar(<40 x i8> inreg %a, i32
; SI-NEXT: v_readfirstlane_b32 s8, v7
; SI-NEXT: v_readfirstlane_b32 s11, v6
; SI-NEXT: v_readfirstlane_b32 s6, v5
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s40, v4
; SI-NEXT: v_readfirstlane_b32 s10, v3
; SI-NEXT: v_readfirstlane_b32 s13, v2
; SI-NEXT: v_readfirstlane_b32 s7, v1
; SI-NEXT: v_readfirstlane_b32 s9, v0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB63_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
@@ -25494,12 +25735,15 @@ define inreg <20 x half> @bitcast_v40i8_to_v20f16_scalar(<40 x i8> inreg %a, i32
; SI-NEXT: ; implicit-def: $vgpr17
; SI-NEXT: ; implicit-def: $vgpr18
; SI-NEXT: ; implicit-def: $vgpr19
-; SI-NEXT: s_branch .LBB63_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB63_2
+; SI-NEXT: s_branch .LBB63_3
;
; VI-LABEL: bitcast_v40i8_to_v20f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v26
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v31, v14
; VI-NEXT: v_mov_b32_e32 v27, v12
; VI-NEXT: v_mov_b32_e32 v32, v10
@@ -25508,7 +25752,7 @@ define inreg <20 x half> @bitcast_v40i8_to_v20f16_scalar(<40 x i8> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v30, v4
; VI-NEXT: v_mov_b32_e32 v34, v2
; VI-NEXT: v_mov_b32_e32 v28, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_lshlrev_b32_e32 v26, 8, v1
; VI-NEXT: v_lshlrev_b32_e32 v35, 8, v3
; VI-NEXT: v_lshlrev_b32_e32 v36, 8, v5
@@ -25524,6 +25768,30 @@ define inreg <20 x half> @bitcast_v40i8_to_v20f16_scalar(<40 x i8> inreg %a, i32
; VI-NEXT: v_lshlrev_b32_e32 v25, 8, v25
; VI-NEXT: s_cbranch_scc0 .LBB63_4
; VI-NEXT: ; %bb.1: ; %cmp.false
+; VI-NEXT: v_or_b32_sdwa v0, v34, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v30, v36 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v32, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v27, v48 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v31, v49 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v16, v17 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v18, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v20, v21 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: s_and_b32 s4, s28, 0xff
+; VI-NEXT: s_lshl_b32 s5, s29, 8
+; VI-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v22, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v24, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: s_or_b32 s4, s4, s5
+; VI-NEXT: v_or_b32_sdwa v2, v33, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v3, v29, v38 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: s_and_b32 s4, s4, 0xffff
+; VI-NEXT: v_or_b32_sdwa v0, v28, v26 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_e32 v3, s4, v0
; VI-NEXT: s_and_b32 s4, s16, 0xff
; VI-NEXT: s_lshl_b32 s5, s17, 8
; VI-NEXT: s_or_b32 s4, s4, s5
@@ -25551,30 +25819,6 @@ define inreg <20 x half> @bitcast_v40i8_to_v20f16_scalar(<40 x i8> inreg %a, i32
; VI-NEXT: s_and_b32 s6, s6, 0xffff
; VI-NEXT: s_lshl_b32 s7, s7, 16
; VI-NEXT: s_or_b32 s6, s6, s7
-; VI-NEXT: s_and_b32 s7, s28, 0xff
-; VI-NEXT: s_lshl_b32 s8, s29, 8
-; VI-NEXT: s_or_b32 s7, s7, s8
-; VI-NEXT: s_and_b32 s7, s7, 0xffff
-; VI-NEXT: v_or_b32_sdwa v0, v28, v26 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_e32 v3, s7, v0
-; VI-NEXT: v_or_b32_sdwa v0, v34, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v30, v36 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v33, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v29, v38 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v5, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v32, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v27, v48 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v31, v49 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v16, v17 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v18, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v20, v21 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v22, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v24, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: v_mov_b32_e32 v2, s6
@@ -25678,12 +25922,15 @@ define inreg <20 x half> @bitcast_v40i8_to_v20f16_scalar(<40 x i8> inreg %a, i32
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB63_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; VI-NEXT: s_branch .LBB63_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB63_2
+; VI-NEXT: s_branch .LBB63_3
;
; GFX9-LABEL: bitcast_v40i8_to_v20f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v26
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v29, v14
; GFX9-NEXT: v_mov_b32_e32 v33, v12
; GFX9-NEXT: v_mov_b32_e32 v30, v10
@@ -25692,7 +25939,7 @@ define inreg <20 x half> @bitcast_v40i8_to_v20f16_scalar(<40 x i8> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v34, v4
; GFX9-NEXT: v_mov_b32_e32 v31, v2
; GFX9-NEXT: v_mov_b32_e32 v32, v0
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_lshlrev_b32_e32 v26, 8, v1
; GFX9-NEXT: v_lshlrev_b32_e32 v36, 8, v3
; GFX9-NEXT: v_lshlrev_b32_e32 v35, 8, v5
@@ -25708,57 +25955,57 @@ define inreg <20 x half> @bitcast_v40i8_to_v20f16_scalar(<40 x i8> inreg %a, i32
; GFX9-NEXT: v_lshlrev_b32_e32 v21, 8, v25
; GFX9-NEXT: s_cbranch_scc0 .LBB63_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
+; GFX9-NEXT: s_and_b32 s4, s28, 0xff
+; GFX9-NEXT: s_lshl_b32 s5, s29, 8
+; GFX9-NEXT: s_or_b32 s4, s4, s5
+; GFX9-NEXT: v_mov_b32_e32 v1, 0xffff
+; GFX9-NEXT: v_or_b32_sdwa v0, v32, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_and_b32_e32 v1, s4, v1
+; GFX9-NEXT: v_lshl_or_b32 v3, v0, 16, v1
+; GFX9-NEXT: v_or_b32_sdwa v0, v28, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_and_b32 s4, s16, 0xff
; GFX9-NEXT: s_lshl_b32 s5, s17, 8
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-NEXT: v_or_b32_sdwa v1, v27, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_or_b32 s4, s4, s5
; GFX9-NEXT: s_and_b32 s5, s18, 0xff
; GFX9-NEXT: s_lshl_b32 s6, s19, 8
+; GFX9-NEXT: v_lshl_or_b32 v5, v1, 16, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v30, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_or_b32 s5, s5, s6
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-NEXT: v_or_b32_sdwa v1, v33, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_pack_ll_b32_b16 s4, s4, s5
; GFX9-NEXT: s_and_b32 s5, s20, 0xff
; GFX9-NEXT: s_lshl_b32 s6, s21, 8
+; GFX9-NEXT: v_lshl_or_b32 v6, v1, 16, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v29, v49 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_or_b32 s5, s5, s6
; GFX9-NEXT: s_and_b32 s6, s22, 0xff
; GFX9-NEXT: s_lshl_b32 s7, s23, 8
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-NEXT: v_or_b32_sdwa v1, v16, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_or_b32 s6, s6, s7
+; GFX9-NEXT: v_lshl_or_b32 v7, v1, 16, v0
+; GFX9-NEXT: v_or_b32_sdwa v0, v18, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_pack_ll_b32_b16 s5, s5, s6
; GFX9-NEXT: s_and_b32 s6, s24, 0xff
; GFX9-NEXT: s_lshl_b32 s7, s25, 8
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-NEXT: v_or_b32_sdwa v1, v20, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_or_b32 s6, s6, s7
; GFX9-NEXT: s_and_b32 s7, s26, 0xff
; GFX9-NEXT: s_lshl_b32 s8, s27, 8
-; GFX9-NEXT: s_or_b32 s7, s7, s8
-; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s7
-; GFX9-NEXT: s_and_b32 s7, s28, 0xff
-; GFX9-NEXT: s_lshl_b32 s8, s29, 8
-; GFX9-NEXT: s_or_b32 s7, s7, s8
-; GFX9-NEXT: v_mov_b32_e32 v1, 0xffff
-; GFX9-NEXT: v_or_b32_sdwa v0, v32, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_and_b32_e32 v1, s7, v1
-; GFX9-NEXT: v_lshl_or_b32 v3, v0, 16, v1
-; GFX9-NEXT: v_or_b32_sdwa v0, v31, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v34, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: v_lshl_or_b32 v4, v1, 16, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v28, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v27, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: v_lshl_or_b32 v5, v1, 16, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v30, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v33, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: v_lshl_or_b32 v6, v1, 16, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v29, v49 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v16, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: v_lshl_or_b32 v7, v1, 16, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v18, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v20, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-NEXT: v_or_b32_sdwa v2, v31, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v8, v1, 16, v0
; GFX9-NEXT: v_or_b32_sdwa v0, v22, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v24, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_or_b32 s7, s7, s8
+; GFX9-NEXT: v_or_b32_sdwa v4, v34, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; GFX9-NEXT: v_or_b32_sdwa v1, v24, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s7
+; GFX9-NEXT: v_lshl_or_b32 v4, v4, 16, v2
; GFX9-NEXT: v_lshl_or_b32 v9, v1, 16, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
@@ -25864,7 +26111,9 @@ define inreg <20 x half> @bitcast_v40i8_to_v20f16_scalar(<40 x i8> inreg %a, i32
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB63_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX9-NEXT: s_branch .LBB63_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB63_2
+; GFX9-NEXT: s_branch .LBB63_3
;
; GFX11-LABEL: bitcast_v40i8_to_v20f16_scalar:
; GFX11: ; %bb.0:
@@ -25885,46 +26134,46 @@ define inreg <20 x half> @bitcast_v40i8_to_v20f16_scalar(<40 x i8> inreg %a, i32
; GFX11-NEXT: v_lshlrev_b32_e32 v17, 8, v17
; GFX11-NEXT: v_lshlrev_b32_e32 v19, 8, v19
; GFX11-NEXT: v_lshlrev_b32_e32 v21, 8, v21
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s4, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB63_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s3, 8
-; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_or_b32 s6, s7, s8
-; GFX11-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_pack_ll_b32_b16 s5, s5, s6
-; GFX11-NEXT: s_pack_ll_b32_b16 s6, s7, s8
-; GFX11-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s23, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s25, 8
+; GFX11-NEXT: s_and_b32 s4, s0, 0xff
+; GFX11-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-NEXT: s_and_b32 s6, s2, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-NEXT: s_or_b32 s4, s4, s5
+; GFX11-NEXT: s_or_b32 s5, s6, s7
+; GFX11-NEXT: s_and_b32 s6, s16, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-NEXT: s_or_b32 s6, s6, s7
+; GFX11-NEXT: s_or_b32 s7, s8, s9
+; GFX11-NEXT: s_pack_ll_b32_b16 s4, s4, s5
+; GFX11-NEXT: s_pack_ll_b32_b16 s5, s6, s7
+; GFX11-NEXT: s_and_b32 s6, s20, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s21, 8
+; GFX11-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-NEXT: s_or_b32 s6, s6, s7
+; GFX11-NEXT: s_or_b32 s7, s8, s9
+; GFX11-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s25, 8
; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v23
-; GFX11-NEXT: s_pack_ll_b32_b16 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_and_b32 s9, s26, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s27, 8
-; GFX11-NEXT: s_and_b32 s11, s28, 0xff
-; GFX11-NEXT: s_lshl_b32 s12, s29, 8
-; GFX11-NEXT: s_or_b32 s9, s9, s10
-; GFX11-NEXT: s_or_b32 s10, s11, s12
+; GFX11-NEXT: s_pack_ll_b32_b16 s6, s6, s7
+; GFX11-NEXT: s_or_b32 s7, s8, s9
+; GFX11-NEXT: s_and_b32 s8, s26, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s27, 8
+; GFX11-NEXT: s_and_b32 s10, s28, 0xff
+; GFX11-NEXT: s_lshl_b32 s11, s29, 8
+; GFX11-NEXT: s_or_b32 s8, s8, s9
+; GFX11-NEXT: s_or_b32 s9, s10, s11
; GFX11-NEXT: v_or_b32_e32 v0, v0, v22
-; GFX11-NEXT: v_and_b32_e64 v2, 0xffff, s10
+; GFX11-NEXT: v_and_b32_e64 v2, 0xffff, s9
; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v30
; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v24
-; GFX11-NEXT: s_pack_ll_b32_b16 s8, s8, s9
+; GFX11-NEXT: s_pack_ll_b32_b16 s7, s7, s8
; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v28
; GFX11-NEXT: v_lshl_or_b32 v4, v0, 16, v2
; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v26
@@ -25944,7 +26193,7 @@ define inreg <20 x half> @bitcast_v40i8_to_v20f16_scalar(<40 x i8> inreg %a, i32
; GFX11-NEXT: v_or_b32_e32 v8, v8, v19
; GFX11-NEXT: v_or_b32_e32 v12, v6, v17
; GFX11-NEXT: v_lshl_or_b32 v6, v0, 16, v3
-; GFX11-NEXT: v_mov_b32_e32 v0, s5
+; GFX11-NEXT: v_mov_b32_e32 v0, s4
; GFX11-NEXT: v_or_b32_e32 v1, v1, v32
; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v5
; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v7
@@ -25953,12 +26202,11 @@ define inreg <20 x half> @bitcast_v40i8_to_v20f16_scalar(<40 x i8> inreg %a, i32
; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX11-NEXT: v_lshl_or_b32 v7, v9, 16, v11
; GFX11-NEXT: v_lshl_or_b32 v8, v12, 16, v13
-; GFX11-NEXT: v_mov_b32_e32 v3, s8
+; GFX11-NEXT: v_mov_b32_e32 v3, s7
; GFX11-NEXT: v_lshl_or_b32 v9, v10, 16, v14
; GFX11-NEXT: v_lshl_or_b32 v5, v2, 16, v1
-; GFX11-NEXT: v_dual_mov_b32 v1, s6 :: v_dual_mov_b32 v2, s7
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB63_3
+; GFX11-NEXT: v_dual_mov_b32 v1, s5 :: v_dual_mov_b32 v2, s6
+; GFX11-NEXT: s_cbranch_execnz .LBB63_3
; GFX11-NEXT: .LBB63_2: ; %cmp.true
; GFX11-NEXT: v_add_nc_u32_e32 v4, 3, v27
; GFX11-NEXT: s_add_i32 s28, s28, 3
@@ -26071,7 +26319,9 @@ define inreg <20 x half> @bitcast_v40i8_to_v20f16_scalar(<40 x i8> inreg %a, i32
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB63_4:
; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-NEXT: s_branch .LBB63_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_vccz .LBB63_2
+; GFX11-NEXT: s_branch .LBB63_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -26385,6 +26635,7 @@ define inreg <5 x double> @bitcast_v20f16_to_v5f64_scalar(<20 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v16, v4
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB65_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v35
@@ -26427,65 +26678,65 @@ define inreg <5 x double> @bitcast_v20f16_to_v5f64_scalar(<20 x half> inreg %a,
; SI-NEXT: v_or_b32_e32 v1, v3, v2
; SI-NEXT: v_cvt_f32_f16_e32 v2, v31
; SI-NEXT: v_cvt_f32_f16_e32 v3, v30
-; SI-NEXT: v_cvt_f32_f16_e32 v4, v29
-; SI-NEXT: v_cvt_f32_f16_e32 v5, v28
+; SI-NEXT: v_cvt_f32_f16_e32 v4, v28
+; SI-NEXT: v_cvt_f32_f16_e32 v5, v26
; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2
; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3
-; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4
; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
-; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
+; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; SI-NEXT: v_cvt_f32_f16_e32 v6, v26
+; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
; SI-NEXT: v_or_b32_e32 v2, v3, v2
-; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v4
-; SI-NEXT: v_cvt_f32_f16_e32 v4, v27
+; SI-NEXT: v_cvt_f32_f16_e32 v3, v29
; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5
; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
-; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6
-; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4
-; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
-; SI-NEXT: v_cvt_f16_f32_e32 v6, v6
-; SI-NEXT: v_or_b32_e32 v3, v5, v3
-; SI-NEXT: v_cvt_f32_f16_e32 v5, v25
-; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; SI-NEXT: v_or_b32_e32 v4, v6, v4
-; SI-NEXT: v_cvt_f32_f16_e32 v6, v24
+; SI-NEXT: v_cvt_f32_f16_e32 v6, v25
+; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3
+; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
; SI-NEXT: v_cvt_f32_f16_e32 v7, v23
-; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5
-; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6
-; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7
; SI-NEXT: v_cvt_f16_f32_e32 v6, v6
+; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; SI-NEXT: v_or_b32_e32 v3, v4, v3
+; SI-NEXT: v_cvt_f32_f16_e32 v4, v27
+; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7
; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
-; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4
+; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
; SI-NEXT: v_cvt_f32_f16_e32 v8, v22
-; SI-NEXT: v_or_b32_e32 v5, v6, v5
-; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v7
-; SI-NEXT: v_cvt_f32_f16_e32 v7, v21
; SI-NEXT: v_cvt_f32_f16_e32 v9, v20
+; SI-NEXT: v_cvt_f32_f16_e32 v10, v17
+; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; SI-NEXT: v_or_b32_e32 v4, v5, v4
+; SI-NEXT: v_cvt_f32_f16_e32 v5, v24
; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8
; SI-NEXT: v_cvt_f16_f32_e32 v8, v8
-; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7
-; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9
+; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5
+; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
; SI-NEXT: v_cvt_f16_f32_e32 v9, v9
+; SI-NEXT: v_cvt_f32_f16_e32 v11, v16
+; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10
+; SI-NEXT: v_or_b32_e32 v5, v5, v6
+; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v7
+; SI-NEXT: v_cvt_f32_f16_e32 v7, v21
; SI-NEXT: v_or_b32_e32 v6, v8, v6
; SI-NEXT: v_cvt_f32_f16_e32 v8, v19
-; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7
-; SI-NEXT: v_or_b32_e32 v7, v9, v7
-; SI-NEXT: v_cvt_f32_f16_e32 v9, v18
-; SI-NEXT: v_cvt_f32_f16_e32 v10, v17
-; SI-NEXT: v_cvt_f32_f16_e32 v11, v16
+; SI-NEXT: v_cvt_f16_f32_e32 v10, v10
+; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7
+; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8
; SI-NEXT: v_cvt_f16_f32_e32 v8, v8
-; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9
-; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10
-; SI-NEXT: v_cvt_f16_f32_e32 v9, v9
-; SI-NEXT: v_cvt_f16_f32_e32 v10, v10
; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11
+; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7
+; SI-NEXT: v_or_b32_e32 v7, v9, v7
+; SI-NEXT: v_cvt_f32_f16_e32 v9, v18
; SI-NEXT: v_cvt_f16_f32_e32 v11, v11
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8
+; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9
+; SI-NEXT: v_cvt_f16_f32_e32 v9, v9
; SI-NEXT: v_or_b32_e32 v8, v9, v8
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v10
; SI-NEXT: v_or_b32_e32 v9, v11, v9
@@ -26493,16 +26744,22 @@ define inreg <5 x double> @bitcast_v20f16_to_v5f64_scalar(<20 x half> inreg %a,
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB65_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; SI-NEXT: s_branch .LBB65_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB65_2
+; SI-NEXT: s_branch .LBB65_3
;
; VI-LABEL: bitcast_v20f16_to_v5f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB65_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB65_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB65_4
-; VI-NEXT: .LBB65_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB65_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB65_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s25, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -26555,8 +26812,6 @@ define inreg <5 x double> @bitcast_v20f16_to_v5f64_scalar(<20 x half> inreg %a,
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v10
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB65_3:
-; VI-NEXT: s_branch .LBB65_2
; VI-NEXT: .LBB65_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -26580,10 +26835,14 @@ define inreg <5 x double> @bitcast_v20f16_to_v5f64_scalar(<20 x half> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB65_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB65_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB65_4
-; GFX9-NEXT: .LBB65_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB65_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB65_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v9, s25, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v8, s24, v0 op_sel_hi:[1,0]
@@ -26596,8 +26855,6 @@ define inreg <5 x double> @bitcast_v20f16_to_v5f64_scalar(<20 x half> inreg %a,
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB65_3:
-; GFX9-NEXT: s_branch .LBB65_2
; GFX9-NEXT: .LBB65_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -26625,12 +26882,15 @@ define inreg <5 x double> @bitcast_v20f16_to_v5f64_scalar(<20 x half> inreg %a,
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB65_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB65_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB65_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB65_4
-; GFX11-NEXT: .LBB65_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v9, 0x200, s21 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v8, 0x200, s20 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v7, 0x200, s19 op_sel_hi:[0,1]
@@ -26642,8 +26902,6 @@ define inreg <5 x double> @bitcast_v20f16_to_v5f64_scalar(<20 x half> inreg %a,
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s13 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB65_3:
-; GFX11-NEXT: s_branch .LBB65_2
; GFX11-NEXT: .LBB65_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -26864,6 +27122,7 @@ define inreg <20 x half> @bitcast_v5f64_to_v20f16_scalar(<5 x double> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB67_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s4, s25, 16
@@ -26956,24 +27215,28 @@ define inreg <20 x half> @bitcast_v5f64_to_v20f16_scalar(<5 x double> inreg %a,
; SI-NEXT: ; implicit-def: $vgpr17
; SI-NEXT: ; implicit-def: $vgpr18
; SI-NEXT: ; implicit-def: $vgpr19
-; SI-NEXT: s_branch .LBB67_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB67_2
+; SI-NEXT: s_branch .LBB67_3
;
; VI-LABEL: bitcast_v5f64_to_v20f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB67_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB67_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB67_4
-; VI-NEXT: .LBB67_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB67_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB67_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
; VI-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
; VI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB67_3:
-; VI-NEXT: s_branch .LBB67_2
; VI-NEXT: .LBB67_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -26997,18 +27260,20 @@ define inreg <20 x half> @bitcast_v5f64_to_v20f16_scalar(<5 x double> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB67_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB67_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB67_4
-; GFX9-NEXT: .LBB67_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB67_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB67_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
; GFX9-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
; GFX9-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB67_3:
-; GFX9-NEXT: s_branch .LBB67_2
; GFX9-NEXT: .LBB67_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -27036,20 +27301,21 @@ define inreg <20 x half> @bitcast_v5f64_to_v20f16_scalar(<5 x double> inreg %a,
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB67_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB67_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB67_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB67_4
-; GFX11-NEXT: .LBB67_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[8:9], s[20:21], 1.0
; GFX11-NEXT: v_add_f64 v[6:7], s[18:19], 1.0
; GFX11-NEXT: v_add_f64 v[4:5], s[16:17], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[14:15], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[12:13], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB67_3:
-; GFX11-NEXT: s_branch .LBB67_2
; GFX11-NEXT: .LBB67_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -27373,6 +27639,7 @@ define inreg <5 x i64> @bitcast_v20f16_to_v5i64_scalar(<20 x half> inreg %a, i32
; SI-NEXT: v_cvt_f16_f32_e32 v16, v4
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB69_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v35
@@ -27415,65 +27682,65 @@ define inreg <5 x i64> @bitcast_v20f16_to_v5i64_scalar(<20 x half> inreg %a, i32
; SI-NEXT: v_or_b32_e32 v1, v3, v2
; SI-NEXT: v_cvt_f32_f16_e32 v2, v31
; SI-NEXT: v_cvt_f32_f16_e32 v3, v30
-; SI-NEXT: v_cvt_f32_f16_e32 v4, v29
-; SI-NEXT: v_cvt_f32_f16_e32 v5, v28
+; SI-NEXT: v_cvt_f32_f16_e32 v4, v28
+; SI-NEXT: v_cvt_f32_f16_e32 v5, v26
; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2
; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3
-; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4
; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
-; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
+; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; SI-NEXT: v_cvt_f32_f16_e32 v6, v26
+; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
; SI-NEXT: v_or_b32_e32 v2, v3, v2
-; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v4
-; SI-NEXT: v_cvt_f32_f16_e32 v4, v27
+; SI-NEXT: v_cvt_f32_f16_e32 v3, v29
; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5
; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
-; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6
-; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4
-; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
-; SI-NEXT: v_cvt_f16_f32_e32 v6, v6
-; SI-NEXT: v_or_b32_e32 v3, v5, v3
-; SI-NEXT: v_cvt_f32_f16_e32 v5, v25
-; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; SI-NEXT: v_or_b32_e32 v4, v6, v4
-; SI-NEXT: v_cvt_f32_f16_e32 v6, v24
+; SI-NEXT: v_cvt_f32_f16_e32 v6, v25
+; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3
+; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
; SI-NEXT: v_cvt_f32_f16_e32 v7, v23
-; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5
-; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6
-; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7
; SI-NEXT: v_cvt_f16_f32_e32 v6, v6
+; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; SI-NEXT: v_or_b32_e32 v3, v4, v3
+; SI-NEXT: v_cvt_f32_f16_e32 v4, v27
+; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7
; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
-; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4
+; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
; SI-NEXT: v_cvt_f32_f16_e32 v8, v22
-; SI-NEXT: v_or_b32_e32 v5, v6, v5
-; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v7
-; SI-NEXT: v_cvt_f32_f16_e32 v7, v21
; SI-NEXT: v_cvt_f32_f16_e32 v9, v20
+; SI-NEXT: v_cvt_f32_f16_e32 v10, v17
+; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; SI-NEXT: v_or_b32_e32 v4, v5, v4
+; SI-NEXT: v_cvt_f32_f16_e32 v5, v24
; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8
; SI-NEXT: v_cvt_f16_f32_e32 v8, v8
-; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7
-; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9
+; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5
+; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
; SI-NEXT: v_cvt_f16_f32_e32 v9, v9
+; SI-NEXT: v_cvt_f32_f16_e32 v11, v16
+; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10
+; SI-NEXT: v_or_b32_e32 v5, v5, v6
+; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v7
+; SI-NEXT: v_cvt_f32_f16_e32 v7, v21
; SI-NEXT: v_or_b32_e32 v6, v8, v6
; SI-NEXT: v_cvt_f32_f16_e32 v8, v19
-; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7
-; SI-NEXT: v_or_b32_e32 v7, v9, v7
-; SI-NEXT: v_cvt_f32_f16_e32 v9, v18
-; SI-NEXT: v_cvt_f32_f16_e32 v10, v17
-; SI-NEXT: v_cvt_f32_f16_e32 v11, v16
+; SI-NEXT: v_cvt_f16_f32_e32 v10, v10
+; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7
+; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8
; SI-NEXT: v_cvt_f16_f32_e32 v8, v8
-; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9
-; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10
-; SI-NEXT: v_cvt_f16_f32_e32 v9, v9
-; SI-NEXT: v_cvt_f16_f32_e32 v10, v10
; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11
+; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7
+; SI-NEXT: v_or_b32_e32 v7, v9, v7
+; SI-NEXT: v_cvt_f32_f16_e32 v9, v18
; SI-NEXT: v_cvt_f16_f32_e32 v11, v11
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8
+; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9
+; SI-NEXT: v_cvt_f16_f32_e32 v9, v9
; SI-NEXT: v_or_b32_e32 v8, v9, v8
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v10
; SI-NEXT: v_or_b32_e32 v9, v11, v9
@@ -27481,16 +27748,22 @@ define inreg <5 x i64> @bitcast_v20f16_to_v5i64_scalar(<20 x half> inreg %a, i32
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB69_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; SI-NEXT: s_branch .LBB69_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB69_2
+; SI-NEXT: s_branch .LBB69_3
;
; VI-LABEL: bitcast_v20f16_to_v5i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB69_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB69_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB69_4
-; VI-NEXT: .LBB69_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB69_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB69_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s25, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -27543,8 +27816,6 @@ define inreg <5 x i64> @bitcast_v20f16_to_v5i64_scalar(<20 x half> inreg %a, i32
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v10
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB69_3:
-; VI-NEXT: s_branch .LBB69_2
; VI-NEXT: .LBB69_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -27568,10 +27839,14 @@ define inreg <5 x i64> @bitcast_v20f16_to_v5i64_scalar(<20 x half> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB69_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB69_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB69_4
-; GFX9-NEXT: .LBB69_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB69_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB69_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v9, s25, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v8, s24, v0 op_sel_hi:[1,0]
@@ -27584,8 +27859,6 @@ define inreg <5 x i64> @bitcast_v20f16_to_v5i64_scalar(<20 x half> inreg %a, i32
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB69_3:
-; GFX9-NEXT: s_branch .LBB69_2
; GFX9-NEXT: .LBB69_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -27613,12 +27886,15 @@ define inreg <5 x i64> @bitcast_v20f16_to_v5i64_scalar(<20 x half> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB69_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB69_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB69_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB69_4
-; GFX11-NEXT: .LBB69_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v9, 0x200, s21 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v8, 0x200, s20 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v7, 0x200, s19 op_sel_hi:[0,1]
@@ -27630,8 +27906,6 @@ define inreg <5 x i64> @bitcast_v20f16_to_v5i64_scalar(<20 x half> inreg %a, i32
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s13 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB69_3:
-; GFX11-NEXT: s_branch .LBB69_2
; GFX11-NEXT: .LBB69_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -27884,6 +28158,7 @@ define inreg <20 x half> @bitcast_v5i64_to_v20f16_scalar(<5 x i64> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB71_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s4, s25, 16
@@ -27981,16 +28256,22 @@ define inreg <20 x half> @bitcast_v5i64_to_v20f16_scalar(<5 x i64> inreg %a, i32
; SI-NEXT: ; implicit-def: $vgpr17
; SI-NEXT: ; implicit-def: $vgpr18
; SI-NEXT: ; implicit-def: $vgpr19
-; SI-NEXT: s_branch .LBB71_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB71_2
+; SI-NEXT: s_branch .LBB71_3
;
; VI-LABEL: bitcast_v5i64_to_v20f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB71_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB71_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB71_3
-; VI-NEXT: .LBB71_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB71_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB71_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_u32 s24, s24, 3
; VI-NEXT: s_addc_u32 s25, s25, 0
; VI-NEXT: s_add_u32 s22, s22, 3
@@ -28001,7 +28282,7 @@ define inreg <20 x half> @bitcast_v5i64_to_v20f16_scalar(<5 x i64> inreg %a, i32
; VI-NEXT: s_addc_u32 s19, s19, 0
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
-; VI-NEXT: .LBB71_3: ; %end
+; VI-NEXT: .LBB71_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -28013,17 +28294,19 @@ define inreg <20 x half> @bitcast_v5i64_to_v20f16_scalar(<5 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB71_4:
-; VI-NEXT: s_branch .LBB71_2
;
; GFX9-LABEL: bitcast_v5i64_to_v20f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB71_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB71_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB71_3
-; GFX9-NEXT: .LBB71_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB71_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB71_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_u32 s24, s24, 3
; GFX9-NEXT: s_addc_u32 s25, s25, 0
; GFX9-NEXT: s_add_u32 s22, s22, 3
@@ -28034,7 +28317,7 @@ define inreg <20 x half> @bitcast_v5i64_to_v20f16_scalar(<5 x i64> inreg %a, i32
; GFX9-NEXT: s_addc_u32 s19, s19, 0
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
-; GFX9-NEXT: .LBB71_3: ; %end
+; GFX9-NEXT: .LBB71_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -28046,19 +28329,20 @@ define inreg <20 x half> @bitcast_v5i64_to_v20f16_scalar(<5 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v8, s24
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB71_4:
-; GFX9-NEXT: s_branch .LBB71_2
;
; GFX11-LABEL: bitcast_v5i64_to_v20f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB71_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB71_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB71_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB71_3
-; GFX11-NEXT: .LBB71_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB71_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s20, s20, 3
; GFX11-NEXT: s_addc_u32 s21, s21, 0
; GFX11-NEXT: s_add_u32 s18, s18, 3
@@ -28069,7 +28353,7 @@ define inreg <20 x half> @bitcast_v5i64_to_v20f16_scalar(<5 x i64> inreg %a, i32
; GFX11-NEXT: s_addc_u32 s3, s3, 0
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: .LBB71_3: ; %end
+; GFX11-NEXT: .LBB71_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -28077,8 +28361,6 @@ define inreg <20 x half> @bitcast_v5i64_to_v20f16_scalar(<5 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB71_4:
-; GFX11-NEXT: s_branch .LBB71_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -29464,6 +29746,7 @@ define inreg <5 x double> @bitcast_v40i8_to_v5f64_scalar(<40 x i8> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v26
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v34, v14
; SI-NEXT: v_mov_b32_e32 v33, v12
; SI-NEXT: v_mov_b32_e32 v32, v10
@@ -29472,7 +29755,7 @@ define inreg <5 x double> @bitcast_v40i8_to_v5f64_scalar(<40 x i8> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v29, v4
; SI-NEXT: v_mov_b32_e32 v27, v2
; SI-NEXT: v_mov_b32_e32 v28, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v51, 24, v1
; SI-NEXT: v_lshlrev_b32_e32 v50, 8, v3
; SI-NEXT: v_lshlrev_b32_e32 v49, 24, v5
@@ -29488,41 +29771,6 @@ define inreg <5 x double> @bitcast_v40i8_to_v5f64_scalar(<40 x i8> inreg %a, i32
; SI-NEXT: v_lshlrev_b32_e32 v17, 24, v25
; SI-NEXT: s_cbranch_scc0 .LBB73_4
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_and_b32 s4, s16, 0xff
-; SI-NEXT: s_lshl_b32 s5, s17, 8
-; SI-NEXT: s_or_b32 s4, s4, s5
-; SI-NEXT: s_and_b32 s5, s18, 0xff
-; SI-NEXT: s_lshl_b32 s5, s5, 16
-; SI-NEXT: s_lshl_b32 s6, s19, 24
-; SI-NEXT: s_and_b32 s4, s4, 0xffff
-; SI-NEXT: s_or_b32 s5, s6, s5
-; SI-NEXT: s_or_b32 s4, s4, s5
-; SI-NEXT: s_and_b32 s5, s20, 0xff
-; SI-NEXT: s_lshl_b32 s6, s21, 8
-; SI-NEXT: s_or_b32 s5, s5, s6
-; SI-NEXT: s_and_b32 s6, s22, 0xff
-; SI-NEXT: s_lshl_b32 s6, s6, 16
-; SI-NEXT: s_lshl_b32 s7, s23, 24
-; SI-NEXT: s_and_b32 s5, s5, 0xffff
-; SI-NEXT: s_or_b32 s6, s7, s6
-; SI-NEXT: s_or_b32 s5, s5, s6
-; SI-NEXT: s_and_b32 s6, s24, 0xff
-; SI-NEXT: s_lshl_b32 s7, s25, 8
-; SI-NEXT: s_or_b32 s6, s6, s7
-; SI-NEXT: s_and_b32 s7, s26, 0xff
-; SI-NEXT: s_lshl_b32 s7, s7, 16
-; SI-NEXT: s_lshl_b32 s8, s27, 24
-; SI-NEXT: s_and_b32 s6, s6, 0xffff
-; SI-NEXT: s_or_b32 s7, s8, s7
-; SI-NEXT: s_or_b32 s6, s6, s7
-; SI-NEXT: s_and_b32 s7, s28, 0xff
-; SI-NEXT: s_lshl_b32 s8, s29, 8
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v28
-; SI-NEXT: s_or_b32 s7, s7, s8
-; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; SI-NEXT: s_and_b32 s7, s7, 0xffff
-; SI-NEXT: v_or_b32_e32 v0, v51, v0
-; SI-NEXT: v_or_b32_e32 v3, s7, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v27
; SI-NEXT: v_and_b32_e32 v1, 0xff, v29
; SI-NEXT: v_or_b32_e32 v0, v0, v50
@@ -29530,13 +29778,6 @@ define inreg <5 x double> @bitcast_v40i8_to_v5f64_scalar(<40 x i8> inreg %a, i32
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v1, v49, v1
; SI-NEXT: v_or_b32_e32 v4, v0, v1
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v30
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v31
-; SI-NEXT: v_or_b32_e32 v0, v0, v48
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v39, v1
-; SI-NEXT: v_or_b32_e32 v5, v0, v1
; SI-NEXT: v_and_b32_e32 v0, 0xff, v32
; SI-NEXT: v_and_b32_e32 v1, 0xff, v33
; SI-NEXT: v_or_b32_e32 v0, v0, v38
@@ -29564,72 +29805,78 @@ define inreg <5 x double> @bitcast_v40i8_to_v5f64_scalar(<40 x i8> inreg %a, i32
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v1, v17, v1
+; SI-NEXT: v_and_b32_e32 v2, 0xff, v30
+; SI-NEXT: v_and_b32_e32 v3, 0xff, v31
; SI-NEXT: v_or_b32_e32 v9, v0, v1
-; SI-NEXT: v_mov_b32_e32 v0, s4
-; SI-NEXT: v_mov_b32_e32 v1, s5
-; SI-NEXT: v_mov_b32_e32 v2, s6
-; SI-NEXT: s_cbranch_execnz .LBB73_3
-; SI-NEXT: .LBB73_2: ; %cmp.true
-; SI-NEXT: s_add_i32 s16, s16, 3
+; SI-NEXT: s_and_b32 s4, s28, 0xff
+; SI-NEXT: s_lshl_b32 s5, s29, 8
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v28
+; SI-NEXT: v_or_b32_e32 v2, v2, v48
+; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; SI-NEXT: s_or_b32 s4, s4, s5
+; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; SI-NEXT: v_or_b32_e32 v3, v39, v3
+; SI-NEXT: s_and_b32 s4, s4, 0xffff
+; SI-NEXT: v_or_b32_e32 v0, v51, v0
+; SI-NEXT: v_or_b32_e32 v5, v2, v3
+; SI-NEXT: v_or_b32_e32 v3, s4, v0
; SI-NEXT: s_and_b32 s4, s16, 0xff
; SI-NEXT: s_lshl_b32 s5, s17, 8
-; SI-NEXT: s_add_i32 s18, s18, 3
-; SI-NEXT: s_or_b32 s4, s5, s4
-; SI-NEXT: s_and_b32 s6, s18, 0xff
-; SI-NEXT: s_addk_i32 s4, 0x300
-; SI-NEXT: s_lshl_b32 s5, s19, 24
-; SI-NEXT: s_lshl_b32 s6, s6, 16
+; SI-NEXT: s_or_b32 s4, s4, s5
+; SI-NEXT: s_and_b32 s5, s18, 0xff
+; SI-NEXT: s_lshl_b32 s5, s5, 16
+; SI-NEXT: s_lshl_b32 s6, s19, 24
; SI-NEXT: s_and_b32 s4, s4, 0xffff
-; SI-NEXT: s_or_b32 s5, s5, s6
-; SI-NEXT: s_add_i32 s20, s20, 3
-; SI-NEXT: s_or_b32 s4, s5, s4
+; SI-NEXT: s_or_b32 s5, s6, s5
+; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: s_and_b32 s5, s20, 0xff
; SI-NEXT: s_lshl_b32 s6, s21, 8
-; SI-NEXT: s_add_i32 s22, s22, 3
-; SI-NEXT: s_or_b32 s5, s6, s5
-; SI-NEXT: s_and_b32 s7, s22, 0xff
-; SI-NEXT: s_addk_i32 s5, 0x300
-; SI-NEXT: s_lshl_b32 s6, s23, 24
-; SI-NEXT: s_lshl_b32 s7, s7, 16
+; SI-NEXT: s_or_b32 s5, s5, s6
+; SI-NEXT: s_and_b32 s6, s22, 0xff
+; SI-NEXT: s_lshl_b32 s6, s6, 16
+; SI-NEXT: s_lshl_b32 s7, s23, 24
; SI-NEXT: s_and_b32 s5, s5, 0xffff
-; SI-NEXT: s_or_b32 s6, s6, s7
-; SI-NEXT: s_add_i32 s24, s24, 3
-; SI-NEXT: s_or_b32 s5, s6, s5
+; SI-NEXT: s_or_b32 s6, s7, s6
+; SI-NEXT: s_or_b32 s5, s5, s6
; SI-NEXT: s_and_b32 s6, s24, 0xff
; SI-NEXT: s_lshl_b32 s7, s25, 8
-; SI-NEXT: s_add_i32 s26, s26, 3
-; SI-NEXT: s_or_b32 s6, s7, s6
-; SI-NEXT: s_and_b32 s8, s26, 0xff
-; SI-NEXT: s_addk_i32 s6, 0x300
-; SI-NEXT: s_lshl_b32 s7, s27, 24
-; SI-NEXT: s_lshl_b32 s8, s8, 16
+; SI-NEXT: s_or_b32 s6, s6, s7
+; SI-NEXT: s_and_b32 s7, s26, 0xff
+; SI-NEXT: s_lshl_b32 s7, s7, 16
+; SI-NEXT: s_lshl_b32 s8, s27, 24
; SI-NEXT: s_and_b32 s6, s6, 0xffff
-; SI-NEXT: s_or_b32 s7, s7, s8
+; SI-NEXT: s_or_b32 s7, s8, s7
+; SI-NEXT: s_or_b32 s6, s6, s7
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: v_mov_b32_e32 v1, s5
+; SI-NEXT: v_mov_b32_e32 v2, s6
+; SI-NEXT: s_cbranch_execnz .LBB73_3
+; SI-NEXT: .LBB73_2: ; %cmp.true
; SI-NEXT: s_add_i32 s28, s28, 3
-; SI-NEXT: s_or_b32 s6, s7, s6
-; SI-NEXT: s_and_b32 s7, s28, 0xff
-; SI-NEXT: s_lshl_b32 s8, s29, 8
+; SI-NEXT: s_and_b32 s4, s28, 0xff
+; SI-NEXT: s_lshl_b32 s5, s29, 8
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v28
-; SI-NEXT: s_or_b32 s7, s8, s7
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v27
+; SI-NEXT: s_or_b32 s4, s5, s4
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: s_addk_i32 s7, 0x300
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
+; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v29
+; SI-NEXT: s_addk_i32 s4, 0x300
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; SI-NEXT: s_and_b32 s7, s7, 0xffff
+; SI-NEXT: v_or_b32_e32 v1, v50, v1
+; SI-NEXT: v_and_b32_e32 v2, 0xff, v2
+; SI-NEXT: s_and_b32 s4, s4, 0xffff
; SI-NEXT: v_or_b32_e32 v0, v51, v0
-; SI-NEXT: v_or_b32_e32 v0, s7, v0
+; SI-NEXT: v_add_i32_e32 v1, vcc, 0x300, v1
+; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; SI-NEXT: v_or_b32_e32 v0, s4, v0
+; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; SI-NEXT: v_or_b32_e32 v2, v49, v2
+; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_add_i32_e32 v3, vcc, 0x3000000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v27
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v29
-; SI-NEXT: v_or_b32_e32 v0, v50, v0
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
-; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v49, v1
-; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v4, vcc, 0x3000000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v30
+; SI-NEXT: v_add_i32_e32 v4, vcc, 0x3000000, v1
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v31
; SI-NEXT: v_or_b32_e32 v0, v48, v0
@@ -29656,30 +29903,66 @@ define inreg <5 x double> @bitcast_v40i8_to_v5f64_scalar(<40 x i8> inreg %a, i32
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v16
; SI-NEXT: v_or_b32_e32 v0, v36, v0
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
+; SI-NEXT: s_add_i32 s16, s16, 3
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT: s_and_b32 s4, s16, 0xff
+; SI-NEXT: s_lshl_b32 s5, s17, 8
+; SI-NEXT: s_add_i32 s18, s18, 3
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v1, v35, v1
+; SI-NEXT: s_or_b32 s4, s5, s4
+; SI-NEXT: s_and_b32 s6, s18, 0xff
; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: s_addk_i32 s4, 0x300
+; SI-NEXT: s_lshl_b32 s5, s19, 24
+; SI-NEXT: s_lshl_b32 s6, s6, 16
; SI-NEXT: v_add_i32_e32 v7, vcc, 0x3000000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v18
+; SI-NEXT: s_and_b32 s4, s4, 0xffff
+; SI-NEXT: s_or_b32 s5, s5, s6
+; SI-NEXT: s_add_i32 s20, s20, 3
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v20
+; SI-NEXT: s_or_b32 s4, s5, s4
+; SI-NEXT: s_and_b32 s5, s20, 0xff
+; SI-NEXT: s_lshl_b32 s6, s21, 8
+; SI-NEXT: s_add_i32 s22, s22, 3
; SI-NEXT: v_or_b32_e32 v0, v26, v0
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
+; SI-NEXT: s_or_b32 s5, s6, s5
+; SI-NEXT: s_and_b32 s7, s22, 0xff
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT: s_addk_i32 s5, 0x300
+; SI-NEXT: s_lshl_b32 s6, s23, 24
+; SI-NEXT: s_lshl_b32 s7, s7, 16
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v1, v21, v1
+; SI-NEXT: s_and_b32 s5, s5, 0xffff
+; SI-NEXT: s_or_b32 s6, s6, s7
+; SI-NEXT: s_add_i32 s24, s24, 3
; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: s_or_b32 s5, s6, s5
+; SI-NEXT: s_and_b32 s6, s24, 0xff
+; SI-NEXT: s_lshl_b32 s7, s25, 8
+; SI-NEXT: s_add_i32 s26, s26, 3
; SI-NEXT: v_add_i32_e32 v8, vcc, 0x3000000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v22
+; SI-NEXT: s_or_b32 s6, s7, s6
+; SI-NEXT: s_and_b32 s8, s26, 0xff
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v24
+; SI-NEXT: s_addk_i32 s6, 0x300
+; SI-NEXT: s_lshl_b32 s7, s27, 24
+; SI-NEXT: s_lshl_b32 s8, s8, 16
; SI-NEXT: v_or_b32_e32 v0, v19, v0
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
+; SI-NEXT: s_and_b32 s6, s6, 0xffff
+; SI-NEXT: s_or_b32 s7, s7, s8
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT: s_or_b32 s6, s7, s6
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v1, v17, v1
; SI-NEXT: s_add_i32 s4, s4, 0x3000000
@@ -29694,12 +29977,15 @@ define inreg <5 x double> @bitcast_v40i8_to_v5f64_scalar(<40 x i8> inreg %a, i32
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB73_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; SI-NEXT: s_branch .LBB73_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB73_2
+; SI-NEXT: s_branch .LBB73_3
;
; VI-LABEL: bitcast_v40i8_to_v5f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v26
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v34, v14
; VI-NEXT: v_mov_b32_e32 v33, v12
; VI-NEXT: v_mov_b32_e32 v32, v10
@@ -29708,7 +29994,7 @@ define inreg <5 x double> @bitcast_v40i8_to_v5f64_scalar(<40 x i8> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v29, v4
; VI-NEXT: v_mov_b32_e32 v27, v2
; VI-NEXT: v_mov_b32_e32 v28, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_lshlrev_b32_e32 v51, 8, v1
; VI-NEXT: v_lshlrev_b32_e32 v50, 8, v3
; VI-NEXT: v_lshlrev_b32_e32 v49, 8, v5
@@ -29724,6 +30010,30 @@ define inreg <5 x double> @bitcast_v40i8_to_v5f64_scalar(<40 x i8> inreg %a, i32
; VI-NEXT: v_lshlrev_b32_e32 v17, 8, v25
; VI-NEXT: s_cbranch_scc0 .LBB73_4
; VI-NEXT: ; %bb.1: ; %cmp.false
+; VI-NEXT: v_or_b32_sdwa v0, v27, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v29, v49 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v32, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v33, v37 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v34, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v16, v35 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v18, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v20, v21 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: s_and_b32 s4, s28, 0xff
+; VI-NEXT: s_lshl_b32 s5, s29, 8
+; VI-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v22, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v24, v17 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: s_or_b32 s4, s4, s5
+; VI-NEXT: v_or_b32_sdwa v2, v30, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v3, v31, v39 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: s_and_b32 s4, s4, 0xffff
+; VI-NEXT: v_or_b32_sdwa v0, v28, v51 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_e32 v3, s4, v0
; VI-NEXT: s_and_b32 s4, s16, 0xff
; VI-NEXT: s_lshl_b32 s5, s17, 8
; VI-NEXT: s_or_b32 s4, s4, s5
@@ -29751,119 +30061,95 @@ define inreg <5 x double> @bitcast_v40i8_to_v5f64_scalar(<40 x i8> inreg %a, i32
; VI-NEXT: s_and_b32 s6, s6, 0xffff
; VI-NEXT: s_lshl_b32 s7, s7, 16
; VI-NEXT: s_or_b32 s6, s6, s7
-; VI-NEXT: s_and_b32 s7, s28, 0xff
-; VI-NEXT: s_lshl_b32 s8, s29, 8
-; VI-NEXT: s_or_b32 s7, s7, s8
-; VI-NEXT: s_and_b32 s7, s7, 0xffff
-; VI-NEXT: v_or_b32_sdwa v0, v28, v51 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_e32 v3, s7, v0
-; VI-NEXT: v_or_b32_sdwa v0, v27, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v29, v49 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v30, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v31, v39 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v5, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v32, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v33, v37 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v34, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v16, v35 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v18, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v20, v21 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v22, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v24, v17 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: v_mov_b32_e32 v2, s6
; VI-NEXT: s_cbranch_execnz .LBB73_3
; VI-NEXT: .LBB73_2: ; %cmp.true
+; VI-NEXT: s_add_i32 s28, s28, 3
+; VI-NEXT: s_and_b32 s4, s28, 0xff
+; VI-NEXT: s_lshl_b32 s5, s29, 8
+; VI-NEXT: s_or_b32 s4, s5, s4
+; VI-NEXT: s_addk_i32 s4, 0x300
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v28
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v27
+; VI-NEXT: s_and_b32 s4, s4, 0xffff
+; VI-NEXT: v_or_b32_sdwa v0, v51, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v1, v50, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v29
+; VI-NEXT: v_or_b32_e32 v0, s4, v0
+; VI-NEXT: v_add_u32_e32 v1, vcc, 0x300, v1
+; VI-NEXT: v_or_b32_sdwa v2, v49, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v3, vcc, 0x3000000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v30
+; VI-NEXT: v_add_u32_e32 v4, vcc, 0x3000000, v1
+; VI-NEXT: v_or_b32_sdwa v0, v48, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v31
+; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
+; VI-NEXT: v_or_b32_sdwa v1, v39, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v5, vcc, 0x3000000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v32
; VI-NEXT: s_add_i32 s16, s16, 3
+; VI-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v33
; VI-NEXT: s_and_b32 s4, s16, 0xff
; VI-NEXT: s_lshl_b32 s5, s17, 8
; VI-NEXT: s_add_i32 s18, s18, 3
+; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
+; VI-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_or_b32 s4, s5, s4
; VI-NEXT: s_and_b32 s5, s18, 0xff
; VI-NEXT: s_lshl_b32 s6, s19, 8
+; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_addk_i32 s4, 0x300
; VI-NEXT: s_or_b32 s5, s6, s5
+; VI-NEXT: v_add_u32_e32 v6, vcc, 0x3000000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v34
; VI-NEXT: s_and_b32 s4, s4, 0xffff
; VI-NEXT: s_lshl_b32 s5, s5, 16
; VI-NEXT: s_add_i32 s20, s20, 3
+; VI-NEXT: v_or_b32_sdwa v0, v36, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v16
; VI-NEXT: s_or_b32 s4, s5, s4
; VI-NEXT: s_and_b32 s5, s20, 0xff
; VI-NEXT: s_lshl_b32 s6, s21, 8
; VI-NEXT: s_add_i32 s22, s22, 3
+; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
+; VI-NEXT: v_or_b32_sdwa v1, v35, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_or_b32 s5, s6, s5
; VI-NEXT: s_and_b32 s6, s22, 0xff
; VI-NEXT: s_lshl_b32 s7, s23, 8
+; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_addk_i32 s5, 0x300
; VI-NEXT: s_or_b32 s6, s7, s6
+; VI-NEXT: v_add_u32_e32 v7, vcc, 0x3000000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v18
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_lshl_b32 s6, s6, 16
; VI-NEXT: s_add_i32 s24, s24, 3
+; VI-NEXT: v_or_b32_sdwa v0, v26, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v20
; VI-NEXT: s_or_b32 s5, s6, s5
; VI-NEXT: s_and_b32 s6, s24, 0xff
; VI-NEXT: s_lshl_b32 s7, s25, 8
; VI-NEXT: s_add_i32 s26, s26, 3
+; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
+; VI-NEXT: v_or_b32_sdwa v1, v21, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_or_b32 s6, s7, s6
; VI-NEXT: s_and_b32 s7, s26, 0xff
; VI-NEXT: s_lshl_b32 s8, s27, 8
+; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_addk_i32 s6, 0x300
; VI-NEXT: s_or_b32 s7, s8, s7
-; VI-NEXT: s_and_b32 s6, s6, 0xffff
-; VI-NEXT: s_lshl_b32 s7, s7, 16
-; VI-NEXT: s_add_i32 s28, s28, 3
-; VI-NEXT: s_or_b32 s6, s7, s6
-; VI-NEXT: s_and_b32 s7, s28, 0xff
-; VI-NEXT: s_lshl_b32 s8, s29, 8
-; VI-NEXT: s_or_b32 s7, s8, s7
-; VI-NEXT: s_addk_i32 s7, 0x300
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v28
-; VI-NEXT: s_and_b32 s7, s7, 0xffff
-; VI-NEXT: v_or_b32_sdwa v0, v51, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_or_b32_e32 v0, s7, v0
-; VI-NEXT: v_add_u32_e32 v3, vcc, 0x3000000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v27
-; VI-NEXT: v_or_b32_sdwa v0, v50, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v29
-; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
-; VI-NEXT: v_or_b32_sdwa v1, v49, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v4, vcc, 0x3000000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v30
-; VI-NEXT: v_or_b32_sdwa v0, v48, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v31
-; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
-; VI-NEXT: v_or_b32_sdwa v1, v39, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v5, vcc, 0x3000000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v32
-; VI-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v33
-; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
-; VI-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v6, vcc, 0x3000000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v34
-; VI-NEXT: v_or_b32_sdwa v0, v36, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v16
-; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
-; VI-NEXT: v_or_b32_sdwa v1, v35, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v7, vcc, 0x3000000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v18
-; VI-NEXT: v_or_b32_sdwa v0, v26, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v20
-; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
-; VI-NEXT: v_or_b32_sdwa v1, v21, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v8, vcc, 0x3000000, v0
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v22
+; VI-NEXT: s_and_b32 s6, s6, 0xffff
+; VI-NEXT: s_lshl_b32 s7, s7, 16
; VI-NEXT: v_or_b32_sdwa v0, v19, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v24
+; VI-NEXT: s_or_b32 s6, s7, s6
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v1, v17, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_add_i32 s4, s4, 0x3000000
@@ -29878,12 +30164,15 @@ define inreg <5 x double> @bitcast_v40i8_to_v5f64_scalar(<40 x i8> inreg %a, i32
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB73_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; VI-NEXT: s_branch .LBB73_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB73_2
+; VI-NEXT: s_branch .LBB73_3
;
; GFX9-LABEL: bitcast_v40i8_to_v5f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v26
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v34, v14
; GFX9-NEXT: v_mov_b32_e32 v33, v12
; GFX9-NEXT: v_mov_b32_e32 v32, v10
@@ -29892,7 +30181,7 @@ define inreg <5 x double> @bitcast_v40i8_to_v5f64_scalar(<40 x i8> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v29, v4
; GFX9-NEXT: v_mov_b32_e32 v27, v2
; GFX9-NEXT: v_mov_b32_e32 v28, v0
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_lshlrev_b32_e32 v51, 8, v1
; GFX9-NEXT: v_lshlrev_b32_e32 v50, 8, v3
; GFX9-NEXT: v_lshlrev_b32_e32 v49, 8, v5
@@ -29908,6 +30197,30 @@ define inreg <5 x double> @bitcast_v40i8_to_v5f64_scalar(<40 x i8> inreg %a, i32
; GFX9-NEXT: v_lshlrev_b32_e32 v17, 8, v25
; GFX9-NEXT: s_cbranch_scc0 .LBB73_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
+; GFX9-NEXT: v_or_b32_sdwa v0, v27, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v29, v49 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v32, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v33, v37 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v34, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v16, v35 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v18, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v20, v21 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_and_b32 s4, s28, 0xff
+; GFX9-NEXT: s_lshl_b32 s5, s29, 8
+; GFX9-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v22, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v24, v17 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_or_b32 s4, s4, s5
+; GFX9-NEXT: v_or_b32_sdwa v2, v30, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v3, v31, v39 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX9-NEXT: v_or_b32_sdwa v0, v28, v51 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_e32 v3, s4, v0
; GFX9-NEXT: s_and_b32 s4, s16, 0xff
; GFX9-NEXT: s_lshl_b32 s5, s17, 8
; GFX9-NEXT: s_or_b32 s4, s4, s5
@@ -29935,135 +30248,113 @@ define inreg <5 x double> @bitcast_v40i8_to_v5f64_scalar(<40 x i8> inreg %a, i32
; GFX9-NEXT: s_and_b32 s6, s6, 0xffff
; GFX9-NEXT: s_lshl_b32 s7, s7, 16
; GFX9-NEXT: s_or_b32 s6, s6, s7
-; GFX9-NEXT: s_and_b32 s7, s28, 0xff
-; GFX9-NEXT: s_lshl_b32 s8, s29, 8
-; GFX9-NEXT: s_or_b32 s7, s7, s8
-; GFX9-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX9-NEXT: v_or_b32_sdwa v0, v28, v51 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_e32 v3, s7, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v27, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v29, v49 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v0, v30, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v31, v39 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v5, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v0, v32, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v33, v37 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v0, v34, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v16, v35 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v0, v18, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v20, v21 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v0, v22, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v24, v17 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: v_mov_b32_e32 v2, s6
; GFX9-NEXT: s_cbranch_execnz .LBB73_3
; GFX9-NEXT: .LBB73_2: ; %cmp.true
+; GFX9-NEXT: s_add_i32 s28, s28, 3
+; GFX9-NEXT: s_and_b32 s5, s28, 0xff
+; GFX9-NEXT: s_lshl_b32 s6, s29, 8
+; GFX9-NEXT: s_or_b32 s5, s6, s5
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v28
+; GFX9-NEXT: s_movk_i32 s4, 0x300
+; GFX9-NEXT: s_addk_i32 s5, 0x300
+; GFX9-NEXT: v_or_b32_sdwa v0, v51, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v27
+; GFX9-NEXT: v_add_u32_e32 v2, 3, v29
+; GFX9-NEXT: s_and_b32 s5, s5, 0xffff
+; GFX9-NEXT: v_add_u32_sdwa v0, v0, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v50, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v2, v49, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: s_add_i32 s16, s16, 3
+; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
+; GFX9-NEXT: v_add_u32_sdwa v2, v2, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_e32 v3, s5, v0
+; GFX9-NEXT: s_and_b32 s5, s16, 0xff
+; GFX9-NEXT: s_lshl_b32 s6, s17, 8
; GFX9-NEXT: s_add_i32 s18, s18, 3
-; GFX9-NEXT: s_and_b32 s4, s16, 0xff
-; GFX9-NEXT: s_lshl_b32 s5, s17, 8
+; GFX9-NEXT: v_or_b32_sdwa v4, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: s_or_b32 s5, s6, s5
; GFX9-NEXT: s_and_b32 s6, s18, 0xff
; GFX9-NEXT: s_lshl_b32 s7, s19, 8
-; GFX9-NEXT: s_or_b32 s4, s5, s4
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v30
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v31
; GFX9-NEXT: s_or_b32 s6, s7, s6
-; GFX9-NEXT: s_addk_i32 s4, 0x300
+; GFX9-NEXT: v_or_b32_sdwa v0, v48, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v39, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: s_addk_i32 s5, 0x300
; GFX9-NEXT: s_addk_i32 s6, 0x300
-; GFX9-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
+; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX9-NEXT: s_and_b32 s5, s5, 0xffff
; GFX9-NEXT: s_lshl_b32 s6, s6, 16
; GFX9-NEXT: s_add_i32 s20, s20, 3
-; GFX9-NEXT: s_or_b32 s4, s4, s6
+; GFX9-NEXT: v_or_b32_sdwa v5, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v32
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v33
+; GFX9-NEXT: s_or_b32 s5, s5, s6
; GFX9-NEXT: s_and_b32 s6, s20, 0xff
; GFX9-NEXT: s_lshl_b32 s7, s21, 8
; GFX9-NEXT: s_add_i32 s22, s22, 3
+; GFX9-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: s_or_b32 s6, s7, s6
; GFX9-NEXT: s_and_b32 s7, s22, 0xff
; GFX9-NEXT: s_lshl_b32 s8, s23, 8
+; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
+; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: s_or_b32 s7, s8, s7
+; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v34
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v16
; GFX9-NEXT: s_addk_i32 s6, 0x300
; GFX9-NEXT: s_addk_i32 s7, 0x300
+; GFX9-NEXT: v_or_b32_sdwa v0, v36, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v35, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: s_and_b32 s6, s6, 0xffff
; GFX9-NEXT: s_lshl_b32 s7, s7, 16
; GFX9-NEXT: s_add_i32 s24, s24, 3
+; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
+; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: s_or_b32 s6, s6, s7
; GFX9-NEXT: s_and_b32 s7, s24, 0xff
; GFX9-NEXT: s_lshl_b32 s8, s25, 8
; GFX9-NEXT: s_add_i32 s26, s26, 3
-; GFX9-NEXT: s_or_b32 s7, s8, s7
-; GFX9-NEXT: s_and_b32 s8, s26, 0xff
-; GFX9-NEXT: s_lshl_b32 s9, s27, 8
-; GFX9-NEXT: s_or_b32 s8, s9, s8
-; GFX9-NEXT: s_addk_i32 s7, 0x300
-; GFX9-NEXT: s_addk_i32 s8, 0x300
-; GFX9-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX9-NEXT: s_lshl_b32 s8, s8, 16
-; GFX9-NEXT: s_add_i32 s28, s28, 3
-; GFX9-NEXT: s_or_b32 s7, s7, s8
-; GFX9-NEXT: s_and_b32 s8, s28, 0xff
-; GFX9-NEXT: s_lshl_b32 s9, s29, 8
-; GFX9-NEXT: s_or_b32 s8, s9, s8
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v28
-; GFX9-NEXT: s_movk_i32 s5, 0x300
-; GFX9-NEXT: s_addk_i32 s8, 0x300
-; GFX9-NEXT: v_or_b32_sdwa v0, v51, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: s_and_b32 s8, s8, 0xffff
-; GFX9-NEXT: v_add_u32_sdwa v0, v0, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_e32 v3, s8, v0
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v27
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v29
-; GFX9-NEXT: v_or_b32_sdwa v0, v50, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_or_b32_sdwa v1, v49, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
-; GFX9-NEXT: v_add_u32_sdwa v1, v1, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v30
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v31
-; GFX9-NEXT: v_or_b32_sdwa v0, v48, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_or_b32_sdwa v1, v39, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
-; GFX9-NEXT: v_add_u32_sdwa v1, v1, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v5, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v32
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v33
-; GFX9-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
-; GFX9-NEXT: v_add_u32_sdwa v1, v1, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v34
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v16
-; GFX9-NEXT: v_or_b32_sdwa v0, v36, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_or_b32_sdwa v1, v35, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
-; GFX9-NEXT: v_add_u32_sdwa v1, v1, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_add_u32_e32 v0, 3, v18
; GFX9-NEXT: v_add_u32_e32 v1, 3, v20
+; GFX9-NEXT: s_or_b32 s7, s8, s7
+; GFX9-NEXT: s_and_b32 s8, s26, 0xff
+; GFX9-NEXT: s_lshl_b32 s9, s27, 8
; GFX9-NEXT: v_or_b32_sdwa v0, v26, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_or_b32_sdwa v1, v21, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: s_or_b32 s8, s9, s8
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
-; GFX9-NEXT: v_add_u32_sdwa v1, v1, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX9-NEXT: s_addk_i32 s7, 0x300
+; GFX9-NEXT: s_addk_i32 s8, 0x300
; GFX9-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_add_u32_e32 v0, 3, v22
; GFX9-NEXT: v_add_u32_e32 v1, 3, v24
+; GFX9-NEXT: s_and_b32 s7, s7, 0xffff
+; GFX9-NEXT: s_lshl_b32 s8, s8, 16
; GFX9-NEXT: v_or_b32_sdwa v0, v19, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_or_b32_sdwa v1, v17, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: s_or_b32 s7, s7, s8
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
-; GFX9-NEXT: v_add_u32_sdwa v1, v1, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-NEXT: v_mov_b32_e32 v0, s5
; GFX9-NEXT: v_mov_b32_e32 v1, s6
; GFX9-NEXT: v_mov_b32_e32 v2, s7
; GFX9-NEXT: .LBB73_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB73_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX9-NEXT: s_branch .LBB73_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB73_2
+; GFX9-NEXT: s_branch .LBB73_3
;
; GFX11-LABEL: bitcast_v40i8_to_v5f64_scalar:
; GFX11: ; %bb.0:
@@ -30084,64 +30375,64 @@ define inreg <5 x double> @bitcast_v40i8_to_v5f64_scalar(<40 x i8> inreg %a, i32
; GFX11-NEXT: v_lshlrev_b32_e32 v17, 8, v17
; GFX11-NEXT: v_lshlrev_b32_e32 v19, 8, v19
; GFX11-NEXT: v_lshlrev_b32_e32 v21, 8, v21
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s4, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB73_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s3, 8
-; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_or_b32 s6, s7, s8
-; GFX11-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_and_b32 s5, s5, 0xffff
-; GFX11-NEXT: s_lshl_b32 s6, s6, 16
-; GFX11-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-NEXT: s_lshl_b32 s8, s8, 16
-; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_or_b32 s6, s7, s8
-; GFX11-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s23, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-NEXT: s_lshl_b32 s8, s8, 16
+; GFX11-NEXT: s_and_b32 s4, s0, 0xff
+; GFX11-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-NEXT: s_and_b32 s6, s2, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-NEXT: s_or_b32 s4, s4, s5
+; GFX11-NEXT: s_or_b32 s5, s6, s7
+; GFX11-NEXT: s_and_b32 s6, s16, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-NEXT: s_or_b32 s6, s6, s7
+; GFX11-NEXT: s_or_b32 s7, s8, s9
+; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX11-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-NEXT: s_and_b32 s6, s6, 0xffff
+; GFX11-NEXT: s_lshl_b32 s7, s7, 16
+; GFX11-NEXT: s_or_b32 s4, s4, s5
+; GFX11-NEXT: s_or_b32 s5, s6, s7
+; GFX11-NEXT: s_and_b32 s6, s20, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s21, 8
+; GFX11-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-NEXT: s_or_b32 s6, s6, s7
+; GFX11-NEXT: s_or_b32 s7, s8, s9
+; GFX11-NEXT: s_and_b32 s6, s6, 0xffff
+; GFX11-NEXT: s_lshl_b32 s7, s7, 16
; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v26
-; GFX11-NEXT: s_or_b32 s7, s7, s8
+; GFX11-NEXT: s_or_b32 s6, s6, s7
; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v23
; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v27
-; GFX11-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s25, 8
+; GFX11-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s25, 8
; GFX11-NEXT: v_or_b32_e32 v0, v0, v37
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_and_b32 s9, s26, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s27, 8
+; GFX11-NEXT: s_or_b32 s7, s8, s9
+; GFX11-NEXT: s_and_b32 s8, s26, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s27, 8
; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v28
; GFX11-NEXT: v_or_b32_e32 v2, v2, v34
; GFX11-NEXT: v_or_b32_e32 v3, v3, v35
; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v30
-; GFX11-NEXT: s_or_b32 s9, s9, s10
-; GFX11-NEXT: s_and_b32 s8, s8, 0xffff
-; GFX11-NEXT: s_lshl_b32 s9, s9, 16
-; GFX11-NEXT: s_and_b32 s10, s28, 0xff
-; GFX11-NEXT: s_lshl_b32 s11, s29, 8
; GFX11-NEXT: s_or_b32 s8, s8, s9
+; GFX11-NEXT: s_and_b32 s7, s7, 0xffff
+; GFX11-NEXT: s_lshl_b32 s8, s8, 16
+; GFX11-NEXT: s_and_b32 s9, s28, 0xff
+; GFX11-NEXT: s_lshl_b32 s10, s29, 8
+; GFX11-NEXT: s_or_b32 s7, s7, s8
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX11-NEXT: v_or_b32_e32 v5, v5, v36
; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX11-NEXT: v_or_b32_e32 v6, v6, v31
-; GFX11-NEXT: s_or_b32 s10, s10, s11
+; GFX11-NEXT: s_or_b32 s9, s9, s10
; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v24
-; GFX11-NEXT: s_and_b32 s10, s10, 0xffff
+; GFX11-NEXT: s_and_b32 s9, s9, 0xffff
; GFX11-NEXT: v_and_b32_e32 v7, 0xffff, v5
; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v29
; GFX11-NEXT: v_and_b32_e32 v8, 0xff, v16
@@ -30149,8 +30440,8 @@ define inreg <5 x double> @bitcast_v40i8_to_v5f64_scalar(<40 x i8> inreg %a, i32
; GFX11-NEXT: v_and_b32_e32 v10, 0xff, v20
; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v6
; GFX11-NEXT: v_or_b32_e32 v6, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, s8
-; GFX11-NEXT: v_or_b32_e32 v4, s10, v0
+; GFX11-NEXT: v_mov_b32_e32 v3, s7
+; GFX11-NEXT: v_or_b32_e32 v4, s9, v0
; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v25
; GFX11-NEXT: v_or_b32_e32 v1, v1, v33
; GFX11-NEXT: v_or_b32_e32 v5, v5, v22
@@ -30167,11 +30458,10 @@ define inreg <5 x double> @bitcast_v40i8_to_v5f64_scalar(<40 x i8> inreg %a, i32
; GFX11-NEXT: v_or_b32_e32 v7, v7, v11
; GFX11-NEXT: v_or_b32_e32 v8, v12, v8
; GFX11-NEXT: v_or_b32_e32 v5, v0, v1
-; GFX11-NEXT: v_mov_b32_e32 v0, s5
+; GFX11-NEXT: v_mov_b32_e32 v0, s4
; GFX11-NEXT: v_or_b32_e32 v9, v9, v10
-; GFX11-NEXT: v_dual_mov_b32 v1, s6 :: v_dual_mov_b32 v2, s7
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB73_3
+; GFX11-NEXT: v_dual_mov_b32 v1, s5 :: v_dual_mov_b32 v2, s6
+; GFX11-NEXT: s_cbranch_execnz .LBB73_3
; GFX11-NEXT: .LBB73_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
; GFX11-NEXT: s_add_i32 s2, s2, 3
@@ -30299,7 +30589,9 @@ define inreg <5 x double> @bitcast_v40i8_to_v5f64_scalar(<40 x i8> inreg %a, i32
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB73_4:
; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-NEXT: s_branch .LBB73_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_vccz .LBB73_2
+; GFX11-NEXT: s_branch .LBB73_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -31313,6 +31605,7 @@ define inreg <40 x i8> @bitcast_v5f64_to_v40i8_scalar(<5 x double> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB75_3
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v1, s24
@@ -31424,7 +31717,8 @@ define inreg <40 x i8> @bitcast_v5f64_to_v40i8_scalar(<5 x double> inreg %a, i32
; SI-NEXT: ; implicit-def: $sgpr8
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB75_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB75_2
; SI-NEXT: .LBB75_4:
; SI-NEXT: v_mov_b32_e32 v1, s24
; SI-NEXT: v_mov_b32_e32 v3, s22
@@ -31558,6 +31852,7 @@ define inreg <40 x i8> @bitcast_v5f64_to_v40i8_scalar(<5 x double> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
+; VI-NEXT: s_mov_b64 s[14:15], -1
; VI-NEXT: s_cbranch_scc0 .LBB75_3
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s26, s25, 24
@@ -31664,7 +31959,8 @@ define inreg <40 x i8> @bitcast_v5f64_to_v40i8_scalar(<5 x double> inreg %a, i32
; VI-NEXT: ; implicit-def: $sgpr28
; VI-NEXT: ; implicit-def: $sgpr27
; VI-NEXT: ; implicit-def: $sgpr26
-; VI-NEXT: s_branch .LBB75_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[14:15]
+; VI-NEXT: s_cbranch_vccz .LBB75_2
; VI-NEXT: .LBB75_4:
; VI-NEXT: v_mov_b32_e32 v9, s16
; VI-NEXT: v_mov_b32_e32 v7, s18
@@ -31788,6 +32084,7 @@ define inreg <40 x i8> @bitcast_v5f64_to_v40i8_scalar(<5 x double> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
+; GFX9-NEXT: s_mov_b64 s[14:15], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB75_3
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s26, s25, 24
@@ -31894,7 +32191,8 @@ define inreg <40 x i8> @bitcast_v5f64_to_v40i8_scalar(<5 x double> inreg %a, i32
; GFX9-NEXT: ; implicit-def: $sgpr28
; GFX9-NEXT: ; implicit-def: $sgpr27
; GFX9-NEXT: ; implicit-def: $sgpr26
-; GFX9-NEXT: s_branch .LBB75_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[14:15]
+; GFX9-NEXT: s_cbranch_vccz .LBB75_2
; GFX9-NEXT: .LBB75_4:
; GFX9-NEXT: v_mov_b32_e32 v9, s16
; GFX9-NEXT: v_mov_b32_e32 v7, s18
@@ -32009,41 +32307,40 @@ define inreg <40 x i8> @bitcast_v5f64_to_v40i8_scalar(<5 x double> inreg %a, i32
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
-; GFX11-NEXT: s_mov_b32 s45, 0
+; GFX11-NEXT: s_mov_b32 s5, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB75_3
; GFX11-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
; GFX11-NEXT: s_lshr_b32 s14, s21, 24
; GFX11-NEXT: s_lshr_b32 s15, s21, 16
; GFX11-NEXT: s_lshr_b32 s22, s21, 8
-; GFX11-NEXT: s_lshr_b32 s47, s20, 16
-; GFX11-NEXT: s_lshr_b32 s46, s20, 8
+; GFX11-NEXT: s_lshr_b32 s46, s20, 16
+; GFX11-NEXT: s_lshr_b32 s45, s20, 8
; GFX11-NEXT: s_lshr_b32 s23, s19, 24
; GFX11-NEXT: s_lshr_b32 s24, s19, 16
; GFX11-NEXT: s_lshr_b32 s25, s19, 8
-; GFX11-NEXT: s_lshr_b32 s57, s18, 16
-; GFX11-NEXT: s_lshr_b32 s56, s18, 8
+; GFX11-NEXT: s_lshr_b32 s56, s18, 16
+; GFX11-NEXT: s_lshr_b32 s47, s18, 8
; GFX11-NEXT: s_lshr_b32 s26, s17, 24
; GFX11-NEXT: s_lshr_b32 s27, s17, 16
; GFX11-NEXT: s_lshr_b32 s28, s17, 8
-; GFX11-NEXT: s_lshr_b32 s59, s16, 16
-; GFX11-NEXT: s_lshr_b32 s58, s16, 8
+; GFX11-NEXT: s_lshr_b32 s58, s16, 16
+; GFX11-NEXT: s_lshr_b32 s57, s16, 8
; GFX11-NEXT: s_lshr_b32 s29, s3, 24
; GFX11-NEXT: s_lshr_b32 s40, s3, 16
; GFX11-NEXT: s_lshr_b32 s41, s3, 8
-; GFX11-NEXT: s_lshr_b32 s61, s2, 16
-; GFX11-NEXT: s_lshr_b32 s60, s2, 8
+; GFX11-NEXT: s_lshr_b32 s60, s2, 16
+; GFX11-NEXT: s_lshr_b32 s59, s2, 8
; GFX11-NEXT: s_lshr_b32 s42, s1, 24
; GFX11-NEXT: s_lshr_b32 s43, s1, 16
; GFX11-NEXT: s_lshr_b32 s44, s1, 8
-; GFX11-NEXT: s_lshr_b32 s63, s0, 16
-; GFX11-NEXT: s_lshr_b32 s62, s0, 8
+; GFX11-NEXT: s_lshr_b32 s62, s0, 16
+; GFX11-NEXT: s_lshr_b32 s61, s0, 8
; GFX11-NEXT: s_lshr_b64 s[12:13], s[20:21], 24
; GFX11-NEXT: s_lshr_b64 s[10:11], s[18:19], 24
; GFX11-NEXT: s_lshr_b64 s[8:9], s[16:17], 24
; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
-; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s45
-; GFX11-NEXT: s_cbranch_vccnz .LBB75_4
+; GFX11-NEXT: s_cbranch_execnz .LBB75_4
; GFX11-NEXT: .LBB75_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[12:13], s[0:1], 1.0
; GFX11-NEXT: v_add_f64 v[10:11], s[2:3], 1.0
@@ -32087,48 +32384,49 @@ define inreg <40 x i8> @bitcast_v5f64_to_v40i8_scalar(<5 x double> inreg %a, i32
; GFX11-NEXT: s_lshr_b32 s44, s1, 8
; GFX11-NEXT: s_branch .LBB75_5
; GFX11-NEXT: .LBB75_3:
+; GFX11-NEXT: ; implicit-def: $sgpr61
; GFX11-NEXT: ; implicit-def: $sgpr62
-; GFX11-NEXT: ; implicit-def: $sgpr63
; GFX11-NEXT: ; implicit-def: $sgpr4
; GFX11-NEXT: ; implicit-def: $sgpr44
; GFX11-NEXT: ; implicit-def: $sgpr43
; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: ; implicit-def: $sgpr59
; GFX11-NEXT: ; implicit-def: $sgpr60
-; GFX11-NEXT: ; implicit-def: $sgpr61
; GFX11-NEXT: ; implicit-def: $sgpr6
; GFX11-NEXT: ; implicit-def: $sgpr41
; GFX11-NEXT: ; implicit-def: $sgpr40
; GFX11-NEXT: ; implicit-def: $sgpr29
+; GFX11-NEXT: ; implicit-def: $sgpr57
; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr59
; GFX11-NEXT: ; implicit-def: $sgpr8
; GFX11-NEXT: ; implicit-def: $sgpr28
; GFX11-NEXT: ; implicit-def: $sgpr27
; GFX11-NEXT: ; implicit-def: $sgpr26
+; GFX11-NEXT: ; implicit-def: $sgpr47
; GFX11-NEXT: ; implicit-def: $sgpr56
-; GFX11-NEXT: ; implicit-def: $sgpr57
; GFX11-NEXT: ; implicit-def: $sgpr10
; GFX11-NEXT: ; implicit-def: $sgpr25
; GFX11-NEXT: ; implicit-def: $sgpr24
; GFX11-NEXT: ; implicit-def: $sgpr23
+; GFX11-NEXT: ; implicit-def: $sgpr45
; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr47
; GFX11-NEXT: ; implicit-def: $sgpr12
; GFX11-NEXT: ; implicit-def: $sgpr22
; GFX11-NEXT: ; implicit-def: $sgpr15
; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: s_branch .LBB75_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
+; GFX11-NEXT: s_cbranch_vccz .LBB75_2
; GFX11-NEXT: .LBB75_4:
; GFX11-NEXT: v_dual_mov_b32 v12, s0 :: v_dual_mov_b32 v7, s16
; GFX11-NEXT: v_dual_mov_b32 v10, s2 :: v_dual_mov_b32 v1, s20
; GFX11-NEXT: v_dual_mov_b32 v4, s18 :: v_dual_mov_b32 v15, s4
; GFX11-NEXT: v_dual_mov_b32 v14, s6 :: v_dual_mov_b32 v5, s10
-; GFX11-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v19, s63
-; GFX11-NEXT: v_dual_mov_b32 v2, s12 :: v_dual_mov_b32 v17, s61
-; GFX11-NEXT: v_dual_mov_b32 v20, s62 :: v_dual_mov_b32 v13, s59
-; GFX11-NEXT: v_dual_mov_b32 v18, s60 :: v_dual_mov_b32 v9, s57
-; GFX11-NEXT: v_dual_mov_b32 v16, s58 :: v_dual_mov_b32 v11, s56
-; GFX11-NEXT: v_dual_mov_b32 v3, s47 :: v_dual_mov_b32 v6, s46
+; GFX11-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v19, s62
+; GFX11-NEXT: v_dual_mov_b32 v2, s12 :: v_dual_mov_b32 v17, s60
+; GFX11-NEXT: v_dual_mov_b32 v20, s61 :: v_dual_mov_b32 v13, s58
+; GFX11-NEXT: v_dual_mov_b32 v18, s59 :: v_dual_mov_b32 v9, s56
+; GFX11-NEXT: v_dual_mov_b32 v16, s57 :: v_dual_mov_b32 v11, s47
+; GFX11-NEXT: v_dual_mov_b32 v3, s46 :: v_dual_mov_b32 v6, s45
; GFX11-NEXT: .LBB75_5: ; %end
; GFX11-NEXT: s_and_b32 s0, s1, 0xff
; GFX11-NEXT: s_lshl_b32 s1, s44, 8
@@ -33615,6 +33913,7 @@ define inreg <5 x i64> @bitcast_v40i8_to_v5i64_scalar(<40 x i8> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v26
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v34, v14
; SI-NEXT: v_mov_b32_e32 v33, v12
; SI-NEXT: v_mov_b32_e32 v32, v10
@@ -33623,7 +33922,7 @@ define inreg <5 x i64> @bitcast_v40i8_to_v5i64_scalar(<40 x i8> inreg %a, i32 in
; SI-NEXT: v_mov_b32_e32 v29, v4
; SI-NEXT: v_mov_b32_e32 v27, v2
; SI-NEXT: v_mov_b32_e32 v28, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v51, 24, v1
; SI-NEXT: v_lshlrev_b32_e32 v50, 8, v3
; SI-NEXT: v_lshlrev_b32_e32 v49, 24, v5
@@ -33639,41 +33938,6 @@ define inreg <5 x i64> @bitcast_v40i8_to_v5i64_scalar(<40 x i8> inreg %a, i32 in
; SI-NEXT: v_lshlrev_b32_e32 v17, 24, v25
; SI-NEXT: s_cbranch_scc0 .LBB77_4
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_and_b32 s4, s16, 0xff
-; SI-NEXT: s_lshl_b32 s5, s17, 8
-; SI-NEXT: s_or_b32 s4, s4, s5
-; SI-NEXT: s_and_b32 s5, s18, 0xff
-; SI-NEXT: s_lshl_b32 s5, s5, 16
-; SI-NEXT: s_lshl_b32 s6, s19, 24
-; SI-NEXT: s_and_b32 s4, s4, 0xffff
-; SI-NEXT: s_or_b32 s5, s6, s5
-; SI-NEXT: s_or_b32 s4, s4, s5
-; SI-NEXT: s_and_b32 s5, s20, 0xff
-; SI-NEXT: s_lshl_b32 s6, s21, 8
-; SI-NEXT: s_or_b32 s5, s5, s6
-; SI-NEXT: s_and_b32 s6, s22, 0xff
-; SI-NEXT: s_lshl_b32 s6, s6, 16
-; SI-NEXT: s_lshl_b32 s7, s23, 24
-; SI-NEXT: s_and_b32 s5, s5, 0xffff
-; SI-NEXT: s_or_b32 s6, s7, s6
-; SI-NEXT: s_or_b32 s5, s5, s6
-; SI-NEXT: s_and_b32 s6, s24, 0xff
-; SI-NEXT: s_lshl_b32 s7, s25, 8
-; SI-NEXT: s_or_b32 s6, s6, s7
-; SI-NEXT: s_and_b32 s7, s26, 0xff
-; SI-NEXT: s_lshl_b32 s7, s7, 16
-; SI-NEXT: s_lshl_b32 s8, s27, 24
-; SI-NEXT: s_and_b32 s6, s6, 0xffff
-; SI-NEXT: s_or_b32 s7, s8, s7
-; SI-NEXT: s_or_b32 s6, s6, s7
-; SI-NEXT: s_and_b32 s7, s28, 0xff
-; SI-NEXT: s_lshl_b32 s8, s29, 8
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v28
-; SI-NEXT: s_or_b32 s7, s7, s8
-; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; SI-NEXT: s_and_b32 s7, s7, 0xffff
-; SI-NEXT: v_or_b32_e32 v0, v51, v0
-; SI-NEXT: v_or_b32_e32 v3, s7, v0
; SI-NEXT: v_and_b32_e32 v0, 0xff, v27
; SI-NEXT: v_and_b32_e32 v1, 0xff, v29
; SI-NEXT: v_or_b32_e32 v0, v0, v50
@@ -33681,13 +33945,6 @@ define inreg <5 x i64> @bitcast_v40i8_to_v5i64_scalar(<40 x i8> inreg %a, i32 in
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v1, v49, v1
; SI-NEXT: v_or_b32_e32 v4, v0, v1
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v30
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v31
-; SI-NEXT: v_or_b32_e32 v0, v0, v48
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v39, v1
-; SI-NEXT: v_or_b32_e32 v5, v0, v1
; SI-NEXT: v_and_b32_e32 v0, 0xff, v32
; SI-NEXT: v_and_b32_e32 v1, 0xff, v33
; SI-NEXT: v_or_b32_e32 v0, v0, v38
@@ -33715,72 +33972,78 @@ define inreg <5 x i64> @bitcast_v40i8_to_v5i64_scalar(<40 x i8> inreg %a, i32 in
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v1, v17, v1
+; SI-NEXT: v_and_b32_e32 v2, 0xff, v30
+; SI-NEXT: v_and_b32_e32 v3, 0xff, v31
; SI-NEXT: v_or_b32_e32 v9, v0, v1
-; SI-NEXT: v_mov_b32_e32 v0, s4
-; SI-NEXT: v_mov_b32_e32 v1, s5
-; SI-NEXT: v_mov_b32_e32 v2, s6
-; SI-NEXT: s_cbranch_execnz .LBB77_3
-; SI-NEXT: .LBB77_2: ; %cmp.true
-; SI-NEXT: s_add_i32 s16, s16, 3
+; SI-NEXT: s_and_b32 s4, s28, 0xff
+; SI-NEXT: s_lshl_b32 s5, s29, 8
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v28
+; SI-NEXT: v_or_b32_e32 v2, v2, v48
+; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; SI-NEXT: s_or_b32 s4, s4, s5
+; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
+; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
+; SI-NEXT: v_or_b32_e32 v3, v39, v3
+; SI-NEXT: s_and_b32 s4, s4, 0xffff
+; SI-NEXT: v_or_b32_e32 v0, v51, v0
+; SI-NEXT: v_or_b32_e32 v5, v2, v3
+; SI-NEXT: v_or_b32_e32 v3, s4, v0
; SI-NEXT: s_and_b32 s4, s16, 0xff
; SI-NEXT: s_lshl_b32 s5, s17, 8
-; SI-NEXT: s_add_i32 s18, s18, 3
-; SI-NEXT: s_or_b32 s4, s5, s4
-; SI-NEXT: s_and_b32 s6, s18, 0xff
-; SI-NEXT: s_addk_i32 s4, 0x300
-; SI-NEXT: s_lshl_b32 s5, s19, 24
-; SI-NEXT: s_lshl_b32 s6, s6, 16
+; SI-NEXT: s_or_b32 s4, s4, s5
+; SI-NEXT: s_and_b32 s5, s18, 0xff
+; SI-NEXT: s_lshl_b32 s5, s5, 16
+; SI-NEXT: s_lshl_b32 s6, s19, 24
; SI-NEXT: s_and_b32 s4, s4, 0xffff
-; SI-NEXT: s_or_b32 s5, s5, s6
-; SI-NEXT: s_add_i32 s20, s20, 3
-; SI-NEXT: s_or_b32 s4, s5, s4
+; SI-NEXT: s_or_b32 s5, s6, s5
+; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: s_and_b32 s5, s20, 0xff
; SI-NEXT: s_lshl_b32 s6, s21, 8
-; SI-NEXT: s_add_i32 s22, s22, 3
-; SI-NEXT: s_or_b32 s5, s6, s5
-; SI-NEXT: s_and_b32 s7, s22, 0xff
-; SI-NEXT: s_addk_i32 s5, 0x300
-; SI-NEXT: s_lshl_b32 s6, s23, 24
-; SI-NEXT: s_lshl_b32 s7, s7, 16
+; SI-NEXT: s_or_b32 s5, s5, s6
+; SI-NEXT: s_and_b32 s6, s22, 0xff
+; SI-NEXT: s_lshl_b32 s6, s6, 16
+; SI-NEXT: s_lshl_b32 s7, s23, 24
; SI-NEXT: s_and_b32 s5, s5, 0xffff
-; SI-NEXT: s_or_b32 s6, s6, s7
-; SI-NEXT: s_add_i32 s24, s24, 3
-; SI-NEXT: s_or_b32 s5, s6, s5
+; SI-NEXT: s_or_b32 s6, s7, s6
+; SI-NEXT: s_or_b32 s5, s5, s6
; SI-NEXT: s_and_b32 s6, s24, 0xff
; SI-NEXT: s_lshl_b32 s7, s25, 8
-; SI-NEXT: s_add_i32 s26, s26, 3
-; SI-NEXT: s_or_b32 s6, s7, s6
-; SI-NEXT: s_and_b32 s8, s26, 0xff
-; SI-NEXT: s_addk_i32 s6, 0x300
-; SI-NEXT: s_lshl_b32 s7, s27, 24
-; SI-NEXT: s_lshl_b32 s8, s8, 16
+; SI-NEXT: s_or_b32 s6, s6, s7
+; SI-NEXT: s_and_b32 s7, s26, 0xff
+; SI-NEXT: s_lshl_b32 s7, s7, 16
+; SI-NEXT: s_lshl_b32 s8, s27, 24
; SI-NEXT: s_and_b32 s6, s6, 0xffff
-; SI-NEXT: s_or_b32 s7, s7, s8
+; SI-NEXT: s_or_b32 s7, s8, s7
+; SI-NEXT: s_or_b32 s6, s6, s7
+; SI-NEXT: v_mov_b32_e32 v0, s4
+; SI-NEXT: v_mov_b32_e32 v1, s5
+; SI-NEXT: v_mov_b32_e32 v2, s6
+; SI-NEXT: s_cbranch_execnz .LBB77_3
+; SI-NEXT: .LBB77_2: ; %cmp.true
; SI-NEXT: s_add_i32 s28, s28, 3
-; SI-NEXT: s_or_b32 s6, s7, s6
-; SI-NEXT: s_and_b32 s7, s28, 0xff
-; SI-NEXT: s_lshl_b32 s8, s29, 8
+; SI-NEXT: s_and_b32 s4, s28, 0xff
+; SI-NEXT: s_lshl_b32 s5, s29, 8
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v28
-; SI-NEXT: s_or_b32 s7, s8, s7
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v27
+; SI-NEXT: s_or_b32 s4, s5, s4
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: s_addk_i32 s7, 0x300
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
+; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v29
+; SI-NEXT: s_addk_i32 s4, 0x300
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; SI-NEXT: s_and_b32 s7, s7, 0xffff
+; SI-NEXT: v_or_b32_e32 v1, v50, v1
+; SI-NEXT: v_and_b32_e32 v2, 0xff, v2
+; SI-NEXT: s_and_b32 s4, s4, 0xffff
; SI-NEXT: v_or_b32_e32 v0, v51, v0
-; SI-NEXT: v_or_b32_e32 v0, s7, v0
+; SI-NEXT: v_add_i32_e32 v1, vcc, 0x300, v1
+; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; SI-NEXT: v_or_b32_e32 v0, s4, v0
+; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; SI-NEXT: v_or_b32_e32 v2, v49, v2
+; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_add_i32_e32 v3, vcc, 0x3000000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v27
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v29
-; SI-NEXT: v_or_b32_e32 v0, v50, v0
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
-; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v49, v1
-; SI-NEXT: v_or_b32_e32 v0, v1, v0
-; SI-NEXT: v_add_i32_e32 v4, vcc, 0x3000000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v30
+; SI-NEXT: v_add_i32_e32 v4, vcc, 0x3000000, v1
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v31
; SI-NEXT: v_or_b32_e32 v0, v48, v0
@@ -33807,30 +34070,66 @@ define inreg <5 x i64> @bitcast_v40i8_to_v5i64_scalar(<40 x i8> inreg %a, i32 in
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v16
; SI-NEXT: v_or_b32_e32 v0, v36, v0
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
+; SI-NEXT: s_add_i32 s16, s16, 3
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT: s_and_b32 s4, s16, 0xff
+; SI-NEXT: s_lshl_b32 s5, s17, 8
+; SI-NEXT: s_add_i32 s18, s18, 3
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v1, v35, v1
+; SI-NEXT: s_or_b32 s4, s5, s4
+; SI-NEXT: s_and_b32 s6, s18, 0xff
; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: s_addk_i32 s4, 0x300
+; SI-NEXT: s_lshl_b32 s5, s19, 24
+; SI-NEXT: s_lshl_b32 s6, s6, 16
; SI-NEXT: v_add_i32_e32 v7, vcc, 0x3000000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v18
+; SI-NEXT: s_and_b32 s4, s4, 0xffff
+; SI-NEXT: s_or_b32 s5, s5, s6
+; SI-NEXT: s_add_i32 s20, s20, 3
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v20
+; SI-NEXT: s_or_b32 s4, s5, s4
+; SI-NEXT: s_and_b32 s5, s20, 0xff
+; SI-NEXT: s_lshl_b32 s6, s21, 8
+; SI-NEXT: s_add_i32 s22, s22, 3
; SI-NEXT: v_or_b32_e32 v0, v26, v0
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
+; SI-NEXT: s_or_b32 s5, s6, s5
+; SI-NEXT: s_and_b32 s7, s22, 0xff
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT: s_addk_i32 s5, 0x300
+; SI-NEXT: s_lshl_b32 s6, s23, 24
+; SI-NEXT: s_lshl_b32 s7, s7, 16
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v1, v21, v1
+; SI-NEXT: s_and_b32 s5, s5, 0xffff
+; SI-NEXT: s_or_b32 s6, s6, s7
+; SI-NEXT: s_add_i32 s24, s24, 3
; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: s_or_b32 s5, s6, s5
+; SI-NEXT: s_and_b32 s6, s24, 0xff
+; SI-NEXT: s_lshl_b32 s7, s25, 8
+; SI-NEXT: s_add_i32 s26, s26, 3
; SI-NEXT: v_add_i32_e32 v8, vcc, 0x3000000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v22
+; SI-NEXT: s_or_b32 s6, s7, s6
+; SI-NEXT: s_and_b32 s8, s26, 0xff
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v24
+; SI-NEXT: s_addk_i32 s6, 0x300
+; SI-NEXT: s_lshl_b32 s7, s27, 24
+; SI-NEXT: s_lshl_b32 s8, s8, 16
; SI-NEXT: v_or_b32_e32 v0, v19, v0
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
+; SI-NEXT: s_and_b32 s6, s6, 0xffff
+; SI-NEXT: s_or_b32 s7, s7, s8
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT: s_or_b32 s6, s7, s6
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v1, v17, v1
; SI-NEXT: s_add_i32 s4, s4, 0x3000000
@@ -33845,12 +34144,15 @@ define inreg <5 x i64> @bitcast_v40i8_to_v5i64_scalar(<40 x i8> inreg %a, i32 in
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB77_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; SI-NEXT: s_branch .LBB77_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB77_2
+; SI-NEXT: s_branch .LBB77_3
;
; VI-LABEL: bitcast_v40i8_to_v5i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v26
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v34, v14
; VI-NEXT: v_mov_b32_e32 v33, v12
; VI-NEXT: v_mov_b32_e32 v32, v10
@@ -33859,7 +34161,7 @@ define inreg <5 x i64> @bitcast_v40i8_to_v5i64_scalar(<40 x i8> inreg %a, i32 in
; VI-NEXT: v_mov_b32_e32 v29, v4
; VI-NEXT: v_mov_b32_e32 v27, v2
; VI-NEXT: v_mov_b32_e32 v28, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_lshlrev_b32_e32 v51, 8, v1
; VI-NEXT: v_lshlrev_b32_e32 v50, 8, v3
; VI-NEXT: v_lshlrev_b32_e32 v49, 8, v5
@@ -33875,6 +34177,30 @@ define inreg <5 x i64> @bitcast_v40i8_to_v5i64_scalar(<40 x i8> inreg %a, i32 in
; VI-NEXT: v_lshlrev_b32_e32 v17, 8, v25
; VI-NEXT: s_cbranch_scc0 .LBB77_4
; VI-NEXT: ; %bb.1: ; %cmp.false
+; VI-NEXT: v_or_b32_sdwa v0, v27, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v29, v49 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v32, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v33, v37 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v34, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v16, v35 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v18, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v20, v21 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: s_and_b32 s4, s28, 0xff
+; VI-NEXT: s_lshl_b32 s5, s29, 8
+; VI-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v22, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v24, v17 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: s_or_b32 s4, s4, s5
+; VI-NEXT: v_or_b32_sdwa v2, v30, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v3, v31, v39 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: s_and_b32 s4, s4, 0xffff
+; VI-NEXT: v_or_b32_sdwa v0, v28, v51 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_e32 v3, s4, v0
; VI-NEXT: s_and_b32 s4, s16, 0xff
; VI-NEXT: s_lshl_b32 s5, s17, 8
; VI-NEXT: s_or_b32 s4, s4, s5
@@ -33902,119 +34228,95 @@ define inreg <5 x i64> @bitcast_v40i8_to_v5i64_scalar(<40 x i8> inreg %a, i32 in
; VI-NEXT: s_and_b32 s6, s6, 0xffff
; VI-NEXT: s_lshl_b32 s7, s7, 16
; VI-NEXT: s_or_b32 s6, s6, s7
-; VI-NEXT: s_and_b32 s7, s28, 0xff
-; VI-NEXT: s_lshl_b32 s8, s29, 8
-; VI-NEXT: s_or_b32 s7, s7, s8
-; VI-NEXT: s_and_b32 s7, s7, 0xffff
-; VI-NEXT: v_or_b32_sdwa v0, v28, v51 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_e32 v3, s7, v0
-; VI-NEXT: v_or_b32_sdwa v0, v27, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v29, v49 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v30, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v31, v39 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v5, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v32, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v33, v37 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v34, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v16, v35 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v18, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v20, v21 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v22, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v24, v17 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: v_mov_b32_e32 v2, s6
; VI-NEXT: s_cbranch_execnz .LBB77_3
; VI-NEXT: .LBB77_2: ; %cmp.true
+; VI-NEXT: s_add_i32 s28, s28, 3
+; VI-NEXT: s_and_b32 s4, s28, 0xff
+; VI-NEXT: s_lshl_b32 s5, s29, 8
+; VI-NEXT: s_or_b32 s4, s5, s4
+; VI-NEXT: s_addk_i32 s4, 0x300
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v28
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v27
+; VI-NEXT: s_and_b32 s4, s4, 0xffff
+; VI-NEXT: v_or_b32_sdwa v0, v51, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v1, v50, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v29
+; VI-NEXT: v_or_b32_e32 v0, s4, v0
+; VI-NEXT: v_add_u32_e32 v1, vcc, 0x300, v1
+; VI-NEXT: v_or_b32_sdwa v2, v49, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v3, vcc, 0x3000000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v30
+; VI-NEXT: v_add_u32_e32 v4, vcc, 0x3000000, v1
+; VI-NEXT: v_or_b32_sdwa v0, v48, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v31
+; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
+; VI-NEXT: v_or_b32_sdwa v1, v39, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v5, vcc, 0x3000000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v32
; VI-NEXT: s_add_i32 s16, s16, 3
+; VI-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v33
; VI-NEXT: s_and_b32 s4, s16, 0xff
; VI-NEXT: s_lshl_b32 s5, s17, 8
; VI-NEXT: s_add_i32 s18, s18, 3
+; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
+; VI-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_or_b32 s4, s5, s4
; VI-NEXT: s_and_b32 s5, s18, 0xff
; VI-NEXT: s_lshl_b32 s6, s19, 8
+; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_addk_i32 s4, 0x300
; VI-NEXT: s_or_b32 s5, s6, s5
+; VI-NEXT: v_add_u32_e32 v6, vcc, 0x3000000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v34
; VI-NEXT: s_and_b32 s4, s4, 0xffff
; VI-NEXT: s_lshl_b32 s5, s5, 16
; VI-NEXT: s_add_i32 s20, s20, 3
+; VI-NEXT: v_or_b32_sdwa v0, v36, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v16
; VI-NEXT: s_or_b32 s4, s5, s4
; VI-NEXT: s_and_b32 s5, s20, 0xff
; VI-NEXT: s_lshl_b32 s6, s21, 8
; VI-NEXT: s_add_i32 s22, s22, 3
+; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
+; VI-NEXT: v_or_b32_sdwa v1, v35, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_or_b32 s5, s6, s5
; VI-NEXT: s_and_b32 s6, s22, 0xff
; VI-NEXT: s_lshl_b32 s7, s23, 8
+; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_addk_i32 s5, 0x300
; VI-NEXT: s_or_b32 s6, s7, s6
+; VI-NEXT: v_add_u32_e32 v7, vcc, 0x3000000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v18
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_lshl_b32 s6, s6, 16
; VI-NEXT: s_add_i32 s24, s24, 3
+; VI-NEXT: v_or_b32_sdwa v0, v26, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v20
; VI-NEXT: s_or_b32 s5, s6, s5
; VI-NEXT: s_and_b32 s6, s24, 0xff
; VI-NEXT: s_lshl_b32 s7, s25, 8
; VI-NEXT: s_add_i32 s26, s26, 3
+; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
+; VI-NEXT: v_or_b32_sdwa v1, v21, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_or_b32 s6, s7, s6
; VI-NEXT: s_and_b32 s7, s26, 0xff
; VI-NEXT: s_lshl_b32 s8, s27, 8
+; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_addk_i32 s6, 0x300
; VI-NEXT: s_or_b32 s7, s8, s7
-; VI-NEXT: s_and_b32 s6, s6, 0xffff
-; VI-NEXT: s_lshl_b32 s7, s7, 16
-; VI-NEXT: s_add_i32 s28, s28, 3
-; VI-NEXT: s_or_b32 s6, s7, s6
-; VI-NEXT: s_and_b32 s7, s28, 0xff
-; VI-NEXT: s_lshl_b32 s8, s29, 8
-; VI-NEXT: s_or_b32 s7, s8, s7
-; VI-NEXT: s_addk_i32 s7, 0x300
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v28
-; VI-NEXT: s_and_b32 s7, s7, 0xffff
-; VI-NEXT: v_or_b32_sdwa v0, v51, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_or_b32_e32 v0, s7, v0
-; VI-NEXT: v_add_u32_e32 v3, vcc, 0x3000000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v27
-; VI-NEXT: v_or_b32_sdwa v0, v50, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v29
-; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
-; VI-NEXT: v_or_b32_sdwa v1, v49, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v4, vcc, 0x3000000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v30
-; VI-NEXT: v_or_b32_sdwa v0, v48, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v31
-; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
-; VI-NEXT: v_or_b32_sdwa v1, v39, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v5, vcc, 0x3000000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v32
-; VI-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v33
-; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
-; VI-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v6, vcc, 0x3000000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v34
-; VI-NEXT: v_or_b32_sdwa v0, v36, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v16
-; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
-; VI-NEXT: v_or_b32_sdwa v1, v35, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v7, vcc, 0x3000000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v18
-; VI-NEXT: v_or_b32_sdwa v0, v26, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v20
-; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
-; VI-NEXT: v_or_b32_sdwa v1, v21, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v8, vcc, 0x3000000, v0
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v22
+; VI-NEXT: s_and_b32 s6, s6, 0xffff
+; VI-NEXT: s_lshl_b32 s7, s7, 16
; VI-NEXT: v_or_b32_sdwa v0, v19, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v24
+; VI-NEXT: s_or_b32 s6, s7, s6
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v1, v17, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_add_i32 s4, s4, 0x3000000
@@ -34029,12 +34331,15 @@ define inreg <5 x i64> @bitcast_v40i8_to_v5i64_scalar(<40 x i8> inreg %a, i32 in
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB77_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; VI-NEXT: s_branch .LBB77_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB77_2
+; VI-NEXT: s_branch .LBB77_3
;
; GFX9-LABEL: bitcast_v40i8_to_v5i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v26
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v34, v14
; GFX9-NEXT: v_mov_b32_e32 v33, v12
; GFX9-NEXT: v_mov_b32_e32 v32, v10
@@ -34043,7 +34348,7 @@ define inreg <5 x i64> @bitcast_v40i8_to_v5i64_scalar(<40 x i8> inreg %a, i32 in
; GFX9-NEXT: v_mov_b32_e32 v29, v4
; GFX9-NEXT: v_mov_b32_e32 v27, v2
; GFX9-NEXT: v_mov_b32_e32 v28, v0
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_lshlrev_b32_e32 v51, 8, v1
; GFX9-NEXT: v_lshlrev_b32_e32 v50, 8, v3
; GFX9-NEXT: v_lshlrev_b32_e32 v49, 8, v5
@@ -34059,6 +34364,30 @@ define inreg <5 x i64> @bitcast_v40i8_to_v5i64_scalar(<40 x i8> inreg %a, i32 in
; GFX9-NEXT: v_lshlrev_b32_e32 v17, 8, v25
; GFX9-NEXT: s_cbranch_scc0 .LBB77_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
+; GFX9-NEXT: v_or_b32_sdwa v0, v27, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v29, v49 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v32, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v33, v37 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v34, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v16, v35 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v18, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v20, v21 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_and_b32 s4, s28, 0xff
+; GFX9-NEXT: s_lshl_b32 s5, s29, 8
+; GFX9-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v22, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v24, v17 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: s_or_b32 s4, s4, s5
+; GFX9-NEXT: v_or_b32_sdwa v2, v30, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v3, v31, v39 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX9-NEXT: v_or_b32_sdwa v0, v28, v51 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_e32 v3, s4, v0
; GFX9-NEXT: s_and_b32 s4, s16, 0xff
; GFX9-NEXT: s_lshl_b32 s5, s17, 8
; GFX9-NEXT: s_or_b32 s4, s4, s5
@@ -34086,135 +34415,113 @@ define inreg <5 x i64> @bitcast_v40i8_to_v5i64_scalar(<40 x i8> inreg %a, i32 in
; GFX9-NEXT: s_and_b32 s6, s6, 0xffff
; GFX9-NEXT: s_lshl_b32 s7, s7, 16
; GFX9-NEXT: s_or_b32 s6, s6, s7
-; GFX9-NEXT: s_and_b32 s7, s28, 0xff
-; GFX9-NEXT: s_lshl_b32 s8, s29, 8
-; GFX9-NEXT: s_or_b32 s7, s7, s8
-; GFX9-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX9-NEXT: v_or_b32_sdwa v0, v28, v51 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_e32 v3, s7, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v27, v50 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v29, v49 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v0, v30, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v31, v39 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v5, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v0, v32, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v33, v37 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v0, v34, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v16, v35 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v0, v18, v26 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v20, v21 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v0, v22, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v24, v17 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: v_mov_b32_e32 v2, s6
; GFX9-NEXT: s_cbranch_execnz .LBB77_3
; GFX9-NEXT: .LBB77_2: ; %cmp.true
+; GFX9-NEXT: s_add_i32 s28, s28, 3
+; GFX9-NEXT: s_and_b32 s5, s28, 0xff
+; GFX9-NEXT: s_lshl_b32 s6, s29, 8
+; GFX9-NEXT: s_or_b32 s5, s6, s5
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v28
+; GFX9-NEXT: s_movk_i32 s4, 0x300
+; GFX9-NEXT: s_addk_i32 s5, 0x300
+; GFX9-NEXT: v_or_b32_sdwa v0, v51, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v27
+; GFX9-NEXT: v_add_u32_e32 v2, 3, v29
+; GFX9-NEXT: s_and_b32 s5, s5, 0xffff
+; GFX9-NEXT: v_add_u32_sdwa v0, v0, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v50, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v2, v49, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: s_add_i32 s16, s16, 3
+; GFX9-NEXT: v_add_u32_e32 v1, 0x300, v1
+; GFX9-NEXT: v_add_u32_sdwa v2, v2, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_e32 v3, s5, v0
+; GFX9-NEXT: s_and_b32 s5, s16, 0xff
+; GFX9-NEXT: s_lshl_b32 s6, s17, 8
; GFX9-NEXT: s_add_i32 s18, s18, 3
-; GFX9-NEXT: s_and_b32 s4, s16, 0xff
-; GFX9-NEXT: s_lshl_b32 s5, s17, 8
+; GFX9-NEXT: v_or_b32_sdwa v4, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: s_or_b32 s5, s6, s5
; GFX9-NEXT: s_and_b32 s6, s18, 0xff
; GFX9-NEXT: s_lshl_b32 s7, s19, 8
-; GFX9-NEXT: s_or_b32 s4, s5, s4
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v30
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v31
; GFX9-NEXT: s_or_b32 s6, s7, s6
-; GFX9-NEXT: s_addk_i32 s4, 0x300
+; GFX9-NEXT: v_or_b32_sdwa v0, v48, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v39, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: s_addk_i32 s5, 0x300
; GFX9-NEXT: s_addk_i32 s6, 0x300
-; GFX9-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
+; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX9-NEXT: s_and_b32 s5, s5, 0xffff
; GFX9-NEXT: s_lshl_b32 s6, s6, 16
; GFX9-NEXT: s_add_i32 s20, s20, 3
-; GFX9-NEXT: s_or_b32 s4, s4, s6
+; GFX9-NEXT: v_or_b32_sdwa v5, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v32
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v33
+; GFX9-NEXT: s_or_b32 s5, s5, s6
; GFX9-NEXT: s_and_b32 s6, s20, 0xff
; GFX9-NEXT: s_lshl_b32 s7, s21, 8
; GFX9-NEXT: s_add_i32 s22, s22, 3
+; GFX9-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: s_or_b32 s6, s7, s6
; GFX9-NEXT: s_and_b32 s7, s22, 0xff
; GFX9-NEXT: s_lshl_b32 s8, s23, 8
+; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
+; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: s_or_b32 s7, s8, s7
+; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v34
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v16
; GFX9-NEXT: s_addk_i32 s6, 0x300
; GFX9-NEXT: s_addk_i32 s7, 0x300
+; GFX9-NEXT: v_or_b32_sdwa v0, v36, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v35, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: s_and_b32 s6, s6, 0xffff
; GFX9-NEXT: s_lshl_b32 s7, s7, 16
; GFX9-NEXT: s_add_i32 s24, s24, 3
+; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
+; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: s_or_b32 s6, s6, s7
; GFX9-NEXT: s_and_b32 s7, s24, 0xff
; GFX9-NEXT: s_lshl_b32 s8, s25, 8
; GFX9-NEXT: s_add_i32 s26, s26, 3
-; GFX9-NEXT: s_or_b32 s7, s8, s7
-; GFX9-NEXT: s_and_b32 s8, s26, 0xff
-; GFX9-NEXT: s_lshl_b32 s9, s27, 8
-; GFX9-NEXT: s_or_b32 s8, s9, s8
-; GFX9-NEXT: s_addk_i32 s7, 0x300
-; GFX9-NEXT: s_addk_i32 s8, 0x300
-; GFX9-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX9-NEXT: s_lshl_b32 s8, s8, 16
-; GFX9-NEXT: s_add_i32 s28, s28, 3
-; GFX9-NEXT: s_or_b32 s7, s7, s8
-; GFX9-NEXT: s_and_b32 s8, s28, 0xff
-; GFX9-NEXT: s_lshl_b32 s9, s29, 8
-; GFX9-NEXT: s_or_b32 s8, s9, s8
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v28
-; GFX9-NEXT: s_movk_i32 s5, 0x300
-; GFX9-NEXT: s_addk_i32 s8, 0x300
-; GFX9-NEXT: v_or_b32_sdwa v0, v51, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: s_and_b32 s8, s8, 0xffff
-; GFX9-NEXT: v_add_u32_sdwa v0, v0, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_e32 v3, s8, v0
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v27
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v29
-; GFX9-NEXT: v_or_b32_sdwa v0, v50, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_or_b32_sdwa v1, v49, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
-; GFX9-NEXT: v_add_u32_sdwa v1, v1, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v30
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v31
-; GFX9-NEXT: v_or_b32_sdwa v0, v48, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_or_b32_sdwa v1, v39, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
-; GFX9-NEXT: v_add_u32_sdwa v1, v1, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v5, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v32
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v33
-; GFX9-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
-; GFX9-NEXT: v_add_u32_sdwa v1, v1, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v34
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v16
-; GFX9-NEXT: v_or_b32_sdwa v0, v36, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_or_b32_sdwa v1, v35, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
-; GFX9-NEXT: v_add_u32_sdwa v1, v1, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_add_u32_e32 v0, 3, v18
; GFX9-NEXT: v_add_u32_e32 v1, 3, v20
+; GFX9-NEXT: s_or_b32 s7, s8, s7
+; GFX9-NEXT: s_and_b32 s8, s26, 0xff
+; GFX9-NEXT: s_lshl_b32 s9, s27, 8
; GFX9-NEXT: v_or_b32_sdwa v0, v26, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_or_b32_sdwa v1, v21, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: s_or_b32 s8, s9, s8
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
-; GFX9-NEXT: v_add_u32_sdwa v1, v1, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX9-NEXT: s_addk_i32 s7, 0x300
+; GFX9-NEXT: s_addk_i32 s8, 0x300
; GFX9-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_add_u32_e32 v0, 3, v22
; GFX9-NEXT: v_add_u32_e32 v1, 3, v24
+; GFX9-NEXT: s_and_b32 s7, s7, 0xffff
+; GFX9-NEXT: s_lshl_b32 s8, s8, 16
; GFX9-NEXT: v_or_b32_sdwa v0, v19, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_or_b32_sdwa v1, v17, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: s_or_b32 s7, s7, s8
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
-; GFX9-NEXT: v_add_u32_sdwa v1, v1, s5 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-NEXT: v_mov_b32_e32 v0, s5
; GFX9-NEXT: v_mov_b32_e32 v1, s6
; GFX9-NEXT: v_mov_b32_e32 v2, s7
; GFX9-NEXT: .LBB77_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB77_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX9-NEXT: s_branch .LBB77_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB77_2
+; GFX9-NEXT: s_branch .LBB77_3
;
; GFX11-LABEL: bitcast_v40i8_to_v5i64_scalar:
; GFX11: ; %bb.0:
@@ -34235,64 +34542,64 @@ define inreg <5 x i64> @bitcast_v40i8_to_v5i64_scalar(<40 x i8> inreg %a, i32 in
; GFX11-NEXT: v_lshlrev_b32_e32 v17, 8, v17
; GFX11-NEXT: v_lshlrev_b32_e32 v19, 8, v19
; GFX11-NEXT: v_lshlrev_b32_e32 v21, 8, v21
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s4, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB77_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s3, 8
-; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_or_b32 s6, s7, s8
-; GFX11-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_and_b32 s5, s5, 0xffff
-; GFX11-NEXT: s_lshl_b32 s6, s6, 16
-; GFX11-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-NEXT: s_lshl_b32 s8, s8, 16
-; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_or_b32 s6, s7, s8
-; GFX11-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s23, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-NEXT: s_lshl_b32 s8, s8, 16
+; GFX11-NEXT: s_and_b32 s4, s0, 0xff
+; GFX11-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-NEXT: s_and_b32 s6, s2, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-NEXT: s_or_b32 s4, s4, s5
+; GFX11-NEXT: s_or_b32 s5, s6, s7
+; GFX11-NEXT: s_and_b32 s6, s16, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-NEXT: s_or_b32 s6, s6, s7
+; GFX11-NEXT: s_or_b32 s7, s8, s9
+; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX11-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-NEXT: s_and_b32 s6, s6, 0xffff
+; GFX11-NEXT: s_lshl_b32 s7, s7, 16
+; GFX11-NEXT: s_or_b32 s4, s4, s5
+; GFX11-NEXT: s_or_b32 s5, s6, s7
+; GFX11-NEXT: s_and_b32 s6, s20, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s21, 8
+; GFX11-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-NEXT: s_or_b32 s6, s6, s7
+; GFX11-NEXT: s_or_b32 s7, s8, s9
+; GFX11-NEXT: s_and_b32 s6, s6, 0xffff
+; GFX11-NEXT: s_lshl_b32 s7, s7, 16
; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v26
-; GFX11-NEXT: s_or_b32 s7, s7, s8
+; GFX11-NEXT: s_or_b32 s6, s6, s7
; GFX11-NEXT: v_and_b32_e32 v2, 0xff, v23
; GFX11-NEXT: v_and_b32_e32 v3, 0xff, v27
-; GFX11-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s25, 8
+; GFX11-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s25, 8
; GFX11-NEXT: v_or_b32_e32 v0, v0, v37
-; GFX11-NEXT: s_or_b32 s8, s9, s10
-; GFX11-NEXT: s_and_b32 s9, s26, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s27, 8
+; GFX11-NEXT: s_or_b32 s7, s8, s9
+; GFX11-NEXT: s_and_b32 s8, s26, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s27, 8
; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v28
; GFX11-NEXT: v_or_b32_e32 v2, v2, v34
; GFX11-NEXT: v_or_b32_e32 v3, v3, v35
; GFX11-NEXT: v_and_b32_e32 v6, 0xff, v30
-; GFX11-NEXT: s_or_b32 s9, s9, s10
-; GFX11-NEXT: s_and_b32 s8, s8, 0xffff
-; GFX11-NEXT: s_lshl_b32 s9, s9, 16
-; GFX11-NEXT: s_and_b32 s10, s28, 0xff
-; GFX11-NEXT: s_lshl_b32 s11, s29, 8
; GFX11-NEXT: s_or_b32 s8, s8, s9
+; GFX11-NEXT: s_and_b32 s7, s7, 0xffff
+; GFX11-NEXT: s_lshl_b32 s8, s8, 16
+; GFX11-NEXT: s_and_b32 s9, s28, 0xff
+; GFX11-NEXT: s_lshl_b32 s10, s29, 8
+; GFX11-NEXT: s_or_b32 s7, s7, s8
; GFX11-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX11-NEXT: v_or_b32_e32 v5, v5, v36
; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
; GFX11-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; GFX11-NEXT: v_or_b32_e32 v6, v6, v31
-; GFX11-NEXT: s_or_b32 s10, s10, s11
+; GFX11-NEXT: s_or_b32 s9, s9, s10
; GFX11-NEXT: v_and_b32_e32 v1, 0xff, v24
-; GFX11-NEXT: s_and_b32 s10, s10, 0xffff
+; GFX11-NEXT: s_and_b32 s9, s9, 0xffff
; GFX11-NEXT: v_and_b32_e32 v7, 0xffff, v5
; GFX11-NEXT: v_and_b32_e32 v5, 0xff, v29
; GFX11-NEXT: v_and_b32_e32 v8, 0xff, v16
@@ -34300,8 +34607,8 @@ define inreg <5 x i64> @bitcast_v40i8_to_v5i64_scalar(<40 x i8> inreg %a, i32 in
; GFX11-NEXT: v_and_b32_e32 v10, 0xff, v20
; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v6
; GFX11-NEXT: v_or_b32_e32 v6, v2, v3
-; GFX11-NEXT: v_mov_b32_e32 v3, s8
-; GFX11-NEXT: v_or_b32_e32 v4, s10, v0
+; GFX11-NEXT: v_mov_b32_e32 v3, s7
+; GFX11-NEXT: v_or_b32_e32 v4, s9, v0
; GFX11-NEXT: v_and_b32_e32 v0, 0xff, v25
; GFX11-NEXT: v_or_b32_e32 v1, v1, v33
; GFX11-NEXT: v_or_b32_e32 v5, v5, v22
@@ -34318,11 +34625,10 @@ define inreg <5 x i64> @bitcast_v40i8_to_v5i64_scalar(<40 x i8> inreg %a, i32 in
; GFX11-NEXT: v_or_b32_e32 v7, v7, v11
; GFX11-NEXT: v_or_b32_e32 v8, v12, v8
; GFX11-NEXT: v_or_b32_e32 v5, v0, v1
-; GFX11-NEXT: v_mov_b32_e32 v0, s5
+; GFX11-NEXT: v_mov_b32_e32 v0, s4
; GFX11-NEXT: v_or_b32_e32 v9, v9, v10
-; GFX11-NEXT: v_dual_mov_b32 v1, s6 :: v_dual_mov_b32 v2, s7
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB77_3
+; GFX11-NEXT: v_dual_mov_b32 v1, s5 :: v_dual_mov_b32 v2, s6
+; GFX11-NEXT: s_cbranch_execnz .LBB77_3
; GFX11-NEXT: .LBB77_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
; GFX11-NEXT: s_add_i32 s2, s2, 3
@@ -34450,7 +34756,9 @@ define inreg <5 x i64> @bitcast_v40i8_to_v5i64_scalar(<40 x i8> inreg %a, i32 in
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB77_4:
; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-NEXT: s_branch .LBB77_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_vccz .LBB77_2
+; GFX11-NEXT: s_branch .LBB77_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -35495,6 +35803,7 @@ define inreg <40 x i8> @bitcast_v5i64_to_v40i8_scalar(<5 x i64> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB79_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v3, s24
@@ -35732,12 +36041,15 @@ define inreg <40 x i8> @bitcast_v5i64_to_v40i8_scalar(<5 x i64> inreg %a, i32 in
; SI-NEXT: ; implicit-def: $sgpr8
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB79_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB79_2
+; SI-NEXT: s_branch .LBB79_3
;
; VI-LABEL: bitcast_v5i64_to_v40i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
+; VI-NEXT: s_mov_b64 s[14:15], -1
; VI-NEXT: s_cbranch_scc0 .LBB79_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s26, s25, 24
@@ -35965,12 +36277,15 @@ define inreg <40 x i8> @bitcast_v5i64_to_v40i8_scalar(<5 x i64> inreg %a, i32 in
; VI-NEXT: ; implicit-def: $sgpr28
; VI-NEXT: ; implicit-def: $sgpr27
; VI-NEXT: ; implicit-def: $sgpr26
-; VI-NEXT: s_branch .LBB79_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[14:15]
+; VI-NEXT: s_cbranch_vccz .LBB79_2
+; VI-NEXT: s_branch .LBB79_3
;
; GFX9-LABEL: bitcast_v5i64_to_v40i8_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
+; GFX9-NEXT: s_mov_b64 s[14:15], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB79_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s26, s25, 24
@@ -36189,15 +36504,18 @@ define inreg <40 x i8> @bitcast_v5i64_to_v40i8_scalar(<5 x i64> inreg %a, i32 in
; GFX9-NEXT: ; implicit-def: $sgpr28
; GFX9-NEXT: ; implicit-def: $sgpr27
; GFX9-NEXT: ; implicit-def: $sgpr26
-; GFX9-NEXT: s_branch .LBB79_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[14:15]
+; GFX9-NEXT: s_cbranch_vccz .LBB79_2
+; GFX9-NEXT: s_branch .LBB79_3
;
; GFX11-LABEL: bitcast_v5i64_to_v40i8_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
-; GFX11-NEXT: s_mov_b32 s63, 0
+; GFX11-NEXT: s_mov_b32 s5, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB79_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-NEXT: s_lshr_b64 s[4:5], s[20:21], 24
; GFX11-NEXT: s_lshr_b32 s14, s21, 24
; GFX11-NEXT: s_lshr_b32 s15, s21, 16
; GFX11-NEXT: s_lshr_b32 s22, s21, 8
@@ -36223,13 +36541,11 @@ define inreg <40 x i8> @bitcast_v5i64_to_v40i8_scalar(<5 x i64> inreg %a, i32 in
; GFX11-NEXT: s_lshr_b32 s60, s1, 8
; GFX11-NEXT: s_lshr_b32 s61, s0, 16
; GFX11-NEXT: s_lshr_b32 s62, s0, 8
-; GFX11-NEXT: s_lshr_b64 s[4:5], s[20:21], 24
; GFX11-NEXT: s_lshr_b64 s[6:7], s[18:19], 24
; GFX11-NEXT: s_lshr_b64 s[8:9], s[16:17], 24
; GFX11-NEXT: s_lshr_b64 s[10:11], s[2:3], 24
; GFX11-NEXT: s_lshr_b64 s[12:13], s[0:1], 24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s63
-; GFX11-NEXT: s_cbranch_vccnz .LBB79_3
+; GFX11-NEXT: s_cbranch_execnz .LBB79_3
; GFX11-NEXT: .LBB79_2: ; %cmp.true
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
@@ -36404,7 +36720,9 @@ define inreg <40 x i8> @bitcast_v5i64_to_v40i8_scalar(<5 x i64> inreg %a, i32 in
; GFX11-NEXT: ; implicit-def: $sgpr22
; GFX11-NEXT: ; implicit-def: $sgpr15
; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: s_branch .LBB79_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
+; GFX11-NEXT: s_cbranch_vccz .LBB79_2
+; GFX11-NEXT: s_branch .LBB79_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -36517,18 +36835,20 @@ define inreg <5 x i64> @bitcast_v5f64_to_v5i64_scalar(<5 x double> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
-; SI-NEXT: s_cbranch_scc0 .LBB81_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB81_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB81_4
-; SI-NEXT: .LBB81_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB81_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB81_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; SI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; SI-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
; SI-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB81_3:
-; SI-NEXT: s_branch .LBB81_2
; SI-NEXT: .LBB81_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -36552,18 +36872,20 @@ define inreg <5 x i64> @bitcast_v5f64_to_v5i64_scalar(<5 x double> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB81_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB81_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB81_4
-; VI-NEXT: .LBB81_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB81_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB81_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; VI-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
; VI-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB81_3:
-; VI-NEXT: s_branch .LBB81_2
; VI-NEXT: .LBB81_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -36587,18 +36909,20 @@ define inreg <5 x i64> @bitcast_v5f64_to_v5i64_scalar(<5 x double> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB81_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB81_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB81_4
-; GFX9-NEXT: .LBB81_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB81_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB81_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
; GFX9-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
; GFX9-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB81_3:
-; GFX9-NEXT: s_branch .LBB81_2
; GFX9-NEXT: .LBB81_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -36626,20 +36950,21 @@ define inreg <5 x i64> @bitcast_v5f64_to_v5i64_scalar(<5 x double> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB81_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB81_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB81_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB81_4
-; GFX11-NEXT: .LBB81_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[0:1], s[12:13], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[14:15], 1.0
; GFX11-NEXT: v_add_f64 v[4:5], s[16:17], 1.0
; GFX11-NEXT: v_add_f64 v[6:7], s[18:19], 1.0
; GFX11-NEXT: v_add_f64 v[8:9], s[20:21], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB81_3:
-; GFX11-NEXT: s_branch .LBB81_2
; GFX11-NEXT: .LBB81_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -36785,10 +37110,14 @@ define inreg <5 x double> @bitcast_v5i64_to_v5f64_scalar(<5 x i64> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s26, 0
-; SI-NEXT: s_cbranch_scc0 .LBB83_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB83_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB83_3
-; SI-NEXT: .LBB83_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB83_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB83_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_u32 s16, s16, 3
; SI-NEXT: s_addc_u32 s17, s17, 0
; SI-NEXT: s_add_u32 s18, s18, 3
@@ -36799,7 +37128,7 @@ define inreg <5 x double> @bitcast_v5i64_to_v5f64_scalar(<5 x i64> inreg %a, i32
; SI-NEXT: s_addc_u32 s23, s23, 0
; SI-NEXT: s_add_u32 s24, s24, 3
; SI-NEXT: s_addc_u32 s25, s25, 0
-; SI-NEXT: .LBB83_3: ; %end
+; SI-NEXT: .LBB83_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -36811,17 +37140,19 @@ define inreg <5 x double> @bitcast_v5i64_to_v5f64_scalar(<5 x i64> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v8, s24
; SI-NEXT: v_mov_b32_e32 v9, s25
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB83_4:
-; SI-NEXT: s_branch .LBB83_2
;
; VI-LABEL: bitcast_v5i64_to_v5f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s26, 0
-; VI-NEXT: s_cbranch_scc0 .LBB83_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB83_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB83_3
-; VI-NEXT: .LBB83_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB83_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB83_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
; VI-NEXT: s_add_u32 s18, s18, 3
@@ -36832,7 +37163,7 @@ define inreg <5 x double> @bitcast_v5i64_to_v5f64_scalar(<5 x i64> inreg %a, i32
; VI-NEXT: s_addc_u32 s23, s23, 0
; VI-NEXT: s_add_u32 s24, s24, 3
; VI-NEXT: s_addc_u32 s25, s25, 0
-; VI-NEXT: .LBB83_3: ; %end
+; VI-NEXT: .LBB83_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -36844,17 +37175,19 @@ define inreg <5 x double> @bitcast_v5i64_to_v5f64_scalar(<5 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB83_4:
-; VI-NEXT: s_branch .LBB83_2
;
; GFX9-LABEL: bitcast_v5i64_to_v5f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s26, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB83_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB83_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB83_3
-; GFX9-NEXT: .LBB83_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB83_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB83_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
; GFX9-NEXT: s_add_u32 s18, s18, 3
@@ -36865,7 +37198,7 @@ define inreg <5 x double> @bitcast_v5i64_to_v5f64_scalar(<5 x i64> inreg %a, i32
; GFX9-NEXT: s_addc_u32 s23, s23, 0
; GFX9-NEXT: s_add_u32 s24, s24, 3
; GFX9-NEXT: s_addc_u32 s25, s25, 0
-; GFX9-NEXT: .LBB83_3: ; %end
+; GFX9-NEXT: .LBB83_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -36877,19 +37210,20 @@ define inreg <5 x double> @bitcast_v5i64_to_v5f64_scalar(<5 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v8, s24
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB83_4:
-; GFX9-NEXT: s_branch .LBB83_2
;
; GFX11-LABEL: bitcast_v5i64_to_v5f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s22, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB83_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB83_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB83_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB83_3
-; GFX11-NEXT: .LBB83_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB83_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
; GFX11-NEXT: s_add_u32 s2, s2, 3
@@ -36900,15 +37234,13 @@ define inreg <5 x double> @bitcast_v5i64_to_v5f64_scalar(<5 x i64> inreg %a, i32
; GFX11-NEXT: s_addc_u32 s19, s19, 0
; GFX11-NEXT: s_add_u32 s20, s20, 3
; GFX11-NEXT: s_addc_u32 s21, s21, 0
-; GFX11-NEXT: .LBB83_3: ; %end
+; GFX11-NEXT: .LBB83_4: ; %end
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v7, s19
; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB83_4:
-; GFX11-NEXT: s_branch .LBB83_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.32bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.32bit.ll
index 48c9b87..f27b8e75 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.32bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.32bit.ll
@@ -81,62 +81,68 @@ define inreg float @bitcast_i32_to_f32_scalar(i32 inreg %a, i32 inreg %b) {
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s17, 0
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB1_3
-; SI-NEXT: .LBB1_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB1_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB1_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_i32 s16, s16, 3
-; SI-NEXT: .LBB1_3: ; %end
+; SI-NEXT: .LBB1_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: s_branch .LBB1_2
;
; VI-LABEL: bitcast_i32_to_f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB1_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB1_3
-; VI-NEXT: .LBB1_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB1_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB1_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB1_3: ; %end
+; VI-NEXT: .LBB1_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_4:
-; VI-NEXT: s_branch .LBB1_2
;
; GFX9-LABEL: bitcast_i32_to_f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB1_3
-; GFX9-NEXT: .LBB1_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB1_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB1_3: ; %end
+; GFX9-NEXT: .LBB1_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-LABEL: bitcast_i32_to_f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-NEXT: s_mov_b32 s1, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB1_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccz .LBB1_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: v_mov_b32_e32 v0, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_3:
-; GFX11-NEXT: .LBB1_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
+; GFX11-NEXT: .LBB1_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -232,14 +238,16 @@ define inreg i32 @bitcast_f32_to_i32_scalar(float inreg %a, i32 inreg %b) {
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s17, 0
-; SI-NEXT: s_cbranch_scc0 .LBB3_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB3_4
-; SI-NEXT: .LBB3_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB3_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB3_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB3_3:
-; SI-NEXT: s_branch .LBB3_2
; SI-NEXT: .LBB3_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: s_setpc_b64 s[30:31]
@@ -248,14 +256,16 @@ define inreg i32 @bitcast_f32_to_i32_scalar(float inreg %a, i32 inreg %b) {
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB3_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_4
-; VI-NEXT: .LBB3_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB3_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB3_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB3_3:
-; VI-NEXT: s_branch .LBB3_2
; VI-NEXT: .LBB3_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -264,14 +274,16 @@ define inreg i32 @bitcast_f32_to_i32_scalar(float inreg %a, i32 inreg %b) {
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_4
-; GFX9-NEXT: .LBB3_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB3_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB3_3:
-; GFX9-NEXT: s_branch .LBB3_2
; GFX9-NEXT: .LBB3_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -280,16 +292,17 @@ define inreg i32 @bitcast_f32_to_i32_scalar(float inreg %a, i32 inreg %b) {
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-NEXT: s_mov_b32 s1, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB3_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
-; GFX11-NEXT: .LBB3_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB3_3:
-; GFX11-NEXT: s_branch .LBB3_2
; GFX11-NEXT: .LBB3_4:
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -396,6 +409,7 @@ define inreg <2 x i16> @bitcast_i32_to_v2i16_scalar(i32 inreg %a, i32 inreg %b)
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s17, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB5_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s6, s16, 16
@@ -409,53 +423,59 @@ define inreg <2 x i16> @bitcast_i32_to_v2i16_scalar(i32 inreg %a, i32 inreg %b)
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB5_4:
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB5_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB5_2
+; SI-NEXT: s_branch .LBB5_3
;
; VI-LABEL: bitcast_i32_to_v2i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB5_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB5_3
-; VI-NEXT: .LBB5_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB5_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB5_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB5_3: ; %end
+; VI-NEXT: .LBB5_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB5_4:
-; VI-NEXT: s_branch .LBB5_2
;
; GFX9-LABEL: bitcast_i32_to_v2i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB5_3
-; GFX9-NEXT: .LBB5_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB5_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB5_3: ; %end
+; GFX9-NEXT: .LBB5_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_4:
-; GFX9-NEXT: s_branch .LBB5_2
;
; GFX11-LABEL: bitcast_i32_to_v2i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-NEXT: s_mov_b32 s1, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB5_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB5_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB5_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccz .LBB5_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: v_mov_b32_e32 v0, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB5_3:
-; GFX11-NEXT: .LBB5_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
+; GFX11-NEXT: .LBB5_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -572,6 +592,7 @@ define inreg i32 @bitcast_v2i16_to_i32_scalar(<2 x i16> inreg %a, i32 inreg %b)
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB7_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
@@ -589,39 +610,45 @@ define inreg i32 @bitcast_v2i16_to_i32_scalar(<2 x i16> inreg %a, i32 inreg %b)
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB7_4:
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB7_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB7_2
+; SI-NEXT: s_branch .LBB7_3
;
; VI-LABEL: bitcast_v2i16_to_i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB7_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB7_3
-; VI-NEXT: .LBB7_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB7_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB7_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s16, 3
; VI-NEXT: s_and_b32 s4, s16, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB7_3: ; %end
+; VI-NEXT: .LBB7_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB7_4:
-; VI-NEXT: s_branch .LBB7_2
;
; GFX9-LABEL: bitcast_v2i16_to_i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB7_4
-; GFX9-NEXT: .LBB7_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB7_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB7_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB7_3:
-; GFX9-NEXT: s_branch .LBB7_2
; GFX9-NEXT: .LBB7_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -630,16 +657,17 @@ define inreg i32 @bitcast_v2i16_to_i32_scalar(<2 x i16> inreg %a, i32 inreg %b)
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-NEXT: s_mov_b32 s1, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB7_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
; GFX11-NEXT: s_cbranch_vccnz .LBB7_4
-; GFX11-NEXT: .LBB7_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB7_3:
-; GFX11-NEXT: s_branch .LBB7_2
; GFX11-NEXT: .LBB7_4:
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -753,6 +781,7 @@ define inreg <2 x half> @bitcast_i32_to_v2f16_scalar(i32 inreg %a, i32 inreg %b)
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s17, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB9_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s4, s16, 16
@@ -769,53 +798,59 @@ define inreg <2 x half> @bitcast_i32_to_v2f16_scalar(i32 inreg %a, i32 inreg %b)
; SI-NEXT: .LBB9_4:
; SI-NEXT: ; implicit-def: $vgpr0
; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: s_branch .LBB9_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB9_2
+; SI-NEXT: s_branch .LBB9_3
;
; VI-LABEL: bitcast_i32_to_v2f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB9_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB9_3
-; VI-NEXT: .LBB9_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB9_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB9_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB9_3: ; %end
+; VI-NEXT: .LBB9_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB9_4:
-; VI-NEXT: s_branch .LBB9_2
;
; GFX9-LABEL: bitcast_i32_to_v2f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB9_3
-; GFX9-NEXT: .LBB9_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB9_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB9_3: ; %end
+; GFX9-NEXT: .LBB9_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB9_4:
-; GFX9-NEXT: s_branch .LBB9_2
;
; GFX11-LABEL: bitcast_i32_to_v2f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-NEXT: s_mov_b32 s1, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB9_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccz .LBB9_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: v_mov_b32_e32 v0, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB9_3:
-; GFX11-NEXT: .LBB9_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
+; GFX11-NEXT: .LBB9_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -939,6 +974,7 @@ define inreg i32 @bitcast_v2f16_to_i32_scalar(<2 x half> inreg %a, i32 inreg %b)
; SI-NEXT: v_cvt_f16_f32_e32 v2, s17
; SI-NEXT: v_cvt_f16_f32_e32 v1, s16
; SI-NEXT: s_cmp_lg_u32 s18, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB11_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v2
@@ -957,16 +993,22 @@ define inreg i32 @bitcast_v2f16_to_i32_scalar(<2 x half> inreg %a, i32 inreg %b)
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB11_4:
; SI-NEXT: ; implicit-def: $vgpr0
-; SI-NEXT: s_branch .LBB11_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB11_2
+; SI-NEXT: s_branch .LBB11_3
;
; VI-LABEL: bitcast_v2f16_to_i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB11_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB11_4
-; VI-NEXT: .LBB11_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB11_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB11_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -974,8 +1016,6 @@ define inreg i32 @bitcast_v2f16_to_i32_scalar(<2 x half> inreg %a, i32 inreg %b)
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v1
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB11_3:
-; VI-NEXT: s_branch .LBB11_2
; VI-NEXT: .LBB11_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -984,15 +1024,17 @@ define inreg i32 @bitcast_v2f16_to_i32_scalar(<2 x half> inreg %a, i32 inreg %b)
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_4
-; GFX9-NEXT: .LBB11_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB11_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB11_3:
-; GFX9-NEXT: s_branch .LBB11_2
; GFX9-NEXT: .LBB11_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -1001,16 +1043,17 @@ define inreg i32 @bitcast_v2f16_to_i32_scalar(<2 x half> inreg %a, i32 inreg %b)
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-NEXT: s_mov_b32 s1, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB11_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
-; GFX11-NEXT: .LBB11_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: s_branch .LBB11_2
; GFX11-NEXT: .LBB11_4:
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -1122,6 +1165,7 @@ define inreg <2 x bfloat> @bitcast_i32_to_v2bf16_scalar(i32 inreg %a, i32 inreg
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s17, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB13_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s6, s16, 0xffff0000
@@ -1138,53 +1182,59 @@ define inreg <2 x bfloat> @bitcast_i32_to_v2bf16_scalar(i32 inreg %a, i32 inreg
; SI-NEXT: .LBB13_4:
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB13_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB13_2
+; SI-NEXT: s_branch .LBB13_3
;
; VI-LABEL: bitcast_i32_to_v2bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB13_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB13_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB13_3
-; VI-NEXT: .LBB13_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB13_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB13_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB13_3: ; %end
+; VI-NEXT: .LBB13_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB13_4:
-; VI-NEXT: s_branch .LBB13_2
;
; GFX9-LABEL: bitcast_i32_to_v2bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB13_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB13_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB13_3
-; GFX9-NEXT: .LBB13_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB13_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB13_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB13_3: ; %end
+; GFX9-NEXT: .LBB13_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB13_4:
-; GFX9-NEXT: s_branch .LBB13_2
;
; GFX11-LABEL: bitcast_i32_to_v2bf16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-NEXT: s_mov_b32 s1, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB13_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB13_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB13_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccz .LBB13_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: v_mov_b32_e32 v0, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB13_3:
-; GFX11-NEXT: .LBB13_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB13_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
+; GFX11-NEXT: .LBB13_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -1388,6 +1438,7 @@ define inreg i32 @bitcast_v2bf16_to_i32_scalar(<2 x bfloat> inreg %a, i32 inreg
; SI-NEXT: s_cmp_lg_u32 s18, 0
; SI-NEXT: v_mul_f32_e64 v1, 1.0, s17
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s16
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB15_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v1
@@ -1404,16 +1455,22 @@ define inreg i32 @bitcast_v2bf16_to_i32_scalar(<2 x bfloat> inreg %a, i32 inreg
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB15_4:
; SI-NEXT: ; implicit-def: $vgpr0
-; SI-NEXT: s_branch .LBB15_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB15_2
+; SI-NEXT: s_branch .LBB15_3
;
; VI-LABEL: bitcast_v2bf16_to_i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB15_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB15_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB15_4
-; VI-NEXT: .LBB15_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB15_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB15_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x40c00000
; VI-NEXT: v_add_f32_e32 v1, s4, v0
@@ -1434,8 +1491,6 @@ define inreg i32 @bitcast_v2bf16_to_i32_scalar(<2 x bfloat> inreg %a, i32 inreg
; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; VI-NEXT: v_alignbit_b32 v0, v0, v1, 16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB15_3:
-; VI-NEXT: s_branch .LBB15_2
; VI-NEXT: .LBB15_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -1444,10 +1499,14 @@ define inreg i32 @bitcast_v2bf16_to_i32_scalar(<2 x bfloat> inreg %a, i32 inreg
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB15_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB15_4
-; GFX9-NEXT: .LBB15_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB15_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB15_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_and_b32 s4, s16, 0xffff0000
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
; GFX9-NEXT: v_add_f32_e32 v1, s4, v0
@@ -1470,8 +1529,6 @@ define inreg i32 @bitcast_v2bf16_to_i32_scalar(<2 x bfloat> inreg %a, i32 inreg
; GFX9-NEXT: v_and_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: v_lshl_or_b32 v0, v1, 16, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB15_3:
-; GFX9-NEXT: s_branch .LBB15_2
; GFX9-NEXT: .LBB15_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -1480,12 +1537,15 @@ define inreg i32 @bitcast_v2bf16_to_i32_scalar(<2 x bfloat> inreg %a, i32 inreg
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-NEXT: s_mov_b32 s1, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB15_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB15_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB15_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
; GFX11-NEXT: s_cbranch_vccnz .LBB15_4
-; GFX11-NEXT: .LBB15_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_lshl_b32 s1, s0, 16
; GFX11-NEXT: s_and_b32 s0, s0, 0xffff0000
; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
@@ -1511,8 +1571,6 @@ define inreg i32 @bitcast_v2bf16_to_i32_scalar(<2 x bfloat> inreg %a, i32 inreg
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; GFX11-NEXT: v_lshl_or_b32 v0, v1, 16, v0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB15_3:
-; GFX11-NEXT: s_branch .LBB15_2
; GFX11-NEXT: .LBB15_4:
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -1608,62 +1666,68 @@ define inreg <1 x i32> @bitcast_i32_to_v1i32_scalar(i32 inreg %a, i32 inreg %b)
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s17, 0
-; SI-NEXT: s_cbranch_scc0 .LBB17_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB17_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB17_3
-; SI-NEXT: .LBB17_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB17_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB17_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_i32 s16, s16, 3
-; SI-NEXT: .LBB17_3: ; %end
+; SI-NEXT: .LBB17_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB17_4:
-; SI-NEXT: s_branch .LBB17_2
;
; VI-LABEL: bitcast_i32_to_v1i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB17_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB17_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB17_3
-; VI-NEXT: .LBB17_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB17_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB17_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB17_3: ; %end
+; VI-NEXT: .LBB17_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB17_4:
-; VI-NEXT: s_branch .LBB17_2
;
; GFX9-LABEL: bitcast_i32_to_v1i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB17_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB17_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB17_3
-; GFX9-NEXT: .LBB17_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB17_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB17_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB17_3: ; %end
+; GFX9-NEXT: .LBB17_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB17_4:
-; GFX9-NEXT: s_branch .LBB17_2
;
; GFX11-LABEL: bitcast_i32_to_v1i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-NEXT: s_mov_b32 s1, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB17_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB17_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB17_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccz .LBB17_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: v_mov_b32_e32 v0, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB17_3:
-; GFX11-NEXT: .LBB17_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB17_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
+; GFX11-NEXT: .LBB17_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -1759,62 +1823,68 @@ define inreg i32 @bitcast_v1i32_to_i32_scalar(<1 x i32> inreg %a, i32 inreg %b)
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s17, 0
-; SI-NEXT: s_cbranch_scc0 .LBB19_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB19_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB19_3
-; SI-NEXT: .LBB19_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB19_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB19_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_i32 s16, s16, 3
-; SI-NEXT: .LBB19_3: ; %end
+; SI-NEXT: .LBB19_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB19_4:
-; SI-NEXT: s_branch .LBB19_2
;
; VI-LABEL: bitcast_v1i32_to_i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB19_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB19_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB19_3
-; VI-NEXT: .LBB19_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB19_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB19_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB19_3: ; %end
+; VI-NEXT: .LBB19_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB19_4:
-; VI-NEXT: s_branch .LBB19_2
;
; GFX9-LABEL: bitcast_v1i32_to_i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB19_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB19_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB19_3
-; GFX9-NEXT: .LBB19_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB19_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB19_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB19_3: ; %end
+; GFX9-NEXT: .LBB19_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB19_4:
-; GFX9-NEXT: s_branch .LBB19_2
;
; GFX11-LABEL: bitcast_v1i32_to_i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-NEXT: s_mov_b32 s1, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB19_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB19_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB19_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccz .LBB19_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: v_mov_b32_e32 v0, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB19_3:
-; GFX11-NEXT: .LBB19_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB19_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
+; GFX11-NEXT: .LBB19_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -2006,6 +2076,7 @@ define inreg <4 x i8> @bitcast_i32_to_v4i8_scalar(i32 inreg %a, i32 inreg %b) {
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s17, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB21_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s6, s16, 24
@@ -2027,12 +2098,15 @@ define inreg <4 x i8> @bitcast_i32_to_v4i8_scalar(i32 inreg %a, i32 inreg %b) {
; SI-NEXT: ; implicit-def: $sgpr8
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB21_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB21_2
+; SI-NEXT: s_branch .LBB21_3
;
; VI-LABEL: bitcast_i32_to_v4i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB21_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s6, s16, 24
@@ -2054,12 +2128,15 @@ define inreg <4 x i8> @bitcast_i32_to_v4i8_scalar(i32 inreg %a, i32 inreg %b) {
; VI-NEXT: ; implicit-def: $sgpr8
; VI-NEXT: ; implicit-def: $sgpr7
; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: s_branch .LBB21_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB21_2
+; VI-NEXT: s_branch .LBB21_3
;
; GFX9-LABEL: bitcast_i32_to_v4i8_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB21_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s6, s16, 24
@@ -2081,20 +2158,21 @@ define inreg <4 x i8> @bitcast_i32_to_v4i8_scalar(i32 inreg %a, i32 inreg %b) {
; GFX9-NEXT: ; implicit-def: $sgpr8
; GFX9-NEXT: ; implicit-def: $sgpr7
; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: s_branch .LBB21_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB21_2
+; GFX9-NEXT: s_branch .LBB21_3
;
; GFX11-LABEL: bitcast_i32_to_v4i8_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
-; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB21_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s1, s0, 24
; GFX11-NEXT: s_lshr_b32 s2, s0, 16
; GFX11-NEXT: s_lshr_b32 s3, s0, 8
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB21_3
+; GFX11-NEXT: s_cbranch_execnz .LBB21_3
; GFX11-NEXT: .LBB21_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -2110,7 +2188,9 @@ define inreg <4 x i8> @bitcast_i32_to_v4i8_scalar(i32 inreg %a, i32 inreg %b) {
; GFX11-NEXT: ; implicit-def: $sgpr3
; GFX11-NEXT: ; implicit-def: $sgpr2
; GFX11-NEXT: ; implicit-def: $sgpr1
-; GFX11-NEXT: s_branch .LBB21_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_vccz .LBB21_2
+; GFX11-NEXT: s_branch .LBB21_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2384,6 +2464,7 @@ define inreg i32 @bitcast_v4i8_to_i32_scalar(<4 x i8> inreg %a, i32 inreg %b) {
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB23_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
@@ -2415,12 +2496,15 @@ define inreg i32 @bitcast_v4i8_to_i32_scalar(<4 x i8> inreg %a, i32 inreg %b) {
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB23_4:
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB23_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB23_2
+; SI-NEXT: s_branch .LBB23_3
;
; VI-LABEL: bitcast_v4i8_to_i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB23_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, s16, 0xff
@@ -2452,12 +2536,15 @@ define inreg i32 @bitcast_v4i8_to_i32_scalar(<4 x i8> inreg %a, i32 inreg %b) {
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB23_4:
; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: s_branch .LBB23_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB23_2
+; VI-NEXT: s_branch .LBB23_3
;
; GFX9-LABEL: bitcast_v4i8_to_i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB23_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_and_b32 s4, s16, 0xff
@@ -2489,27 +2576,28 @@ define inreg i32 @bitcast_v4i8_to_i32_scalar(<4 x i8> inreg %a, i32 inreg %b) {
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB23_4:
; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: s_branch .LBB23_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB23_2
+; GFX9-NEXT: s_branch .LBB23_3
;
; GFX11-LABEL: bitcast_v4i8_to_i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
-; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_mov_b32 s5, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB23_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s3, 8
-; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_or_b32 s6, s7, s8
-; GFX11-NEXT: s_and_b32 s5, s5, 0xffff
-; GFX11-NEXT: s_lshl_b32 s6, s6, 16
+; GFX11-NEXT: s_and_b32 s4, s0, 0xff
+; GFX11-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-NEXT: s_and_b32 s6, s2, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-NEXT: s_or_b32 s4, s4, s5
+; GFX11-NEXT: s_or_b32 s5, s6, s7
+; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX11-NEXT: s_lshl_b32 s5, s5, 16
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB23_3
+; GFX11-NEXT: s_or_b32 s4, s4, s5
+; GFX11-NEXT: s_cbranch_execnz .LBB23_3
; GFX11-NEXT: .LBB23_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
; GFX11-NEXT: s_add_i32 s2, s2, 3
@@ -2524,14 +2612,16 @@ define inreg i32 @bitcast_v4i8_to_i32_scalar(<4 x i8> inreg %a, i32 inreg %b) {
; GFX11-NEXT: s_and_b32 s0, s0, 0xffff
; GFX11-NEXT: s_lshl_b32 s1, s1, 16
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_or_b32 s5, s0, s1
+; GFX11-NEXT: s_or_b32 s4, s0, s1
; GFX11-NEXT: .LBB23_3: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_mov_b32_e32 v0, s5
+; GFX11-NEXT: v_mov_b32_e32 v0, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB23_4:
-; GFX11-NEXT: ; implicit-def: $sgpr5
-; GFX11-NEXT: s_branch .LBB23_2
+; GFX11-NEXT: ; implicit-def: $sgpr4
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
+; GFX11-NEXT: s_cbranch_vccz .LBB23_2
+; GFX11-NEXT: s_branch .LBB23_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2635,6 +2725,7 @@ define inreg <2 x i16> @bitcast_f32_to_v2i16_scalar(float inreg %a, i32 inreg %b
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s17, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB25_3
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s6, s16, 16
@@ -2645,7 +2736,8 @@ define inreg <2 x i16> @bitcast_f32_to_v2i16_scalar(float inreg %a, i32 inreg %b
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB25_3:
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB25_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB25_2
; SI-NEXT: .LBB25_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s6
@@ -2655,14 +2747,16 @@ define inreg <2 x i16> @bitcast_f32_to_v2i16_scalar(float inreg %a, i32 inreg %b
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB25_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB25_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB25_4
-; VI-NEXT: .LBB25_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB25_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB25_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB25_3:
-; VI-NEXT: s_branch .LBB25_2
; VI-NEXT: .LBB25_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -2671,14 +2765,16 @@ define inreg <2 x i16> @bitcast_f32_to_v2i16_scalar(float inreg %a, i32 inreg %b
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB25_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB25_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB25_4
-; GFX9-NEXT: .LBB25_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB25_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB25_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB25_3:
-; GFX9-NEXT: s_branch .LBB25_2
; GFX9-NEXT: .LBB25_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -2687,16 +2783,17 @@ define inreg <2 x i16> @bitcast_f32_to_v2i16_scalar(float inreg %a, i32 inreg %b
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-NEXT: s_mov_b32 s1, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB25_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB25_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB25_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
; GFX11-NEXT: s_cbranch_vccnz .LBB25_4
-; GFX11-NEXT: .LBB25_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB25_3:
-; GFX11-NEXT: s_branch .LBB25_2
; GFX11-NEXT: .LBB25_4:
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -2813,6 +2910,7 @@ define inreg float @bitcast_v2i16_to_f32_scalar(<2 x i16> inreg %a, i32 inreg %b
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB27_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
@@ -2830,39 +2928,45 @@ define inreg float @bitcast_v2i16_to_f32_scalar(<2 x i16> inreg %a, i32 inreg %b
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB27_4:
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB27_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB27_2
+; SI-NEXT: s_branch .LBB27_3
;
; VI-LABEL: bitcast_v2i16_to_f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB27_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB27_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB27_3
-; VI-NEXT: .LBB27_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB27_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB27_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s16, 3
; VI-NEXT: s_and_b32 s4, s16, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB27_3: ; %end
+; VI-NEXT: .LBB27_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB27_4:
-; VI-NEXT: s_branch .LBB27_2
;
; GFX9-LABEL: bitcast_v2i16_to_f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB27_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB27_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB27_4
-; GFX9-NEXT: .LBB27_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB27_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB27_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB27_3:
-; GFX9-NEXT: s_branch .LBB27_2
; GFX9-NEXT: .LBB27_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -2871,16 +2975,17 @@ define inreg float @bitcast_v2i16_to_f32_scalar(<2 x i16> inreg %a, i32 inreg %b
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-NEXT: s_mov_b32 s1, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB27_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB27_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB27_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
; GFX11-NEXT: s_cbranch_vccnz .LBB27_4
-; GFX11-NEXT: .LBB27_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB27_3:
-; GFX11-NEXT: s_branch .LBB27_2
; GFX11-NEXT: .LBB27_4:
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -2994,6 +3099,7 @@ define inreg <2 x half> @bitcast_f32_to_v2f16_scalar(float inreg %a, i32 inreg %
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s17, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB29_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s4, s16, 16
@@ -3010,20 +3116,24 @@ define inreg <2 x half> @bitcast_f32_to_v2f16_scalar(float inreg %a, i32 inreg %
; SI-NEXT: .LBB29_4:
; SI-NEXT: ; implicit-def: $vgpr0
; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: s_branch .LBB29_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB29_2
+; SI-NEXT: s_branch .LBB29_3
;
; VI-LABEL: bitcast_f32_to_v2f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB29_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB29_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB29_4
-; VI-NEXT: .LBB29_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB29_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB29_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB29_3:
-; VI-NEXT: s_branch .LBB29_2
; VI-NEXT: .LBB29_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -3032,14 +3142,16 @@ define inreg <2 x half> @bitcast_f32_to_v2f16_scalar(float inreg %a, i32 inreg %
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB29_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB29_4
-; GFX9-NEXT: .LBB29_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB29_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB29_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB29_3:
-; GFX9-NEXT: s_branch .LBB29_2
; GFX9-NEXT: .LBB29_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -3048,16 +3160,17 @@ define inreg <2 x half> @bitcast_f32_to_v2f16_scalar(float inreg %a, i32 inreg %
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-NEXT: s_mov_b32 s1, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB29_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB29_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB29_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
; GFX11-NEXT: s_cbranch_vccnz .LBB29_4
-; GFX11-NEXT: .LBB29_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB29_3:
-; GFX11-NEXT: s_branch .LBB29_2
; GFX11-NEXT: .LBB29_4:
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -3181,6 +3294,7 @@ define inreg float @bitcast_v2f16_to_f32_scalar(<2 x half> inreg %a, i32 inreg %
; SI-NEXT: v_cvt_f16_f32_e32 v2, s17
; SI-NEXT: v_cvt_f16_f32_e32 v1, s16
; SI-NEXT: s_cmp_lg_u32 s18, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB31_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v2
@@ -3199,16 +3313,22 @@ define inreg float @bitcast_v2f16_to_f32_scalar(<2 x half> inreg %a, i32 inreg %
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB31_4:
; SI-NEXT: ; implicit-def: $vgpr0
-; SI-NEXT: s_branch .LBB31_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB31_2
+; SI-NEXT: s_branch .LBB31_3
;
; VI-LABEL: bitcast_v2f16_to_f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB31_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB31_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB31_4
-; VI-NEXT: .LBB31_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB31_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB31_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -3216,8 +3336,6 @@ define inreg float @bitcast_v2f16_to_f32_scalar(<2 x half> inreg %a, i32 inreg %
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v1
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB31_3:
-; VI-NEXT: s_branch .LBB31_2
; VI-NEXT: .LBB31_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -3226,15 +3344,17 @@ define inreg float @bitcast_v2f16_to_f32_scalar(<2 x half> inreg %a, i32 inreg %
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB31_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB31_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB31_4
-; GFX9-NEXT: .LBB31_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB31_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB31_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB31_3:
-; GFX9-NEXT: s_branch .LBB31_2
; GFX9-NEXT: .LBB31_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -3243,16 +3363,17 @@ define inreg float @bitcast_v2f16_to_f32_scalar(<2 x half> inreg %a, i32 inreg %
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-NEXT: s_mov_b32 s1, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB31_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB31_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB31_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
; GFX11-NEXT: s_cbranch_vccnz .LBB31_4
-; GFX11-NEXT: .LBB31_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB31_3:
-; GFX11-NEXT: s_branch .LBB31_2
; GFX11-NEXT: .LBB31_4:
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -3364,6 +3485,7 @@ define inreg <2 x bfloat> @bitcast_f32_to_v2bf16_scalar(float inreg %a, i32 inre
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s17, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB33_3
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s6, s16, 0xffff0000
@@ -3377,7 +3499,8 @@ define inreg <2 x bfloat> @bitcast_f32_to_v2bf16_scalar(float inreg %a, i32 inre
; SI-NEXT: .LBB33_3:
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB33_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB33_2
; SI-NEXT: .LBB33_4:
; SI-NEXT: v_mov_b32_e32 v0, s7
; SI-NEXT: v_mov_b32_e32 v1, s6
@@ -3387,14 +3510,16 @@ define inreg <2 x bfloat> @bitcast_f32_to_v2bf16_scalar(float inreg %a, i32 inre
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB33_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB33_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB33_4
-; VI-NEXT: .LBB33_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB33_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB33_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB33_3:
-; VI-NEXT: s_branch .LBB33_2
; VI-NEXT: .LBB33_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -3403,14 +3528,16 @@ define inreg <2 x bfloat> @bitcast_f32_to_v2bf16_scalar(float inreg %a, i32 inre
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB33_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB33_4
-; GFX9-NEXT: .LBB33_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB33_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB33_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB33_3:
-; GFX9-NEXT: s_branch .LBB33_2
; GFX9-NEXT: .LBB33_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -3419,16 +3546,17 @@ define inreg <2 x bfloat> @bitcast_f32_to_v2bf16_scalar(float inreg %a, i32 inre
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-NEXT: s_mov_b32 s1, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB33_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB33_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB33_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
; GFX11-NEXT: s_cbranch_vccnz .LBB33_4
-; GFX11-NEXT: .LBB33_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB33_3:
-; GFX11-NEXT: s_branch .LBB33_2
; GFX11-NEXT: .LBB33_4:
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -3632,6 +3760,7 @@ define inreg float @bitcast_v2bf16_to_f32_scalar(<2 x bfloat> inreg %a, i32 inre
; SI-NEXT: s_cmp_lg_u32 s18, 0
; SI-NEXT: v_mul_f32_e64 v1, 1.0, s17
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s16
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB35_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v1
@@ -3648,16 +3777,22 @@ define inreg float @bitcast_v2bf16_to_f32_scalar(<2 x bfloat> inreg %a, i32 inre
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB35_4:
; SI-NEXT: ; implicit-def: $vgpr0
-; SI-NEXT: s_branch .LBB35_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB35_2
+; SI-NEXT: s_branch .LBB35_3
;
; VI-LABEL: bitcast_v2bf16_to_f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB35_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB35_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB35_4
-; VI-NEXT: .LBB35_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB35_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB35_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x40c00000
; VI-NEXT: v_add_f32_e32 v1, s4, v0
@@ -3678,8 +3813,6 @@ define inreg float @bitcast_v2bf16_to_f32_scalar(<2 x bfloat> inreg %a, i32 inre
; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; VI-NEXT: v_alignbit_b32 v0, v0, v1, 16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB35_3:
-; VI-NEXT: s_branch .LBB35_2
; VI-NEXT: .LBB35_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -3688,10 +3821,14 @@ define inreg float @bitcast_v2bf16_to_f32_scalar(<2 x bfloat> inreg %a, i32 inre
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB35_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB35_4
-; GFX9-NEXT: .LBB35_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB35_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB35_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_and_b32 s4, s16, 0xffff0000
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
; GFX9-NEXT: v_add_f32_e32 v1, s4, v0
@@ -3714,8 +3851,6 @@ define inreg float @bitcast_v2bf16_to_f32_scalar(<2 x bfloat> inreg %a, i32 inre
; GFX9-NEXT: v_and_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: v_lshl_or_b32 v0, v1, 16, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB35_3:
-; GFX9-NEXT: s_branch .LBB35_2
; GFX9-NEXT: .LBB35_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -3724,12 +3859,15 @@ define inreg float @bitcast_v2bf16_to_f32_scalar(<2 x bfloat> inreg %a, i32 inre
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-NEXT: s_mov_b32 s1, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB35_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB35_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB35_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
; GFX11-NEXT: s_cbranch_vccnz .LBB35_4
-; GFX11-NEXT: .LBB35_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_lshl_b32 s1, s0, 16
; GFX11-NEXT: s_and_b32 s0, s0, 0xffff0000
; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
@@ -3755,8 +3893,6 @@ define inreg float @bitcast_v2bf16_to_f32_scalar(<2 x bfloat> inreg %a, i32 inre
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; GFX11-NEXT: v_lshl_or_b32 v0, v1, 16, v0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB35_3:
-; GFX11-NEXT: s_branch .LBB35_2
; GFX11-NEXT: .LBB35_4:
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -3852,14 +3988,16 @@ define inreg <1 x i32> @bitcast_f32_to_v1i32_scalar(float inreg %a, i32 inreg %b
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s17, 0
-; SI-NEXT: s_cbranch_scc0 .LBB37_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB37_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB37_4
-; SI-NEXT: .LBB37_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB37_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB37_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB37_3:
-; SI-NEXT: s_branch .LBB37_2
; SI-NEXT: .LBB37_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: s_setpc_b64 s[30:31]
@@ -3868,14 +4006,16 @@ define inreg <1 x i32> @bitcast_f32_to_v1i32_scalar(float inreg %a, i32 inreg %b
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB37_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB37_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB37_4
-; VI-NEXT: .LBB37_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB37_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB37_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB37_3:
-; VI-NEXT: s_branch .LBB37_2
; VI-NEXT: .LBB37_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -3884,14 +4024,16 @@ define inreg <1 x i32> @bitcast_f32_to_v1i32_scalar(float inreg %a, i32 inreg %b
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB37_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB37_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB37_4
-; GFX9-NEXT: .LBB37_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB37_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB37_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB37_3:
-; GFX9-NEXT: s_branch .LBB37_2
; GFX9-NEXT: .LBB37_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -3900,16 +4042,17 @@ define inreg <1 x i32> @bitcast_f32_to_v1i32_scalar(float inreg %a, i32 inreg %b
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-NEXT: s_mov_b32 s1, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB37_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB37_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB37_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
; GFX11-NEXT: s_cbranch_vccnz .LBB37_4
-; GFX11-NEXT: .LBB37_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB37_3:
-; GFX11-NEXT: s_branch .LBB37_2
; GFX11-NEXT: .LBB37_4:
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -4005,62 +4148,68 @@ define inreg float @bitcast_v1i32_to_f32_scalar(<1 x i32> inreg %a, i32 inreg %b
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s17, 0
-; SI-NEXT: s_cbranch_scc0 .LBB39_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB39_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB39_3
-; SI-NEXT: .LBB39_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB39_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB39_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_i32 s16, s16, 3
-; SI-NEXT: .LBB39_3: ; %end
+; SI-NEXT: .LBB39_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB39_4:
-; SI-NEXT: s_branch .LBB39_2
;
; VI-LABEL: bitcast_v1i32_to_f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB39_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB39_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB39_3
-; VI-NEXT: .LBB39_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB39_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB39_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB39_3: ; %end
+; VI-NEXT: .LBB39_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB39_4:
-; VI-NEXT: s_branch .LBB39_2
;
; GFX9-LABEL: bitcast_v1i32_to_f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB39_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB39_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB39_3
-; GFX9-NEXT: .LBB39_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB39_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB39_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB39_3: ; %end
+; GFX9-NEXT: .LBB39_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB39_4:
-; GFX9-NEXT: s_branch .LBB39_2
;
; GFX11-LABEL: bitcast_v1i32_to_f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-NEXT: s_mov_b32 s1, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB39_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB39_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB39_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccz .LBB39_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: v_mov_b32_e32 v0, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB39_3:
-; GFX11-NEXT: .LBB39_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB39_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
+; GFX11-NEXT: .LBB39_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -4252,6 +4401,7 @@ define inreg <4 x i8> @bitcast_f32_to_v4i8_scalar(float inreg %a, i32 inreg %b)
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s17, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB41_3
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s7, s16, 24
@@ -4268,7 +4418,8 @@ define inreg <4 x i8> @bitcast_f32_to_v4i8_scalar(float inreg %a, i32 inreg %b)
; SI-NEXT: ; implicit-def: $sgpr8
; SI-NEXT: ; implicit-def: $sgpr6
; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: s_branch .LBB41_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB41_2
; SI-NEXT: .LBB41_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s8
@@ -4280,6 +4431,7 @@ define inreg <4 x i8> @bitcast_f32_to_v4i8_scalar(float inreg %a, i32 inreg %b)
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB41_3
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s7, s16, 24
@@ -4296,7 +4448,8 @@ define inreg <4 x i8> @bitcast_f32_to_v4i8_scalar(float inreg %a, i32 inreg %b)
; VI-NEXT: ; implicit-def: $sgpr8
; VI-NEXT: ; implicit-def: $sgpr6
; VI-NEXT: ; implicit-def: $sgpr7
-; VI-NEXT: s_branch .LBB41_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB41_2
; VI-NEXT: .LBB41_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s8
@@ -4308,6 +4461,7 @@ define inreg <4 x i8> @bitcast_f32_to_v4i8_scalar(float inreg %a, i32 inreg %b)
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB41_3
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s7, s16, 24
@@ -4324,7 +4478,8 @@ define inreg <4 x i8> @bitcast_f32_to_v4i8_scalar(float inreg %a, i32 inreg %b)
; GFX9-NEXT: ; implicit-def: $sgpr8
; GFX9-NEXT: ; implicit-def: $sgpr6
; GFX9-NEXT: ; implicit-def: $sgpr7
-; GFX9-NEXT: s_branch .LBB41_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB41_2
; GFX9-NEXT: .LBB41_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s8
@@ -4336,14 +4491,13 @@ define inreg <4 x i8> @bitcast_f32_to_v4i8_scalar(float inreg %a, i32 inreg %b)
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
-; GFX11-NEXT: s_mov_b32 s1, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB41_3
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s3, s0, 24
-; GFX11-NEXT: s_lshr_b32 s2, s0, 16
-; GFX11-NEXT: s_lshr_b32 s4, s0, 8
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccnz .LBB41_4
+; GFX11-NEXT: s_lshr_b32 s2, s0, 24
+; GFX11-NEXT: s_lshr_b32 s1, s0, 16
+; GFX11-NEXT: s_lshr_b32 s3, s0, 8
+; GFX11-NEXT: s_cbranch_execnz .LBB41_4
; GFX11-NEXT: .LBB41_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
@@ -4352,13 +4506,14 @@ define inreg <4 x i8> @bitcast_f32_to_v4i8_scalar(float inreg %a, i32 inreg %b)
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB41_3:
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr2
; GFX11-NEXT: ; implicit-def: $sgpr3
-; GFX11-NEXT: s_branch .LBB41_2
+; GFX11-NEXT: ; implicit-def: $sgpr1
+; GFX11-NEXT: ; implicit-def: $sgpr2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_vccz .LBB41_2
; GFX11-NEXT: .LBB41_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s4
-; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s3
+; GFX11-NEXT: v_dual_mov_b32 v3, s2 :: v_dual_mov_b32 v2, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -4633,6 +4788,7 @@ define inreg float @bitcast_v4i8_to_f32_scalar(<4 x i8> inreg %a, i32 inreg %b)
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB43_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
@@ -4664,12 +4820,15 @@ define inreg float @bitcast_v4i8_to_f32_scalar(<4 x i8> inreg %a, i32 inreg %b)
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB43_4:
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB43_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB43_2
+; SI-NEXT: s_branch .LBB43_3
;
; VI-LABEL: bitcast_v4i8_to_f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB43_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, s16, 0xff
@@ -4701,12 +4860,15 @@ define inreg float @bitcast_v4i8_to_f32_scalar(<4 x i8> inreg %a, i32 inreg %b)
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB43_4:
; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: s_branch .LBB43_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB43_2
+; VI-NEXT: s_branch .LBB43_3
;
; GFX9-LABEL: bitcast_v4i8_to_f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB43_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_and_b32 s4, s16, 0xff
@@ -4738,27 +4900,28 @@ define inreg float @bitcast_v4i8_to_f32_scalar(<4 x i8> inreg %a, i32 inreg %b)
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB43_4:
; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: s_branch .LBB43_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB43_2
+; GFX9-NEXT: s_branch .LBB43_3
;
; GFX11-LABEL: bitcast_v4i8_to_f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
-; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_mov_b32 s5, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB43_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s3, 8
-; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_or_b32 s6, s7, s8
-; GFX11-NEXT: s_and_b32 s5, s5, 0xffff
-; GFX11-NEXT: s_lshl_b32 s6, s6, 16
+; GFX11-NEXT: s_and_b32 s4, s0, 0xff
+; GFX11-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-NEXT: s_and_b32 s6, s2, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-NEXT: s_or_b32 s4, s4, s5
+; GFX11-NEXT: s_or_b32 s5, s6, s7
+; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX11-NEXT: s_lshl_b32 s5, s5, 16
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB43_3
+; GFX11-NEXT: s_or_b32 s4, s4, s5
+; GFX11-NEXT: s_cbranch_execnz .LBB43_3
; GFX11-NEXT: .LBB43_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
; GFX11-NEXT: s_add_i32 s2, s2, 3
@@ -4773,14 +4936,16 @@ define inreg float @bitcast_v4i8_to_f32_scalar(<4 x i8> inreg %a, i32 inreg %b)
; GFX11-NEXT: s_and_b32 s0, s0, 0xffff
; GFX11-NEXT: s_lshl_b32 s1, s1, 16
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_or_b32 s5, s0, s1
+; GFX11-NEXT: s_or_b32 s4, s0, s1
; GFX11-NEXT: .LBB43_3: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_mov_b32_e32 v0, s5
+; GFX11-NEXT: v_mov_b32_e32 v0, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB43_4:
-; GFX11-NEXT: ; implicit-def: $sgpr5
-; GFX11-NEXT: s_branch .LBB43_2
+; GFX11-NEXT: ; implicit-def: $sgpr4
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
+; GFX11-NEXT: s_cbranch_vccz .LBB43_2
+; GFX11-NEXT: s_branch .LBB43_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -4895,6 +5060,7 @@ define inreg <2 x half> @bitcast_v2i16_to_v2f16_scalar(<2 x i16> inreg %a, i32 i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB45_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_cvt_f32_f16_e32 v0, s16
@@ -4910,39 +5076,45 @@ define inreg <2 x half> @bitcast_v2i16_to_v2f16_scalar(<2 x i16> inreg %a, i32 i
; SI-NEXT: .LBB45_4:
; SI-NEXT: ; implicit-def: $vgpr0
; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: s_branch .LBB45_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB45_2
+; SI-NEXT: s_branch .LBB45_3
;
; VI-LABEL: bitcast_v2i16_to_v2f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB45_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB45_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB45_3
-; VI-NEXT: .LBB45_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB45_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB45_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s16, 3
; VI-NEXT: s_and_b32 s4, s16, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB45_3: ; %end
+; VI-NEXT: .LBB45_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB45_4:
-; VI-NEXT: s_branch .LBB45_2
;
; GFX9-LABEL: bitcast_v2i16_to_v2f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB45_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB45_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB45_4
-; GFX9-NEXT: .LBB45_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB45_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB45_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB45_3:
-; GFX9-NEXT: s_branch .LBB45_2
; GFX9-NEXT: .LBB45_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -4951,16 +5123,17 @@ define inreg <2 x half> @bitcast_v2i16_to_v2f16_scalar(<2 x i16> inreg %a, i32 i
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-NEXT: s_mov_b32 s1, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB45_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB45_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB45_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
; GFX11-NEXT: s_cbranch_vccnz .LBB45_4
-; GFX11-NEXT: .LBB45_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB45_3:
-; GFX11-NEXT: s_branch .LBB45_2
; GFX11-NEXT: .LBB45_4:
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -5072,10 +5245,14 @@ define inreg <2 x i16> @bitcast_v2f16_to_v2i16_scalar(<2 x half> inreg %a, i32 i
; SI-NEXT: v_cvt_f16_f32_e32 v0, s16
; SI-NEXT: v_cvt_f16_f32_e32 v1, s17
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB47_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB47_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB47_3
-; SI-NEXT: .LBB47_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB47_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB47_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1
@@ -5084,19 +5261,21 @@ define inreg <2 x i16> @bitcast_v2f16_to_v2i16_scalar(<2 x half> inreg %a, i32 i
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v1
; SI-NEXT: v_or_b32_e32 v0, v0, v2
-; SI-NEXT: .LBB47_3: ; %end
+; SI-NEXT: .LBB47_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB47_4:
-; SI-NEXT: s_branch .LBB47_2
;
; VI-LABEL: bitcast_v2f16_to_v2i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB47_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB47_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB47_4
-; VI-NEXT: .LBB47_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB47_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB47_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v2, s4
@@ -5104,8 +5283,6 @@ define inreg <2 x i16> @bitcast_v2f16_to_v2i16_scalar(<2 x half> inreg %a, i32 i
; VI-NEXT: v_add_f16_sdwa v0, v2, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; VI-NEXT: v_or_b32_e32 v0, v1, v0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB47_3:
-; VI-NEXT: s_branch .LBB47_2
; VI-NEXT: .LBB47_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -5114,15 +5291,17 @@ define inreg <2 x i16> @bitcast_v2f16_to_v2i16_scalar(<2 x half> inreg %a, i32 i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB47_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB47_4
-; GFX9-NEXT: .LBB47_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB47_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB47_3:
-; GFX9-NEXT: s_branch .LBB47_2
; GFX9-NEXT: .LBB47_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -5131,16 +5310,17 @@ define inreg <2 x i16> @bitcast_v2f16_to_v2i16_scalar(<2 x half> inreg %a, i32 i
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-NEXT: s_mov_b32 s1, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB47_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB47_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB47_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
; GFX11-NEXT: s_cbranch_vccnz .LBB47_4
-; GFX11-NEXT: .LBB47_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB47_3:
-; GFX11-NEXT: s_branch .LBB47_2
; GFX11-NEXT: .LBB47_4:
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -5242,6 +5422,7 @@ define inreg <2 x bfloat> @bitcast_v2i16_to_v2bf16_scalar(<2 x i16> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB49_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshl_b32 s6, s16, 16
@@ -5259,39 +5440,45 @@ define inreg <2 x bfloat> @bitcast_v2i16_to_v2bf16_scalar(<2 x i16> inreg %a, i3
; SI-NEXT: .LBB49_4:
; SI-NEXT: ; implicit-def: $sgpr6
; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: s_branch .LBB49_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB49_2
+; SI-NEXT: s_branch .LBB49_3
;
; VI-LABEL: bitcast_v2i16_to_v2bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB49_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB49_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB49_3
-; VI-NEXT: .LBB49_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB49_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB49_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s16, 3
; VI-NEXT: s_and_b32 s4, s16, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB49_3: ; %end
+; VI-NEXT: .LBB49_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB49_4:
-; VI-NEXT: s_branch .LBB49_2
;
; GFX9-LABEL: bitcast_v2i16_to_v2bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB49_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB49_4
-; GFX9-NEXT: .LBB49_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB49_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB49_3:
-; GFX9-NEXT: s_branch .LBB49_2
; GFX9-NEXT: .LBB49_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -5300,16 +5487,17 @@ define inreg <2 x bfloat> @bitcast_v2i16_to_v2bf16_scalar(<2 x i16> inreg %a, i3
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-NEXT: s_mov_b32 s1, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB49_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB49_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB49_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
; GFX11-NEXT: s_cbranch_vccnz .LBB49_4
-; GFX11-NEXT: .LBB49_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB49_3:
-; GFX11-NEXT: s_branch .LBB49_2
; GFX11-NEXT: .LBB49_4:
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -5516,6 +5704,7 @@ define inreg <2 x i16> @bitcast_v2bf16_to_v2i16_scalar(<2 x bfloat> inreg %a, i3
; SI-NEXT: s_cmp_lg_u32 s18, 0
; SI-NEXT: v_mul_f32_e64 v3, 1.0, s16
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s17
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB51_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v3
@@ -5533,16 +5722,22 @@ define inreg <2 x i16> @bitcast_v2bf16_to_v2i16_scalar(<2 x bfloat> inreg %a, i3
; SI-NEXT: .LBB51_4:
; SI-NEXT: ; implicit-def: $vgpr0
; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: s_branch .LBB51_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB51_2
+; SI-NEXT: s_branch .LBB51_3
;
; VI-LABEL: bitcast_v2bf16_to_v2i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB51_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB51_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB51_4
-; VI-NEXT: .LBB51_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB51_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB51_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x40c00000
; VI-NEXT: v_add_f32_e32 v1, s4, v0
@@ -5563,8 +5758,6 @@ define inreg <2 x i16> @bitcast_v2bf16_to_v2i16_scalar(<2 x bfloat> inreg %a, i3
; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; VI-NEXT: v_alignbit_b32 v0, v0, v1, 16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB51_3:
-; VI-NEXT: s_branch .LBB51_2
; VI-NEXT: .LBB51_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -5573,10 +5766,14 @@ define inreg <2 x i16> @bitcast_v2bf16_to_v2i16_scalar(<2 x bfloat> inreg %a, i3
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB51_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB51_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB51_4
-; GFX9-NEXT: .LBB51_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB51_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB51_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_and_b32 s4, s16, 0xffff0000
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
; GFX9-NEXT: v_add_f32_e32 v1, s4, v0
@@ -5598,8 +5795,6 @@ define inreg <2 x i16> @bitcast_v2bf16_to_v2i16_scalar(<2 x bfloat> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v2, 0xffff0000
; GFX9-NEXT: v_and_or_b32 v0, v1, v2, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB51_3:
-; GFX9-NEXT: s_branch .LBB51_2
; GFX9-NEXT: .LBB51_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -5608,12 +5803,15 @@ define inreg <2 x i16> @bitcast_v2bf16_to_v2i16_scalar(<2 x bfloat> inreg %a, i3
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-NEXT: s_mov_b32 s1, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB51_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB51_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB51_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
; GFX11-NEXT: s_cbranch_vccnz .LBB51_4
-; GFX11-NEXT: .LBB51_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_lshl_b32 s1, s0, 16
; GFX11-NEXT: s_and_b32 s0, s0, 0xffff0000
; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
@@ -5637,8 +5835,6 @@ define inreg <2 x i16> @bitcast_v2bf16_to_v2i16_scalar(<2 x bfloat> inreg %a, i3
; GFX11-NEXT: v_cndmask_b32_e32 v1, v3, v5, vcc_lo
; GFX11-NEXT: v_and_or_b32 v0, 0xffff0000, v1, v0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB51_3:
-; GFX11-NEXT: s_branch .LBB51_2
; GFX11-NEXT: .LBB51_4:
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -5755,6 +5951,7 @@ define inreg <1 x i32> @bitcast_v2i16_to_v1i32_scalar(<2 x i16> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB53_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
@@ -5772,39 +5969,45 @@ define inreg <1 x i32> @bitcast_v2i16_to_v1i32_scalar(<2 x i16> inreg %a, i32 in
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB53_4:
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB53_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB53_2
+; SI-NEXT: s_branch .LBB53_3
;
; VI-LABEL: bitcast_v2i16_to_v1i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB53_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB53_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB53_3
-; VI-NEXT: .LBB53_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB53_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB53_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s16, 3
; VI-NEXT: s_and_b32 s4, s16, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB53_3: ; %end
+; VI-NEXT: .LBB53_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB53_4:
-; VI-NEXT: s_branch .LBB53_2
;
; GFX9-LABEL: bitcast_v2i16_to_v1i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB53_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB53_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB53_4
-; GFX9-NEXT: .LBB53_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB53_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB53_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB53_3:
-; GFX9-NEXT: s_branch .LBB53_2
; GFX9-NEXT: .LBB53_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -5813,16 +6016,17 @@ define inreg <1 x i32> @bitcast_v2i16_to_v1i32_scalar(<2 x i16> inreg %a, i32 in
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-NEXT: s_mov_b32 s1, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB53_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB53_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB53_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
; GFX11-NEXT: s_cbranch_vccnz .LBB53_4
-; GFX11-NEXT: .LBB53_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB53_3:
-; GFX11-NEXT: s_branch .LBB53_2
; GFX11-NEXT: .LBB53_4:
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -5929,6 +6133,7 @@ define inreg <2 x i16> @bitcast_v1i32_to_v2i16_scalar(<1 x i32> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s17, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB55_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s6, s16, 16
@@ -5942,53 +6147,59 @@ define inreg <2 x i16> @bitcast_v1i32_to_v2i16_scalar(<1 x i32> inreg %a, i32 in
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB55_4:
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB55_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB55_2
+; SI-NEXT: s_branch .LBB55_3
;
; VI-LABEL: bitcast_v1i32_to_v2i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB55_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB55_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB55_3
-; VI-NEXT: .LBB55_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB55_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB55_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB55_3: ; %end
+; VI-NEXT: .LBB55_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB55_4:
-; VI-NEXT: s_branch .LBB55_2
;
; GFX9-LABEL: bitcast_v1i32_to_v2i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB55_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB55_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB55_3
-; GFX9-NEXT: .LBB55_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB55_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB55_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB55_3: ; %end
+; GFX9-NEXT: .LBB55_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB55_4:
-; GFX9-NEXT: s_branch .LBB55_2
;
; GFX11-LABEL: bitcast_v1i32_to_v2i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-NEXT: s_mov_b32 s1, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB55_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB55_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB55_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccz .LBB55_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: v_mov_b32_e32 v0, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB55_3:
-; GFX11-NEXT: .LBB55_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB55_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
+; GFX11-NEXT: .LBB55_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -6198,6 +6409,7 @@ define inreg <4 x i8> @bitcast_v2i16_to_v4i8_scalar(<2 x i16> inreg %a, i32 inre
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB57_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
@@ -6227,12 +6439,15 @@ define inreg <4 x i8> @bitcast_v2i16_to_v4i8_scalar(<2 x i16> inreg %a, i32 inre
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $sgpr8
; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: s_branch .LBB57_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB57_2
+; SI-NEXT: s_branch .LBB57_3
;
; VI-LABEL: bitcast_v2i16_to_v4i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB57_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s8, s16, 24
@@ -6260,12 +6475,15 @@ define inreg <4 x i8> @bitcast_v2i16_to_v4i8_scalar(<2 x i16> inreg %a, i32 inre
; VI-NEXT: ; implicit-def: $sgpr9
; VI-NEXT: ; implicit-def: $sgpr6
; VI-NEXT: ; implicit-def: $sgpr8
-; VI-NEXT: s_branch .LBB57_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB57_2
+; VI-NEXT: s_branch .LBB57_3
;
; GFX9-LABEL: bitcast_v2i16_to_v4i8_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB57_3
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s7, s16, 24
@@ -6282,7 +6500,8 @@ define inreg <4 x i8> @bitcast_v2i16_to_v4i8_scalar(<2 x i16> inreg %a, i32 inre
; GFX9-NEXT: ; implicit-def: $sgpr8
; GFX9-NEXT: ; implicit-def: $sgpr6
; GFX9-NEXT: ; implicit-def: $sgpr7
-; GFX9-NEXT: s_branch .LBB57_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB57_2
; GFX9-NEXT: .LBB57_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s8
@@ -6294,14 +6513,13 @@ define inreg <4 x i8> @bitcast_v2i16_to_v4i8_scalar(<2 x i16> inreg %a, i32 inre
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
-; GFX11-NEXT: s_mov_b32 s1, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB57_3
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s3, s0, 24
-; GFX11-NEXT: s_lshr_b32 s2, s0, 16
-; GFX11-NEXT: s_lshr_b32 s4, s0, 8
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccnz .LBB57_4
+; GFX11-NEXT: s_lshr_b32 s2, s0, 24
+; GFX11-NEXT: s_lshr_b32 s1, s0, 16
+; GFX11-NEXT: s_lshr_b32 s3, s0, 8
+; GFX11-NEXT: s_cbranch_execnz .LBB57_4
; GFX11-NEXT: .LBB57_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
@@ -6310,13 +6528,14 @@ define inreg <4 x i8> @bitcast_v2i16_to_v4i8_scalar(<2 x i16> inreg %a, i32 inre
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB57_3:
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr2
; GFX11-NEXT: ; implicit-def: $sgpr3
-; GFX11-NEXT: s_branch .LBB57_2
+; GFX11-NEXT: ; implicit-def: $sgpr1
+; GFX11-NEXT: ; implicit-def: $sgpr2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_vccz .LBB57_2
; GFX11-NEXT: .LBB57_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s4
-; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s3
+; GFX11-NEXT: v_dual_mov_b32 v3, s2 :: v_dual_mov_b32 v2, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -6594,6 +6813,7 @@ define inreg <2 x i16> @bitcast_v4i8_to_v2i16_scalar(<4 x i8> inreg %a, i32 inre
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB59_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
@@ -6629,12 +6849,15 @@ define inreg <2 x i16> @bitcast_v4i8_to_v2i16_scalar(<4 x i8> inreg %a, i32 inre
; SI-NEXT: .LBB59_4:
; SI-NEXT: ; implicit-def: $sgpr6
; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: s_branch .LBB59_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB59_2
+; SI-NEXT: s_branch .LBB59_3
;
; VI-LABEL: bitcast_v4i8_to_v2i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB59_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, s16, 0xff
@@ -6666,12 +6889,15 @@ define inreg <2 x i16> @bitcast_v4i8_to_v2i16_scalar(<4 x i8> inreg %a, i32 inre
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB59_4:
; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: s_branch .LBB59_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB59_2
+; VI-NEXT: s_branch .LBB59_3
;
; GFX9-LABEL: bitcast_v4i8_to_v2i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB59_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_and_b32 s4, s16, 0xff
@@ -6703,27 +6929,28 @@ define inreg <2 x i16> @bitcast_v4i8_to_v2i16_scalar(<4 x i8> inreg %a, i32 inre
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB59_4:
; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: s_branch .LBB59_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB59_2
+; GFX9-NEXT: s_branch .LBB59_3
;
; GFX11-LABEL: bitcast_v4i8_to_v2i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
-; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_mov_b32 s5, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB59_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s3, 8
-; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_or_b32 s6, s7, s8
-; GFX11-NEXT: s_and_b32 s5, s5, 0xffff
-; GFX11-NEXT: s_lshl_b32 s6, s6, 16
+; GFX11-NEXT: s_and_b32 s4, s0, 0xff
+; GFX11-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-NEXT: s_and_b32 s6, s2, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-NEXT: s_or_b32 s4, s4, s5
+; GFX11-NEXT: s_or_b32 s5, s6, s7
+; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX11-NEXT: s_lshl_b32 s5, s5, 16
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB59_3
+; GFX11-NEXT: s_or_b32 s4, s4, s5
+; GFX11-NEXT: s_cbranch_execnz .LBB59_3
; GFX11-NEXT: .LBB59_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
; GFX11-NEXT: s_add_i32 s2, s2, 3
@@ -6738,14 +6965,16 @@ define inreg <2 x i16> @bitcast_v4i8_to_v2i16_scalar(<4 x i8> inreg %a, i32 inre
; GFX11-NEXT: s_and_b32 s0, s0, 0xffff
; GFX11-NEXT: s_lshl_b32 s1, s1, 16
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_or_b32 s5, s0, s1
+; GFX11-NEXT: s_or_b32 s4, s0, s1
; GFX11-NEXT: .LBB59_3: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_mov_b32_e32 v0, s5
+; GFX11-NEXT: v_mov_b32_e32 v0, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB59_4:
-; GFX11-NEXT: ; implicit-def: $sgpr5
-; GFX11-NEXT: s_branch .LBB59_2
+; GFX11-NEXT: ; implicit-def: $sgpr4
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
+; GFX11-NEXT: s_cbranch_vccz .LBB59_2
+; GFX11-NEXT: s_branch .LBB59_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -6867,6 +7096,7 @@ define inreg <2 x bfloat> @bitcast_v2f16_to_v2bf16_scalar(<2 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v2, s16
; SI-NEXT: v_cvt_f16_f32_e32 v3, s17
; SI-NEXT: s_cmp_lg_u32 s18, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB61_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v2
@@ -6886,16 +7116,22 @@ define inreg <2 x bfloat> @bitcast_v2f16_to_v2bf16_scalar(<2 x half> inreg %a, i
; SI-NEXT: .LBB61_4:
; SI-NEXT: ; implicit-def: $vgpr0
; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: s_branch .LBB61_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB61_2
+; SI-NEXT: s_branch .LBB61_3
;
; VI-LABEL: bitcast_v2f16_to_v2bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB61_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB61_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB61_4
-; VI-NEXT: .LBB61_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB61_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB61_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v2, s4
@@ -6903,8 +7139,6 @@ define inreg <2 x bfloat> @bitcast_v2f16_to_v2bf16_scalar(<2 x half> inreg %a, i
; VI-NEXT: v_add_f16_sdwa v0, v2, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; VI-NEXT: v_or_b32_e32 v0, v1, v0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB61_3:
-; VI-NEXT: s_branch .LBB61_2
; VI-NEXT: .LBB61_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -6913,15 +7147,17 @@ define inreg <2 x bfloat> @bitcast_v2f16_to_v2bf16_scalar(<2 x half> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB61_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB61_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB61_4
-; GFX9-NEXT: .LBB61_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB61_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB61_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB61_3:
-; GFX9-NEXT: s_branch .LBB61_2
; GFX9-NEXT: .LBB61_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -6930,16 +7166,17 @@ define inreg <2 x bfloat> @bitcast_v2f16_to_v2bf16_scalar(<2 x half> inreg %a, i
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-NEXT: s_mov_b32 s1, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB61_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB61_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB61_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
; GFX11-NEXT: s_cbranch_vccnz .LBB61_4
-; GFX11-NEXT: .LBB61_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB61_3:
-; GFX11-NEXT: s_branch .LBB61_2
; GFX11-NEXT: .LBB61_4:
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -7148,6 +7385,7 @@ define inreg <2 x half> @bitcast_v2bf16_to_v2f16_scalar(<2 x bfloat> inreg %a, i
; SI-NEXT: s_cmp_lg_u32 s18, 0
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s16
; SI-NEXT: v_mul_f32_e64 v3, 1.0, s17
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB63_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v2
@@ -7169,16 +7407,22 @@ define inreg <2 x half> @bitcast_v2bf16_to_v2f16_scalar(<2 x bfloat> inreg %a, i
; SI-NEXT: .LBB63_4:
; SI-NEXT: ; implicit-def: $vgpr0
; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: s_branch .LBB63_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB63_2
+; SI-NEXT: s_branch .LBB63_3
;
; VI-LABEL: bitcast_v2bf16_to_v2f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB63_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB63_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB63_4
-; VI-NEXT: .LBB63_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB63_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB63_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x40c00000
; VI-NEXT: v_add_f32_e32 v1, s4, v0
@@ -7199,8 +7443,6 @@ define inreg <2 x half> @bitcast_v2bf16_to_v2f16_scalar(<2 x bfloat> inreg %a, i
; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; VI-NEXT: v_alignbit_b32 v0, v0, v1, 16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB63_3:
-; VI-NEXT: s_branch .LBB63_2
; VI-NEXT: .LBB63_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -7209,10 +7451,14 @@ define inreg <2 x half> @bitcast_v2bf16_to_v2f16_scalar(<2 x bfloat> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB63_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB63_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB63_4
-; GFX9-NEXT: .LBB63_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB63_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB63_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_and_b32 s4, s16, 0xffff0000
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
; GFX9-NEXT: v_add_f32_e32 v1, s4, v0
@@ -7235,8 +7481,6 @@ define inreg <2 x half> @bitcast_v2bf16_to_v2f16_scalar(<2 x bfloat> inreg %a, i
; GFX9-NEXT: v_and_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: v_lshl_or_b32 v0, v1, 16, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB63_3:
-; GFX9-NEXT: s_branch .LBB63_2
; GFX9-NEXT: .LBB63_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -7245,12 +7489,15 @@ define inreg <2 x half> @bitcast_v2bf16_to_v2f16_scalar(<2 x bfloat> inreg %a, i
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-NEXT: s_mov_b32 s1, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB63_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB63_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB63_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
; GFX11-NEXT: s_cbranch_vccnz .LBB63_4
-; GFX11-NEXT: .LBB63_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_lshl_b32 s1, s0, 16
; GFX11-NEXT: s_and_b32 s0, s0, 0xffff0000
; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
@@ -7276,8 +7523,6 @@ define inreg <2 x half> @bitcast_v2bf16_to_v2f16_scalar(<2 x bfloat> inreg %a, i
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; GFX11-NEXT: v_lshl_or_b32 v0, v1, 16, v0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB63_3:
-; GFX11-NEXT: s_branch .LBB63_2
; GFX11-NEXT: .LBB63_4:
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -7401,6 +7646,7 @@ define inreg <1 x i32> @bitcast_v2f16_to_v1i32_scalar(<2 x half> inreg %a, i32 i
; SI-NEXT: v_cvt_f16_f32_e32 v2, s17
; SI-NEXT: v_cvt_f16_f32_e32 v1, s16
; SI-NEXT: s_cmp_lg_u32 s18, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB65_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v2
@@ -7419,16 +7665,22 @@ define inreg <1 x i32> @bitcast_v2f16_to_v1i32_scalar(<2 x half> inreg %a, i32 i
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB65_4:
; SI-NEXT: ; implicit-def: $vgpr0
-; SI-NEXT: s_branch .LBB65_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB65_2
+; SI-NEXT: s_branch .LBB65_3
;
; VI-LABEL: bitcast_v2f16_to_v1i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB65_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB65_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB65_4
-; VI-NEXT: .LBB65_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB65_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB65_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -7436,8 +7688,6 @@ define inreg <1 x i32> @bitcast_v2f16_to_v1i32_scalar(<2 x half> inreg %a, i32 i
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v1
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB65_3:
-; VI-NEXT: s_branch .LBB65_2
; VI-NEXT: .LBB65_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -7446,15 +7696,17 @@ define inreg <1 x i32> @bitcast_v2f16_to_v1i32_scalar(<2 x half> inreg %a, i32 i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB65_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB65_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB65_4
-; GFX9-NEXT: .LBB65_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB65_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB65_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB65_3:
-; GFX9-NEXT: s_branch .LBB65_2
; GFX9-NEXT: .LBB65_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -7463,16 +7715,17 @@ define inreg <1 x i32> @bitcast_v2f16_to_v1i32_scalar(<2 x half> inreg %a, i32 i
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-NEXT: s_mov_b32 s1, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB65_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB65_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB65_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
; GFX11-NEXT: s_cbranch_vccnz .LBB65_4
-; GFX11-NEXT: .LBB65_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB65_3:
-; GFX11-NEXT: s_branch .LBB65_2
; GFX11-NEXT: .LBB65_4:
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -7586,6 +7839,7 @@ define inreg <2 x half> @bitcast_v1i32_to_v2f16_scalar(<1 x i32> inreg %a, i32 i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s17, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB67_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s4, s16, 16
@@ -7602,53 +7856,59 @@ define inreg <2 x half> @bitcast_v1i32_to_v2f16_scalar(<1 x i32> inreg %a, i32 i
; SI-NEXT: .LBB67_4:
; SI-NEXT: ; implicit-def: $vgpr0
; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: s_branch .LBB67_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB67_2
+; SI-NEXT: s_branch .LBB67_3
;
; VI-LABEL: bitcast_v1i32_to_v2f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB67_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB67_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB67_3
-; VI-NEXT: .LBB67_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB67_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB67_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB67_3: ; %end
+; VI-NEXT: .LBB67_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB67_4:
-; VI-NEXT: s_branch .LBB67_2
;
; GFX9-LABEL: bitcast_v1i32_to_v2f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB67_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB67_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB67_3
-; GFX9-NEXT: .LBB67_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB67_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB67_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB67_3: ; %end
+; GFX9-NEXT: .LBB67_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB67_4:
-; GFX9-NEXT: s_branch .LBB67_2
;
; GFX11-LABEL: bitcast_v1i32_to_v2f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-NEXT: s_mov_b32 s1, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB67_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB67_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB67_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccz .LBB67_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: v_mov_b32_e32 v0, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB67_3:
-; GFX11-NEXT: .LBB67_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB67_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
+; GFX11-NEXT: .LBB67_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -7856,6 +8116,7 @@ define inreg <4 x i8> @bitcast_v2f16_to_v4i8_scalar(<2 x half> inreg %a, i32 inr
; SI-NEXT: v_cvt_f16_f32_e32 v2, s17
; SI-NEXT: v_cvt_f16_f32_e32 v4, s16
; SI-NEXT: s_cmp_lg_u32 s18, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB69_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v2
@@ -7880,12 +8141,15 @@ define inreg <4 x i8> @bitcast_v2f16_to_v4i8_scalar(<2 x half> inreg %a, i32 inr
; SI-NEXT: ; implicit-def: $vgpr0
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; implicit-def: $vgpr3
-; SI-NEXT: s_branch .LBB69_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB69_2
+; SI-NEXT: s_branch .LBB69_3
;
; VI-LABEL: bitcast_v2f16_to_v4i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB69_3
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s6, s16, 24
@@ -7906,7 +8170,8 @@ define inreg <4 x i8> @bitcast_v2f16_to_v4i8_scalar(<2 x half> inreg %a, i32 inr
; VI-NEXT: ; implicit-def: $sgpr7
; VI-NEXT: ; implicit-def: $sgpr8
; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: s_branch .LBB69_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB69_2
; VI-NEXT: .LBB69_4:
; VI-NEXT: v_mov_b32_e32 v2, s8
; VI-NEXT: v_mov_b32_e32 v0, s16
@@ -7918,6 +8183,7 @@ define inreg <4 x i8> @bitcast_v2f16_to_v4i8_scalar(<2 x half> inreg %a, i32 inr
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB69_3
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s7, s16, 24
@@ -7935,7 +8201,8 @@ define inreg <4 x i8> @bitcast_v2f16_to_v4i8_scalar(<2 x half> inreg %a, i32 inr
; GFX9-NEXT: ; implicit-def: $sgpr8
; GFX9-NEXT: ; implicit-def: $sgpr6
; GFX9-NEXT: ; implicit-def: $sgpr7
-; GFX9-NEXT: s_branch .LBB69_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB69_2
; GFX9-NEXT: .LBB69_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s8
@@ -7947,14 +8214,13 @@ define inreg <4 x i8> @bitcast_v2f16_to_v4i8_scalar(<2 x half> inreg %a, i32 inr
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
-; GFX11-NEXT: s_mov_b32 s1, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB69_3
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s3, s0, 24
-; GFX11-NEXT: s_lshr_b32 s2, s0, 16
-; GFX11-NEXT: s_lshr_b32 s4, s0, 8
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccnz .LBB69_4
+; GFX11-NEXT: s_lshr_b32 s2, s0, 24
+; GFX11-NEXT: s_lshr_b32 s1, s0, 16
+; GFX11-NEXT: s_lshr_b32 s3, s0, 8
+; GFX11-NEXT: s_cbranch_execnz .LBB69_4
; GFX11-NEXT: .LBB69_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
@@ -7963,13 +8229,14 @@ define inreg <4 x i8> @bitcast_v2f16_to_v4i8_scalar(<2 x half> inreg %a, i32 inr
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v0
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB69_3:
-; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr2
; GFX11-NEXT: ; implicit-def: $sgpr3
-; GFX11-NEXT: s_branch .LBB69_2
+; GFX11-NEXT: ; implicit-def: $sgpr1
+; GFX11-NEXT: ; implicit-def: $sgpr2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_vccz .LBB69_2
; GFX11-NEXT: .LBB69_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s4
-; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s3
+; GFX11-NEXT: v_dual_mov_b32 v3, s2 :: v_dual_mov_b32 v2, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -8243,6 +8510,7 @@ define inreg <2 x half> @bitcast_v4i8_to_v2f16_scalar(<4 x i8> inreg %a, i32 inr
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB71_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
@@ -8272,12 +8540,15 @@ define inreg <2 x half> @bitcast_v4i8_to_v2f16_scalar(<4 x i8> inreg %a, i32 inr
; SI-NEXT: .LBB71_4:
; SI-NEXT: ; implicit-def: $vgpr0
; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: s_branch .LBB71_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB71_2
+; SI-NEXT: s_branch .LBB71_3
;
; VI-LABEL: bitcast_v4i8_to_v2f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB71_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, s16, 0xff
@@ -8309,12 +8580,15 @@ define inreg <2 x half> @bitcast_v4i8_to_v2f16_scalar(<4 x i8> inreg %a, i32 inr
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB71_4:
; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: s_branch .LBB71_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB71_2
+; VI-NEXT: s_branch .LBB71_3
;
; GFX9-LABEL: bitcast_v4i8_to_v2f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB71_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_and_b32 s4, s16, 0xff
@@ -8346,27 +8620,28 @@ define inreg <2 x half> @bitcast_v4i8_to_v2f16_scalar(<4 x i8> inreg %a, i32 inr
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB71_4:
; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: s_branch .LBB71_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB71_2
+; GFX9-NEXT: s_branch .LBB71_3
;
; GFX11-LABEL: bitcast_v4i8_to_v2f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
-; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_mov_b32 s5, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB71_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s3, 8
-; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_or_b32 s6, s7, s8
-; GFX11-NEXT: s_and_b32 s5, s5, 0xffff
-; GFX11-NEXT: s_lshl_b32 s6, s6, 16
+; GFX11-NEXT: s_and_b32 s4, s0, 0xff
+; GFX11-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-NEXT: s_and_b32 s6, s2, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-NEXT: s_or_b32 s4, s4, s5
+; GFX11-NEXT: s_or_b32 s5, s6, s7
+; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX11-NEXT: s_lshl_b32 s5, s5, 16
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB71_3
+; GFX11-NEXT: s_or_b32 s4, s4, s5
+; GFX11-NEXT: s_cbranch_execnz .LBB71_3
; GFX11-NEXT: .LBB71_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
; GFX11-NEXT: s_add_i32 s2, s2, 3
@@ -8381,14 +8656,16 @@ define inreg <2 x half> @bitcast_v4i8_to_v2f16_scalar(<4 x i8> inreg %a, i32 inr
; GFX11-NEXT: s_and_b32 s0, s0, 0xffff
; GFX11-NEXT: s_lshl_b32 s1, s1, 16
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_or_b32 s5, s0, s1
+; GFX11-NEXT: s_or_b32 s4, s0, s1
; GFX11-NEXT: .LBB71_3: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_mov_b32_e32 v0, s5
+; GFX11-NEXT: v_mov_b32_e32 v0, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB71_4:
-; GFX11-NEXT: ; implicit-def: $sgpr5
-; GFX11-NEXT: s_branch .LBB71_2
+; GFX11-NEXT: ; implicit-def: $sgpr4
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
+; GFX11-NEXT: s_cbranch_vccz .LBB71_2
+; GFX11-NEXT: s_branch .LBB71_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -8589,6 +8866,7 @@ define inreg <1 x i32> @bitcast_v2bf16_to_v1i32_scalar(<2 x bfloat> inreg %a, i3
; SI-NEXT: s_cmp_lg_u32 s18, 0
; SI-NEXT: v_mul_f32_e64 v1, 1.0, s17
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s16
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB73_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v1
@@ -8605,16 +8883,22 @@ define inreg <1 x i32> @bitcast_v2bf16_to_v1i32_scalar(<2 x bfloat> inreg %a, i3
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB73_4:
; SI-NEXT: ; implicit-def: $vgpr0
-; SI-NEXT: s_branch .LBB73_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB73_2
+; SI-NEXT: s_branch .LBB73_3
;
; VI-LABEL: bitcast_v2bf16_to_v1i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB73_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB73_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB73_4
-; VI-NEXT: .LBB73_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB73_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB73_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x40c00000
; VI-NEXT: v_add_f32_e32 v1, s4, v0
@@ -8635,8 +8919,6 @@ define inreg <1 x i32> @bitcast_v2bf16_to_v1i32_scalar(<2 x bfloat> inreg %a, i3
; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; VI-NEXT: v_alignbit_b32 v0, v0, v1, 16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB73_3:
-; VI-NEXT: s_branch .LBB73_2
; VI-NEXT: .LBB73_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
@@ -8645,10 +8927,14 @@ define inreg <1 x i32> @bitcast_v2bf16_to_v1i32_scalar(<2 x bfloat> inreg %a, i3
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB73_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB73_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB73_4
-; GFX9-NEXT: .LBB73_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB73_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB73_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_and_b32 s4, s16, 0xffff0000
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
; GFX9-NEXT: v_add_f32_e32 v1, s4, v0
@@ -8671,8 +8957,6 @@ define inreg <1 x i32> @bitcast_v2bf16_to_v1i32_scalar(<2 x bfloat> inreg %a, i3
; GFX9-NEXT: v_and_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: v_lshl_or_b32 v0, v1, 16, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB73_3:
-; GFX9-NEXT: s_branch .LBB73_2
; GFX9-NEXT: .LBB73_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
@@ -8681,12 +8965,15 @@ define inreg <1 x i32> @bitcast_v2bf16_to_v1i32_scalar(<2 x bfloat> inreg %a, i3
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-NEXT: s_mov_b32 s1, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB73_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB73_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB73_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
; GFX11-NEXT: s_cbranch_vccnz .LBB73_4
-; GFX11-NEXT: .LBB73_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_lshl_b32 s1, s0, 16
; GFX11-NEXT: s_and_b32 s0, s0, 0xffff0000
; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s1
@@ -8712,8 +8999,6 @@ define inreg <1 x i32> @bitcast_v2bf16_to_v1i32_scalar(<2 x bfloat> inreg %a, i3
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 16, v1
; GFX11-NEXT: v_lshl_or_b32 v0, v1, 16, v0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB73_3:
-; GFX11-NEXT: s_branch .LBB73_2
; GFX11-NEXT: .LBB73_4:
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -8825,6 +9110,7 @@ define inreg <2 x bfloat> @bitcast_v1i32_to_v2bf16_scalar(<1 x i32> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s17, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB75_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s6, s16, 0xffff0000
@@ -8841,53 +9127,59 @@ define inreg <2 x bfloat> @bitcast_v1i32_to_v2bf16_scalar(<1 x i32> inreg %a, i3
; SI-NEXT: .LBB75_4:
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB75_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB75_2
+; SI-NEXT: s_branch .LBB75_3
;
; VI-LABEL: bitcast_v1i32_to_v2bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
-; VI-NEXT: s_cbranch_scc0 .LBB75_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB75_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB75_3
-; VI-NEXT: .LBB75_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB75_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB75_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB75_3: ; %end
+; VI-NEXT: .LBB75_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB75_4:
-; VI-NEXT: s_branch .LBB75_2
;
; GFX9-LABEL: bitcast_v1i32_to_v2bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB75_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB75_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB75_3
-; GFX9-NEXT: .LBB75_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB75_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB75_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB75_3: ; %end
+; GFX9-NEXT: .LBB75_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB75_4:
-; GFX9-NEXT: s_branch .LBB75_2
;
; GFX11-LABEL: bitcast_v1i32_to_v2bf16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
+; GFX11-NEXT: s_mov_b32 s1, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB75_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s1, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB75_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB75_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccz .LBB75_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: v_mov_b32_e32 v0, s0
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB75_3:
-; GFX11-NEXT: .LBB75_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB75_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
+; GFX11-NEXT: .LBB75_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_mov_b32_e32 v0, s0
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -9165,6 +9457,7 @@ define inreg <4 x i8> @bitcast_v2bf16_to_v4i8_scalar(<2 x bfloat> inreg %a, i32
; SI-NEXT: s_cmp_lg_u32 s18, 0
; SI-NEXT: v_mul_f32_e64 v4, 1.0, s17
; SI-NEXT: v_mul_f32_e64 v5, 1.0, s16
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB77_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v4
@@ -9188,12 +9481,15 @@ define inreg <4 x i8> @bitcast_v2bf16_to_v4i8_scalar(<2 x bfloat> inreg %a, i32
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr3
-; SI-NEXT: s_branch .LBB77_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB77_2
+; SI-NEXT: s_branch .LBB77_3
;
; VI-LABEL: bitcast_v2bf16_to_v4i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB77_3
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s7, s16, 24
@@ -9228,7 +9524,8 @@ define inreg <4 x i8> @bitcast_v2bf16_to_v4i8_scalar(<2 x bfloat> inreg %a, i32
; VI-NEXT: ; implicit-def: $sgpr8
; VI-NEXT: ; implicit-def: $sgpr6
; VI-NEXT: ; implicit-def: $sgpr7
-; VI-NEXT: s_branch .LBB77_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB77_2
; VI-NEXT: .LBB77_4:
; VI-NEXT: v_mov_b32_e32 v1, s8
; VI-NEXT: v_mov_b32_e32 v3, s7
@@ -9240,6 +9537,7 @@ define inreg <4 x i8> @bitcast_v2bf16_to_v4i8_scalar(<2 x bfloat> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB77_3
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s6, s16, 24
@@ -9275,7 +9573,8 @@ define inreg <4 x i8> @bitcast_v2bf16_to_v4i8_scalar(<2 x bfloat> inreg %a, i32
; GFX9-NEXT: ; implicit-def: $sgpr7
; GFX9-NEXT: ; implicit-def: $sgpr8
; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: s_branch .LBB77_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB77_2
; GFX9-NEXT: .LBB77_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v2, s8
@@ -9287,14 +9586,13 @@ define inreg <4 x i8> @bitcast_v2bf16_to_v4i8_scalar(<2 x bfloat> inreg %a, i32
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
-; GFX11-NEXT: s_mov_b32 s1, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB77_3
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s2, s0, 24
-; GFX11-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-NEXT: s_lshr_b32 s3, s0, 8
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
-; GFX11-NEXT: s_cbranch_vccnz .LBB77_4
+; GFX11-NEXT: s_lshr_b32 s1, s0, 24
+; GFX11-NEXT: s_lshr_b32 s3, s0, 16
+; GFX11-NEXT: s_lshr_b32 s2, s0, 8
+; GFX11-NEXT: s_cbranch_execnz .LBB77_4
; GFX11-NEXT: .LBB77_2: ; %cmp.true
; GFX11-NEXT: s_lshl_b32 s1, s0, 16
; GFX11-NEXT: s_and_b32 s0, s0, 0xffff0000
@@ -9326,13 +9624,14 @@ define inreg <4 x i8> @bitcast_v2bf16_to_v4i8_scalar(<2 x bfloat> inreg %a, i32
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v1
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB77_3:
-; GFX11-NEXT: ; implicit-def: $sgpr3
-; GFX11-NEXT: ; implicit-def: $sgpr4
; GFX11-NEXT: ; implicit-def: $sgpr2
-; GFX11-NEXT: s_branch .LBB77_2
+; GFX11-NEXT: ; implicit-def: $sgpr3
+; GFX11-NEXT: ; implicit-def: $sgpr1
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_vccz .LBB77_2
; GFX11-NEXT: .LBB77_4:
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s2
-; GFX11-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v1, s3
+; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s1
+; GFX11-NEXT: v_dual_mov_b32 v2, s3 :: v_dual_mov_b32 v1, s2
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -9606,6 +9905,7 @@ define inreg <2 x bfloat> @bitcast_v4i8_to_v2bf16_scalar(<4 x i8> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB79_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
@@ -9637,12 +9937,15 @@ define inreg <2 x bfloat> @bitcast_v4i8_to_v2bf16_scalar(<4 x i8> inreg %a, i32
; SI-NEXT: .LBB79_4:
; SI-NEXT: ; implicit-def: $sgpr6
; SI-NEXT: ; implicit-def: $sgpr7
-; SI-NEXT: s_branch .LBB79_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB79_2
+; SI-NEXT: s_branch .LBB79_3
;
; VI-LABEL: bitcast_v4i8_to_v2bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB79_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, s16, 0xff
@@ -9674,12 +9977,15 @@ define inreg <2 x bfloat> @bitcast_v4i8_to_v2bf16_scalar(<4 x i8> inreg %a, i32
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB79_4:
; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: s_branch .LBB79_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB79_2
+; VI-NEXT: s_branch .LBB79_3
;
; GFX9-LABEL: bitcast_v4i8_to_v2bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB79_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_and_b32 s4, s16, 0xff
@@ -9711,27 +10017,28 @@ define inreg <2 x bfloat> @bitcast_v4i8_to_v2bf16_scalar(<4 x i8> inreg %a, i32
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB79_4:
; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: s_branch .LBB79_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB79_2
+; GFX9-NEXT: s_branch .LBB79_3
;
; GFX11-LABEL: bitcast_v4i8_to_v2bf16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
-; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_mov_b32 s5, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB79_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s3, 8
-; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_or_b32 s6, s7, s8
-; GFX11-NEXT: s_and_b32 s5, s5, 0xffff
-; GFX11-NEXT: s_lshl_b32 s6, s6, 16
+; GFX11-NEXT: s_and_b32 s4, s0, 0xff
+; GFX11-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-NEXT: s_and_b32 s6, s2, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-NEXT: s_or_b32 s4, s4, s5
+; GFX11-NEXT: s_or_b32 s5, s6, s7
+; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX11-NEXT: s_lshl_b32 s5, s5, 16
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB79_3
+; GFX11-NEXT: s_or_b32 s4, s4, s5
+; GFX11-NEXT: s_cbranch_execnz .LBB79_3
; GFX11-NEXT: .LBB79_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
; GFX11-NEXT: s_add_i32 s2, s2, 3
@@ -9746,14 +10053,16 @@ define inreg <2 x bfloat> @bitcast_v4i8_to_v2bf16_scalar(<4 x i8> inreg %a, i32
; GFX11-NEXT: s_and_b32 s0, s0, 0xffff
; GFX11-NEXT: s_lshl_b32 s1, s1, 16
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_or_b32 s5, s0, s1
+; GFX11-NEXT: s_or_b32 s4, s0, s1
; GFX11-NEXT: .LBB79_3: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_mov_b32_e32 v0, s5
+; GFX11-NEXT: v_mov_b32_e32 v0, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB79_4:
-; GFX11-NEXT: ; implicit-def: $sgpr5
-; GFX11-NEXT: s_branch .LBB79_2
+; GFX11-NEXT: ; implicit-def: $sgpr4
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
+; GFX11-NEXT: s_cbranch_vccz .LBB79_2
+; GFX11-NEXT: s_branch .LBB79_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -9942,6 +10251,7 @@ define inreg <4 x i8> @bitcast_v1i32_to_v4i8_scalar(<1 x i32> inreg %a, i32 inre
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s17, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB81_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s6, s16, 24
@@ -9963,12 +10273,15 @@ define inreg <4 x i8> @bitcast_v1i32_to_v4i8_scalar(<1 x i32> inreg %a, i32 inre
; SI-NEXT: ; implicit-def: $sgpr8
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB81_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB81_2
+; SI-NEXT: s_branch .LBB81_3
;
; VI-LABEL: bitcast_v1i32_to_v4i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s17, 0
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB81_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s6, s16, 24
@@ -9990,12 +10303,15 @@ define inreg <4 x i8> @bitcast_v1i32_to_v4i8_scalar(<1 x i32> inreg %a, i32 inre
; VI-NEXT: ; implicit-def: $sgpr8
; VI-NEXT: ; implicit-def: $sgpr7
; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: s_branch .LBB81_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB81_2
+; VI-NEXT: s_branch .LBB81_3
;
; GFX9-LABEL: bitcast_v1i32_to_v4i8_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s17, 0
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB81_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s6, s16, 24
@@ -10017,20 +10333,21 @@ define inreg <4 x i8> @bitcast_v1i32_to_v4i8_scalar(<1 x i32> inreg %a, i32 inre
; GFX9-NEXT: ; implicit-def: $sgpr8
; GFX9-NEXT: ; implicit-def: $sgpr7
; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: s_branch .LBB81_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB81_2
+; GFX9-NEXT: s_branch .LBB81_3
;
; GFX11-LABEL: bitcast_v1i32_to_v4i8_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
-; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB81_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s1, s0, 24
; GFX11-NEXT: s_lshr_b32 s2, s0, 16
; GFX11-NEXT: s_lshr_b32 s3, s0, 8
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB81_3
+; GFX11-NEXT: s_cbranch_execnz .LBB81_3
; GFX11-NEXT: .LBB81_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
@@ -10046,7 +10363,9 @@ define inreg <4 x i8> @bitcast_v1i32_to_v4i8_scalar(<1 x i32> inreg %a, i32 inre
; GFX11-NEXT: ; implicit-def: $sgpr3
; GFX11-NEXT: ; implicit-def: $sgpr2
; GFX11-NEXT: ; implicit-def: $sgpr1
-; GFX11-NEXT: s_branch .LBB81_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-NEXT: s_cbranch_vccz .LBB81_2
+; GFX11-NEXT: s_branch .LBB81_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -10320,6 +10639,7 @@ define inreg <1 x i32> @bitcast_v4i8_to_v1i32_scalar(<4 x i8> inreg %a, i32 inre
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB83_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
@@ -10351,12 +10671,15 @@ define inreg <1 x i32> @bitcast_v4i8_to_v1i32_scalar(<4 x i8> inreg %a, i32 inre
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB83_4:
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB83_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB83_2
+; SI-NEXT: s_branch .LBB83_3
;
; VI-LABEL: bitcast_v4i8_to_v1i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s20, 0
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB83_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, s16, 0xff
@@ -10388,12 +10711,15 @@ define inreg <1 x i32> @bitcast_v4i8_to_v1i32_scalar(<4 x i8> inreg %a, i32 inre
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB83_4:
; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: s_branch .LBB83_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB83_2
+; VI-NEXT: s_branch .LBB83_3
;
; GFX9-LABEL: bitcast_v4i8_to_v1i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s20, 0
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB83_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_and_b32 s4, s16, 0xff
@@ -10425,27 +10751,28 @@ define inreg <1 x i32> @bitcast_v4i8_to_v1i32_scalar(<4 x i8> inreg %a, i32 inre
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB83_4:
; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: s_branch .LBB83_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB83_2
+; GFX9-NEXT: s_branch .LBB83_3
;
; GFX11-LABEL: bitcast_v4i8_to_v1i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s16, 0
-; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_mov_b32 s5, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB83_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s3, 8
-; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_or_b32 s6, s7, s8
-; GFX11-NEXT: s_and_b32 s5, s5, 0xffff
-; GFX11-NEXT: s_lshl_b32 s6, s6, 16
+; GFX11-NEXT: s_and_b32 s4, s0, 0xff
+; GFX11-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-NEXT: s_and_b32 s6, s2, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-NEXT: s_or_b32 s4, s4, s5
+; GFX11-NEXT: s_or_b32 s5, s6, s7
+; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX11-NEXT: s_lshl_b32 s5, s5, 16
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB83_3
+; GFX11-NEXT: s_or_b32 s4, s4, s5
+; GFX11-NEXT: s_cbranch_execnz .LBB83_3
; GFX11-NEXT: .LBB83_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
; GFX11-NEXT: s_add_i32 s2, s2, 3
@@ -10460,14 +10787,16 @@ define inreg <1 x i32> @bitcast_v4i8_to_v1i32_scalar(<4 x i8> inreg %a, i32 inre
; GFX11-NEXT: s_and_b32 s0, s0, 0xffff
; GFX11-NEXT: s_lshl_b32 s1, s1, 16
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_or_b32 s5, s0, s1
+; GFX11-NEXT: s_or_b32 s4, s0, s1
; GFX11-NEXT: .LBB83_3: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_mov_b32_e32 v0, s5
+; GFX11-NEXT: v_mov_b32_e32 v0, s4
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB83_4:
-; GFX11-NEXT: ; implicit-def: $sgpr5
-; GFX11-NEXT: s_branch .LBB83_2
+; GFX11-NEXT: ; implicit-def: $sgpr4
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
+; GFX11-NEXT: s_cbranch_vccz .LBB83_2
+; GFX11-NEXT: s_branch .LBB83_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.352bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.352bit.ll
index 6fc9a35..b9ed916 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.352bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.352bit.ll
@@ -125,10 +125,14 @@ define inreg <11 x float> @bitcast_v11i32_to_v11f32_scalar(<11 x i32> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s27, 0
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB1_3
-; SI-NEXT: .LBB1_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB1_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB1_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_i32 s26, s26, 3
; SI-NEXT: s_add_i32 s25, s25, 3
; SI-NEXT: s_add_i32 s24, s24, 3
@@ -140,7 +144,7 @@ define inreg <11 x float> @bitcast_v11i32_to_v11f32_scalar(<11 x i32> inreg %a,
; SI-NEXT: s_add_i32 s18, s18, 3
; SI-NEXT: s_add_i32 s17, s17, 3
; SI-NEXT: s_add_i32 s16, s16, 3
-; SI-NEXT: .LBB1_3: ; %end
+; SI-NEXT: .LBB1_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -153,17 +157,19 @@ define inreg <11 x float> @bitcast_v11i32_to_v11f32_scalar(<11 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v9, s25
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: s_branch .LBB1_2
;
; VI-LABEL: bitcast_v11i32_to_v11f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s27, 0
-; VI-NEXT: s_cbranch_scc0 .LBB1_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB1_3
-; VI-NEXT: .LBB1_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB1_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB1_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s26, s26, 3
; VI-NEXT: s_add_i32 s25, s25, 3
; VI-NEXT: s_add_i32 s24, s24, 3
@@ -175,7 +181,7 @@ define inreg <11 x float> @bitcast_v11i32_to_v11f32_scalar(<11 x i32> inreg %a,
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB1_3: ; %end
+; VI-NEXT: .LBB1_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -188,17 +194,19 @@ define inreg <11 x float> @bitcast_v11i32_to_v11f32_scalar(<11 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_4:
-; VI-NEXT: s_branch .LBB1_2
;
; GFX9-LABEL: bitcast_v11i32_to_v11f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s27, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB1_3
-; GFX9-NEXT: .LBB1_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB1_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s26, s26, 3
; GFX9-NEXT: s_add_i32 s25, s25, 3
; GFX9-NEXT: s_add_i32 s24, s24, 3
@@ -210,7 +218,7 @@ define inreg <11 x float> @bitcast_v11i32_to_v11f32_scalar(<11 x i32> inreg %a,
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB1_3: ; %end
+; GFX9-NEXT: .LBB1_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -223,19 +231,20 @@ define inreg <11 x float> @bitcast_v11i32_to_v11f32_scalar(<11 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-LABEL: bitcast_v11i32_to_v11f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s23, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB1_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB1_3
-; GFX11-NEXT: .LBB1_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s22, s22, 3
; GFX11-NEXT: s_add_i32 s21, s21, 3
; GFX11-NEXT: s_add_i32 s20, s20, 3
@@ -247,7 +256,7 @@ define inreg <11 x float> @bitcast_v11i32_to_v11f32_scalar(<11 x i32> inreg %a,
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB1_3: ; %end
+; GFX11-NEXT: .LBB1_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -256,8 +265,6 @@ define inreg <11 x float> @bitcast_v11i32_to_v11f32_scalar(<11 x i32> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: v_mov_b32_e32 v10, s22
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_4:
-; GFX11-NEXT: s_branch .LBB1_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -388,10 +395,14 @@ define inreg <11 x i32> @bitcast_v11f32_to_v11i32_scalar(<11 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s27, 0
-; SI-NEXT: s_cbranch_scc0 .LBB3_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB3_4
-; SI-NEXT: .LBB3_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB3_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB3_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v10, s26, 1.0
; SI-NEXT: v_add_f32_e64 v9, s25, 1.0
; SI-NEXT: v_add_f32_e64 v8, s24, 1.0
@@ -404,8 +415,6 @@ define inreg <11 x i32> @bitcast_v11f32_to_v11i32_scalar(<11 x float> inreg %a,
; SI-NEXT: v_add_f32_e64 v1, s17, 1.0
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB3_3:
-; SI-NEXT: s_branch .LBB3_2
; SI-NEXT: .LBB3_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -424,10 +433,14 @@ define inreg <11 x i32> @bitcast_v11f32_to_v11i32_scalar(<11 x float> inreg %a,
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s27, 0
-; VI-NEXT: s_cbranch_scc0 .LBB3_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_4
-; VI-NEXT: .LBB3_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB3_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB3_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v10, s26, 1.0
; VI-NEXT: v_add_f32_e64 v9, s25, 1.0
; VI-NEXT: v_add_f32_e64 v8, s24, 1.0
@@ -440,8 +453,6 @@ define inreg <11 x i32> @bitcast_v11f32_to_v11i32_scalar(<11 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB3_3:
-; VI-NEXT: s_branch .LBB3_2
; VI-NEXT: .LBB3_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -460,10 +471,14 @@ define inreg <11 x i32> @bitcast_v11f32_to_v11i32_scalar(<11 x float> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s27, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_4
-; GFX9-NEXT: .LBB3_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB3_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v10, s26, 1.0
; GFX9-NEXT: v_add_f32_e64 v9, s25, 1.0
; GFX9-NEXT: v_add_f32_e64 v8, s24, 1.0
@@ -476,8 +491,6 @@ define inreg <11 x i32> @bitcast_v11f32_to_v11i32_scalar(<11 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB3_3:
-; GFX9-NEXT: s_branch .LBB3_2
; GFX9-NEXT: .LBB3_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -500,12 +513,15 @@ define inreg <11 x i32> @bitcast_v11f32_to_v11i32_scalar(<11 x float> inreg %a,
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s23, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB3_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
-; GFX11-NEXT: .LBB3_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v10, s22, 1.0
; GFX11-NEXT: v_add_f32_e64 v9, s21, 1.0
; GFX11-NEXT: v_add_f32_e64 v8, s20, 1.0
@@ -518,8 +534,6 @@ define inreg <11 x i32> @bitcast_v11f32_to_v11i32_scalar(<11 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v1, s13, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB3_3:
-; GFX11-NEXT: s_branch .LBB3_2
; GFX11-NEXT: .LBB3_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -714,6 +728,7 @@ define inreg <22 x i16> @bitcast_v11i32_to_v22i16_scalar(<11 x i32> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s27, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB5_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s26
@@ -793,16 +808,22 @@ define inreg <22 x i16> @bitcast_v11i32_to_v22i16_scalar(<11 x i32> inreg %a, i3
; SI-NEXT: ; implicit-def: $vgpr17
; SI-NEXT: ; implicit-def: $sgpr6
; SI-NEXT: ; implicit-def: $vgpr21
-; SI-NEXT: s_branch .LBB5_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB5_2
+; SI-NEXT: s_branch .LBB5_3
;
; VI-LABEL: bitcast_v11i32_to_v22i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s27, 0
-; VI-NEXT: s_cbranch_scc0 .LBB5_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB5_3
-; VI-NEXT: .LBB5_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB5_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB5_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s26, s26, 3
; VI-NEXT: s_add_i32 s25, s25, 3
; VI-NEXT: s_add_i32 s24, s24, 3
@@ -814,7 +835,7 @@ define inreg <22 x i16> @bitcast_v11i32_to_v22i16_scalar(<11 x i32> inreg %a, i3
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB5_3: ; %end
+; VI-NEXT: .LBB5_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -827,17 +848,19 @@ define inreg <22 x i16> @bitcast_v11i32_to_v22i16_scalar(<11 x i32> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB5_4:
-; VI-NEXT: s_branch .LBB5_2
;
; GFX9-LABEL: bitcast_v11i32_to_v22i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s27, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB5_3
-; GFX9-NEXT: .LBB5_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB5_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s26, s26, 3
; GFX9-NEXT: s_add_i32 s25, s25, 3
; GFX9-NEXT: s_add_i32 s24, s24, 3
@@ -849,7 +872,7 @@ define inreg <22 x i16> @bitcast_v11i32_to_v22i16_scalar(<11 x i32> inreg %a, i3
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB5_3: ; %end
+; GFX9-NEXT: .LBB5_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -862,19 +885,20 @@ define inreg <22 x i16> @bitcast_v11i32_to_v22i16_scalar(<11 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_4:
-; GFX9-NEXT: s_branch .LBB5_2
;
; GFX11-LABEL: bitcast_v11i32_to_v22i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s23, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB5_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB5_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB5_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB5_3
-; GFX11-NEXT: .LBB5_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s22, s22, 3
; GFX11-NEXT: s_add_i32 s21, s21, 3
; GFX11-NEXT: s_add_i32 s20, s20, 3
@@ -886,7 +910,7 @@ define inreg <22 x i16> @bitcast_v11i32_to_v22i16_scalar(<11 x i32> inreg %a, i3
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB5_3: ; %end
+; GFX11-NEXT: .LBB5_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -895,8 +919,6 @@ define inreg <22 x i16> @bitcast_v11i32_to_v22i16_scalar(<11 x i32> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: v_mov_b32_e32 v10, s22
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB5_4:
-; GFX11-NEXT: s_branch .LBB5_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1159,11 +1181,12 @@ define inreg <11 x i32> @bitcast_v22i16_to_v11i32_scalar(<22 x i16> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v11, v6
; SI-NEXT: v_mov_b32_e32 v12, v4
; SI-NEXT: v_mov_b32_e32 v13, v2
; SI-NEXT: v_mov_b32_e32 v14, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v3
; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v5
@@ -1270,16 +1293,22 @@ define inreg <11 x i32> @bitcast_v22i16_to_v11i32_scalar(<22 x i16> inreg %a, i3
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB7_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10
-; SI-NEXT: s_branch .LBB7_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB7_2
+; SI-NEXT: s_branch .LBB7_3
;
; VI-LABEL: bitcast_v22i16_to_v11i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s27, 0
-; VI-NEXT: s_cbranch_scc0 .LBB7_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB7_3
-; VI-NEXT: .LBB7_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB7_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB7_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s26, 3
; VI-NEXT: s_and_b32 s4, s26, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
@@ -1335,7 +1364,7 @@ define inreg <11 x i32> @bitcast_v22i16_to_v11i32_scalar(<22 x i16> inreg %a, i3
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB7_3: ; %end
+; VI-NEXT: .LBB7_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -1348,17 +1377,19 @@ define inreg <11 x i32> @bitcast_v22i16_to_v11i32_scalar(<22 x i16> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB7_4:
-; VI-NEXT: s_branch .LBB7_2
;
; GFX9-LABEL: bitcast_v22i16_to_v11i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s27, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB7_4
-; GFX9-NEXT: .LBB7_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB7_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB7_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v10, s26, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v9, s25, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v8, s24, 3 op_sel_hi:[1,0]
@@ -1371,8 +1402,6 @@ define inreg <11 x i32> @bitcast_v22i16_to_v11i32_scalar(<22 x i16> inreg %a, i3
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB7_3:
-; GFX9-NEXT: s_branch .LBB7_2
; GFX9-NEXT: .LBB7_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -1395,12 +1424,15 @@ define inreg <11 x i32> @bitcast_v22i16_to_v11i32_scalar(<22 x i16> inreg %a, i3
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s23, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB7_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB7_4
-; GFX11-NEXT: .LBB7_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v10, s22, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v9, s21, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v8, s20, 3 op_sel_hi:[1,0]
@@ -1413,8 +1445,6 @@ define inreg <11 x i32> @bitcast_v22i16_to_v11i32_scalar(<22 x i16> inreg %a, i3
; GFX11-NEXT: v_pk_add_u16 v1, s13, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB7_3:
-; GFX11-NEXT: s_branch .LBB7_2
; GFX11-NEXT: .LBB7_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -1676,6 +1706,7 @@ define inreg <22 x half> @bitcast_v11i32_to_v22f16_scalar(<11 x i32> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s27, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB9_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s4, s26, 16
@@ -1782,16 +1813,22 @@ define inreg <22 x half> @bitcast_v11i32_to_v22f16_scalar(<11 x i32> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr19
; SI-NEXT: ; implicit-def: $vgpr20
; SI-NEXT: ; implicit-def: $vgpr21
-; SI-NEXT: s_branch .LBB9_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB9_2
+; SI-NEXT: s_branch .LBB9_3
;
; VI-LABEL: bitcast_v11i32_to_v22f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s27, 0
-; VI-NEXT: s_cbranch_scc0 .LBB9_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB9_3
-; VI-NEXT: .LBB9_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB9_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB9_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s26, s26, 3
; VI-NEXT: s_add_i32 s25, s25, 3
; VI-NEXT: s_add_i32 s24, s24, 3
@@ -1803,7 +1840,7 @@ define inreg <22 x half> @bitcast_v11i32_to_v22f16_scalar(<11 x i32> inreg %a, i
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB9_3: ; %end
+; VI-NEXT: .LBB9_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -1816,17 +1853,19 @@ define inreg <22 x half> @bitcast_v11i32_to_v22f16_scalar(<11 x i32> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB9_4:
-; VI-NEXT: s_branch .LBB9_2
;
; GFX9-LABEL: bitcast_v11i32_to_v22f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s27, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB9_3
-; GFX9-NEXT: .LBB9_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB9_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s26, s26, 3
; GFX9-NEXT: s_add_i32 s25, s25, 3
; GFX9-NEXT: s_add_i32 s24, s24, 3
@@ -1838,7 +1877,7 @@ define inreg <22 x half> @bitcast_v11i32_to_v22f16_scalar(<11 x i32> inreg %a, i
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB9_3: ; %end
+; GFX9-NEXT: .LBB9_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -1851,19 +1890,20 @@ define inreg <22 x half> @bitcast_v11i32_to_v22f16_scalar(<11 x i32> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB9_4:
-; GFX9-NEXT: s_branch .LBB9_2
;
; GFX11-LABEL: bitcast_v11i32_to_v22f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s23, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB9_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB9_3
-; GFX11-NEXT: .LBB9_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s22, s22, 3
; GFX11-NEXT: s_add_i32 s21, s21, 3
; GFX11-NEXT: s_add_i32 s20, s20, 3
@@ -1875,7 +1915,7 @@ define inreg <22 x half> @bitcast_v11i32_to_v22f16_scalar(<11 x i32> inreg %a, i
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB9_3: ; %end
+; GFX11-NEXT: .LBB9_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -1884,8 +1924,6 @@ define inreg <22 x half> @bitcast_v11i32_to_v22f16_scalar(<11 x i32> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: v_mov_b32_e32 v10, s22
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB9_4:
-; GFX11-NEXT: s_branch .LBB9_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2206,7 +2244,7 @@ define inreg <11 x i32> @bitcast_v22f16_to_v11i32_scalar(<22 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v25, s22
; SI-NEXT: v_cvt_f16_f32_e32 v24, s25
; SI-NEXT: v_cvt_f16_f32_e32 v23, s24
-; SI-NEXT: v_cvt_f16_f32_e32 v22, s27
+; SI-NEXT: v_cvt_f16_f32_e32 v21, s27
; SI-NEXT: v_cvt_f16_f32_e32 v20, s26
; SI-NEXT: v_cvt_f16_f32_e32 v18, v1
; SI-NEXT: v_cvt_f16_f32_e32 v17, v0
@@ -2216,10 +2254,11 @@ define inreg <11 x i32> @bitcast_v22f16_to_v11i32_scalar(<22 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v13, v4
; SI-NEXT: v_cvt_f16_f32_e32 v12, v7
; SI-NEXT: v_cvt_f16_f32_e32 v11, v6
-; SI-NEXT: v_cvt_f16_f32_e32 v21, s29
+; SI-NEXT: v_cvt_f16_f32_e32 v22, s29
; SI-NEXT: v_cvt_f16_f32_e32 v19, s28
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB11_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v32
@@ -2227,8 +2266,8 @@ define inreg <11 x i32> @bitcast_v22f16_to_v11i32_scalar(<22 x half> inreg %a, i
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v28
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v26
; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v24
-; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v22
-; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v21
+; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v21
+; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v22
; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v18
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v16
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v14
@@ -2283,7 +2322,7 @@ define inreg <11 x i32> @bitcast_v22f16_to_v11i32_scalar(<22 x half> inreg %a, i
; SI-NEXT: v_cvt_f32_f16_e32 v6, v23
; SI-NEXT: v_or_b32_e32 v3, v4, v3
; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v5
-; SI-NEXT: v_cvt_f32_f16_e32 v5, v22
+; SI-NEXT: v_cvt_f32_f16_e32 v5, v21
; SI-NEXT: v_cvt_f32_f16_e32 v7, v20
; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6
; SI-NEXT: v_cvt_f16_f32_e32 v6, v6
@@ -2292,7 +2331,7 @@ define inreg <11 x i32> @bitcast_v22f16_to_v11i32_scalar(<22 x half> inreg %a, i
; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7
; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
; SI-NEXT: v_or_b32_e32 v4, v6, v4
-; SI-NEXT: v_cvt_f32_f16_e32 v6, v21
+; SI-NEXT: v_cvt_f32_f16_e32 v6, v22
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: v_or_b32_e32 v5, v7, v5
; SI-NEXT: v_cvt_f32_f16_e32 v7, v19
@@ -2338,16 +2377,22 @@ define inreg <11 x i32> @bitcast_v22f16_to_v11i32_scalar(<22 x half> inreg %a, i
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB11_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10
-; SI-NEXT: s_branch .LBB11_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB11_2
+; SI-NEXT: s_branch .LBB11_3
;
; VI-LABEL: bitcast_v22f16_to_v11i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s27, 0
-; VI-NEXT: s_cbranch_scc0 .LBB11_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB11_4
-; VI-NEXT: .LBB11_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB11_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB11_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s26, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -2405,8 +2450,6 @@ define inreg <11 x i32> @bitcast_v22f16_to_v11i32_scalar(<22 x half> inreg %a, i
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v11
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB11_3:
-; VI-NEXT: s_branch .LBB11_2
; VI-NEXT: .LBB11_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -2425,10 +2468,14 @@ define inreg <11 x i32> @bitcast_v22f16_to_v11i32_scalar(<22 x half> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s27, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_4
-; GFX9-NEXT: .LBB11_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB11_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v10, s26, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v9, s25, v0 op_sel_hi:[1,0]
@@ -2442,8 +2489,6 @@ define inreg <11 x i32> @bitcast_v22f16_to_v11i32_scalar(<22 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB11_3:
-; GFX9-NEXT: s_branch .LBB11_2
; GFX9-NEXT: .LBB11_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -2466,12 +2511,15 @@ define inreg <11 x i32> @bitcast_v22f16_to_v11i32_scalar(<22 x half> inreg %a, i
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s23, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB11_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
-; GFX11-NEXT: .LBB11_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v10, 0x200, s22 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v9, 0x200, s21 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v8, 0x200, s20 op_sel_hi:[0,1]
@@ -2484,8 +2532,6 @@ define inreg <11 x i32> @bitcast_v22f16_to_v11i32_scalar(<22 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s13 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: s_branch .LBB11_2
; GFX11-NEXT: .LBB11_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -2674,6 +2720,7 @@ define inreg <22 x i16> @bitcast_v11f32_to_v22i16_scalar(<11 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s27, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB13_3
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s26
@@ -2730,7 +2777,8 @@ define inreg <22 x i16> @bitcast_v11f32_to_v22i16_scalar(<11 x float> inreg %a,
; SI-NEXT: ; implicit-def: $vgpr17
; SI-NEXT: ; implicit-def: $sgpr10
; SI-NEXT: ; implicit-def: $vgpr21
-; SI-NEXT: s_branch .LBB13_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB13_2
; SI-NEXT: .LBB13_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v2, s17
@@ -2754,10 +2802,14 @@ define inreg <22 x i16> @bitcast_v11f32_to_v22i16_scalar(<11 x float> inreg %a,
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s27, 0
-; VI-NEXT: s_cbranch_scc0 .LBB13_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB13_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB13_4
-; VI-NEXT: .LBB13_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB13_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB13_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v10, s26, 1.0
; VI-NEXT: v_add_f32_e64 v9, s25, 1.0
; VI-NEXT: v_add_f32_e64 v8, s24, 1.0
@@ -2770,8 +2822,6 @@ define inreg <22 x i16> @bitcast_v11f32_to_v22i16_scalar(<11 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB13_3:
-; VI-NEXT: s_branch .LBB13_2
; VI-NEXT: .LBB13_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -2795,10 +2845,14 @@ define inreg <22 x i16> @bitcast_v11f32_to_v22i16_scalar(<11 x float> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s27, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB13_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB13_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB13_4
-; GFX9-NEXT: .LBB13_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB13_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB13_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v10, s26, 1.0
; GFX9-NEXT: v_add_f32_e64 v9, s25, 1.0
; GFX9-NEXT: v_add_f32_e64 v8, s24, 1.0
@@ -2811,8 +2865,6 @@ define inreg <22 x i16> @bitcast_v11f32_to_v22i16_scalar(<11 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB13_3:
-; GFX9-NEXT: s_branch .LBB13_2
; GFX9-NEXT: .LBB13_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -2840,12 +2892,15 @@ define inreg <22 x i16> @bitcast_v11f32_to_v22i16_scalar(<11 x float> inreg %a,
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s23, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB13_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB13_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB13_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB13_4
-; GFX11-NEXT: .LBB13_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v10, s22, 1.0
; GFX11-NEXT: v_add_f32_e64 v9, s21, 1.0
; GFX11-NEXT: v_add_f32_e64 v8, s20, 1.0
@@ -2858,8 +2913,6 @@ define inreg <22 x i16> @bitcast_v11f32_to_v22i16_scalar(<11 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v1, s13, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB13_3:
-; GFX11-NEXT: s_branch .LBB13_2
; GFX11-NEXT: .LBB13_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -3132,11 +3185,12 @@ define inreg <11 x float> @bitcast_v22i16_to_v11f32_scalar(<22 x i16> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v11, v6
; SI-NEXT: v_mov_b32_e32 v12, v4
; SI-NEXT: v_mov_b32_e32 v13, v2
; SI-NEXT: v_mov_b32_e32 v14, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v3
; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v5
@@ -3243,16 +3297,22 @@ define inreg <11 x float> @bitcast_v22i16_to_v11f32_scalar(<22 x i16> inreg %a,
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB15_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10
-; SI-NEXT: s_branch .LBB15_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB15_2
+; SI-NEXT: s_branch .LBB15_3
;
; VI-LABEL: bitcast_v22i16_to_v11f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s27, 0
-; VI-NEXT: s_cbranch_scc0 .LBB15_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB15_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB15_3
-; VI-NEXT: .LBB15_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB15_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB15_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s26, 3
; VI-NEXT: s_and_b32 s4, s26, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
@@ -3308,7 +3368,7 @@ define inreg <11 x float> @bitcast_v22i16_to_v11f32_scalar(<22 x i16> inreg %a,
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB15_3: ; %end
+; VI-NEXT: .LBB15_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -3321,17 +3381,19 @@ define inreg <11 x float> @bitcast_v22i16_to_v11f32_scalar(<22 x i16> inreg %a,
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB15_4:
-; VI-NEXT: s_branch .LBB15_2
;
; GFX9-LABEL: bitcast_v22i16_to_v11f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s27, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB15_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB15_4
-; GFX9-NEXT: .LBB15_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB15_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB15_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v10, s26, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v9, s25, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v8, s24, 3 op_sel_hi:[1,0]
@@ -3344,8 +3406,6 @@ define inreg <11 x float> @bitcast_v22i16_to_v11f32_scalar(<22 x i16> inreg %a,
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB15_3:
-; GFX9-NEXT: s_branch .LBB15_2
; GFX9-NEXT: .LBB15_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -3368,12 +3428,15 @@ define inreg <11 x float> @bitcast_v22i16_to_v11f32_scalar(<22 x i16> inreg %a,
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s23, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB15_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB15_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB15_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB15_4
-; GFX11-NEXT: .LBB15_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v10, s22, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v9, s21, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v8, s20, 3 op_sel_hi:[1,0]
@@ -3386,8 +3449,6 @@ define inreg <11 x float> @bitcast_v22i16_to_v11f32_scalar(<22 x i16> inreg %a,
; GFX11-NEXT: v_pk_add_u16 v1, s13, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB15_3:
-; GFX11-NEXT: s_branch .LBB15_2
; GFX11-NEXT: .LBB15_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -3643,6 +3704,7 @@ define inreg <22 x half> @bitcast_v11f32_to_v22f16_scalar(<11 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s27, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB17_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s4, s26, 16
@@ -3749,16 +3811,22 @@ define inreg <22 x half> @bitcast_v11f32_to_v22f16_scalar(<11 x float> inreg %a,
; SI-NEXT: ; implicit-def: $vgpr19
; SI-NEXT: ; implicit-def: $vgpr20
; SI-NEXT: ; implicit-def: $vgpr21
-; SI-NEXT: s_branch .LBB17_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB17_2
+; SI-NEXT: s_branch .LBB17_3
;
; VI-LABEL: bitcast_v11f32_to_v22f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s27, 0
-; VI-NEXT: s_cbranch_scc0 .LBB17_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB17_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB17_4
-; VI-NEXT: .LBB17_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB17_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB17_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v10, s26, 1.0
; VI-NEXT: v_add_f32_e64 v9, s25, 1.0
; VI-NEXT: v_add_f32_e64 v8, s24, 1.0
@@ -3771,8 +3839,6 @@ define inreg <22 x half> @bitcast_v11f32_to_v22f16_scalar(<11 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB17_3:
-; VI-NEXT: s_branch .LBB17_2
; VI-NEXT: .LBB17_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -3796,10 +3862,14 @@ define inreg <22 x half> @bitcast_v11f32_to_v22f16_scalar(<11 x float> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s27, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB17_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB17_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB17_4
-; GFX9-NEXT: .LBB17_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB17_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB17_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v10, s26, 1.0
; GFX9-NEXT: v_add_f32_e64 v9, s25, 1.0
; GFX9-NEXT: v_add_f32_e64 v8, s24, 1.0
@@ -3812,8 +3882,6 @@ define inreg <22 x half> @bitcast_v11f32_to_v22f16_scalar(<11 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB17_3:
-; GFX9-NEXT: s_branch .LBB17_2
; GFX9-NEXT: .LBB17_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -3841,12 +3909,15 @@ define inreg <22 x half> @bitcast_v11f32_to_v22f16_scalar(<11 x float> inreg %a,
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s23, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB17_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB17_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB17_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB17_4
-; GFX11-NEXT: .LBB17_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v10, s22, 1.0
; GFX11-NEXT: v_add_f32_e64 v9, s21, 1.0
; GFX11-NEXT: v_add_f32_e64 v8, s20, 1.0
@@ -3859,8 +3930,6 @@ define inreg <22 x half> @bitcast_v11f32_to_v22f16_scalar(<11 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v1, s13, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB17_3:
-; GFX11-NEXT: s_branch .LBB17_2
; GFX11-NEXT: .LBB17_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -4191,7 +4260,7 @@ define inreg <11 x float> @bitcast_v22f16_to_v11f32_scalar(<22 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v25, s22
; SI-NEXT: v_cvt_f16_f32_e32 v24, s25
; SI-NEXT: v_cvt_f16_f32_e32 v23, s24
-; SI-NEXT: v_cvt_f16_f32_e32 v22, s27
+; SI-NEXT: v_cvt_f16_f32_e32 v21, s27
; SI-NEXT: v_cvt_f16_f32_e32 v20, s26
; SI-NEXT: v_cvt_f16_f32_e32 v18, v1
; SI-NEXT: v_cvt_f16_f32_e32 v17, v0
@@ -4201,10 +4270,11 @@ define inreg <11 x float> @bitcast_v22f16_to_v11f32_scalar(<22 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v13, v4
; SI-NEXT: v_cvt_f16_f32_e32 v12, v7
; SI-NEXT: v_cvt_f16_f32_e32 v11, v6
-; SI-NEXT: v_cvt_f16_f32_e32 v21, s29
+; SI-NEXT: v_cvt_f16_f32_e32 v22, s29
; SI-NEXT: v_cvt_f16_f32_e32 v19, s28
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB19_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v32
@@ -4212,8 +4282,8 @@ define inreg <11 x float> @bitcast_v22f16_to_v11f32_scalar(<22 x half> inreg %a,
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v28
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v26
; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v24
-; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v22
-; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v21
+; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v21
+; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v22
; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v18
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v16
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v14
@@ -4268,7 +4338,7 @@ define inreg <11 x float> @bitcast_v22f16_to_v11f32_scalar(<22 x half> inreg %a,
; SI-NEXT: v_cvt_f32_f16_e32 v6, v23
; SI-NEXT: v_or_b32_e32 v3, v4, v3
; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v5
-; SI-NEXT: v_cvt_f32_f16_e32 v5, v22
+; SI-NEXT: v_cvt_f32_f16_e32 v5, v21
; SI-NEXT: v_cvt_f32_f16_e32 v7, v20
; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6
; SI-NEXT: v_cvt_f16_f32_e32 v6, v6
@@ -4277,7 +4347,7 @@ define inreg <11 x float> @bitcast_v22f16_to_v11f32_scalar(<22 x half> inreg %a,
; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7
; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
; SI-NEXT: v_or_b32_e32 v4, v6, v4
-; SI-NEXT: v_cvt_f32_f16_e32 v6, v21
+; SI-NEXT: v_cvt_f32_f16_e32 v6, v22
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: v_or_b32_e32 v5, v7, v5
; SI-NEXT: v_cvt_f32_f16_e32 v7, v19
@@ -4323,16 +4393,22 @@ define inreg <11 x float> @bitcast_v22f16_to_v11f32_scalar(<22 x half> inreg %a,
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB19_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10
-; SI-NEXT: s_branch .LBB19_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB19_2
+; SI-NEXT: s_branch .LBB19_3
;
; VI-LABEL: bitcast_v22f16_to_v11f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s27, 0
-; VI-NEXT: s_cbranch_scc0 .LBB19_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB19_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB19_4
-; VI-NEXT: .LBB19_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB19_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB19_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s26, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -4390,8 +4466,6 @@ define inreg <11 x float> @bitcast_v22f16_to_v11f32_scalar(<22 x half> inreg %a,
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v11
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB19_3:
-; VI-NEXT: s_branch .LBB19_2
; VI-NEXT: .LBB19_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -4410,10 +4484,14 @@ define inreg <11 x float> @bitcast_v22f16_to_v11f32_scalar(<22 x half> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s27, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB19_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB19_4
-; GFX9-NEXT: .LBB19_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB19_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB19_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v10, s26, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v9, s25, v0 op_sel_hi:[1,0]
@@ -4427,8 +4505,6 @@ define inreg <11 x float> @bitcast_v22f16_to_v11f32_scalar(<22 x half> inreg %a,
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB19_3:
-; GFX9-NEXT: s_branch .LBB19_2
; GFX9-NEXT: .LBB19_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -4451,12 +4527,15 @@ define inreg <11 x float> @bitcast_v22f16_to_v11f32_scalar(<22 x half> inreg %a,
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s23, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB19_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB19_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB19_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB19_4
-; GFX11-NEXT: .LBB19_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v10, 0x200, s22 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v9, 0x200, s21 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v8, 0x200, s20 op_sel_hi:[0,1]
@@ -4469,8 +4548,6 @@ define inreg <11 x float> @bitcast_v22f16_to_v11f32_scalar(<22 x half> inreg %a,
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s13 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB19_3:
-; GFX11-NEXT: s_branch .LBB19_2
; GFX11-NEXT: .LBB19_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -4766,6 +4843,7 @@ define inreg <22 x half> @bitcast_v22i16_to_v22f16_scalar(<22 x i16> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v26, v7
; SI-NEXT: v_mov_b32_e32 v25, v6
; SI-NEXT: v_mov_b32_e32 v24, v5
@@ -4774,7 +4852,7 @@ define inreg <22 x half> @bitcast_v22i16_to_v22f16_scalar(<22 x i16> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v29, v2
; SI-NEXT: v_mov_b32_e32 v28, v1
; SI-NEXT: v_mov_b32_e32 v27, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB21_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_cvt_f32_f16_e32 v0, s16
@@ -4870,16 +4948,22 @@ define inreg <22 x half> @bitcast_v22i16_to_v22f16_scalar(<22 x i16> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr19
; SI-NEXT: ; implicit-def: $vgpr20
; SI-NEXT: ; implicit-def: $vgpr21
-; SI-NEXT: s_branch .LBB21_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB21_2
+; SI-NEXT: s_branch .LBB21_3
;
; VI-LABEL: bitcast_v22i16_to_v22f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s27, 0
-; VI-NEXT: s_cbranch_scc0 .LBB21_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB21_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB21_3
-; VI-NEXT: .LBB21_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB21_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB21_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_and_b32 s4, s16, 0xffff0000
; VI-NEXT: s_add_i32 s5, s16, 3
; VI-NEXT: s_and_b32 s6, s17, 0xffff0000
@@ -4935,7 +5019,7 @@ define inreg <22 x half> @bitcast_v22i16_to_v22f16_scalar(<22 x i16> inreg %a, i
; VI-NEXT: s_add_i32 s18, s8, 0x30000
; VI-NEXT: s_add_i32 s17, s6, 0x30000
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB21_3: ; %end
+; VI-NEXT: .LBB21_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -4948,17 +5032,19 @@ define inreg <22 x half> @bitcast_v22i16_to_v22f16_scalar(<22 x i16> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB21_4:
-; VI-NEXT: s_branch .LBB21_2
;
; GFX9-LABEL: bitcast_v22i16_to_v22f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s27, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB21_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB21_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB21_4
-; GFX9-NEXT: .LBB21_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB21_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v10, s26, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v9, s25, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v8, s24, 3 op_sel_hi:[1,0]
@@ -4971,8 +5057,6 @@ define inreg <22 x half> @bitcast_v22i16_to_v22f16_scalar(<22 x i16> inreg %a, i
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB21_3:
-; GFX9-NEXT: s_branch .LBB21_2
; GFX9-NEXT: .LBB21_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -5000,12 +5084,15 @@ define inreg <22 x half> @bitcast_v22i16_to_v22f16_scalar(<22 x i16> inreg %a, i
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s23, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB21_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB21_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB21_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB21_4
-; GFX11-NEXT: .LBB21_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v10, s22, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v9, s21, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v8, s20, 3 op_sel_hi:[1,0]
@@ -5018,8 +5105,6 @@ define inreg <22 x half> @bitcast_v22i16_to_v22f16_scalar(<22 x i16> inreg %a, i
; GFX11-NEXT: v_pk_add_u16 v1, s13, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB21_3:
-; GFX11-NEXT: s_branch .LBB21_2
; GFX11-NEXT: .LBB21_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -5325,10 +5410,14 @@ define inreg <22 x i16> @bitcast_v22f16_to_v22i16_scalar(<22 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v12, s28
; SI-NEXT: v_cvt_f16_f32_e32 v13, s29
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: s_cbranch_scc0 .LBB23_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB23_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB23_3
-; SI-NEXT: .LBB23_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB23_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB23_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v21, v21
; SI-NEXT: v_cvt_f32_f16_e32 v20, v20
; SI-NEXT: v_cvt_f32_f16_e32 v19, v19
@@ -5422,19 +5511,21 @@ define inreg <22 x i16> @bitcast_v22f16_to_v22i16_scalar(<22 x half> inreg %a, i
; SI-NEXT: v_alignbit_b32 v9, v10, v9, 16
; SI-NEXT: v_alignbit_b32 v13, v14, v13, 16
; SI-NEXT: v_alignbit_b32 v17, v18, v17, 16
-; SI-NEXT: .LBB23_3: ; %end
+; SI-NEXT: .LBB23_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB23_4:
-; SI-NEXT: s_branch .LBB23_2
;
; VI-LABEL: bitcast_v22f16_to_v22i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s27, 0
-; VI-NEXT: s_cbranch_scc0 .LBB23_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB23_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB23_4
-; VI-NEXT: .LBB23_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB23_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB23_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s5, s25, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v2, s5
@@ -5492,8 +5583,6 @@ define inreg <22 x i16> @bitcast_v22f16_to_v22i16_scalar(<22 x half> inreg %a, i
; VI-NEXT: v_or_b32_e32 v1, v0, v1
; VI-NEXT: v_or_b32_e32 v0, v11, v12
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB23_3:
-; VI-NEXT: s_branch .LBB23_2
; VI-NEXT: .LBB23_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -5517,10 +5606,14 @@ define inreg <22 x i16> @bitcast_v22f16_to_v22i16_scalar(<22 x half> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s27, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB23_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB23_4
-; GFX9-NEXT: .LBB23_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB23_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v10, s26, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v9, s25, v0 op_sel_hi:[1,0]
@@ -5534,8 +5627,6 @@ define inreg <22 x i16> @bitcast_v22f16_to_v22i16_scalar(<22 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB23_3:
-; GFX9-NEXT: s_branch .LBB23_2
; GFX9-NEXT: .LBB23_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -5563,12 +5654,15 @@ define inreg <22 x i16> @bitcast_v22f16_to_v22i16_scalar(<22 x half> inreg %a, i
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s23, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB23_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB23_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB23_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB23_4
-; GFX11-NEXT: .LBB23_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v10, 0x200, s22 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v9, 0x200, s21 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v8, 0x200, s20 op_sel_hi:[0,1]
@@ -5581,8 +5675,6 @@ define inreg <22 x i16> @bitcast_v22f16_to_v22i16_scalar(<22 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s13 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB23_3:
-; GFX11-NEXT: s_branch .LBB23_2
; GFX11-NEXT: .LBB23_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.384bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.384bit.ll
index c9860dbb..2a9f65f 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.384bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.384bit.ll
@@ -129,10 +129,14 @@ define inreg <12 x float> @bitcast_v12i32_to_v12f32_scalar(<12 x i32> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB1_3
-; SI-NEXT: .LBB1_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB1_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB1_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_i32 s27, s27, 3
; SI-NEXT: s_add_i32 s26, s26, 3
; SI-NEXT: s_add_i32 s25, s25, 3
@@ -145,7 +149,7 @@ define inreg <12 x float> @bitcast_v12i32_to_v12f32_scalar(<12 x i32> inreg %a,
; SI-NEXT: s_add_i32 s18, s18, 3
; SI-NEXT: s_add_i32 s17, s17, 3
; SI-NEXT: s_add_i32 s16, s16, 3
-; SI-NEXT: .LBB1_3: ; %end
+; SI-NEXT: .LBB1_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -159,17 +163,19 @@ define inreg <12 x float> @bitcast_v12i32_to_v12f32_scalar(<12 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: s_branch .LBB1_2
;
; VI-LABEL: bitcast_v12i32_to_v12f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB1_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB1_3
-; VI-NEXT: .LBB1_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB1_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB1_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s27, s27, 3
; VI-NEXT: s_add_i32 s26, s26, 3
; VI-NEXT: s_add_i32 s25, s25, 3
@@ -182,7 +188,7 @@ define inreg <12 x float> @bitcast_v12i32_to_v12f32_scalar(<12 x i32> inreg %a,
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB1_3: ; %end
+; VI-NEXT: .LBB1_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -196,17 +202,19 @@ define inreg <12 x float> @bitcast_v12i32_to_v12f32_scalar(<12 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_4:
-; VI-NEXT: s_branch .LBB1_2
;
; GFX9-LABEL: bitcast_v12i32_to_v12f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB1_3
-; GFX9-NEXT: .LBB1_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB1_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s27, s27, 3
; GFX9-NEXT: s_add_i32 s26, s26, 3
; GFX9-NEXT: s_add_i32 s25, s25, 3
@@ -219,7 +227,7 @@ define inreg <12 x float> @bitcast_v12i32_to_v12f32_scalar(<12 x i32> inreg %a,
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB1_3: ; %end
+; GFX9-NEXT: .LBB1_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -233,19 +241,20 @@ define inreg <12 x float> @bitcast_v12i32_to_v12f32_scalar(<12 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-LABEL: bitcast_v12i32_to_v12f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB1_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB1_3
-; GFX11-NEXT: .LBB1_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s23, s23, 3
; GFX11-NEXT: s_add_i32 s22, s22, 3
; GFX11-NEXT: s_add_i32 s21, s21, 3
@@ -258,7 +267,7 @@ define inreg <12 x float> @bitcast_v12i32_to_v12f32_scalar(<12 x i32> inreg %a,
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB1_3: ; %end
+; GFX11-NEXT: .LBB1_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -267,8 +276,6 @@ define inreg <12 x float> @bitcast_v12i32_to_v12f32_scalar(<12 x i32> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_4:
-; GFX11-NEXT: s_branch .LBB1_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -402,10 +409,14 @@ define inreg <12 x i32> @bitcast_v12f32_to_v12i32_scalar(<12 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
-; SI-NEXT: s_cbranch_scc0 .LBB3_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB3_4
-; SI-NEXT: .LBB3_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB3_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB3_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v11, s27, 1.0
; SI-NEXT: v_add_f32_e64 v10, s26, 1.0
; SI-NEXT: v_add_f32_e64 v9, s25, 1.0
@@ -419,8 +430,6 @@ define inreg <12 x i32> @bitcast_v12f32_to_v12i32_scalar(<12 x float> inreg %a,
; SI-NEXT: v_add_f32_e64 v1, s17, 1.0
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB3_3:
-; SI-NEXT: s_branch .LBB3_2
; SI-NEXT: .LBB3_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -440,10 +449,14 @@ define inreg <12 x i32> @bitcast_v12f32_to_v12i32_scalar(<12 x float> inreg %a,
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB3_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_4
-; VI-NEXT: .LBB3_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB3_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB3_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v11, s27, 1.0
; VI-NEXT: v_add_f32_e64 v10, s26, 1.0
; VI-NEXT: v_add_f32_e64 v9, s25, 1.0
@@ -457,8 +470,6 @@ define inreg <12 x i32> @bitcast_v12f32_to_v12i32_scalar(<12 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB3_3:
-; VI-NEXT: s_branch .LBB3_2
; VI-NEXT: .LBB3_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -478,10 +489,14 @@ define inreg <12 x i32> @bitcast_v12f32_to_v12i32_scalar(<12 x float> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_4
-; GFX9-NEXT: .LBB3_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB3_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v11, s27, 1.0
; GFX9-NEXT: v_add_f32_e64 v10, s26, 1.0
; GFX9-NEXT: v_add_f32_e64 v9, s25, 1.0
@@ -495,8 +510,6 @@ define inreg <12 x i32> @bitcast_v12f32_to_v12i32_scalar(<12 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB3_3:
-; GFX9-NEXT: s_branch .LBB3_2
; GFX9-NEXT: .LBB3_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -520,12 +533,15 @@ define inreg <12 x i32> @bitcast_v12f32_to_v12i32_scalar(<12 x float> inreg %a,
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB3_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
-; GFX11-NEXT: .LBB3_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v11, s23, 1.0
; GFX11-NEXT: v_add_f32_e64 v10, s22, 1.0
; GFX11-NEXT: v_add_f32_e64 v9, s21, 1.0
@@ -539,8 +555,6 @@ define inreg <12 x i32> @bitcast_v12f32_to_v12i32_scalar(<12 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v1, s13, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB3_3:
-; GFX11-NEXT: s_branch .LBB3_2
; GFX11-NEXT: .LBB3_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -689,10 +703,14 @@ define inreg <6 x double> @bitcast_v12i32_to_v6f64_scalar(<12 x i32> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
-; SI-NEXT: s_cbranch_scc0 .LBB5_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB5_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB5_3
-; SI-NEXT: .LBB5_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB5_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB5_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_i32 s27, s27, 3
; SI-NEXT: s_add_i32 s26, s26, 3
; SI-NEXT: s_add_i32 s25, s25, 3
@@ -705,7 +723,7 @@ define inreg <6 x double> @bitcast_v12i32_to_v6f64_scalar(<12 x i32> inreg %a, i
; SI-NEXT: s_add_i32 s18, s18, 3
; SI-NEXT: s_add_i32 s17, s17, 3
; SI-NEXT: s_add_i32 s16, s16, 3
-; SI-NEXT: .LBB5_3: ; %end
+; SI-NEXT: .LBB5_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -719,17 +737,19 @@ define inreg <6 x double> @bitcast_v12i32_to_v6f64_scalar(<12 x i32> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB5_4:
-; SI-NEXT: s_branch .LBB5_2
;
; VI-LABEL: bitcast_v12i32_to_v6f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB5_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB5_3
-; VI-NEXT: .LBB5_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB5_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB5_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s27, s27, 3
; VI-NEXT: s_add_i32 s26, s26, 3
; VI-NEXT: s_add_i32 s25, s25, 3
@@ -742,7 +762,7 @@ define inreg <6 x double> @bitcast_v12i32_to_v6f64_scalar(<12 x i32> inreg %a, i
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB5_3: ; %end
+; VI-NEXT: .LBB5_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -756,17 +776,19 @@ define inreg <6 x double> @bitcast_v12i32_to_v6f64_scalar(<12 x i32> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB5_4:
-; VI-NEXT: s_branch .LBB5_2
;
; GFX9-LABEL: bitcast_v12i32_to_v6f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB5_3
-; GFX9-NEXT: .LBB5_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB5_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s27, s27, 3
; GFX9-NEXT: s_add_i32 s26, s26, 3
; GFX9-NEXT: s_add_i32 s25, s25, 3
@@ -779,7 +801,7 @@ define inreg <6 x double> @bitcast_v12i32_to_v6f64_scalar(<12 x i32> inreg %a, i
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB5_3: ; %end
+; GFX9-NEXT: .LBB5_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -793,19 +815,20 @@ define inreg <6 x double> @bitcast_v12i32_to_v6f64_scalar(<12 x i32> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_4:
-; GFX9-NEXT: s_branch .LBB5_2
;
; GFX11-LABEL: bitcast_v12i32_to_v6f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB5_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB5_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB5_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB5_3
-; GFX11-NEXT: .LBB5_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s23, s23, 3
; GFX11-NEXT: s_add_i32 s22, s22, 3
; GFX11-NEXT: s_add_i32 s21, s21, 3
@@ -818,7 +841,7 @@ define inreg <6 x double> @bitcast_v12i32_to_v6f64_scalar(<12 x i32> inreg %a, i
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB5_3: ; %end
+; GFX11-NEXT: .LBB5_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -827,8 +850,6 @@ define inreg <6 x double> @bitcast_v12i32_to_v6f64_scalar(<12 x i32> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB5_4:
-; GFX11-NEXT: s_branch .LBB5_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -945,10 +966,14 @@ define inreg <12 x i32> @bitcast_v6f64_to_v12i32_scalar(<6 x double> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
-; SI-NEXT: s_cbranch_scc0 .LBB7_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB7_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB7_4
-; SI-NEXT: .LBB7_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB7_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB7_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
; SI-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
; SI-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
@@ -956,8 +981,6 @@ define inreg <12 x i32> @bitcast_v6f64_to_v12i32_scalar(<6 x double> inreg %a, i
; SI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB7_3:
-; SI-NEXT: s_branch .LBB7_2
; SI-NEXT: .LBB7_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -977,10 +1000,14 @@ define inreg <12 x i32> @bitcast_v6f64_to_v12i32_scalar(<6 x double> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB7_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB7_4
-; VI-NEXT: .LBB7_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB7_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB7_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
; VI-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
; VI-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
@@ -988,8 +1015,6 @@ define inreg <12 x i32> @bitcast_v6f64_to_v12i32_scalar(<6 x double> inreg %a, i
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB7_3:
-; VI-NEXT: s_branch .LBB7_2
; VI-NEXT: .LBB7_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -1009,10 +1034,14 @@ define inreg <12 x i32> @bitcast_v6f64_to_v12i32_scalar(<6 x double> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB7_4
-; GFX9-NEXT: .LBB7_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB7_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB7_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
; GFX9-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
; GFX9-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
@@ -1020,8 +1049,6 @@ define inreg <12 x i32> @bitcast_v6f64_to_v12i32_scalar(<6 x double> inreg %a, i
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB7_3:
-; GFX9-NEXT: s_branch .LBB7_2
; GFX9-NEXT: .LBB7_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -1045,12 +1072,15 @@ define inreg <12 x i32> @bitcast_v6f64_to_v12i32_scalar(<6 x double> inreg %a, i
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB7_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB7_4
-; GFX11-NEXT: .LBB7_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[10:11], s[22:23], 1.0
; GFX11-NEXT: v_add_f64 v[8:9], s[20:21], 1.0
; GFX11-NEXT: v_add_f64 v[6:7], s[18:19], 1.0
@@ -1058,8 +1088,6 @@ define inreg <12 x i32> @bitcast_v6f64_to_v12i32_scalar(<6 x double> inreg %a, i
; GFX11-NEXT: v_add_f64 v[2:3], s[14:15], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[12:13], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB7_3:
-; GFX11-NEXT: s_branch .LBB7_2
; GFX11-NEXT: .LBB7_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -1208,10 +1236,14 @@ define inreg <6 x i64> @bitcast_v12i32_to_v6i64_scalar(<12 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
-; SI-NEXT: s_cbranch_scc0 .LBB9_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB9_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB9_3
-; SI-NEXT: .LBB9_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB9_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB9_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_i32 s27, s27, 3
; SI-NEXT: s_add_i32 s26, s26, 3
; SI-NEXT: s_add_i32 s25, s25, 3
@@ -1224,7 +1256,7 @@ define inreg <6 x i64> @bitcast_v12i32_to_v6i64_scalar(<12 x i32> inreg %a, i32
; SI-NEXT: s_add_i32 s18, s18, 3
; SI-NEXT: s_add_i32 s17, s17, 3
; SI-NEXT: s_add_i32 s16, s16, 3
-; SI-NEXT: .LBB9_3: ; %end
+; SI-NEXT: .LBB9_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -1238,17 +1270,19 @@ define inreg <6 x i64> @bitcast_v12i32_to_v6i64_scalar(<12 x i32> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB9_4:
-; SI-NEXT: s_branch .LBB9_2
;
; VI-LABEL: bitcast_v12i32_to_v6i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB9_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB9_3
-; VI-NEXT: .LBB9_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB9_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB9_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s27, s27, 3
; VI-NEXT: s_add_i32 s26, s26, 3
; VI-NEXT: s_add_i32 s25, s25, 3
@@ -1261,7 +1295,7 @@ define inreg <6 x i64> @bitcast_v12i32_to_v6i64_scalar(<12 x i32> inreg %a, i32
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB9_3: ; %end
+; VI-NEXT: .LBB9_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -1275,17 +1309,19 @@ define inreg <6 x i64> @bitcast_v12i32_to_v6i64_scalar(<12 x i32> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB9_4:
-; VI-NEXT: s_branch .LBB9_2
;
; GFX9-LABEL: bitcast_v12i32_to_v6i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB9_3
-; GFX9-NEXT: .LBB9_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB9_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s27, s27, 3
; GFX9-NEXT: s_add_i32 s26, s26, 3
; GFX9-NEXT: s_add_i32 s25, s25, 3
@@ -1298,7 +1334,7 @@ define inreg <6 x i64> @bitcast_v12i32_to_v6i64_scalar(<12 x i32> inreg %a, i32
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB9_3: ; %end
+; GFX9-NEXT: .LBB9_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -1312,19 +1348,20 @@ define inreg <6 x i64> @bitcast_v12i32_to_v6i64_scalar(<12 x i32> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB9_4:
-; GFX9-NEXT: s_branch .LBB9_2
;
; GFX11-LABEL: bitcast_v12i32_to_v6i64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB9_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB9_3
-; GFX11-NEXT: .LBB9_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s23, s23, 3
; GFX11-NEXT: s_add_i32 s22, s22, 3
; GFX11-NEXT: s_add_i32 s21, s21, 3
@@ -1337,7 +1374,7 @@ define inreg <6 x i64> @bitcast_v12i32_to_v6i64_scalar(<12 x i32> inreg %a, i32
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB9_3: ; %end
+; GFX11-NEXT: .LBB9_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -1346,8 +1383,6 @@ define inreg <6 x i64> @bitcast_v12i32_to_v6i64_scalar(<12 x i32> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB9_4:
-; GFX11-NEXT: s_branch .LBB9_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1491,10 +1526,14 @@ define inreg <12 x i32> @bitcast_v6i64_to_v12i32_scalar(<6 x i64> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
-; SI-NEXT: s_cbranch_scc0 .LBB11_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB11_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB11_3
-; SI-NEXT: .LBB11_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB11_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB11_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_u32 s26, s26, 3
; SI-NEXT: s_addc_u32 s27, s27, 0
; SI-NEXT: s_add_u32 s24, s24, 3
@@ -1507,7 +1546,7 @@ define inreg <12 x i32> @bitcast_v6i64_to_v12i32_scalar(<6 x i64> inreg %a, i32
; SI-NEXT: s_addc_u32 s19, s19, 0
; SI-NEXT: s_add_u32 s16, s16, 3
; SI-NEXT: s_addc_u32 s17, s17, 0
-; SI-NEXT: .LBB11_3: ; %end
+; SI-NEXT: .LBB11_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -1521,17 +1560,19 @@ define inreg <12 x i32> @bitcast_v6i64_to_v12i32_scalar(<6 x i64> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB11_4:
-; SI-NEXT: s_branch .LBB11_2
;
; VI-LABEL: bitcast_v6i64_to_v12i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB11_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB11_3
-; VI-NEXT: .LBB11_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB11_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB11_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_u32 s26, s26, 3
; VI-NEXT: s_addc_u32 s27, s27, 0
; VI-NEXT: s_add_u32 s24, s24, 3
@@ -1544,7 +1585,7 @@ define inreg <12 x i32> @bitcast_v6i64_to_v12i32_scalar(<6 x i64> inreg %a, i32
; VI-NEXT: s_addc_u32 s19, s19, 0
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
-; VI-NEXT: .LBB11_3: ; %end
+; VI-NEXT: .LBB11_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -1558,17 +1599,19 @@ define inreg <12 x i32> @bitcast_v6i64_to_v12i32_scalar(<6 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB11_4:
-; VI-NEXT: s_branch .LBB11_2
;
; GFX9-LABEL: bitcast_v6i64_to_v12i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_3
-; GFX9-NEXT: .LBB11_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB11_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_u32 s26, s26, 3
; GFX9-NEXT: s_addc_u32 s27, s27, 0
; GFX9-NEXT: s_add_u32 s24, s24, 3
@@ -1581,7 +1624,7 @@ define inreg <12 x i32> @bitcast_v6i64_to_v12i32_scalar(<6 x i64> inreg %a, i32
; GFX9-NEXT: s_addc_u32 s19, s19, 0
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
-; GFX9-NEXT: .LBB11_3: ; %end
+; GFX9-NEXT: .LBB11_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -1595,19 +1638,20 @@ define inreg <12 x i32> @bitcast_v6i64_to_v12i32_scalar(<6 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB11_4:
-; GFX9-NEXT: s_branch .LBB11_2
;
; GFX11-LABEL: bitcast_v6i64_to_v12i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB11_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB11_3
-; GFX11-NEXT: .LBB11_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s22, s22, 3
; GFX11-NEXT: s_addc_u32 s23, s23, 0
; GFX11-NEXT: s_add_u32 s20, s20, 3
@@ -1620,7 +1664,7 @@ define inreg <12 x i32> @bitcast_v6i64_to_v12i32_scalar(<6 x i64> inreg %a, i32
; GFX11-NEXT: s_addc_u32 s3, s3, 0
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: .LBB11_3: ; %end
+; GFX11-NEXT: .LBB11_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -1629,8 +1673,6 @@ define inreg <12 x i32> @bitcast_v6i64_to_v12i32_scalar(<6 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB11_4:
-; GFX11-NEXT: s_branch .LBB11_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1822,6 +1864,7 @@ define inreg <24 x i16> @bitcast_v12i32_to_v24i16_scalar(<12 x i32> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB13_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s26
@@ -1907,16 +1950,22 @@ define inreg <24 x i16> @bitcast_v12i32_to_v24i16_scalar(<12 x i32> inreg %a, i3
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $vgpr21
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB13_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB13_2
+; SI-NEXT: s_branch .LBB13_3
;
; VI-LABEL: bitcast_v12i32_to_v24i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB13_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB13_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB13_3
-; VI-NEXT: .LBB13_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB13_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB13_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s27, s27, 3
; VI-NEXT: s_add_i32 s26, s26, 3
; VI-NEXT: s_add_i32 s25, s25, 3
@@ -1929,7 +1978,7 @@ define inreg <24 x i16> @bitcast_v12i32_to_v24i16_scalar(<12 x i32> inreg %a, i3
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB13_3: ; %end
+; VI-NEXT: .LBB13_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -1943,17 +1992,19 @@ define inreg <24 x i16> @bitcast_v12i32_to_v24i16_scalar(<12 x i32> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB13_4:
-; VI-NEXT: s_branch .LBB13_2
;
; GFX9-LABEL: bitcast_v12i32_to_v24i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB13_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB13_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB13_3
-; GFX9-NEXT: .LBB13_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB13_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB13_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s27, s27, 3
; GFX9-NEXT: s_add_i32 s26, s26, 3
; GFX9-NEXT: s_add_i32 s25, s25, 3
@@ -1966,7 +2017,7 @@ define inreg <24 x i16> @bitcast_v12i32_to_v24i16_scalar(<12 x i32> inreg %a, i3
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB13_3: ; %end
+; GFX9-NEXT: .LBB13_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -1980,19 +2031,20 @@ define inreg <24 x i16> @bitcast_v12i32_to_v24i16_scalar(<12 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB13_4:
-; GFX9-NEXT: s_branch .LBB13_2
;
; GFX11-LABEL: bitcast_v12i32_to_v24i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB13_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB13_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB13_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB13_3
-; GFX11-NEXT: .LBB13_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB13_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s23, s23, 3
; GFX11-NEXT: s_add_i32 s22, s22, 3
; GFX11-NEXT: s_add_i32 s21, s21, 3
@@ -2005,7 +2057,7 @@ define inreg <24 x i16> @bitcast_v12i32_to_v24i16_scalar(<12 x i32> inreg %a, i3
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB13_3: ; %end
+; GFX11-NEXT: .LBB13_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -2014,8 +2066,6 @@ define inreg <24 x i16> @bitcast_v12i32_to_v24i16_scalar(<12 x i32> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB13_4:
-; GFX11-NEXT: s_branch .LBB13_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2292,12 +2342,13 @@ define inreg <12 x i32> @bitcast_v24i16_to_v12i32_scalar(<24 x i16> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v12, v8
; SI-NEXT: v_mov_b32_e32 v13, v6
; SI-NEXT: v_mov_b32_e32 v14, v4
; SI-NEXT: v_mov_b32_e32 v15, v2
; SI-NEXT: v_mov_b32_e32 v16, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v3
; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v5
@@ -2411,16 +2462,22 @@ define inreg <12 x i32> @bitcast_v24i16_to_v12i32_scalar(<24 x i16> inreg %a, i3
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB15_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11
-; SI-NEXT: s_branch .LBB15_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB15_2
+; SI-NEXT: s_branch .LBB15_3
;
; VI-LABEL: bitcast_v24i16_to_v12i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB15_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB15_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB15_3
-; VI-NEXT: .LBB15_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB15_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB15_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s27, 3
; VI-NEXT: s_and_b32 s4, s27, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
@@ -2481,7 +2538,7 @@ define inreg <12 x i32> @bitcast_v24i16_to_v12i32_scalar(<24 x i16> inreg %a, i3
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB15_3: ; %end
+; VI-NEXT: .LBB15_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -2495,17 +2552,19 @@ define inreg <12 x i32> @bitcast_v24i16_to_v12i32_scalar(<24 x i16> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB15_4:
-; VI-NEXT: s_branch .LBB15_2
;
; GFX9-LABEL: bitcast_v24i16_to_v12i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB15_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB15_4
-; GFX9-NEXT: .LBB15_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB15_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB15_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v11, s27, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v10, s26, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v9, s25, 3 op_sel_hi:[1,0]
@@ -2519,8 +2578,6 @@ define inreg <12 x i32> @bitcast_v24i16_to_v12i32_scalar(<24 x i16> inreg %a, i3
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB15_3:
-; GFX9-NEXT: s_branch .LBB15_2
; GFX9-NEXT: .LBB15_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -2544,12 +2601,15 @@ define inreg <12 x i32> @bitcast_v24i16_to_v12i32_scalar(<24 x i16> inreg %a, i3
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB15_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB15_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB15_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB15_4
-; GFX11-NEXT: .LBB15_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v11, s23, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v10, s22, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v9, s21, 3 op_sel_hi:[1,0]
@@ -2563,8 +2623,6 @@ define inreg <12 x i32> @bitcast_v24i16_to_v12i32_scalar(<24 x i16> inreg %a, i3
; GFX11-NEXT: v_pk_add_u16 v1, s13, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB15_3:
-; GFX11-NEXT: s_branch .LBB15_2
; GFX11-NEXT: .LBB15_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -2840,6 +2898,7 @@ define inreg <24 x half> @bitcast_v12i32_to_v24f16_scalar(<12 x i32> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB17_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s4, s27, 16
@@ -2955,16 +3014,22 @@ define inreg <24 x half> @bitcast_v12i32_to_v24f16_scalar(<12 x i32> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr21
; SI-NEXT: ; implicit-def: $vgpr22
; SI-NEXT: ; implicit-def: $vgpr23
-; SI-NEXT: s_branch .LBB17_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB17_2
+; SI-NEXT: s_branch .LBB17_3
;
; VI-LABEL: bitcast_v12i32_to_v24f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB17_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB17_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB17_3
-; VI-NEXT: .LBB17_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB17_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB17_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s27, s27, 3
; VI-NEXT: s_add_i32 s26, s26, 3
; VI-NEXT: s_add_i32 s25, s25, 3
@@ -2977,7 +3042,7 @@ define inreg <24 x half> @bitcast_v12i32_to_v24f16_scalar(<12 x i32> inreg %a, i
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB17_3: ; %end
+; VI-NEXT: .LBB17_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -2991,17 +3056,19 @@ define inreg <24 x half> @bitcast_v12i32_to_v24f16_scalar(<12 x i32> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB17_4:
-; VI-NEXT: s_branch .LBB17_2
;
; GFX9-LABEL: bitcast_v12i32_to_v24f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB17_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB17_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB17_3
-; GFX9-NEXT: .LBB17_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB17_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB17_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s27, s27, 3
; GFX9-NEXT: s_add_i32 s26, s26, 3
; GFX9-NEXT: s_add_i32 s25, s25, 3
@@ -3014,7 +3081,7 @@ define inreg <24 x half> @bitcast_v12i32_to_v24f16_scalar(<12 x i32> inreg %a, i
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB17_3: ; %end
+; GFX9-NEXT: .LBB17_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -3028,19 +3095,20 @@ define inreg <24 x half> @bitcast_v12i32_to_v24f16_scalar(<12 x i32> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB17_4:
-; GFX9-NEXT: s_branch .LBB17_2
;
; GFX11-LABEL: bitcast_v12i32_to_v24f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB17_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB17_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB17_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB17_3
-; GFX11-NEXT: .LBB17_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB17_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s23, s23, 3
; GFX11-NEXT: s_add_i32 s22, s22, 3
; GFX11-NEXT: s_add_i32 s21, s21, 3
@@ -3053,7 +3121,7 @@ define inreg <24 x half> @bitcast_v12i32_to_v24f16_scalar(<12 x i32> inreg %a, i
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB17_3: ; %end
+; GFX11-NEXT: .LBB17_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -3062,8 +3130,6 @@ define inreg <24 x half> @bitcast_v12i32_to_v24f16_scalar(<12 x i32> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB17_4:
-; GFX11-NEXT: s_branch .LBB17_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -3419,6 +3485,7 @@ define inreg <12 x i32> @bitcast_v24f16_to_v12i32_scalar(<24 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v22, s28
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB19_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v35
@@ -3547,16 +3614,22 @@ define inreg <12 x i32> @bitcast_v24f16_to_v12i32_scalar(<24 x half> inreg %a, i
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB19_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11
-; SI-NEXT: s_branch .LBB19_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB19_2
+; SI-NEXT: s_branch .LBB19_3
;
; VI-LABEL: bitcast_v24f16_to_v12i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB19_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB19_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB19_4
-; VI-NEXT: .LBB19_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB19_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB19_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s27, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -3619,8 +3692,6 @@ define inreg <12 x i32> @bitcast_v24f16_to_v12i32_scalar(<24 x half> inreg %a, i
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v12
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB19_3:
-; VI-NEXT: s_branch .LBB19_2
; VI-NEXT: .LBB19_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -3640,10 +3711,14 @@ define inreg <12 x i32> @bitcast_v24f16_to_v12i32_scalar(<24 x half> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB19_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB19_4
-; GFX9-NEXT: .LBB19_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB19_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB19_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v11, s27, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v10, s26, v0 op_sel_hi:[1,0]
@@ -3658,8 +3733,6 @@ define inreg <12 x i32> @bitcast_v24f16_to_v12i32_scalar(<24 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB19_3:
-; GFX9-NEXT: s_branch .LBB19_2
; GFX9-NEXT: .LBB19_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -3683,12 +3756,15 @@ define inreg <12 x i32> @bitcast_v24f16_to_v12i32_scalar(<24 x half> inreg %a, i
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB19_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB19_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB19_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB19_4
-; GFX11-NEXT: .LBB19_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v11, 0x200, s23 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v10, 0x200, s22 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v9, 0x200, s21 op_sel_hi:[0,1]
@@ -3702,8 +3778,6 @@ define inreg <12 x i32> @bitcast_v24f16_to_v12i32_scalar(<24 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s13 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB19_3:
-; GFX11-NEXT: s_branch .LBB19_2
; GFX11-NEXT: .LBB19_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -3845,10 +3919,14 @@ define inreg <6 x double> @bitcast_v12f32_to_v6f64_scalar(<12 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
-; SI-NEXT: s_cbranch_scc0 .LBB21_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB21_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB21_4
-; SI-NEXT: .LBB21_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB21_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB21_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v11, s27, 1.0
; SI-NEXT: v_add_f32_e64 v10, s26, 1.0
; SI-NEXT: v_add_f32_e64 v9, s25, 1.0
@@ -3862,8 +3940,6 @@ define inreg <6 x double> @bitcast_v12f32_to_v6f64_scalar(<12 x float> inreg %a,
; SI-NEXT: v_add_f32_e64 v1, s17, 1.0
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB21_3:
-; SI-NEXT: s_branch .LBB21_2
; SI-NEXT: .LBB21_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -3887,10 +3963,14 @@ define inreg <6 x double> @bitcast_v12f32_to_v6f64_scalar(<12 x float> inreg %a,
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB21_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB21_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB21_4
-; VI-NEXT: .LBB21_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB21_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB21_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v11, s27, 1.0
; VI-NEXT: v_add_f32_e64 v10, s26, 1.0
; VI-NEXT: v_add_f32_e64 v9, s25, 1.0
@@ -3904,8 +3984,6 @@ define inreg <6 x double> @bitcast_v12f32_to_v6f64_scalar(<12 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB21_3:
-; VI-NEXT: s_branch .LBB21_2
; VI-NEXT: .LBB21_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -3929,10 +4007,14 @@ define inreg <6 x double> @bitcast_v12f32_to_v6f64_scalar(<12 x float> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB21_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB21_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB21_4
-; GFX9-NEXT: .LBB21_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB21_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v11, s27, 1.0
; GFX9-NEXT: v_add_f32_e64 v10, s26, 1.0
; GFX9-NEXT: v_add_f32_e64 v9, s25, 1.0
@@ -3946,8 +4028,6 @@ define inreg <6 x double> @bitcast_v12f32_to_v6f64_scalar(<12 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB21_3:
-; GFX9-NEXT: s_branch .LBB21_2
; GFX9-NEXT: .LBB21_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -3975,12 +4055,15 @@ define inreg <6 x double> @bitcast_v12f32_to_v6f64_scalar(<12 x float> inreg %a,
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB21_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB21_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB21_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB21_4
-; GFX11-NEXT: .LBB21_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v11, s23, 1.0
; GFX11-NEXT: v_add_f32_e64 v10, s22, 1.0
; GFX11-NEXT: v_add_f32_e64 v9, s21, 1.0
@@ -3994,8 +4077,6 @@ define inreg <6 x double> @bitcast_v12f32_to_v6f64_scalar(<12 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v1, s13, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB21_3:
-; GFX11-NEXT: s_branch .LBB21_2
; GFX11-NEXT: .LBB21_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -4122,10 +4203,14 @@ define inreg <12 x float> @bitcast_v6f64_to_v12f32_scalar(<6 x double> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
-; SI-NEXT: s_cbranch_scc0 .LBB23_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB23_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB23_4
-; SI-NEXT: .LBB23_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB23_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB23_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
; SI-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
; SI-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
@@ -4133,8 +4218,6 @@ define inreg <12 x float> @bitcast_v6f64_to_v12f32_scalar(<6 x double> inreg %a,
; SI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB23_3:
-; SI-NEXT: s_branch .LBB23_2
; SI-NEXT: .LBB23_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -4154,10 +4237,14 @@ define inreg <12 x float> @bitcast_v6f64_to_v12f32_scalar(<6 x double> inreg %a,
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB23_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB23_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB23_4
-; VI-NEXT: .LBB23_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB23_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB23_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
; VI-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
; VI-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
@@ -4165,8 +4252,6 @@ define inreg <12 x float> @bitcast_v6f64_to_v12f32_scalar(<6 x double> inreg %a,
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB23_3:
-; VI-NEXT: s_branch .LBB23_2
; VI-NEXT: .LBB23_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -4186,10 +4271,14 @@ define inreg <12 x float> @bitcast_v6f64_to_v12f32_scalar(<6 x double> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB23_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB23_4
-; GFX9-NEXT: .LBB23_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB23_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
; GFX9-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
; GFX9-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
@@ -4197,8 +4286,6 @@ define inreg <12 x float> @bitcast_v6f64_to_v12f32_scalar(<6 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB23_3:
-; GFX9-NEXT: s_branch .LBB23_2
; GFX9-NEXT: .LBB23_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -4222,12 +4309,15 @@ define inreg <12 x float> @bitcast_v6f64_to_v12f32_scalar(<6 x double> inreg %a,
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB23_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB23_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB23_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB23_4
-; GFX11-NEXT: .LBB23_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[10:11], s[22:23], 1.0
; GFX11-NEXT: v_add_f64 v[8:9], s[20:21], 1.0
; GFX11-NEXT: v_add_f64 v[6:7], s[18:19], 1.0
@@ -4235,8 +4325,6 @@ define inreg <12 x float> @bitcast_v6f64_to_v12f32_scalar(<6 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[2:3], s[14:15], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[12:13], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB23_3:
-; GFX11-NEXT: s_branch .LBB23_2
; GFX11-NEXT: .LBB23_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -4378,10 +4466,14 @@ define inreg <6 x i64> @bitcast_v12f32_to_v6i64_scalar(<12 x float> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
-; SI-NEXT: s_cbranch_scc0 .LBB25_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB25_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB25_4
-; SI-NEXT: .LBB25_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB25_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB25_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v11, s27, 1.0
; SI-NEXT: v_add_f32_e64 v10, s26, 1.0
; SI-NEXT: v_add_f32_e64 v9, s25, 1.0
@@ -4395,8 +4487,6 @@ define inreg <6 x i64> @bitcast_v12f32_to_v6i64_scalar(<12 x float> inreg %a, i3
; SI-NEXT: v_add_f32_e64 v1, s17, 1.0
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB25_3:
-; SI-NEXT: s_branch .LBB25_2
; SI-NEXT: .LBB25_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -4420,10 +4510,14 @@ define inreg <6 x i64> @bitcast_v12f32_to_v6i64_scalar(<12 x float> inreg %a, i3
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB25_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB25_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB25_4
-; VI-NEXT: .LBB25_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB25_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB25_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v11, s27, 1.0
; VI-NEXT: v_add_f32_e64 v10, s26, 1.0
; VI-NEXT: v_add_f32_e64 v9, s25, 1.0
@@ -4437,8 +4531,6 @@ define inreg <6 x i64> @bitcast_v12f32_to_v6i64_scalar(<12 x float> inreg %a, i3
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB25_3:
-; VI-NEXT: s_branch .LBB25_2
; VI-NEXT: .LBB25_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -4462,10 +4554,14 @@ define inreg <6 x i64> @bitcast_v12f32_to_v6i64_scalar(<12 x float> inreg %a, i3
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB25_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB25_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB25_4
-; GFX9-NEXT: .LBB25_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB25_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB25_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v11, s27, 1.0
; GFX9-NEXT: v_add_f32_e64 v10, s26, 1.0
; GFX9-NEXT: v_add_f32_e64 v9, s25, 1.0
@@ -4479,8 +4575,6 @@ define inreg <6 x i64> @bitcast_v12f32_to_v6i64_scalar(<12 x float> inreg %a, i3
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB25_3:
-; GFX9-NEXT: s_branch .LBB25_2
; GFX9-NEXT: .LBB25_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -4508,12 +4602,15 @@ define inreg <6 x i64> @bitcast_v12f32_to_v6i64_scalar(<12 x float> inreg %a, i3
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB25_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB25_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB25_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB25_4
-; GFX11-NEXT: .LBB25_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v11, s23, 1.0
; GFX11-NEXT: v_add_f32_e64 v10, s22, 1.0
; GFX11-NEXT: v_add_f32_e64 v9, s21, 1.0
@@ -4527,8 +4624,6 @@ define inreg <6 x i64> @bitcast_v12f32_to_v6i64_scalar(<12 x float> inreg %a, i3
; GFX11-NEXT: v_add_f32_e64 v1, s13, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB25_3:
-; GFX11-NEXT: s_branch .LBB25_2
; GFX11-NEXT: .LBB25_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -4682,10 +4777,14 @@ define inreg <12 x float> @bitcast_v6i64_to_v12f32_scalar(<6 x i64> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
-; SI-NEXT: s_cbranch_scc0 .LBB27_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB27_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB27_3
-; SI-NEXT: .LBB27_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB27_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB27_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_u32 s26, s26, 3
; SI-NEXT: s_addc_u32 s27, s27, 0
; SI-NEXT: s_add_u32 s24, s24, 3
@@ -4698,7 +4797,7 @@ define inreg <12 x float> @bitcast_v6i64_to_v12f32_scalar(<6 x i64> inreg %a, i3
; SI-NEXT: s_addc_u32 s19, s19, 0
; SI-NEXT: s_add_u32 s16, s16, 3
; SI-NEXT: s_addc_u32 s17, s17, 0
-; SI-NEXT: .LBB27_3: ; %end
+; SI-NEXT: .LBB27_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -4712,17 +4811,19 @@ define inreg <12 x float> @bitcast_v6i64_to_v12f32_scalar(<6 x i64> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB27_4:
-; SI-NEXT: s_branch .LBB27_2
;
; VI-LABEL: bitcast_v6i64_to_v12f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB27_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB27_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB27_3
-; VI-NEXT: .LBB27_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB27_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB27_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_u32 s26, s26, 3
; VI-NEXT: s_addc_u32 s27, s27, 0
; VI-NEXT: s_add_u32 s24, s24, 3
@@ -4735,7 +4836,7 @@ define inreg <12 x float> @bitcast_v6i64_to_v12f32_scalar(<6 x i64> inreg %a, i3
; VI-NEXT: s_addc_u32 s19, s19, 0
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
-; VI-NEXT: .LBB27_3: ; %end
+; VI-NEXT: .LBB27_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -4749,17 +4850,19 @@ define inreg <12 x float> @bitcast_v6i64_to_v12f32_scalar(<6 x i64> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB27_4:
-; VI-NEXT: s_branch .LBB27_2
;
; GFX9-LABEL: bitcast_v6i64_to_v12f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB27_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB27_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB27_3
-; GFX9-NEXT: .LBB27_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB27_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB27_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_u32 s26, s26, 3
; GFX9-NEXT: s_addc_u32 s27, s27, 0
; GFX9-NEXT: s_add_u32 s24, s24, 3
@@ -4772,7 +4875,7 @@ define inreg <12 x float> @bitcast_v6i64_to_v12f32_scalar(<6 x i64> inreg %a, i3
; GFX9-NEXT: s_addc_u32 s19, s19, 0
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
-; GFX9-NEXT: .LBB27_3: ; %end
+; GFX9-NEXT: .LBB27_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -4786,19 +4889,20 @@ define inreg <12 x float> @bitcast_v6i64_to_v12f32_scalar(<6 x i64> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB27_4:
-; GFX9-NEXT: s_branch .LBB27_2
;
; GFX11-LABEL: bitcast_v6i64_to_v12f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB27_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB27_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB27_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB27_3
-; GFX11-NEXT: .LBB27_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB27_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s22, s22, 3
; GFX11-NEXT: s_addc_u32 s23, s23, 0
; GFX11-NEXT: s_add_u32 s20, s20, 3
@@ -4811,7 +4915,7 @@ define inreg <12 x float> @bitcast_v6i64_to_v12f32_scalar(<6 x i64> inreg %a, i3
; GFX11-NEXT: s_addc_u32 s3, s3, 0
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: .LBB27_3: ; %end
+; GFX11-NEXT: .LBB27_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -4820,8 +4924,6 @@ define inreg <12 x float> @bitcast_v6i64_to_v12f32_scalar(<6 x i64> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB27_4:
-; GFX11-NEXT: s_branch .LBB27_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -5006,6 +5108,7 @@ define inreg <24 x i16> @bitcast_v12f32_to_v24i16_scalar(<12 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB29_3
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s26
@@ -5066,7 +5169,8 @@ define inreg <24 x i16> @bitcast_v12f32_to_v24i16_scalar(<12 x float> inreg %a,
; SI-NEXT: ; implicit-def: $sgpr10
; SI-NEXT: ; implicit-def: $vgpr21
; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: s_branch .LBB29_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB29_2
; SI-NEXT: .LBB29_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v2, s17
@@ -5092,10 +5196,14 @@ define inreg <24 x i16> @bitcast_v12f32_to_v24i16_scalar(<12 x float> inreg %a,
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB29_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB29_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB29_4
-; VI-NEXT: .LBB29_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB29_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB29_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v11, s27, 1.0
; VI-NEXT: v_add_f32_e64 v10, s26, 1.0
; VI-NEXT: v_add_f32_e64 v9, s25, 1.0
@@ -5109,8 +5217,6 @@ define inreg <24 x i16> @bitcast_v12f32_to_v24i16_scalar(<12 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB29_3:
-; VI-NEXT: s_branch .LBB29_2
; VI-NEXT: .LBB29_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -5134,10 +5240,14 @@ define inreg <24 x i16> @bitcast_v12f32_to_v24i16_scalar(<12 x float> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB29_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB29_4
-; GFX9-NEXT: .LBB29_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB29_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB29_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v11, s27, 1.0
; GFX9-NEXT: v_add_f32_e64 v10, s26, 1.0
; GFX9-NEXT: v_add_f32_e64 v9, s25, 1.0
@@ -5151,8 +5261,6 @@ define inreg <24 x i16> @bitcast_v12f32_to_v24i16_scalar(<12 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB29_3:
-; GFX9-NEXT: s_branch .LBB29_2
; GFX9-NEXT: .LBB29_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -5180,12 +5288,15 @@ define inreg <24 x i16> @bitcast_v12f32_to_v24i16_scalar(<12 x float> inreg %a,
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB29_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB29_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB29_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB29_4
-; GFX11-NEXT: .LBB29_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v11, s23, 1.0
; GFX11-NEXT: v_add_f32_e64 v10, s22, 1.0
; GFX11-NEXT: v_add_f32_e64 v9, s21, 1.0
@@ -5199,8 +5310,6 @@ define inreg <24 x i16> @bitcast_v12f32_to_v24i16_scalar(<12 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v1, s13, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB29_3:
-; GFX11-NEXT: s_branch .LBB29_2
; GFX11-NEXT: .LBB29_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -5487,12 +5596,13 @@ define inreg <12 x float> @bitcast_v24i16_to_v12f32_scalar(<24 x i16> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v12, v8
; SI-NEXT: v_mov_b32_e32 v13, v6
; SI-NEXT: v_mov_b32_e32 v14, v4
; SI-NEXT: v_mov_b32_e32 v15, v2
; SI-NEXT: v_mov_b32_e32 v16, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v3
; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v5
@@ -5606,16 +5716,22 @@ define inreg <12 x float> @bitcast_v24i16_to_v12f32_scalar(<24 x i16> inreg %a,
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB31_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11
-; SI-NEXT: s_branch .LBB31_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB31_2
+; SI-NEXT: s_branch .LBB31_3
;
; VI-LABEL: bitcast_v24i16_to_v12f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB31_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB31_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB31_3
-; VI-NEXT: .LBB31_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB31_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB31_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s27, 3
; VI-NEXT: s_and_b32 s4, s27, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
@@ -5676,7 +5792,7 @@ define inreg <12 x float> @bitcast_v24i16_to_v12f32_scalar(<24 x i16> inreg %a,
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB31_3: ; %end
+; VI-NEXT: .LBB31_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -5690,17 +5806,19 @@ define inreg <12 x float> @bitcast_v24i16_to_v12f32_scalar(<24 x i16> inreg %a,
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB31_4:
-; VI-NEXT: s_branch .LBB31_2
;
; GFX9-LABEL: bitcast_v24i16_to_v12f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB31_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB31_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB31_4
-; GFX9-NEXT: .LBB31_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB31_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB31_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v11, s27, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v10, s26, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v9, s25, 3 op_sel_hi:[1,0]
@@ -5714,8 +5832,6 @@ define inreg <12 x float> @bitcast_v24i16_to_v12f32_scalar(<24 x i16> inreg %a,
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB31_3:
-; GFX9-NEXT: s_branch .LBB31_2
; GFX9-NEXT: .LBB31_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -5739,12 +5855,15 @@ define inreg <12 x float> @bitcast_v24i16_to_v12f32_scalar(<24 x i16> inreg %a,
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB31_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB31_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB31_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB31_4
-; GFX11-NEXT: .LBB31_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v11, s23, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v10, s22, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v9, s21, 3 op_sel_hi:[1,0]
@@ -5758,8 +5877,6 @@ define inreg <12 x float> @bitcast_v24i16_to_v12f32_scalar(<24 x i16> inreg %a,
; GFX11-NEXT: v_pk_add_u16 v1, s13, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB31_3:
-; GFX11-NEXT: s_branch .LBB31_2
; GFX11-NEXT: .LBB31_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -6028,6 +6145,7 @@ define inreg <24 x half> @bitcast_v12f32_to_v24f16_scalar(<12 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB33_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s4, s27, 16
@@ -6143,16 +6261,22 @@ define inreg <24 x half> @bitcast_v12f32_to_v24f16_scalar(<12 x float> inreg %a,
; SI-NEXT: ; implicit-def: $vgpr21
; SI-NEXT: ; implicit-def: $vgpr22
; SI-NEXT: ; implicit-def: $vgpr23
-; SI-NEXT: s_branch .LBB33_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB33_2
+; SI-NEXT: s_branch .LBB33_3
;
; VI-LABEL: bitcast_v12f32_to_v24f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB33_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB33_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB33_4
-; VI-NEXT: .LBB33_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB33_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB33_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v11, s27, 1.0
; VI-NEXT: v_add_f32_e64 v10, s26, 1.0
; VI-NEXT: v_add_f32_e64 v9, s25, 1.0
@@ -6166,8 +6290,6 @@ define inreg <24 x half> @bitcast_v12f32_to_v24f16_scalar(<12 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB33_3:
-; VI-NEXT: s_branch .LBB33_2
; VI-NEXT: .LBB33_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -6191,10 +6313,14 @@ define inreg <24 x half> @bitcast_v12f32_to_v24f16_scalar(<12 x float> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB33_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB33_4
-; GFX9-NEXT: .LBB33_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB33_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB33_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v11, s27, 1.0
; GFX9-NEXT: v_add_f32_e64 v10, s26, 1.0
; GFX9-NEXT: v_add_f32_e64 v9, s25, 1.0
@@ -6208,8 +6334,6 @@ define inreg <24 x half> @bitcast_v12f32_to_v24f16_scalar(<12 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB33_3:
-; GFX9-NEXT: s_branch .LBB33_2
; GFX9-NEXT: .LBB33_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -6237,12 +6361,15 @@ define inreg <24 x half> @bitcast_v12f32_to_v24f16_scalar(<12 x float> inreg %a,
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB33_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB33_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB33_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB33_4
-; GFX11-NEXT: .LBB33_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v11, s23, 1.0
; GFX11-NEXT: v_add_f32_e64 v10, s22, 1.0
; GFX11-NEXT: v_add_f32_e64 v9, s21, 1.0
@@ -6256,8 +6383,6 @@ define inreg <24 x half> @bitcast_v12f32_to_v24f16_scalar(<12 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v1, s13, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB33_3:
-; GFX11-NEXT: s_branch .LBB33_2
; GFX11-NEXT: .LBB33_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -6623,6 +6748,7 @@ define inreg <12 x float> @bitcast_v24f16_to_v12f32_scalar(<24 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v22, s28
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB35_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v35
@@ -6751,16 +6877,22 @@ define inreg <12 x float> @bitcast_v24f16_to_v12f32_scalar(<24 x half> inreg %a,
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB35_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11
-; SI-NEXT: s_branch .LBB35_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB35_2
+; SI-NEXT: s_branch .LBB35_3
;
; VI-LABEL: bitcast_v24f16_to_v12f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB35_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB35_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB35_4
-; VI-NEXT: .LBB35_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB35_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB35_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s27, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -6823,8 +6955,6 @@ define inreg <12 x float> @bitcast_v24f16_to_v12f32_scalar(<24 x half> inreg %a,
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v12
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB35_3:
-; VI-NEXT: s_branch .LBB35_2
; VI-NEXT: .LBB35_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -6844,10 +6974,14 @@ define inreg <12 x float> @bitcast_v24f16_to_v12f32_scalar(<24 x half> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB35_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB35_4
-; GFX9-NEXT: .LBB35_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB35_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB35_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v11, s27, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v10, s26, v0 op_sel_hi:[1,0]
@@ -6862,8 +6996,6 @@ define inreg <12 x float> @bitcast_v24f16_to_v12f32_scalar(<24 x half> inreg %a,
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB35_3:
-; GFX9-NEXT: s_branch .LBB35_2
; GFX9-NEXT: .LBB35_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -6887,12 +7019,15 @@ define inreg <12 x float> @bitcast_v24f16_to_v12f32_scalar(<24 x half> inreg %a,
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB35_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB35_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB35_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB35_4
-; GFX11-NEXT: .LBB35_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v11, 0x200, s23 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v10, 0x200, s22 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v9, 0x200, s21 op_sel_hi:[0,1]
@@ -6906,8 +7041,6 @@ define inreg <12 x float> @bitcast_v24f16_to_v12f32_scalar(<24 x half> inreg %a,
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s13 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB35_3:
-; GFX11-NEXT: s_branch .LBB35_2
; GFX11-NEXT: .LBB35_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -7032,10 +7165,14 @@ define inreg <6 x i64> @bitcast_v6f64_to_v6i64_scalar(<6 x double> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
-; SI-NEXT: s_cbranch_scc0 .LBB37_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB37_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB37_4
-; SI-NEXT: .LBB37_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB37_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB37_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; SI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
@@ -7043,8 +7180,6 @@ define inreg <6 x i64> @bitcast_v6f64_to_v6i64_scalar(<6 x double> inreg %a, i32
; SI-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
; SI-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB37_3:
-; SI-NEXT: s_branch .LBB37_2
; SI-NEXT: .LBB37_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -7068,10 +7203,14 @@ define inreg <6 x i64> @bitcast_v6f64_to_v6i64_scalar(<6 x double> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB37_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB37_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB37_4
-; VI-NEXT: .LBB37_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB37_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB37_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
@@ -7079,8 +7218,6 @@ define inreg <6 x i64> @bitcast_v6f64_to_v6i64_scalar(<6 x double> inreg %a, i32
; VI-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
; VI-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB37_3:
-; VI-NEXT: s_branch .LBB37_2
; VI-NEXT: .LBB37_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -7104,10 +7241,14 @@ define inreg <6 x i64> @bitcast_v6f64_to_v6i64_scalar(<6 x double> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB37_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB37_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB37_4
-; GFX9-NEXT: .LBB37_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB37_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB37_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
@@ -7115,8 +7256,6 @@ define inreg <6 x i64> @bitcast_v6f64_to_v6i64_scalar(<6 x double> inreg %a, i32
; GFX9-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
; GFX9-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB37_3:
-; GFX9-NEXT: s_branch .LBB37_2
; GFX9-NEXT: .LBB37_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -7144,12 +7283,15 @@ define inreg <6 x i64> @bitcast_v6f64_to_v6i64_scalar(<6 x double> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB37_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB37_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB37_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB37_4
-; GFX11-NEXT: .LBB37_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[0:1], s[12:13], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[14:15], 1.0
; GFX11-NEXT: v_add_f64 v[4:5], s[16:17], 1.0
@@ -7157,8 +7299,6 @@ define inreg <6 x i64> @bitcast_v6f64_to_v6i64_scalar(<6 x double> inreg %a, i32
; GFX11-NEXT: v_add_f64 v[8:9], s[20:21], 1.0
; GFX11-NEXT: v_add_f64 v[10:11], s[22:23], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB37_3:
-; GFX11-NEXT: s_branch .LBB37_2
; GFX11-NEXT: .LBB37_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -7312,10 +7452,14 @@ define inreg <6 x double> @bitcast_v6i64_to_v6f64_scalar(<6 x i64> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
-; SI-NEXT: s_cbranch_scc0 .LBB39_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB39_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB39_3
-; SI-NEXT: .LBB39_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB39_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB39_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_u32 s16, s16, 3
; SI-NEXT: s_addc_u32 s17, s17, 0
; SI-NEXT: s_add_u32 s18, s18, 3
@@ -7328,7 +7472,7 @@ define inreg <6 x double> @bitcast_v6i64_to_v6f64_scalar(<6 x i64> inreg %a, i32
; SI-NEXT: s_addc_u32 s25, s25, 0
; SI-NEXT: s_add_u32 s26, s26, 3
; SI-NEXT: s_addc_u32 s27, s27, 0
-; SI-NEXT: .LBB39_3: ; %end
+; SI-NEXT: .LBB39_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -7342,17 +7486,19 @@ define inreg <6 x double> @bitcast_v6i64_to_v6f64_scalar(<6 x i64> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB39_4:
-; SI-NEXT: s_branch .LBB39_2
;
; VI-LABEL: bitcast_v6i64_to_v6f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB39_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB39_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB39_3
-; VI-NEXT: .LBB39_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB39_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB39_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
; VI-NEXT: s_add_u32 s18, s18, 3
@@ -7365,7 +7511,7 @@ define inreg <6 x double> @bitcast_v6i64_to_v6f64_scalar(<6 x i64> inreg %a, i32
; VI-NEXT: s_addc_u32 s25, s25, 0
; VI-NEXT: s_add_u32 s26, s26, 3
; VI-NEXT: s_addc_u32 s27, s27, 0
-; VI-NEXT: .LBB39_3: ; %end
+; VI-NEXT: .LBB39_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -7379,17 +7525,19 @@ define inreg <6 x double> @bitcast_v6i64_to_v6f64_scalar(<6 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB39_4:
-; VI-NEXT: s_branch .LBB39_2
;
; GFX9-LABEL: bitcast_v6i64_to_v6f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB39_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB39_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB39_3
-; GFX9-NEXT: .LBB39_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB39_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB39_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
; GFX9-NEXT: s_add_u32 s18, s18, 3
@@ -7402,7 +7550,7 @@ define inreg <6 x double> @bitcast_v6i64_to_v6f64_scalar(<6 x i64> inreg %a, i32
; GFX9-NEXT: s_addc_u32 s25, s25, 0
; GFX9-NEXT: s_add_u32 s26, s26, 3
; GFX9-NEXT: s_addc_u32 s27, s27, 0
-; GFX9-NEXT: .LBB39_3: ; %end
+; GFX9-NEXT: .LBB39_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -7416,19 +7564,20 @@ define inreg <6 x double> @bitcast_v6i64_to_v6f64_scalar(<6 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB39_4:
-; GFX9-NEXT: s_branch .LBB39_2
;
; GFX11-LABEL: bitcast_v6i64_to_v6f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB39_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB39_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB39_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB39_3
-; GFX11-NEXT: .LBB39_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB39_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
; GFX11-NEXT: s_add_u32 s2, s2, 3
@@ -7441,7 +7590,7 @@ define inreg <6 x double> @bitcast_v6i64_to_v6f64_scalar(<6 x i64> inreg %a, i32
; GFX11-NEXT: s_addc_u32 s21, s21, 0
; GFX11-NEXT: s_add_u32 s22, s22, 3
; GFX11-NEXT: s_addc_u32 s23, s23, 0
-; GFX11-NEXT: .LBB39_3: ; %end
+; GFX11-NEXT: .LBB39_4: ; %end
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -7449,8 +7598,6 @@ define inreg <6 x double> @bitcast_v6i64_to_v6f64_scalar(<6 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB39_4:
-; GFX11-NEXT: s_branch .LBB39_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -7628,6 +7775,7 @@ define inreg <24 x i16> @bitcast_v6f64_to_v24i16_scalar(<6 x double> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB41_3
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s26
@@ -7682,7 +7830,8 @@ define inreg <24 x i16> @bitcast_v6f64_to_v24i16_scalar(<6 x double> inreg %a, i
; SI-NEXT: ; implicit-def: $sgpr10
; SI-NEXT: ; implicit-def: $vgpr24
; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: s_branch .LBB41_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB41_2
; SI-NEXT: .LBB41_4:
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v5, s19
@@ -7721,10 +7870,14 @@ define inreg <24 x i16> @bitcast_v6f64_to_v24i16_scalar(<6 x double> inreg %a, i
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB41_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB41_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB41_4
-; VI-NEXT: .LBB41_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB41_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB41_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
; VI-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
; VI-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
@@ -7732,8 +7885,6 @@ define inreg <24 x i16> @bitcast_v6f64_to_v24i16_scalar(<6 x double> inreg %a, i
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB41_3:
-; VI-NEXT: s_branch .LBB41_2
; VI-NEXT: .LBB41_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -7757,10 +7908,14 @@ define inreg <24 x i16> @bitcast_v6f64_to_v24i16_scalar(<6 x double> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB41_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB41_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB41_4
-; GFX9-NEXT: .LBB41_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB41_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB41_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
; GFX9-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
; GFX9-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
@@ -7768,8 +7923,6 @@ define inreg <24 x i16> @bitcast_v6f64_to_v24i16_scalar(<6 x double> inreg %a, i
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB41_3:
-; GFX9-NEXT: s_branch .LBB41_2
; GFX9-NEXT: .LBB41_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -7797,12 +7950,15 @@ define inreg <24 x i16> @bitcast_v6f64_to_v24i16_scalar(<6 x double> inreg %a, i
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB41_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB41_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB41_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB41_4
-; GFX11-NEXT: .LBB41_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[10:11], s[22:23], 1.0
; GFX11-NEXT: v_add_f64 v[8:9], s[20:21], 1.0
; GFX11-NEXT: v_add_f64 v[6:7], s[18:19], 1.0
@@ -7810,8 +7966,6 @@ define inreg <24 x i16> @bitcast_v6f64_to_v24i16_scalar(<6 x double> inreg %a, i
; GFX11-NEXT: v_add_f64 v[2:3], s[14:15], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[12:13], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB41_3:
-; GFX11-NEXT: s_branch .LBB41_2
; GFX11-NEXT: .LBB41_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -8100,12 +8254,13 @@ define inreg <6 x double> @bitcast_v24i16_to_v6f64_scalar(<24 x i16> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v16, v8
; SI-NEXT: v_mov_b32_e32 v17, v6
; SI-NEXT: v_mov_b32_e32 v18, v4
; SI-NEXT: v_mov_b32_e32 v19, v2
; SI-NEXT: v_mov_b32_e32 v20, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v3
; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v5
@@ -8156,31 +8311,27 @@ define inreg <6 x double> @bitcast_v24i16_to_v6f64_scalar(<24 x i16> inreg %a, i
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v20
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v25, v0
-; SI-NEXT: v_add_i32_e32 v7, vcc, 0x30000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v19
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_add_i32 s16, s16, 3
-; SI-NEXT: v_or_b32_e32 v0, v24, v0
+; SI-NEXT: v_add_i32_e32 v7, vcc, 0x30000, v0
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v18
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s17, 16
; SI-NEXT: s_add_i32 s18, s18, 3
-; SI-NEXT: v_add_i32_e32 v8, vcc, 0x30000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v18
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_or_b32 s4, s5, s4
; SI-NEXT: s_and_b32 s5, s18, 0xffff
; SI-NEXT: s_lshl_b32 s6, s19, 16
; SI-NEXT: s_add_i32 s20, s20, 3
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: v_or_b32_e32 v0, v23, v0
; SI-NEXT: s_or_b32 s5, s6, s5
; SI-NEXT: s_and_b32 s6, s20, 0xffff
; SI-NEXT: s_lshl_b32 s7, s21, 16
; SI-NEXT: s_add_i32 s22, s22, 3
-; SI-NEXT: v_or_b32_e32 v0, v23, v0
+; SI-NEXT: v_add_i32_e32 v9, vcc, 0x30000, v0
; SI-NEXT: s_or_b32 s6, s7, s6
; SI-NEXT: s_and_b32 s7, s22, 0xffff
; SI-NEXT: s_lshl_b32 s8, s23, 16
; SI-NEXT: s_add_i32 s24, s24, 3
-; SI-NEXT: v_add_i32_e32 v9, vcc, 0x30000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v17
; SI-NEXT: s_or_b32 s7, s8, s7
; SI-NEXT: s_and_b32 s8, s24, 0xffff
@@ -8192,13 +8343,16 @@ define inreg <6 x double> @bitcast_v24i16_to_v6f64_scalar(<24 x i16> inreg %a, i
; SI-NEXT: s_lshl_b32 s10, s27, 16
; SI-NEXT: s_add_i32 s28, s28, 3
; SI-NEXT: v_or_b32_e32 v0, v22, v0
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v19
; SI-NEXT: s_or_b32 s9, s10, s9
; SI-NEXT: s_and_b32 s10, s28, 0xffff
; SI-NEXT: s_lshl_b32 s11, s29, 16
; SI-NEXT: v_add_i32_e32 v10, vcc, 0x30000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v16
+; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1
; SI-NEXT: s_or_b32 s10, s11, s10
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: v_or_b32_e32 v1, v24, v1
; SI-NEXT: s_add_i32 s4, s4, 0x30000
; SI-NEXT: s_add_i32 s5, s5, 0x30000
; SI-NEXT: s_add_i32 s6, s6, 0x30000
@@ -8207,6 +8361,7 @@ define inreg <6 x double> @bitcast_v24i16_to_v6f64_scalar(<24 x i16> inreg %a, i
; SI-NEXT: s_add_i32 s9, s9, 0x30000
; SI-NEXT: s_add_i32 s10, s10, 0x30000
; SI-NEXT: v_or_b32_e32 v0, v21, v0
+; SI-NEXT: v_add_i32_e32 v8, vcc, 0x30000, v1
; SI-NEXT: v_add_i32_e32 v11, vcc, 0x30000, v0
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
@@ -8219,16 +8374,22 @@ define inreg <6 x double> @bitcast_v24i16_to_v6f64_scalar(<24 x i16> inreg %a, i
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB43_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; SI-NEXT: s_branch .LBB43_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB43_2
+; SI-NEXT: s_branch .LBB43_3
;
; VI-LABEL: bitcast_v24i16_to_v6f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB43_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB43_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB43_3
-; VI-NEXT: .LBB43_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB43_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB43_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s27, 3
; VI-NEXT: s_and_b32 s4, s27, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
@@ -8289,7 +8450,7 @@ define inreg <6 x double> @bitcast_v24i16_to_v6f64_scalar(<24 x i16> inreg %a, i
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB43_3: ; %end
+; VI-NEXT: .LBB43_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -8303,17 +8464,19 @@ define inreg <6 x double> @bitcast_v24i16_to_v6f64_scalar(<24 x i16> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB43_4:
-; VI-NEXT: s_branch .LBB43_2
;
; GFX9-LABEL: bitcast_v24i16_to_v6f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB43_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB43_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB43_4
-; GFX9-NEXT: .LBB43_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB43_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB43_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v11, s27, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v10, s26, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v9, s25, 3 op_sel_hi:[1,0]
@@ -8327,8 +8490,6 @@ define inreg <6 x double> @bitcast_v24i16_to_v6f64_scalar(<24 x i16> inreg %a, i
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB43_3:
-; GFX9-NEXT: s_branch .LBB43_2
; GFX9-NEXT: .LBB43_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -8356,12 +8517,15 @@ define inreg <6 x double> @bitcast_v24i16_to_v6f64_scalar(<24 x i16> inreg %a, i
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB43_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB43_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB43_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB43_4
-; GFX11-NEXT: .LBB43_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v11, s23, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v10, s22, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v9, s21, 3 op_sel_hi:[1,0]
@@ -8375,8 +8539,6 @@ define inreg <6 x double> @bitcast_v24i16_to_v6f64_scalar(<24 x i16> inreg %a, i
; GFX11-NEXT: v_pk_add_u16 v1, s13, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB43_3:
-; GFX11-NEXT: s_branch .LBB43_2
; GFX11-NEXT: .LBB43_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -8620,6 +8782,7 @@ define inreg <24 x half> @bitcast_v6f64_to_v24f16_scalar(<6 x double> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB45_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s4, s27, 16
@@ -8729,16 +8892,22 @@ define inreg <24 x half> @bitcast_v6f64_to_v24f16_scalar(<6 x double> inreg %a,
; SI-NEXT: ; implicit-def: $vgpr21
; SI-NEXT: ; implicit-def: $vgpr22
; SI-NEXT: ; implicit-def: $vgpr23
-; SI-NEXT: s_branch .LBB45_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB45_2
+; SI-NEXT: s_branch .LBB45_3
;
; VI-LABEL: bitcast_v6f64_to_v24f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB45_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB45_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB45_4
-; VI-NEXT: .LBB45_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB45_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB45_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
; VI-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
; VI-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
@@ -8746,8 +8915,6 @@ define inreg <24 x half> @bitcast_v6f64_to_v24f16_scalar(<6 x double> inreg %a,
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB45_3:
-; VI-NEXT: s_branch .LBB45_2
; VI-NEXT: .LBB45_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -8771,10 +8938,14 @@ define inreg <24 x half> @bitcast_v6f64_to_v24f16_scalar(<6 x double> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB45_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB45_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB45_4
-; GFX9-NEXT: .LBB45_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB45_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB45_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
; GFX9-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
; GFX9-NEXT: v_add_f64 v[6:7], s[22:23], 1.0
@@ -8782,8 +8953,6 @@ define inreg <24 x half> @bitcast_v6f64_to_v24f16_scalar(<6 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB45_3:
-; GFX9-NEXT: s_branch .LBB45_2
; GFX9-NEXT: .LBB45_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -8811,12 +8980,15 @@ define inreg <24 x half> @bitcast_v6f64_to_v24f16_scalar(<6 x double> inreg %a,
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB45_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB45_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB45_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB45_4
-; GFX11-NEXT: .LBB45_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[10:11], s[22:23], 1.0
; GFX11-NEXT: v_add_f64 v[8:9], s[20:21], 1.0
; GFX11-NEXT: v_add_f64 v[6:7], s[18:19], 1.0
@@ -8824,8 +8996,6 @@ define inreg <24 x half> @bitcast_v6f64_to_v24f16_scalar(<6 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[2:3], s[14:15], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[12:13], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB45_3:
-; GFX11-NEXT: s_branch .LBB45_2
; GFX11-NEXT: .LBB45_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -9191,6 +9361,7 @@ define inreg <6 x double> @bitcast_v24f16_to_v6f64_scalar(<24 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v26, s28
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB47_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v39
@@ -9319,16 +9490,22 @@ define inreg <6 x double> @bitcast_v24f16_to_v6f64_scalar(<24 x half> inreg %a,
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB47_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; SI-NEXT: s_branch .LBB47_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB47_2
+; SI-NEXT: s_branch .LBB47_3
;
; VI-LABEL: bitcast_v24f16_to_v6f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB47_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB47_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB47_4
-; VI-NEXT: .LBB47_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB47_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB47_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s27, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -9391,8 +9568,6 @@ define inreg <6 x double> @bitcast_v24f16_to_v6f64_scalar(<24 x half> inreg %a,
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v12
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB47_3:
-; VI-NEXT: s_branch .LBB47_2
; VI-NEXT: .LBB47_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -9416,10 +9591,14 @@ define inreg <6 x double> @bitcast_v24f16_to_v6f64_scalar(<24 x half> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB47_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB47_4
-; GFX9-NEXT: .LBB47_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB47_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v11, s27, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v10, s26, v0 op_sel_hi:[1,0]
@@ -9434,8 +9613,6 @@ define inreg <6 x double> @bitcast_v24f16_to_v6f64_scalar(<24 x half> inreg %a,
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB47_3:
-; GFX9-NEXT: s_branch .LBB47_2
; GFX9-NEXT: .LBB47_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -9463,12 +9640,15 @@ define inreg <6 x double> @bitcast_v24f16_to_v6f64_scalar(<24 x half> inreg %a,
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB47_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB47_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB47_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB47_4
-; GFX11-NEXT: .LBB47_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v11, 0x200, s23 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v10, 0x200, s22 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v9, 0x200, s21 op_sel_hi:[0,1]
@@ -9482,8 +9662,6 @@ define inreg <6 x double> @bitcast_v24f16_to_v6f64_scalar(<24 x half> inreg %a,
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s13 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB47_3:
-; GFX11-NEXT: s_branch .LBB47_2
; GFX11-NEXT: .LBB47_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -9688,6 +9866,7 @@ define inreg <24 x i16> @bitcast_v6i64_to_v24i16_scalar(<6 x i64> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB49_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s26
@@ -9773,16 +9952,22 @@ define inreg <24 x i16> @bitcast_v6i64_to_v24i16_scalar(<6 x i64> inreg %a, i32
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $vgpr21
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB49_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB49_2
+; SI-NEXT: s_branch .LBB49_3
;
; VI-LABEL: bitcast_v6i64_to_v24i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB49_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB49_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB49_3
-; VI-NEXT: .LBB49_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB49_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB49_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_u32 s26, s26, 3
; VI-NEXT: s_addc_u32 s27, s27, 0
; VI-NEXT: s_add_u32 s24, s24, 3
@@ -9795,7 +9980,7 @@ define inreg <24 x i16> @bitcast_v6i64_to_v24i16_scalar(<6 x i64> inreg %a, i32
; VI-NEXT: s_addc_u32 s19, s19, 0
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
-; VI-NEXT: .LBB49_3: ; %end
+; VI-NEXT: .LBB49_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -9809,17 +9994,19 @@ define inreg <24 x i16> @bitcast_v6i64_to_v24i16_scalar(<6 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB49_4:
-; VI-NEXT: s_branch .LBB49_2
;
; GFX9-LABEL: bitcast_v6i64_to_v24i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB49_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB49_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB49_3
-; GFX9-NEXT: .LBB49_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB49_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_u32 s26, s26, 3
; GFX9-NEXT: s_addc_u32 s27, s27, 0
; GFX9-NEXT: s_add_u32 s24, s24, 3
@@ -9832,7 +10019,7 @@ define inreg <24 x i16> @bitcast_v6i64_to_v24i16_scalar(<6 x i64> inreg %a, i32
; GFX9-NEXT: s_addc_u32 s19, s19, 0
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
-; GFX9-NEXT: .LBB49_3: ; %end
+; GFX9-NEXT: .LBB49_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -9846,19 +10033,20 @@ define inreg <24 x i16> @bitcast_v6i64_to_v24i16_scalar(<6 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB49_4:
-; GFX9-NEXT: s_branch .LBB49_2
;
; GFX11-LABEL: bitcast_v6i64_to_v24i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB49_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB49_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB49_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB49_3
-; GFX11-NEXT: .LBB49_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s22, s22, 3
; GFX11-NEXT: s_addc_u32 s23, s23, 0
; GFX11-NEXT: s_add_u32 s20, s20, 3
@@ -9871,7 +10059,7 @@ define inreg <24 x i16> @bitcast_v6i64_to_v24i16_scalar(<6 x i64> inreg %a, i32
; GFX11-NEXT: s_addc_u32 s3, s3, 0
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: .LBB49_3: ; %end
+; GFX11-NEXT: .LBB49_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -9880,8 +10068,6 @@ define inreg <24 x i16> @bitcast_v6i64_to_v24i16_scalar(<6 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB49_4:
-; GFX11-NEXT: s_branch .LBB49_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -10160,12 +10346,13 @@ define inreg <6 x i64> @bitcast_v24i16_to_v6i64_scalar(<24 x i16> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v16, v8
; SI-NEXT: v_mov_b32_e32 v17, v6
; SI-NEXT: v_mov_b32_e32 v18, v4
; SI-NEXT: v_mov_b32_e32 v19, v2
; SI-NEXT: v_mov_b32_e32 v20, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v3
; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v5
@@ -10216,31 +10403,27 @@ define inreg <6 x i64> @bitcast_v24i16_to_v6i64_scalar(<24 x i16> inreg %a, i32
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v20
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v25, v0
-; SI-NEXT: v_add_i32_e32 v7, vcc, 0x30000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v19
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_add_i32 s16, s16, 3
-; SI-NEXT: v_or_b32_e32 v0, v24, v0
+; SI-NEXT: v_add_i32_e32 v7, vcc, 0x30000, v0
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v18
; SI-NEXT: s_and_b32 s4, s16, 0xffff
; SI-NEXT: s_lshl_b32 s5, s17, 16
; SI-NEXT: s_add_i32 s18, s18, 3
-; SI-NEXT: v_add_i32_e32 v8, vcc, 0x30000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v18
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: s_or_b32 s4, s5, s4
; SI-NEXT: s_and_b32 s5, s18, 0xffff
; SI-NEXT: s_lshl_b32 s6, s19, 16
; SI-NEXT: s_add_i32 s20, s20, 3
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: v_or_b32_e32 v0, v23, v0
; SI-NEXT: s_or_b32 s5, s6, s5
; SI-NEXT: s_and_b32 s6, s20, 0xffff
; SI-NEXT: s_lshl_b32 s7, s21, 16
; SI-NEXT: s_add_i32 s22, s22, 3
-; SI-NEXT: v_or_b32_e32 v0, v23, v0
+; SI-NEXT: v_add_i32_e32 v9, vcc, 0x30000, v0
; SI-NEXT: s_or_b32 s6, s7, s6
; SI-NEXT: s_and_b32 s7, s22, 0xffff
; SI-NEXT: s_lshl_b32 s8, s23, 16
; SI-NEXT: s_add_i32 s24, s24, 3
-; SI-NEXT: v_add_i32_e32 v9, vcc, 0x30000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v17
; SI-NEXT: s_or_b32 s7, s8, s7
; SI-NEXT: s_and_b32 s8, s24, 0xffff
@@ -10252,13 +10435,16 @@ define inreg <6 x i64> @bitcast_v24i16_to_v6i64_scalar(<24 x i16> inreg %a, i32
; SI-NEXT: s_lshl_b32 s10, s27, 16
; SI-NEXT: s_add_i32 s28, s28, 3
; SI-NEXT: v_or_b32_e32 v0, v22, v0
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v19
; SI-NEXT: s_or_b32 s9, s10, s9
; SI-NEXT: s_and_b32 s10, s28, 0xffff
; SI-NEXT: s_lshl_b32 s11, s29, 16
; SI-NEXT: v_add_i32_e32 v10, vcc, 0x30000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v16
+; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1
; SI-NEXT: s_or_b32 s10, s11, s10
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: v_or_b32_e32 v1, v24, v1
; SI-NEXT: s_add_i32 s4, s4, 0x30000
; SI-NEXT: s_add_i32 s5, s5, 0x30000
; SI-NEXT: s_add_i32 s6, s6, 0x30000
@@ -10267,6 +10453,7 @@ define inreg <6 x i64> @bitcast_v24i16_to_v6i64_scalar(<24 x i16> inreg %a, i32
; SI-NEXT: s_add_i32 s9, s9, 0x30000
; SI-NEXT: s_add_i32 s10, s10, 0x30000
; SI-NEXT: v_or_b32_e32 v0, v21, v0
+; SI-NEXT: v_add_i32_e32 v8, vcc, 0x30000, v1
; SI-NEXT: v_add_i32_e32 v11, vcc, 0x30000, v0
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
@@ -10279,16 +10466,22 @@ define inreg <6 x i64> @bitcast_v24i16_to_v6i64_scalar(<24 x i16> inreg %a, i32
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB51_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; SI-NEXT: s_branch .LBB51_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB51_2
+; SI-NEXT: s_branch .LBB51_3
;
; VI-LABEL: bitcast_v24i16_to_v6i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB51_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB51_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB51_3
-; VI-NEXT: .LBB51_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB51_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB51_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s27, 3
; VI-NEXT: s_and_b32 s4, s27, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
@@ -10349,7 +10542,7 @@ define inreg <6 x i64> @bitcast_v24i16_to_v6i64_scalar(<24 x i16> inreg %a, i32
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB51_3: ; %end
+; VI-NEXT: .LBB51_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -10363,17 +10556,19 @@ define inreg <6 x i64> @bitcast_v24i16_to_v6i64_scalar(<24 x i16> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB51_4:
-; VI-NEXT: s_branch .LBB51_2
;
; GFX9-LABEL: bitcast_v24i16_to_v6i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB51_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB51_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB51_4
-; GFX9-NEXT: .LBB51_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB51_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB51_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v11, s27, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v10, s26, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v9, s25, 3 op_sel_hi:[1,0]
@@ -10387,8 +10582,6 @@ define inreg <6 x i64> @bitcast_v24i16_to_v6i64_scalar(<24 x i16> inreg %a, i32
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB51_3:
-; GFX9-NEXT: s_branch .LBB51_2
; GFX9-NEXT: .LBB51_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -10416,12 +10609,15 @@ define inreg <6 x i64> @bitcast_v24i16_to_v6i64_scalar(<24 x i16> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB51_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB51_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB51_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB51_4
-; GFX11-NEXT: .LBB51_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v11, s23, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v10, s22, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v9, s21, 3 op_sel_hi:[1,0]
@@ -10435,8 +10631,6 @@ define inreg <6 x i64> @bitcast_v24i16_to_v6i64_scalar(<24 x i16> inreg %a, i32
; GFX11-NEXT: v_pk_add_u16 v1, s13, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB51_3:
-; GFX11-NEXT: s_branch .LBB51_2
; GFX11-NEXT: .LBB51_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -10717,6 +10911,7 @@ define inreg <24 x half> @bitcast_v6i64_to_v24f16_scalar(<6 x i64> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB53_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s4, s27, 16
@@ -10832,16 +11027,22 @@ define inreg <24 x half> @bitcast_v6i64_to_v24f16_scalar(<6 x i64> inreg %a, i32
; SI-NEXT: ; implicit-def: $vgpr21
; SI-NEXT: ; implicit-def: $vgpr22
; SI-NEXT: ; implicit-def: $vgpr23
-; SI-NEXT: s_branch .LBB53_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB53_2
+; SI-NEXT: s_branch .LBB53_3
;
; VI-LABEL: bitcast_v6i64_to_v24f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB53_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB53_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB53_3
-; VI-NEXT: .LBB53_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB53_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB53_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_u32 s26, s26, 3
; VI-NEXT: s_addc_u32 s27, s27, 0
; VI-NEXT: s_add_u32 s24, s24, 3
@@ -10854,7 +11055,7 @@ define inreg <24 x half> @bitcast_v6i64_to_v24f16_scalar(<6 x i64> inreg %a, i32
; VI-NEXT: s_addc_u32 s19, s19, 0
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
-; VI-NEXT: .LBB53_3: ; %end
+; VI-NEXT: .LBB53_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -10868,17 +11069,19 @@ define inreg <24 x half> @bitcast_v6i64_to_v24f16_scalar(<6 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB53_4:
-; VI-NEXT: s_branch .LBB53_2
;
; GFX9-LABEL: bitcast_v6i64_to_v24f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB53_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB53_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB53_3
-; GFX9-NEXT: .LBB53_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB53_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB53_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_u32 s26, s26, 3
; GFX9-NEXT: s_addc_u32 s27, s27, 0
; GFX9-NEXT: s_add_u32 s24, s24, 3
@@ -10891,7 +11094,7 @@ define inreg <24 x half> @bitcast_v6i64_to_v24f16_scalar(<6 x i64> inreg %a, i32
; GFX9-NEXT: s_addc_u32 s19, s19, 0
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
-; GFX9-NEXT: .LBB53_3: ; %end
+; GFX9-NEXT: .LBB53_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -10905,19 +11108,20 @@ define inreg <24 x half> @bitcast_v6i64_to_v24f16_scalar(<6 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB53_4:
-; GFX9-NEXT: s_branch .LBB53_2
;
; GFX11-LABEL: bitcast_v6i64_to_v24f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB53_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB53_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB53_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB53_3
-; GFX11-NEXT: .LBB53_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB53_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s22, s22, 3
; GFX11-NEXT: s_addc_u32 s23, s23, 0
; GFX11-NEXT: s_add_u32 s20, s20, 3
@@ -10930,7 +11134,7 @@ define inreg <24 x half> @bitcast_v6i64_to_v24f16_scalar(<6 x i64> inreg %a, i32
; GFX11-NEXT: s_addc_u32 s3, s3, 0
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: .LBB53_3: ; %end
+; GFX11-NEXT: .LBB53_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -10939,8 +11143,6 @@ define inreg <24 x half> @bitcast_v6i64_to_v24f16_scalar(<6 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB53_4:
-; GFX11-NEXT: s_branch .LBB53_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -11296,6 +11498,7 @@ define inreg <6 x i64> @bitcast_v24f16_to_v6i64_scalar(<24 x half> inreg %a, i32
; SI-NEXT: v_cvt_f16_f32_e32 v26, s28
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB55_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v39
@@ -11424,16 +11627,22 @@ define inreg <6 x i64> @bitcast_v24f16_to_v6i64_scalar(<24 x half> inreg %a, i32
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB55_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; SI-NEXT: s_branch .LBB55_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB55_2
+; SI-NEXT: s_branch .LBB55_3
;
; VI-LABEL: bitcast_v24f16_to_v6i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB55_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB55_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB55_4
-; VI-NEXT: .LBB55_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB55_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB55_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s27, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -11496,8 +11705,6 @@ define inreg <6 x i64> @bitcast_v24f16_to_v6i64_scalar(<24 x half> inreg %a, i32
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v12
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB55_3:
-; VI-NEXT: s_branch .LBB55_2
; VI-NEXT: .LBB55_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -11521,10 +11728,14 @@ define inreg <6 x i64> @bitcast_v24f16_to_v6i64_scalar(<24 x half> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB55_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB55_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB55_4
-; GFX9-NEXT: .LBB55_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB55_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB55_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v11, s27, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v10, s26, v0 op_sel_hi:[1,0]
@@ -11539,8 +11750,6 @@ define inreg <6 x i64> @bitcast_v24f16_to_v6i64_scalar(<24 x half> inreg %a, i32
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB55_3:
-; GFX9-NEXT: s_branch .LBB55_2
; GFX9-NEXT: .LBB55_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -11568,12 +11777,15 @@ define inreg <6 x i64> @bitcast_v24f16_to_v6i64_scalar(<24 x half> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB55_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB55_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB55_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB55_4
-; GFX11-NEXT: .LBB55_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v11, 0x200, s23 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v10, 0x200, s22 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v9, 0x200, s21 op_sel_hi:[0,1]
@@ -11587,8 +11799,6 @@ define inreg <6 x i64> @bitcast_v24f16_to_v6i64_scalar(<24 x half> inreg %a, i32
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s13 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB55_3:
-; GFX11-NEXT: s_branch .LBB55_2
; GFX11-NEXT: .LBB55_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -11903,6 +12113,7 @@ define inreg <24 x half> @bitcast_v24i16_to_v24f16_scalar(<24 x i16> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v30, v9
; SI-NEXT: v_mov_b32_e32 v29, v8
; SI-NEXT: v_mov_b32_e32 v28, v7
@@ -11913,7 +12124,7 @@ define inreg <24 x half> @bitcast_v24i16_to_v24f16_scalar(<24 x i16> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v33, v2
; SI-NEXT: v_mov_b32_e32 v32, v1
; SI-NEXT: v_mov_b32_e32 v31, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB57_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_cvt_f32_f16_e32 v0, s16
@@ -12017,16 +12228,22 @@ define inreg <24 x half> @bitcast_v24i16_to_v24f16_scalar(<24 x i16> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr21
; SI-NEXT: ; implicit-def: $vgpr22
; SI-NEXT: ; implicit-def: $vgpr23
-; SI-NEXT: s_branch .LBB57_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB57_2
+; SI-NEXT: s_branch .LBB57_3
;
; VI-LABEL: bitcast_v24i16_to_v24f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB57_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB57_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB57_3
-; VI-NEXT: .LBB57_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB57_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB57_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_and_b32 s4, s16, 0xffff0000
; VI-NEXT: s_add_i32 s5, s16, 3
; VI-NEXT: s_and_b32 s6, s17, 0xffff0000
@@ -12087,7 +12304,7 @@ define inreg <24 x half> @bitcast_v24i16_to_v24f16_scalar(<24 x i16> inreg %a, i
; VI-NEXT: s_add_i32 s18, s8, 0x30000
; VI-NEXT: s_add_i32 s17, s6, 0x30000
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB57_3: ; %end
+; VI-NEXT: .LBB57_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -12101,17 +12318,19 @@ define inreg <24 x half> @bitcast_v24i16_to_v24f16_scalar(<24 x i16> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB57_4:
-; VI-NEXT: s_branch .LBB57_2
;
; GFX9-LABEL: bitcast_v24i16_to_v24f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB57_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB57_4
-; GFX9-NEXT: .LBB57_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB57_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB57_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v11, s27, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v10, s26, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v9, s25, 3 op_sel_hi:[1,0]
@@ -12125,8 +12344,6 @@ define inreg <24 x half> @bitcast_v24i16_to_v24f16_scalar(<24 x i16> inreg %a, i
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB57_3:
-; GFX9-NEXT: s_branch .LBB57_2
; GFX9-NEXT: .LBB57_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -12154,12 +12371,15 @@ define inreg <24 x half> @bitcast_v24i16_to_v24f16_scalar(<24 x i16> inreg %a, i
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB57_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB57_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB57_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB57_4
-; GFX11-NEXT: .LBB57_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v11, s23, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v10, s22, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v9, s21, 3 op_sel_hi:[1,0]
@@ -12173,8 +12393,6 @@ define inreg <24 x half> @bitcast_v24i16_to_v24f16_scalar(<24 x i16> inreg %a, i
; GFX11-NEXT: v_pk_add_u16 v1, s13, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB57_3:
-; GFX11-NEXT: s_branch .LBB57_2
; GFX11-NEXT: .LBB57_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -12500,10 +12718,14 @@ define inreg <24 x i16> @bitcast_v24f16_to_v24i16_scalar(<24 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v12, s28
; SI-NEXT: v_cvt_f16_f32_e32 v13, s29
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: s_cbranch_scc0 .LBB59_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB59_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB59_3
-; SI-NEXT: .LBB59_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB59_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB59_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v23, v23
; SI-NEXT: v_cvt_f32_f16_e32 v22, v22
; SI-NEXT: v_cvt_f32_f16_e32 v19, v19
@@ -12606,19 +12828,21 @@ define inreg <24 x i16> @bitcast_v24f16_to_v24i16_scalar(<24 x half> inreg %a, i
; SI-NEXT: v_alignbit_b32 v13, v14, v13, 16
; SI-NEXT: v_alignbit_b32 v17, v18, v17, 16
; SI-NEXT: v_alignbit_b32 v21, v22, v21, 16
-; SI-NEXT: .LBB59_3: ; %end
+; SI-NEXT: .LBB59_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB59_4:
-; SI-NEXT: s_branch .LBB59_2
;
; VI-LABEL: bitcast_v24f16_to_v24i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
-; VI-NEXT: s_cbranch_scc0 .LBB59_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB59_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB59_4
-; VI-NEXT: .LBB59_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB59_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB59_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s5, s26, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v2, s5
@@ -12681,8 +12905,6 @@ define inreg <24 x i16> @bitcast_v24f16_to_v24i16_scalar(<24 x half> inreg %a, i
; VI-NEXT: v_or_b32_e32 v1, v0, v1
; VI-NEXT: v_or_b32_e32 v0, v12, v13
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB59_3:
-; VI-NEXT: s_branch .LBB59_2
; VI-NEXT: .LBB59_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -12706,10 +12928,14 @@ define inreg <24 x i16> @bitcast_v24f16_to_v24i16_scalar(<24 x half> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB59_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB59_4
-; GFX9-NEXT: .LBB59_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB59_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB59_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v11, s27, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v10, s26, v0 op_sel_hi:[1,0]
@@ -12724,8 +12950,6 @@ define inreg <24 x i16> @bitcast_v24f16_to_v24i16_scalar(<24 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB59_3:
-; GFX9-NEXT: s_branch .LBB59_2
; GFX9-NEXT: .LBB59_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -12753,12 +12977,15 @@ define inreg <24 x i16> @bitcast_v24f16_to_v24i16_scalar(<24 x half> inreg %a, i
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB59_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB59_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB59_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB59_4
-; GFX11-NEXT: .LBB59_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v11, 0x200, s23 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v10, 0x200, s22 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v9, 0x200, s21 op_sel_hi:[0,1]
@@ -12772,8 +12999,6 @@ define inreg <24 x i16> @bitcast_v24f16_to_v24i16_scalar(<24 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s13 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB59_3:
-; GFX11-NEXT: s_branch .LBB59_2
; GFX11-NEXT: .LBB59_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.448bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.448bit.ll
index eaf314d..75636b1 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.448bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.448bit.ll
@@ -138,10 +138,14 @@ define inreg <14 x float> @bitcast_v14i32_to_v14f32_scalar(<14 x i32> inreg %a,
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB1_3
-; SI-NEXT: .LBB1_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB1_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB1_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_i32 s29, s29, 3
; SI-NEXT: s_add_i32 s28, s28, 3
; SI-NEXT: s_add_i32 s27, s27, 3
@@ -156,7 +160,7 @@ define inreg <14 x float> @bitcast_v14i32_to_v14f32_scalar(<14 x i32> inreg %a,
; SI-NEXT: s_add_i32 s18, s18, 3
; SI-NEXT: s_add_i32 s17, s17, 3
; SI-NEXT: s_add_i32 s16, s16, 3
-; SI-NEXT: .LBB1_3: ; %end
+; SI-NEXT: .LBB1_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -172,18 +176,20 @@ define inreg <14 x float> @bitcast_v14i32_to_v14f32_scalar(<14 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: s_branch .LBB1_2
;
; VI-LABEL: bitcast_v14i32_to_v14f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: s_cbranch_scc0 .LBB1_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB1_3
-; VI-NEXT: .LBB1_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB1_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB1_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s29, s29, 3
; VI-NEXT: s_add_i32 s28, s28, 3
; VI-NEXT: s_add_i32 s27, s27, 3
@@ -198,7 +204,7 @@ define inreg <14 x float> @bitcast_v14i32_to_v14f32_scalar(<14 x i32> inreg %a,
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB1_3: ; %end
+; VI-NEXT: .LBB1_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -214,18 +220,20 @@ define inreg <14 x float> @bitcast_v14i32_to_v14f32_scalar(<14 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_4:
-; VI-NEXT: s_branch .LBB1_2
;
; GFX9-LABEL: bitcast_v14i32_to_v14f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB1_3
-; GFX9-NEXT: .LBB1_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB1_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s29, s29, 3
; GFX9-NEXT: s_add_i32 s28, s28, 3
; GFX9-NEXT: s_add_i32 s27, s27, 3
@@ -240,7 +248,7 @@ define inreg <14 x float> @bitcast_v14i32_to_v14f32_scalar(<14 x i32> inreg %a,
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB1_3: ; %end
+; GFX9-NEXT: .LBB1_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -256,19 +264,20 @@ define inreg <14 x float> @bitcast_v14i32_to_v14f32_scalar(<14 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-LABEL: bitcast_v14i32_to_v14f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB1_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB1_3
-; GFX11-NEXT: .LBB1_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s25, s25, 3
; GFX11-NEXT: s_add_i32 s24, s24, 3
; GFX11-NEXT: s_add_i32 s23, s23, 3
@@ -283,7 +292,7 @@ define inreg <14 x float> @bitcast_v14i32_to_v14f32_scalar(<14 x i32> inreg %a,
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB1_3: ; %end
+; GFX11-NEXT: .LBB1_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -293,8 +302,6 @@ define inreg <14 x float> @bitcast_v14i32_to_v14f32_scalar(<14 x i32> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_4:
-; GFX11-NEXT: s_branch .LBB1_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -437,10 +444,14 @@ define inreg <14 x i32> @bitcast_v14f32_to_v14i32_scalar(<14 x float> inreg %a,
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: s_cbranch_scc0 .LBB3_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB3_4
-; SI-NEXT: .LBB3_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB3_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB3_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v13, s29, 1.0
; SI-NEXT: v_add_f32_e64 v12, s28, 1.0
; SI-NEXT: v_add_f32_e64 v11, s27, 1.0
@@ -456,8 +467,6 @@ define inreg <14 x i32> @bitcast_v14f32_to_v14i32_scalar(<14 x float> inreg %a,
; SI-NEXT: v_add_f32_e64 v1, s17, 1.0
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB3_3:
-; SI-NEXT: s_branch .LBB3_2
; SI-NEXT: .LBB3_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -482,10 +491,14 @@ define inreg <14 x i32> @bitcast_v14f32_to_v14i32_scalar(<14 x float> inreg %a,
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: s_cbranch_scc0 .LBB3_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_4
-; VI-NEXT: .LBB3_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB3_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB3_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v13, s29, 1.0
; VI-NEXT: v_add_f32_e64 v12, s28, 1.0
; VI-NEXT: v_add_f32_e64 v11, s27, 1.0
@@ -501,8 +514,6 @@ define inreg <14 x i32> @bitcast_v14f32_to_v14i32_scalar(<14 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB3_3:
-; VI-NEXT: s_branch .LBB3_2
; VI-NEXT: .LBB3_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -527,10 +538,14 @@ define inreg <14 x i32> @bitcast_v14f32_to_v14i32_scalar(<14 x float> inreg %a,
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_4
-; GFX9-NEXT: .LBB3_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB3_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v13, s29, 1.0
; GFX9-NEXT: v_add_f32_e64 v12, s28, 1.0
; GFX9-NEXT: v_add_f32_e64 v11, s27, 1.0
@@ -546,8 +561,6 @@ define inreg <14 x i32> @bitcast_v14f32_to_v14i32_scalar(<14 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB3_3:
-; GFX9-NEXT: s_branch .LBB3_2
; GFX9-NEXT: .LBB3_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -575,12 +588,15 @@ define inreg <14 x i32> @bitcast_v14f32_to_v14i32_scalar(<14 x float> inreg %a,
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB3_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
-; GFX11-NEXT: .LBB3_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v13, s25, 1.0
; GFX11-NEXT: v_add_f32_e64 v12, s24, 1.0
; GFX11-NEXT: v_add_f32_e64 v11, s23, 1.0
@@ -596,8 +612,6 @@ define inreg <14 x i32> @bitcast_v14f32_to_v14i32_scalar(<14 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v1, s13, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB3_3:
-; GFX11-NEXT: s_branch .LBB3_2
; GFX11-NEXT: .LBB3_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -757,10 +771,14 @@ define inreg <7 x i64> @bitcast_v14i32_to_v7i64_scalar(<14 x i32> inreg %a, i32
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: s_cbranch_scc0 .LBB5_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB5_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB5_3
-; SI-NEXT: .LBB5_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB5_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB5_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_i32 s29, s29, 3
; SI-NEXT: s_add_i32 s28, s28, 3
; SI-NEXT: s_add_i32 s27, s27, 3
@@ -775,7 +793,7 @@ define inreg <7 x i64> @bitcast_v14i32_to_v7i64_scalar(<14 x i32> inreg %a, i32
; SI-NEXT: s_add_i32 s18, s18, 3
; SI-NEXT: s_add_i32 s17, s17, 3
; SI-NEXT: s_add_i32 s16, s16, 3
-; SI-NEXT: .LBB5_3: ; %end
+; SI-NEXT: .LBB5_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -791,18 +809,20 @@ define inreg <7 x i64> @bitcast_v14i32_to_v7i64_scalar(<14 x i32> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB5_4:
-; SI-NEXT: s_branch .LBB5_2
;
; VI-LABEL: bitcast_v14i32_to_v7i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: s_cbranch_scc0 .LBB5_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB5_3
-; VI-NEXT: .LBB5_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB5_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB5_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s29, s29, 3
; VI-NEXT: s_add_i32 s28, s28, 3
; VI-NEXT: s_add_i32 s27, s27, 3
@@ -817,7 +837,7 @@ define inreg <7 x i64> @bitcast_v14i32_to_v7i64_scalar(<14 x i32> inreg %a, i32
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB5_3: ; %end
+; VI-NEXT: .LBB5_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -833,18 +853,20 @@ define inreg <7 x i64> @bitcast_v14i32_to_v7i64_scalar(<14 x i32> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB5_4:
-; VI-NEXT: s_branch .LBB5_2
;
; GFX9-LABEL: bitcast_v14i32_to_v7i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB5_3
-; GFX9-NEXT: .LBB5_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB5_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s29, s29, 3
; GFX9-NEXT: s_add_i32 s28, s28, 3
; GFX9-NEXT: s_add_i32 s27, s27, 3
@@ -859,7 +881,7 @@ define inreg <7 x i64> @bitcast_v14i32_to_v7i64_scalar(<14 x i32> inreg %a, i32
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB5_3: ; %end
+; GFX9-NEXT: .LBB5_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -875,19 +897,20 @@ define inreg <7 x i64> @bitcast_v14i32_to_v7i64_scalar(<14 x i32> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_4:
-; GFX9-NEXT: s_branch .LBB5_2
;
; GFX11-LABEL: bitcast_v14i32_to_v7i64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB5_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB5_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB5_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB5_3
-; GFX11-NEXT: .LBB5_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s25, s25, 3
; GFX11-NEXT: s_add_i32 s24, s24, 3
; GFX11-NEXT: s_add_i32 s23, s23, 3
@@ -902,7 +925,7 @@ define inreg <7 x i64> @bitcast_v14i32_to_v7i64_scalar(<14 x i32> inreg %a, i32
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB5_3: ; %end
+; GFX11-NEXT: .LBB5_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -912,8 +935,6 @@ define inreg <7 x i64> @bitcast_v14i32_to_v7i64_scalar(<14 x i32> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB5_4:
-; GFX11-NEXT: s_branch .LBB5_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1067,10 +1088,14 @@ define inreg <14 x i32> @bitcast_v7i64_to_v14i32_scalar(<7 x i64> inreg %a, i32
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: s_cbranch_scc0 .LBB7_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB7_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB7_3
-; SI-NEXT: .LBB7_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB7_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB7_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_u32 s28, s28, 3
; SI-NEXT: s_addc_u32 s29, s29, 0
; SI-NEXT: s_add_u32 s26, s26, 3
@@ -1085,7 +1110,7 @@ define inreg <14 x i32> @bitcast_v7i64_to_v14i32_scalar(<7 x i64> inreg %a, i32
; SI-NEXT: s_addc_u32 s19, s19, 0
; SI-NEXT: s_add_u32 s16, s16, 3
; SI-NEXT: s_addc_u32 s17, s17, 0
-; SI-NEXT: .LBB7_3: ; %end
+; SI-NEXT: .LBB7_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -1101,18 +1126,20 @@ define inreg <14 x i32> @bitcast_v7i64_to_v14i32_scalar(<7 x i64> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB7_4:
-; SI-NEXT: s_branch .LBB7_2
;
; VI-LABEL: bitcast_v7i64_to_v14i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: s_cbranch_scc0 .LBB7_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB7_3
-; VI-NEXT: .LBB7_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB7_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB7_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_u32 s28, s28, 3
; VI-NEXT: s_addc_u32 s29, s29, 0
; VI-NEXT: s_add_u32 s26, s26, 3
@@ -1127,7 +1154,7 @@ define inreg <14 x i32> @bitcast_v7i64_to_v14i32_scalar(<7 x i64> inreg %a, i32
; VI-NEXT: s_addc_u32 s19, s19, 0
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
-; VI-NEXT: .LBB7_3: ; %end
+; VI-NEXT: .LBB7_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -1143,18 +1170,20 @@ define inreg <14 x i32> @bitcast_v7i64_to_v14i32_scalar(<7 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB7_4:
-; VI-NEXT: s_branch .LBB7_2
;
; GFX9-LABEL: bitcast_v7i64_to_v14i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB7_3
-; GFX9-NEXT: .LBB7_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB7_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB7_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_u32 s28, s28, 3
; GFX9-NEXT: s_addc_u32 s29, s29, 0
; GFX9-NEXT: s_add_u32 s26, s26, 3
@@ -1169,7 +1198,7 @@ define inreg <14 x i32> @bitcast_v7i64_to_v14i32_scalar(<7 x i64> inreg %a, i32
; GFX9-NEXT: s_addc_u32 s19, s19, 0
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
-; GFX9-NEXT: .LBB7_3: ; %end
+; GFX9-NEXT: .LBB7_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -1185,19 +1214,20 @@ define inreg <14 x i32> @bitcast_v7i64_to_v14i32_scalar(<7 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB7_4:
-; GFX9-NEXT: s_branch .LBB7_2
;
; GFX11-LABEL: bitcast_v7i64_to_v14i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB7_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB7_3
-; GFX11-NEXT: .LBB7_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB7_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s24, s24, 3
; GFX11-NEXT: s_addc_u32 s25, s25, 0
; GFX11-NEXT: s_add_u32 s22, s22, 3
@@ -1212,7 +1242,7 @@ define inreg <14 x i32> @bitcast_v7i64_to_v14i32_scalar(<7 x i64> inreg %a, i32
; GFX11-NEXT: s_addc_u32 s3, s3, 0
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: .LBB7_3: ; %end
+; GFX11-NEXT: .LBB7_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -1222,8 +1252,6 @@ define inreg <14 x i32> @bitcast_v7i64_to_v14i32_scalar(<7 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB7_4:
-; GFX11-NEXT: s_branch .LBB7_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1373,10 +1401,14 @@ define inreg <7 x double> @bitcast_v14i32_to_v7f64_scalar(<14 x i32> inreg %a, i
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: s_cbranch_scc0 .LBB9_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB9_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB9_3
-; SI-NEXT: .LBB9_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB9_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB9_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_i32 s29, s29, 3
; SI-NEXT: s_add_i32 s28, s28, 3
; SI-NEXT: s_add_i32 s27, s27, 3
@@ -1391,7 +1423,7 @@ define inreg <7 x double> @bitcast_v14i32_to_v7f64_scalar(<14 x i32> inreg %a, i
; SI-NEXT: s_add_i32 s18, s18, 3
; SI-NEXT: s_add_i32 s17, s17, 3
; SI-NEXT: s_add_i32 s16, s16, 3
-; SI-NEXT: .LBB9_3: ; %end
+; SI-NEXT: .LBB9_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -1407,18 +1439,20 @@ define inreg <7 x double> @bitcast_v14i32_to_v7f64_scalar(<14 x i32> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB9_4:
-; SI-NEXT: s_branch .LBB9_2
;
; VI-LABEL: bitcast_v14i32_to_v7f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: s_cbranch_scc0 .LBB9_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB9_3
-; VI-NEXT: .LBB9_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB9_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB9_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s29, s29, 3
; VI-NEXT: s_add_i32 s28, s28, 3
; VI-NEXT: s_add_i32 s27, s27, 3
@@ -1433,7 +1467,7 @@ define inreg <7 x double> @bitcast_v14i32_to_v7f64_scalar(<14 x i32> inreg %a, i
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB9_3: ; %end
+; VI-NEXT: .LBB9_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -1449,18 +1483,20 @@ define inreg <7 x double> @bitcast_v14i32_to_v7f64_scalar(<14 x i32> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB9_4:
-; VI-NEXT: s_branch .LBB9_2
;
; GFX9-LABEL: bitcast_v14i32_to_v7f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB9_3
-; GFX9-NEXT: .LBB9_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB9_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s29, s29, 3
; GFX9-NEXT: s_add_i32 s28, s28, 3
; GFX9-NEXT: s_add_i32 s27, s27, 3
@@ -1475,7 +1511,7 @@ define inreg <7 x double> @bitcast_v14i32_to_v7f64_scalar(<14 x i32> inreg %a, i
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB9_3: ; %end
+; GFX9-NEXT: .LBB9_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -1491,19 +1527,20 @@ define inreg <7 x double> @bitcast_v14i32_to_v7f64_scalar(<14 x i32> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB9_4:
-; GFX9-NEXT: s_branch .LBB9_2
;
; GFX11-LABEL: bitcast_v14i32_to_v7f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB9_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB9_3
-; GFX11-NEXT: .LBB9_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s25, s25, 3
; GFX11-NEXT: s_add_i32 s24, s24, 3
; GFX11-NEXT: s_add_i32 s23, s23, 3
@@ -1518,7 +1555,7 @@ define inreg <7 x double> @bitcast_v14i32_to_v7f64_scalar(<14 x i32> inreg %a, i
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB9_3: ; %end
+; GFX11-NEXT: .LBB9_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -1528,8 +1565,6 @@ define inreg <7 x double> @bitcast_v14i32_to_v7f64_scalar(<14 x i32> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB9_4:
-; GFX11-NEXT: s_branch .LBB9_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1651,10 +1686,14 @@ define inreg <14 x i32> @bitcast_v7f64_to_v14i32_scalar(<7 x double> inreg %a, i
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: s_cbranch_scc0 .LBB11_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB11_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB11_4
-; SI-NEXT: .LBB11_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB11_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB11_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
; SI-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
; SI-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
@@ -1663,8 +1702,6 @@ define inreg <14 x i32> @bitcast_v7f64_to_v14i32_scalar(<7 x double> inreg %a, i
; SI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB11_3:
-; SI-NEXT: s_branch .LBB11_2
; SI-NEXT: .LBB11_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -1689,10 +1726,14 @@ define inreg <14 x i32> @bitcast_v7f64_to_v14i32_scalar(<7 x double> inreg %a, i
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: s_cbranch_scc0 .LBB11_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB11_4
-; VI-NEXT: .LBB11_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB11_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB11_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
; VI-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
; VI-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
@@ -1701,8 +1742,6 @@ define inreg <14 x i32> @bitcast_v7f64_to_v14i32_scalar(<7 x double> inreg %a, i
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB11_3:
-; VI-NEXT: s_branch .LBB11_2
; VI-NEXT: .LBB11_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -1727,10 +1766,14 @@ define inreg <14 x i32> @bitcast_v7f64_to_v14i32_scalar(<7 x double> inreg %a, i
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_4
-; GFX9-NEXT: .LBB11_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB11_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
; GFX9-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
; GFX9-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
@@ -1739,8 +1782,6 @@ define inreg <14 x i32> @bitcast_v7f64_to_v14i32_scalar(<7 x double> inreg %a, i
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB11_3:
-; GFX9-NEXT: s_branch .LBB11_2
; GFX9-NEXT: .LBB11_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -1768,12 +1809,15 @@ define inreg <14 x i32> @bitcast_v7f64_to_v14i32_scalar(<7 x double> inreg %a, i
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB11_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
-; GFX11-NEXT: .LBB11_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[12:13], s[24:25], 1.0
; GFX11-NEXT: v_add_f64 v[10:11], s[22:23], 1.0
; GFX11-NEXT: v_add_f64 v[8:9], s[20:21], 1.0
@@ -1782,8 +1826,6 @@ define inreg <14 x i32> @bitcast_v7f64_to_v14i32_scalar(<7 x double> inreg %a, i
; GFX11-NEXT: v_add_f64 v[2:3], s[14:15], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[12:13], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: s_branch .LBB11_2
; GFX11-NEXT: .LBB11_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -2002,6 +2044,7 @@ define inreg <28 x i16> @bitcast_v14i32_to_v28i16_scalar(<14 x i32> inreg %a, i3
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB13_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s28
@@ -2100,17 +2143,23 @@ define inreg <28 x i16> @bitcast_v14i32_to_v28i16_scalar(<14 x i32> inreg %a, i3
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $vgpr25
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB13_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB13_2
+; SI-NEXT: s_branch .LBB13_3
;
; VI-LABEL: bitcast_v14i32_to_v28i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: s_cbranch_scc0 .LBB13_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB13_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB13_3
-; VI-NEXT: .LBB13_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB13_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB13_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s29, s29, 3
; VI-NEXT: s_add_i32 s28, s28, 3
; VI-NEXT: s_add_i32 s27, s27, 3
@@ -2125,7 +2174,7 @@ define inreg <28 x i16> @bitcast_v14i32_to_v28i16_scalar(<14 x i32> inreg %a, i3
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB13_3: ; %end
+; VI-NEXT: .LBB13_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -2141,18 +2190,20 @@ define inreg <28 x i16> @bitcast_v14i32_to_v28i16_scalar(<14 x i32> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB13_4:
-; VI-NEXT: s_branch .LBB13_2
;
; GFX9-LABEL: bitcast_v14i32_to_v28i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: s_cbranch_scc0 .LBB13_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB13_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB13_3
-; GFX9-NEXT: .LBB13_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB13_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB13_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s29, s29, 3
; GFX9-NEXT: s_add_i32 s28, s28, 3
; GFX9-NEXT: s_add_i32 s27, s27, 3
@@ -2167,7 +2218,7 @@ define inreg <28 x i16> @bitcast_v14i32_to_v28i16_scalar(<14 x i32> inreg %a, i3
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB13_3: ; %end
+; GFX9-NEXT: .LBB13_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -2183,19 +2234,20 @@ define inreg <28 x i16> @bitcast_v14i32_to_v28i16_scalar(<14 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB13_4:
-; GFX9-NEXT: s_branch .LBB13_2
;
; GFX11-LABEL: bitcast_v14i32_to_v28i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB13_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB13_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB13_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB13_3
-; GFX11-NEXT: .LBB13_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB13_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s25, s25, 3
; GFX11-NEXT: s_add_i32 s24, s24, 3
; GFX11-NEXT: s_add_i32 s23, s23, 3
@@ -2210,7 +2262,7 @@ define inreg <28 x i16> @bitcast_v14i32_to_v28i16_scalar(<14 x i32> inreg %a, i3
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB13_3: ; %end
+; GFX11-NEXT: .LBB13_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -2220,8 +2272,6 @@ define inreg <28 x i16> @bitcast_v14i32_to_v28i16_scalar(<14 x i32> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB13_4:
-; GFX11-NEXT: s_branch .LBB13_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2528,6 +2578,7 @@ define inreg <14 x i32> @bitcast_v28i16_to_v14i32_scalar(<28 x i16> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v16, v12
; SI-NEXT: v_mov_b32_e32 v17, v10
; SI-NEXT: v_mov_b32_e32 v18, v8
@@ -2535,7 +2586,7 @@ define inreg <14 x i32> @bitcast_v28i16_to_v14i32_scalar(<28 x i16> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v20, v4
; SI-NEXT: v_mov_b32_e32 v21, v2
; SI-NEXT: v_mov_b32_e32 v22, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v3
; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v5
@@ -2663,17 +2714,23 @@ define inreg <14 x i32> @bitcast_v28i16_to_v14i32_scalar(<28 x i16> inreg %a, i3
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB15_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; SI-NEXT: s_branch .LBB15_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB15_2
+; SI-NEXT: s_branch .LBB15_3
;
; VI-LABEL: bitcast_v28i16_to_v14i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: s_cbranch_scc0 .LBB15_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB15_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB15_3
-; VI-NEXT: .LBB15_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB15_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB15_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s29, 3
; VI-NEXT: s_and_b32 s4, s29, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
@@ -2744,7 +2801,7 @@ define inreg <14 x i32> @bitcast_v28i16_to_v14i32_scalar(<28 x i16> inreg %a, i3
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB15_3: ; %end
+; VI-NEXT: .LBB15_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -2760,18 +2817,20 @@ define inreg <14 x i32> @bitcast_v28i16_to_v14i32_scalar(<28 x i16> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB15_4:
-; VI-NEXT: s_branch .LBB15_2
;
; GFX9-LABEL: bitcast_v28i16_to_v14i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB15_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB15_4
-; GFX9-NEXT: .LBB15_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB15_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB15_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v13, s29, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v12, s28, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v11, s27, 3 op_sel_hi:[1,0]
@@ -2787,8 +2846,6 @@ define inreg <14 x i32> @bitcast_v28i16_to_v14i32_scalar(<28 x i16> inreg %a, i3
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB15_3:
-; GFX9-NEXT: s_branch .LBB15_2
; GFX9-NEXT: .LBB15_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -2816,12 +2873,15 @@ define inreg <14 x i32> @bitcast_v28i16_to_v14i32_scalar(<28 x i16> inreg %a, i3
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB15_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB15_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB15_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB15_4
-; GFX11-NEXT: .LBB15_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v13, s25, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v12, s24, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v11, s23, 3 op_sel_hi:[1,0]
@@ -2837,8 +2897,6 @@ define inreg <14 x i32> @bitcast_v28i16_to_v14i32_scalar(<28 x i16> inreg %a, i3
; GFX11-NEXT: v_pk_add_u16 v1, s13, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB15_3:
-; GFX11-NEXT: s_branch .LBB15_2
; GFX11-NEXT: .LBB15_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -3145,6 +3203,7 @@ define inreg <28 x half> @bitcast_v14i32_to_v28f16_scalar(<14 x i32> inreg %a, i
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB17_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s4, s29, 16
@@ -3278,17 +3337,23 @@ define inreg <28 x half> @bitcast_v14i32_to_v28f16_scalar(<14 x i32> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr25
; SI-NEXT: ; implicit-def: $vgpr26
; SI-NEXT: ; implicit-def: $vgpr27
-; SI-NEXT: s_branch .LBB17_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB17_2
+; SI-NEXT: s_branch .LBB17_3
;
; VI-LABEL: bitcast_v14i32_to_v28f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: s_cbranch_scc0 .LBB17_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB17_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB17_3
-; VI-NEXT: .LBB17_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB17_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB17_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s29, s29, 3
; VI-NEXT: s_add_i32 s28, s28, 3
; VI-NEXT: s_add_i32 s27, s27, 3
@@ -3303,7 +3368,7 @@ define inreg <28 x half> @bitcast_v14i32_to_v28f16_scalar(<14 x i32> inreg %a, i
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB17_3: ; %end
+; VI-NEXT: .LBB17_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -3319,18 +3384,20 @@ define inreg <28 x half> @bitcast_v14i32_to_v28f16_scalar(<14 x i32> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB17_4:
-; VI-NEXT: s_branch .LBB17_2
;
; GFX9-LABEL: bitcast_v14i32_to_v28f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: s_cbranch_scc0 .LBB17_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB17_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB17_3
-; GFX9-NEXT: .LBB17_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB17_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB17_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s29, s29, 3
; GFX9-NEXT: s_add_i32 s28, s28, 3
; GFX9-NEXT: s_add_i32 s27, s27, 3
@@ -3345,7 +3412,7 @@ define inreg <28 x half> @bitcast_v14i32_to_v28f16_scalar(<14 x i32> inreg %a, i
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB17_3: ; %end
+; GFX9-NEXT: .LBB17_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -3361,19 +3428,20 @@ define inreg <28 x half> @bitcast_v14i32_to_v28f16_scalar(<14 x i32> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB17_4:
-; GFX9-NEXT: s_branch .LBB17_2
;
; GFX11-LABEL: bitcast_v14i32_to_v28f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB17_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB17_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB17_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB17_3
-; GFX11-NEXT: .LBB17_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB17_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s25, s25, 3
; GFX11-NEXT: s_add_i32 s24, s24, 3
; GFX11-NEXT: s_add_i32 s23, s23, 3
@@ -3388,7 +3456,7 @@ define inreg <28 x half> @bitcast_v14i32_to_v28f16_scalar(<14 x i32> inreg %a, i
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB17_3: ; %end
+; GFX11-NEXT: .LBB17_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -3398,8 +3466,6 @@ define inreg <28 x half> @bitcast_v14i32_to_v28f16_scalar(<14 x i32> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB17_4:
-; GFX11-NEXT: s_branch .LBB17_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -3776,7 +3842,7 @@ define inreg <14 x i32> @bitcast_v28f16_to_v14i32_scalar(<28 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v49, s19
; SI-NEXT: v_cvt_f16_f32_e32 v38, s18
; SI-NEXT: v_cvt_f16_f32_e32 v36, s21
-; SI-NEXT: v_cvt_f16_f32_e32 v35, s20
+; SI-NEXT: v_cvt_f16_f32_e32 v34, s20
; SI-NEXT: v_cvt_f16_f32_e32 v29, v1
; SI-NEXT: v_cvt_f16_f32_e32 v28, v0
; SI-NEXT: v_cvt_f16_f32_e32 v27, v3
@@ -3794,13 +3860,14 @@ define inreg <14 x i32> @bitcast_v28f16_to_v14i32_scalar(<28 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v48, s23
; SI-NEXT: v_cvt_f16_f32_e32 v39, s22
; SI-NEXT: v_cvt_f16_f32_e32 v37, s25
-; SI-NEXT: v_cvt_f16_f32_e32 v34, s24
+; SI-NEXT: v_cvt_f16_f32_e32 v35, s24
; SI-NEXT: v_cvt_f16_f32_e32 v33, s27
; SI-NEXT: v_cvt_f16_f32_e32 v32, s26
; SI-NEXT: v_cvt_f16_f32_e32 v31, s29
; SI-NEXT: v_cvt_f16_f32_e32 v30, s28
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB19_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v51
@@ -3819,9 +3886,9 @@ define inreg <14 x i32> @bitcast_v28f16_to_v14i32_scalar(<28 x half> inreg %a, i
; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v17
; SI-NEXT: v_or_b32_e32 v0, v50, v0
; SI-NEXT: v_or_b32_e32 v1, v38, v1
-; SI-NEXT: v_or_b32_e32 v2, v35, v2
+; SI-NEXT: v_or_b32_e32 v2, v34, v2
; SI-NEXT: v_or_b32_e32 v3, v39, v3
-; SI-NEXT: v_or_b32_e32 v4, v34, v4
+; SI-NEXT: v_or_b32_e32 v4, v35, v4
; SI-NEXT: v_or_b32_e32 v5, v32, v5
; SI-NEXT: v_or_b32_e32 v6, v30, v6
; SI-NEXT: v_or_b32_e32 v7, v28, v7
@@ -3844,7 +3911,7 @@ define inreg <14 x i32> @bitcast_v28f16_to_v14i32_scalar(<28 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; SI-NEXT: v_cvt_f32_f16_e32 v4, v35
+; SI-NEXT: v_cvt_f32_f16_e32 v4, v34
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v2
; SI-NEXT: v_cvt_f32_f16_e32 v2, v36
@@ -3867,7 +3934,7 @@ define inreg <14 x i32> @bitcast_v28f16_to_v14i32_scalar(<28 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; SI-NEXT: v_cvt_f32_f16_e32 v6, v34
+; SI-NEXT: v_cvt_f32_f16_e32 v6, v35
; SI-NEXT: v_or_b32_e32 v3, v4, v3
; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v5
; SI-NEXT: v_cvt_f32_f16_e32 v5, v33
@@ -3949,17 +4016,23 @@ define inreg <14 x i32> @bitcast_v28f16_to_v14i32_scalar(<28 x half> inreg %a, i
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB19_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; SI-NEXT: s_branch .LBB19_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB19_2
+; SI-NEXT: s_branch .LBB19_3
;
; VI-LABEL: bitcast_v28f16_to_v14i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: s_cbranch_scc0 .LBB19_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB19_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB19_4
-; VI-NEXT: .LBB19_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB19_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB19_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s29, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -4032,8 +4105,6 @@ define inreg <14 x i32> @bitcast_v28f16_to_v14i32_scalar(<28 x half> inreg %a, i
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v14
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB19_3:
-; VI-NEXT: s_branch .LBB19_2
; VI-NEXT: .LBB19_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -4058,10 +4129,14 @@ define inreg <14 x i32> @bitcast_v28f16_to_v14i32_scalar(<28 x half> inreg %a, i
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB19_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB19_4
-; GFX9-NEXT: .LBB19_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB19_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB19_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v13, s29, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v12, s28, v0 op_sel_hi:[1,0]
@@ -4078,8 +4153,6 @@ define inreg <14 x i32> @bitcast_v28f16_to_v14i32_scalar(<28 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB19_3:
-; GFX9-NEXT: s_branch .LBB19_2
; GFX9-NEXT: .LBB19_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -4107,12 +4180,15 @@ define inreg <14 x i32> @bitcast_v28f16_to_v14i32_scalar(<28 x half> inreg %a, i
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB19_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB19_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB19_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB19_4
-; GFX11-NEXT: .LBB19_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v13, 0x200, s25 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v12, 0x200, s24 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v11, 0x200, s23 op_sel_hi:[0,1]
@@ -4128,8 +4204,6 @@ define inreg <14 x i32> @bitcast_v28f16_to_v14i32_scalar(<28 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s13 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB19_3:
-; GFX11-NEXT: s_branch .LBB19_2
; GFX11-NEXT: .LBB19_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -4282,10 +4356,14 @@ define inreg <7 x i64> @bitcast_v14f32_to_v7i64_scalar(<14 x float> inreg %a, i3
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: s_cbranch_scc0 .LBB21_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB21_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB21_4
-; SI-NEXT: .LBB21_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB21_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB21_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v13, s29, 1.0
; SI-NEXT: v_add_f32_e64 v12, s28, 1.0
; SI-NEXT: v_add_f32_e64 v11, s27, 1.0
@@ -4301,8 +4379,6 @@ define inreg <7 x i64> @bitcast_v14f32_to_v7i64_scalar(<14 x float> inreg %a, i3
; SI-NEXT: v_add_f32_e64 v1, s17, 1.0
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB21_3:
-; SI-NEXT: s_branch .LBB21_2
; SI-NEXT: .LBB21_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -4327,10 +4403,14 @@ define inreg <7 x i64> @bitcast_v14f32_to_v7i64_scalar(<14 x float> inreg %a, i3
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: s_cbranch_scc0 .LBB21_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB21_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB21_4
-; VI-NEXT: .LBB21_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB21_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB21_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v13, s29, 1.0
; VI-NEXT: v_add_f32_e64 v12, s28, 1.0
; VI-NEXT: v_add_f32_e64 v11, s27, 1.0
@@ -4346,8 +4426,6 @@ define inreg <7 x i64> @bitcast_v14f32_to_v7i64_scalar(<14 x float> inreg %a, i3
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB21_3:
-; VI-NEXT: s_branch .LBB21_2
; VI-NEXT: .LBB21_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -4372,10 +4450,14 @@ define inreg <7 x i64> @bitcast_v14f32_to_v7i64_scalar(<14 x float> inreg %a, i3
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: s_cbranch_scc0 .LBB21_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB21_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB21_4
-; GFX9-NEXT: .LBB21_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB21_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v13, s29, 1.0
; GFX9-NEXT: v_add_f32_e64 v12, s28, 1.0
; GFX9-NEXT: v_add_f32_e64 v11, s27, 1.0
@@ -4391,8 +4473,6 @@ define inreg <7 x i64> @bitcast_v14f32_to_v7i64_scalar(<14 x float> inreg %a, i3
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB21_3:
-; GFX9-NEXT: s_branch .LBB21_2
; GFX9-NEXT: .LBB21_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -4420,12 +4500,15 @@ define inreg <7 x i64> @bitcast_v14f32_to_v7i64_scalar(<14 x float> inreg %a, i3
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB21_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB21_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB21_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB21_4
-; GFX11-NEXT: .LBB21_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v13, s25, 1.0
; GFX11-NEXT: v_add_f32_e64 v12, s24, 1.0
; GFX11-NEXT: v_add_f32_e64 v11, s23, 1.0
@@ -4441,8 +4524,6 @@ define inreg <7 x i64> @bitcast_v14f32_to_v7i64_scalar(<14 x float> inreg %a, i3
; GFX11-NEXT: v_add_f32_e64 v1, s13, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB21_3:
-; GFX11-NEXT: s_branch .LBB21_2
; GFX11-NEXT: .LBB21_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -4606,10 +4687,14 @@ define inreg <14 x float> @bitcast_v7i64_to_v14f32_scalar(<7 x i64> inreg %a, i3
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: s_cbranch_scc0 .LBB23_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB23_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB23_3
-; SI-NEXT: .LBB23_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB23_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB23_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_u32 s28, s28, 3
; SI-NEXT: s_addc_u32 s29, s29, 0
; SI-NEXT: s_add_u32 s26, s26, 3
@@ -4624,7 +4709,7 @@ define inreg <14 x float> @bitcast_v7i64_to_v14f32_scalar(<7 x i64> inreg %a, i3
; SI-NEXT: s_addc_u32 s19, s19, 0
; SI-NEXT: s_add_u32 s16, s16, 3
; SI-NEXT: s_addc_u32 s17, s17, 0
-; SI-NEXT: .LBB23_3: ; %end
+; SI-NEXT: .LBB23_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -4640,18 +4725,20 @@ define inreg <14 x float> @bitcast_v7i64_to_v14f32_scalar(<7 x i64> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB23_4:
-; SI-NEXT: s_branch .LBB23_2
;
; VI-LABEL: bitcast_v7i64_to_v14f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: s_cbranch_scc0 .LBB23_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB23_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB23_3
-; VI-NEXT: .LBB23_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB23_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB23_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_u32 s28, s28, 3
; VI-NEXT: s_addc_u32 s29, s29, 0
; VI-NEXT: s_add_u32 s26, s26, 3
@@ -4666,7 +4753,7 @@ define inreg <14 x float> @bitcast_v7i64_to_v14f32_scalar(<7 x i64> inreg %a, i3
; VI-NEXT: s_addc_u32 s19, s19, 0
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
-; VI-NEXT: .LBB23_3: ; %end
+; VI-NEXT: .LBB23_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -4682,18 +4769,20 @@ define inreg <14 x float> @bitcast_v7i64_to_v14f32_scalar(<7 x i64> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB23_4:
-; VI-NEXT: s_branch .LBB23_2
;
; GFX9-LABEL: bitcast_v7i64_to_v14f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: s_cbranch_scc0 .LBB23_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB23_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB23_3
-; GFX9-NEXT: .LBB23_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB23_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_u32 s28, s28, 3
; GFX9-NEXT: s_addc_u32 s29, s29, 0
; GFX9-NEXT: s_add_u32 s26, s26, 3
@@ -4708,7 +4797,7 @@ define inreg <14 x float> @bitcast_v7i64_to_v14f32_scalar(<7 x i64> inreg %a, i3
; GFX9-NEXT: s_addc_u32 s19, s19, 0
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
-; GFX9-NEXT: .LBB23_3: ; %end
+; GFX9-NEXT: .LBB23_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -4724,19 +4813,20 @@ define inreg <14 x float> @bitcast_v7i64_to_v14f32_scalar(<7 x i64> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB23_4:
-; GFX9-NEXT: s_branch .LBB23_2
;
; GFX11-LABEL: bitcast_v7i64_to_v14f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB23_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB23_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB23_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB23_3
-; GFX11-NEXT: .LBB23_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s24, s24, 3
; GFX11-NEXT: s_addc_u32 s25, s25, 0
; GFX11-NEXT: s_add_u32 s22, s22, 3
@@ -4751,7 +4841,7 @@ define inreg <14 x float> @bitcast_v7i64_to_v14f32_scalar(<7 x i64> inreg %a, i3
; GFX11-NEXT: s_addc_u32 s3, s3, 0
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: .LBB23_3: ; %end
+; GFX11-NEXT: .LBB23_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -4761,8 +4851,6 @@ define inreg <14 x float> @bitcast_v7i64_to_v14f32_scalar(<7 x i64> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB23_4:
-; GFX11-NEXT: s_branch .LBB23_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -4905,10 +4993,14 @@ define inreg <7 x double> @bitcast_v14f32_to_v7f64_scalar(<14 x float> inreg %a,
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: s_cbranch_scc0 .LBB25_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB25_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB25_4
-; SI-NEXT: .LBB25_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB25_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB25_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v13, s29, 1.0
; SI-NEXT: v_add_f32_e64 v12, s28, 1.0
; SI-NEXT: v_add_f32_e64 v11, s27, 1.0
@@ -4924,8 +5016,6 @@ define inreg <7 x double> @bitcast_v14f32_to_v7f64_scalar(<14 x float> inreg %a,
; SI-NEXT: v_add_f32_e64 v1, s17, 1.0
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB25_3:
-; SI-NEXT: s_branch .LBB25_2
; SI-NEXT: .LBB25_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -4950,10 +5040,14 @@ define inreg <7 x double> @bitcast_v14f32_to_v7f64_scalar(<14 x float> inreg %a,
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: s_cbranch_scc0 .LBB25_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB25_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB25_4
-; VI-NEXT: .LBB25_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB25_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB25_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v13, s29, 1.0
; VI-NEXT: v_add_f32_e64 v12, s28, 1.0
; VI-NEXT: v_add_f32_e64 v11, s27, 1.0
@@ -4969,8 +5063,6 @@ define inreg <7 x double> @bitcast_v14f32_to_v7f64_scalar(<14 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB25_3:
-; VI-NEXT: s_branch .LBB25_2
; VI-NEXT: .LBB25_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -4995,10 +5087,14 @@ define inreg <7 x double> @bitcast_v14f32_to_v7f64_scalar(<14 x float> inreg %a,
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: s_cbranch_scc0 .LBB25_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB25_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB25_4
-; GFX9-NEXT: .LBB25_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB25_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB25_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v13, s29, 1.0
; GFX9-NEXT: v_add_f32_e64 v12, s28, 1.0
; GFX9-NEXT: v_add_f32_e64 v11, s27, 1.0
@@ -5014,8 +5110,6 @@ define inreg <7 x double> @bitcast_v14f32_to_v7f64_scalar(<14 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB25_3:
-; GFX9-NEXT: s_branch .LBB25_2
; GFX9-NEXT: .LBB25_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -5043,12 +5137,15 @@ define inreg <7 x double> @bitcast_v14f32_to_v7f64_scalar(<14 x float> inreg %a,
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB25_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB25_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB25_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB25_4
-; GFX11-NEXT: .LBB25_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v13, s25, 1.0
; GFX11-NEXT: v_add_f32_e64 v12, s24, 1.0
; GFX11-NEXT: v_add_f32_e64 v11, s23, 1.0
@@ -5064,8 +5161,6 @@ define inreg <7 x double> @bitcast_v14f32_to_v7f64_scalar(<14 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v1, s13, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB25_3:
-; GFX11-NEXT: s_branch .LBB25_2
; GFX11-NEXT: .LBB25_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -5197,10 +5292,14 @@ define inreg <14 x float> @bitcast_v7f64_to_v14f32_scalar(<7 x double> inreg %a,
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: s_cbranch_scc0 .LBB27_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB27_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB27_4
-; SI-NEXT: .LBB27_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB27_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB27_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
; SI-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
; SI-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
@@ -5209,8 +5308,6 @@ define inreg <14 x float> @bitcast_v7f64_to_v14f32_scalar(<7 x double> inreg %a,
; SI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB27_3:
-; SI-NEXT: s_branch .LBB27_2
; SI-NEXT: .LBB27_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -5235,10 +5332,14 @@ define inreg <14 x float> @bitcast_v7f64_to_v14f32_scalar(<7 x double> inreg %a,
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: s_cbranch_scc0 .LBB27_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB27_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB27_4
-; VI-NEXT: .LBB27_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB27_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB27_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
; VI-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
; VI-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
@@ -5247,8 +5348,6 @@ define inreg <14 x float> @bitcast_v7f64_to_v14f32_scalar(<7 x double> inreg %a,
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB27_3:
-; VI-NEXT: s_branch .LBB27_2
; VI-NEXT: .LBB27_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -5273,10 +5372,14 @@ define inreg <14 x float> @bitcast_v7f64_to_v14f32_scalar(<7 x double> inreg %a,
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: s_cbranch_scc0 .LBB27_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB27_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB27_4
-; GFX9-NEXT: .LBB27_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB27_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB27_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
; GFX9-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
; GFX9-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
@@ -5285,8 +5388,6 @@ define inreg <14 x float> @bitcast_v7f64_to_v14f32_scalar(<7 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB27_3:
-; GFX9-NEXT: s_branch .LBB27_2
; GFX9-NEXT: .LBB27_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -5314,12 +5415,15 @@ define inreg <14 x float> @bitcast_v7f64_to_v14f32_scalar(<7 x double> inreg %a,
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB27_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB27_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB27_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB27_4
-; GFX11-NEXT: .LBB27_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[12:13], s[24:25], 1.0
; GFX11-NEXT: v_add_f64 v[10:11], s[22:23], 1.0
; GFX11-NEXT: v_add_f64 v[8:9], s[20:21], 1.0
@@ -5328,8 +5432,6 @@ define inreg <14 x float> @bitcast_v7f64_to_v14f32_scalar(<7 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[2:3], s[14:15], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[12:13], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB27_3:
-; GFX11-NEXT: s_branch .LBB27_2
; GFX11-NEXT: .LBB27_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -5541,6 +5643,7 @@ define inreg <28 x i16> @bitcast_v14f32_to_v28i16_scalar(<14 x float> inreg %a,
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB29_3
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s28
@@ -5610,7 +5713,8 @@ define inreg <28 x i16> @bitcast_v14f32_to_v28i16_scalar(<14 x float> inreg %a,
; SI-NEXT: ; implicit-def: $sgpr11
; SI-NEXT: ; implicit-def: $vgpr25
; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: s_branch .LBB29_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB29_2
; SI-NEXT: .LBB29_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v2, s17
@@ -5640,10 +5744,14 @@ define inreg <28 x i16> @bitcast_v14f32_to_v28i16_scalar(<14 x float> inreg %a,
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: s_cbranch_scc0 .LBB29_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB29_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB29_4
-; VI-NEXT: .LBB29_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB29_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB29_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v13, s29, 1.0
; VI-NEXT: v_add_f32_e64 v12, s28, 1.0
; VI-NEXT: v_add_f32_e64 v11, s27, 1.0
@@ -5659,8 +5767,6 @@ define inreg <28 x i16> @bitcast_v14f32_to_v28i16_scalar(<14 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB29_3:
-; VI-NEXT: s_branch .LBB29_2
; VI-NEXT: .LBB29_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -5685,10 +5791,14 @@ define inreg <28 x i16> @bitcast_v14f32_to_v28i16_scalar(<14 x float> inreg %a,
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB29_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB29_4
-; GFX9-NEXT: .LBB29_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB29_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB29_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v13, s29, 1.0
; GFX9-NEXT: v_add_f32_e64 v12, s28, 1.0
; GFX9-NEXT: v_add_f32_e64 v11, s27, 1.0
@@ -5704,8 +5814,6 @@ define inreg <28 x i16> @bitcast_v14f32_to_v28i16_scalar(<14 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB29_3:
-; GFX9-NEXT: s_branch .LBB29_2
; GFX9-NEXT: .LBB29_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -5733,12 +5841,15 @@ define inreg <28 x i16> @bitcast_v14f32_to_v28i16_scalar(<14 x float> inreg %a,
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB29_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB29_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB29_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB29_4
-; GFX11-NEXT: .LBB29_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v13, s25, 1.0
; GFX11-NEXT: v_add_f32_e64 v12, s24, 1.0
; GFX11-NEXT: v_add_f32_e64 v11, s23, 1.0
@@ -5754,8 +5865,6 @@ define inreg <28 x i16> @bitcast_v14f32_to_v28i16_scalar(<14 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v1, s13, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB29_3:
-; GFX11-NEXT: s_branch .LBB29_2
; GFX11-NEXT: .LBB29_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -6072,6 +6181,7 @@ define inreg <14 x float> @bitcast_v28i16_to_v14f32_scalar(<28 x i16> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v16, v12
; SI-NEXT: v_mov_b32_e32 v17, v10
; SI-NEXT: v_mov_b32_e32 v18, v8
@@ -6079,7 +6189,7 @@ define inreg <14 x float> @bitcast_v28i16_to_v14f32_scalar(<28 x i16> inreg %a,
; SI-NEXT: v_mov_b32_e32 v20, v4
; SI-NEXT: v_mov_b32_e32 v21, v2
; SI-NEXT: v_mov_b32_e32 v22, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v3
; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v5
@@ -6207,17 +6317,23 @@ define inreg <14 x float> @bitcast_v28i16_to_v14f32_scalar(<28 x i16> inreg %a,
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB31_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; SI-NEXT: s_branch .LBB31_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB31_2
+; SI-NEXT: s_branch .LBB31_3
;
; VI-LABEL: bitcast_v28i16_to_v14f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: s_cbranch_scc0 .LBB31_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB31_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB31_3
-; VI-NEXT: .LBB31_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB31_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB31_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s29, 3
; VI-NEXT: s_and_b32 s4, s29, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
@@ -6288,7 +6404,7 @@ define inreg <14 x float> @bitcast_v28i16_to_v14f32_scalar(<28 x i16> inreg %a,
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB31_3: ; %end
+; VI-NEXT: .LBB31_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -6304,18 +6420,20 @@ define inreg <14 x float> @bitcast_v28i16_to_v14f32_scalar(<28 x i16> inreg %a,
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB31_4:
-; VI-NEXT: s_branch .LBB31_2
;
; GFX9-LABEL: bitcast_v28i16_to_v14f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: s_cbranch_scc0 .LBB31_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB31_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB31_4
-; GFX9-NEXT: .LBB31_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB31_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB31_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v13, s29, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v12, s28, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v11, s27, 3 op_sel_hi:[1,0]
@@ -6331,8 +6449,6 @@ define inreg <14 x float> @bitcast_v28i16_to_v14f32_scalar(<28 x i16> inreg %a,
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB31_3:
-; GFX9-NEXT: s_branch .LBB31_2
; GFX9-NEXT: .LBB31_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -6360,12 +6476,15 @@ define inreg <14 x float> @bitcast_v28i16_to_v14f32_scalar(<28 x i16> inreg %a,
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB31_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB31_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB31_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB31_4
-; GFX11-NEXT: .LBB31_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v13, s25, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v12, s24, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v11, s23, 3 op_sel_hi:[1,0]
@@ -6381,8 +6500,6 @@ define inreg <14 x float> @bitcast_v28i16_to_v14f32_scalar(<28 x i16> inreg %a,
; GFX11-NEXT: v_pk_add_u16 v1, s13, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB31_3:
-; GFX11-NEXT: s_branch .LBB31_2
; GFX11-NEXT: .LBB31_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -6682,6 +6799,7 @@ define inreg <28 x half> @bitcast_v14f32_to_v28f16_scalar(<14 x float> inreg %a,
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB33_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s4, s29, 16
@@ -6815,17 +6933,23 @@ define inreg <28 x half> @bitcast_v14f32_to_v28f16_scalar(<14 x float> inreg %a,
; SI-NEXT: ; implicit-def: $vgpr25
; SI-NEXT: ; implicit-def: $vgpr26
; SI-NEXT: ; implicit-def: $vgpr27
-; SI-NEXT: s_branch .LBB33_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB33_2
+; SI-NEXT: s_branch .LBB33_3
;
; VI-LABEL: bitcast_v14f32_to_v28f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: s_cbranch_scc0 .LBB33_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB33_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB33_4
-; VI-NEXT: .LBB33_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB33_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB33_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v13, s29, 1.0
; VI-NEXT: v_add_f32_e64 v12, s28, 1.0
; VI-NEXT: v_add_f32_e64 v11, s27, 1.0
@@ -6841,8 +6965,6 @@ define inreg <28 x half> @bitcast_v14f32_to_v28f16_scalar(<14 x float> inreg %a,
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB33_3:
-; VI-NEXT: s_branch .LBB33_2
; VI-NEXT: .LBB33_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -6867,10 +6989,14 @@ define inreg <28 x half> @bitcast_v14f32_to_v28f16_scalar(<14 x float> inreg %a,
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB33_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB33_4
-; GFX9-NEXT: .LBB33_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB33_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB33_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v13, s29, 1.0
; GFX9-NEXT: v_add_f32_e64 v12, s28, 1.0
; GFX9-NEXT: v_add_f32_e64 v11, s27, 1.0
@@ -6886,8 +7012,6 @@ define inreg <28 x half> @bitcast_v14f32_to_v28f16_scalar(<14 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB33_3:
-; GFX9-NEXT: s_branch .LBB33_2
; GFX9-NEXT: .LBB33_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -6915,12 +7039,15 @@ define inreg <28 x half> @bitcast_v14f32_to_v28f16_scalar(<14 x float> inreg %a,
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB33_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB33_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB33_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB33_4
-; GFX11-NEXT: .LBB33_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v13, s25, 1.0
; GFX11-NEXT: v_add_f32_e64 v12, s24, 1.0
; GFX11-NEXT: v_add_f32_e64 v11, s23, 1.0
@@ -6936,8 +7063,6 @@ define inreg <28 x half> @bitcast_v14f32_to_v28f16_scalar(<14 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v1, s13, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB33_3:
-; GFX11-NEXT: s_branch .LBB33_2
; GFX11-NEXT: .LBB33_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -7324,7 +7449,7 @@ define inreg <14 x float> @bitcast_v28f16_to_v14f32_scalar(<28 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v49, s19
; SI-NEXT: v_cvt_f16_f32_e32 v38, s18
; SI-NEXT: v_cvt_f16_f32_e32 v36, s21
-; SI-NEXT: v_cvt_f16_f32_e32 v35, s20
+; SI-NEXT: v_cvt_f16_f32_e32 v34, s20
; SI-NEXT: v_cvt_f16_f32_e32 v29, v1
; SI-NEXT: v_cvt_f16_f32_e32 v28, v0
; SI-NEXT: v_cvt_f16_f32_e32 v27, v3
@@ -7342,13 +7467,14 @@ define inreg <14 x float> @bitcast_v28f16_to_v14f32_scalar(<28 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v48, s23
; SI-NEXT: v_cvt_f16_f32_e32 v39, s22
; SI-NEXT: v_cvt_f16_f32_e32 v37, s25
-; SI-NEXT: v_cvt_f16_f32_e32 v34, s24
+; SI-NEXT: v_cvt_f16_f32_e32 v35, s24
; SI-NEXT: v_cvt_f16_f32_e32 v33, s27
; SI-NEXT: v_cvt_f16_f32_e32 v32, s26
; SI-NEXT: v_cvt_f16_f32_e32 v31, s29
; SI-NEXT: v_cvt_f16_f32_e32 v30, s28
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB35_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v51
@@ -7367,9 +7493,9 @@ define inreg <14 x float> @bitcast_v28f16_to_v14f32_scalar(<28 x half> inreg %a,
; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v17
; SI-NEXT: v_or_b32_e32 v0, v50, v0
; SI-NEXT: v_or_b32_e32 v1, v38, v1
-; SI-NEXT: v_or_b32_e32 v2, v35, v2
+; SI-NEXT: v_or_b32_e32 v2, v34, v2
; SI-NEXT: v_or_b32_e32 v3, v39, v3
-; SI-NEXT: v_or_b32_e32 v4, v34, v4
+; SI-NEXT: v_or_b32_e32 v4, v35, v4
; SI-NEXT: v_or_b32_e32 v5, v32, v5
; SI-NEXT: v_or_b32_e32 v6, v30, v6
; SI-NEXT: v_or_b32_e32 v7, v28, v7
@@ -7392,7 +7518,7 @@ define inreg <14 x float> @bitcast_v28f16_to_v14f32_scalar(<28 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; SI-NEXT: v_cvt_f32_f16_e32 v4, v35
+; SI-NEXT: v_cvt_f32_f16_e32 v4, v34
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v2
; SI-NEXT: v_cvt_f32_f16_e32 v2, v36
@@ -7415,7 +7541,7 @@ define inreg <14 x float> @bitcast_v28f16_to_v14f32_scalar(<28 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; SI-NEXT: v_cvt_f32_f16_e32 v6, v34
+; SI-NEXT: v_cvt_f32_f16_e32 v6, v35
; SI-NEXT: v_or_b32_e32 v3, v4, v3
; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v5
; SI-NEXT: v_cvt_f32_f16_e32 v5, v33
@@ -7497,17 +7623,23 @@ define inreg <14 x float> @bitcast_v28f16_to_v14f32_scalar(<28 x half> inreg %a,
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB35_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; SI-NEXT: s_branch .LBB35_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB35_2
+; SI-NEXT: s_branch .LBB35_3
;
; VI-LABEL: bitcast_v28f16_to_v14f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: s_cbranch_scc0 .LBB35_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB35_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB35_4
-; VI-NEXT: .LBB35_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB35_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB35_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s29, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -7580,8 +7712,6 @@ define inreg <14 x float> @bitcast_v28f16_to_v14f32_scalar(<28 x half> inreg %a,
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v14
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB35_3:
-; VI-NEXT: s_branch .LBB35_2
; VI-NEXT: .LBB35_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -7606,10 +7736,14 @@ define inreg <14 x float> @bitcast_v28f16_to_v14f32_scalar(<28 x half> inreg %a,
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB35_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB35_4
-; GFX9-NEXT: .LBB35_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB35_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB35_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v13, s29, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v12, s28, v0 op_sel_hi:[1,0]
@@ -7626,8 +7760,6 @@ define inreg <14 x float> @bitcast_v28f16_to_v14f32_scalar(<28 x half> inreg %a,
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB35_3:
-; GFX9-NEXT: s_branch .LBB35_2
; GFX9-NEXT: .LBB35_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -7655,12 +7787,15 @@ define inreg <14 x float> @bitcast_v28f16_to_v14f32_scalar(<28 x half> inreg %a,
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB35_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB35_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB35_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB35_4
-; GFX11-NEXT: .LBB35_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v13, 0x200, s25 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v12, 0x200, s24 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v11, 0x200, s23 op_sel_hi:[0,1]
@@ -7676,8 +7811,6 @@ define inreg <14 x float> @bitcast_v28f16_to_v14f32_scalar(<28 x half> inreg %a,
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s13 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB35_3:
-; GFX11-NEXT: s_branch .LBB35_2
; GFX11-NEXT: .LBB35_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -7841,10 +7974,14 @@ define inreg <7 x double> @bitcast_v7i64_to_v7f64_scalar(<7 x i64> inreg %a, i32
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: s_cbranch_scc0 .LBB37_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB37_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB37_3
-; SI-NEXT: .LBB37_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB37_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB37_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_u32 s16, s16, 3
; SI-NEXT: s_addc_u32 s17, s17, 0
; SI-NEXT: s_add_u32 s18, s18, 3
@@ -7859,7 +7996,7 @@ define inreg <7 x double> @bitcast_v7i64_to_v7f64_scalar(<7 x i64> inreg %a, i32
; SI-NEXT: s_addc_u32 s27, s27, 0
; SI-NEXT: s_add_u32 s28, s28, 3
; SI-NEXT: s_addc_u32 s29, s29, 0
-; SI-NEXT: .LBB37_3: ; %end
+; SI-NEXT: .LBB37_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
@@ -7875,18 +8012,20 @@ define inreg <7 x double> @bitcast_v7i64_to_v7f64_scalar(<7 x i64> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB37_4:
-; SI-NEXT: s_branch .LBB37_2
;
; VI-LABEL: bitcast_v7i64_to_v7f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: s_cbranch_scc0 .LBB37_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB37_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB37_3
-; VI-NEXT: .LBB37_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB37_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB37_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
; VI-NEXT: s_add_u32 s18, s18, 3
@@ -7901,7 +8040,7 @@ define inreg <7 x double> @bitcast_v7i64_to_v7f64_scalar(<7 x i64> inreg %a, i32
; VI-NEXT: s_addc_u32 s27, s27, 0
; VI-NEXT: s_add_u32 s28, s28, 3
; VI-NEXT: s_addc_u32 s29, s29, 0
-; VI-NEXT: .LBB37_3: ; %end
+; VI-NEXT: .LBB37_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -7917,18 +8056,20 @@ define inreg <7 x double> @bitcast_v7i64_to_v7f64_scalar(<7 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB37_4:
-; VI-NEXT: s_branch .LBB37_2
;
; GFX9-LABEL: bitcast_v7i64_to_v7f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: s_cbranch_scc0 .LBB37_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB37_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB37_3
-; GFX9-NEXT: .LBB37_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB37_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB37_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
; GFX9-NEXT: s_add_u32 s18, s18, 3
@@ -7943,7 +8084,7 @@ define inreg <7 x double> @bitcast_v7i64_to_v7f64_scalar(<7 x i64> inreg %a, i32
; GFX9-NEXT: s_addc_u32 s27, s27, 0
; GFX9-NEXT: s_add_u32 s28, s28, 3
; GFX9-NEXT: s_addc_u32 s29, s29, 0
-; GFX9-NEXT: .LBB37_3: ; %end
+; GFX9-NEXT: .LBB37_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -7959,19 +8100,20 @@ define inreg <7 x double> @bitcast_v7i64_to_v7f64_scalar(<7 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB37_4:
-; GFX9-NEXT: s_branch .LBB37_2
;
; GFX11-LABEL: bitcast_v7i64_to_v7f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB37_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB37_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB37_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB37_3
-; GFX11-NEXT: .LBB37_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB37_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
; GFX11-NEXT: s_add_u32 s2, s2, 3
@@ -7986,7 +8128,7 @@ define inreg <7 x double> @bitcast_v7i64_to_v7f64_scalar(<7 x i64> inreg %a, i32
; GFX11-NEXT: s_addc_u32 s23, s23, 0
; GFX11-NEXT: s_add_u32 s24, s24, 3
; GFX11-NEXT: s_addc_u32 s25, s25, 0
-; GFX11-NEXT: .LBB37_3: ; %end
+; GFX11-NEXT: .LBB37_4: ; %end
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -7995,8 +8137,6 @@ define inreg <7 x double> @bitcast_v7i64_to_v7f64_scalar(<7 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB37_4:
-; GFX11-NEXT: s_branch .LBB37_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -8118,10 +8258,14 @@ define inreg <7 x i64> @bitcast_v7f64_to_v7i64_scalar(<7 x double> inreg %a, i32
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: s_cbranch_scc0 .LBB39_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB39_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB39_4
-; SI-NEXT: .LBB39_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB39_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB39_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; SI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
@@ -8130,8 +8274,6 @@ define inreg <7 x i64> @bitcast_v7f64_to_v7i64_scalar(<7 x double> inreg %a, i32
; SI-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
; SI-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB39_3:
-; SI-NEXT: s_branch .LBB39_2
; SI-NEXT: .LBB39_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -8156,10 +8298,14 @@ define inreg <7 x i64> @bitcast_v7f64_to_v7i64_scalar(<7 x double> inreg %a, i32
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: s_cbranch_scc0 .LBB39_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB39_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB39_4
-; VI-NEXT: .LBB39_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB39_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB39_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
@@ -8168,8 +8314,6 @@ define inreg <7 x i64> @bitcast_v7f64_to_v7i64_scalar(<7 x double> inreg %a, i32
; VI-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
; VI-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB39_3:
-; VI-NEXT: s_branch .LBB39_2
; VI-NEXT: .LBB39_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -8194,10 +8338,14 @@ define inreg <7 x i64> @bitcast_v7f64_to_v7i64_scalar(<7 x double> inreg %a, i32
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: s_cbranch_scc0 .LBB39_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB39_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB39_4
-; GFX9-NEXT: .LBB39_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB39_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB39_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[4:5], s[20:21], 1.0
@@ -8206,8 +8354,6 @@ define inreg <7 x i64> @bitcast_v7f64_to_v7i64_scalar(<7 x double> inreg %a, i32
; GFX9-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
; GFX9-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB39_3:
-; GFX9-NEXT: s_branch .LBB39_2
; GFX9-NEXT: .LBB39_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -8235,12 +8381,15 @@ define inreg <7 x i64> @bitcast_v7f64_to_v7i64_scalar(<7 x double> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB39_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB39_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB39_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB39_4
-; GFX11-NEXT: .LBB39_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[0:1], s[12:13], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[14:15], 1.0
; GFX11-NEXT: v_add_f64 v[4:5], s[16:17], 1.0
@@ -8249,8 +8398,6 @@ define inreg <7 x i64> @bitcast_v7f64_to_v7i64_scalar(<7 x double> inreg %a, i32
; GFX11-NEXT: v_add_f64 v[10:11], s[22:23], 1.0
; GFX11-NEXT: v_add_f64 v[12:13], s[24:25], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB39_3:
-; GFX11-NEXT: s_branch .LBB39_2
; GFX11-NEXT: .LBB39_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -8473,6 +8620,7 @@ define inreg <28 x i16> @bitcast_v7i64_to_v28i16_scalar(<7 x i64> inreg %a, i32
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB41_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s28
@@ -8571,17 +8719,23 @@ define inreg <28 x i16> @bitcast_v7i64_to_v28i16_scalar(<7 x i64> inreg %a, i32
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $vgpr25
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB41_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB41_2
+; SI-NEXT: s_branch .LBB41_3
;
; VI-LABEL: bitcast_v7i64_to_v28i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: s_cbranch_scc0 .LBB41_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB41_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB41_3
-; VI-NEXT: .LBB41_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB41_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB41_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_u32 s28, s28, 3
; VI-NEXT: s_addc_u32 s29, s29, 0
; VI-NEXT: s_add_u32 s26, s26, 3
@@ -8596,7 +8750,7 @@ define inreg <28 x i16> @bitcast_v7i64_to_v28i16_scalar(<7 x i64> inreg %a, i32
; VI-NEXT: s_addc_u32 s19, s19, 0
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
-; VI-NEXT: .LBB41_3: ; %end
+; VI-NEXT: .LBB41_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -8612,18 +8766,20 @@ define inreg <28 x i16> @bitcast_v7i64_to_v28i16_scalar(<7 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB41_4:
-; VI-NEXT: s_branch .LBB41_2
;
; GFX9-LABEL: bitcast_v7i64_to_v28i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: s_cbranch_scc0 .LBB41_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB41_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB41_3
-; GFX9-NEXT: .LBB41_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB41_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB41_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_u32 s28, s28, 3
; GFX9-NEXT: s_addc_u32 s29, s29, 0
; GFX9-NEXT: s_add_u32 s26, s26, 3
@@ -8638,7 +8794,7 @@ define inreg <28 x i16> @bitcast_v7i64_to_v28i16_scalar(<7 x i64> inreg %a, i32
; GFX9-NEXT: s_addc_u32 s19, s19, 0
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
-; GFX9-NEXT: .LBB41_3: ; %end
+; GFX9-NEXT: .LBB41_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -8654,19 +8810,20 @@ define inreg <28 x i16> @bitcast_v7i64_to_v28i16_scalar(<7 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB41_4:
-; GFX9-NEXT: s_branch .LBB41_2
;
; GFX11-LABEL: bitcast_v7i64_to_v28i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB41_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB41_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB41_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB41_3
-; GFX11-NEXT: .LBB41_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB41_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s24, s24, 3
; GFX11-NEXT: s_addc_u32 s25, s25, 0
; GFX11-NEXT: s_add_u32 s22, s22, 3
@@ -8681,7 +8838,7 @@ define inreg <28 x i16> @bitcast_v7i64_to_v28i16_scalar(<7 x i64> inreg %a, i32
; GFX11-NEXT: s_addc_u32 s3, s3, 0
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: .LBB41_3: ; %end
+; GFX11-NEXT: .LBB41_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -8691,8 +8848,6 @@ define inreg <28 x i16> @bitcast_v7i64_to_v28i16_scalar(<7 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB41_4:
-; GFX11-NEXT: s_branch .LBB41_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -8999,6 +9154,7 @@ define inreg <7 x i64> @bitcast_v28i16_to_v7i64_scalar(<28 x i16> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v16, v12
; SI-NEXT: v_mov_b32_e32 v17, v10
; SI-NEXT: v_mov_b32_e32 v18, v8
@@ -9006,7 +9162,7 @@ define inreg <7 x i64> @bitcast_v28i16_to_v7i64_scalar(<28 x i16> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v20, v4
; SI-NEXT: v_mov_b32_e32 v21, v2
; SI-NEXT: v_mov_b32_e32 v22, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v3
; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v5
@@ -9134,17 +9290,23 @@ define inreg <7 x i64> @bitcast_v28i16_to_v7i64_scalar(<28 x i16> inreg %a, i32
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB43_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; SI-NEXT: s_branch .LBB43_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB43_2
+; SI-NEXT: s_branch .LBB43_3
;
; VI-LABEL: bitcast_v28i16_to_v7i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: s_cbranch_scc0 .LBB43_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB43_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB43_3
-; VI-NEXT: .LBB43_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB43_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB43_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s29, 3
; VI-NEXT: s_and_b32 s4, s29, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
@@ -9215,7 +9377,7 @@ define inreg <7 x i64> @bitcast_v28i16_to_v7i64_scalar(<28 x i16> inreg %a, i32
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB43_3: ; %end
+; VI-NEXT: .LBB43_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -9231,18 +9393,20 @@ define inreg <7 x i64> @bitcast_v28i16_to_v7i64_scalar(<28 x i16> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB43_4:
-; VI-NEXT: s_branch .LBB43_2
;
; GFX9-LABEL: bitcast_v28i16_to_v7i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: s_cbranch_scc0 .LBB43_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB43_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB43_4
-; GFX9-NEXT: .LBB43_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB43_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB43_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v13, s29, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v12, s28, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v11, s27, 3 op_sel_hi:[1,0]
@@ -9258,8 +9422,6 @@ define inreg <7 x i64> @bitcast_v28i16_to_v7i64_scalar(<28 x i16> inreg %a, i32
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB43_3:
-; GFX9-NEXT: s_branch .LBB43_2
; GFX9-NEXT: .LBB43_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -9287,12 +9449,15 @@ define inreg <7 x i64> @bitcast_v28i16_to_v7i64_scalar(<28 x i16> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB43_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB43_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB43_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB43_4
-; GFX11-NEXT: .LBB43_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v13, s25, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v12, s24, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v11, s23, 3 op_sel_hi:[1,0]
@@ -9308,8 +9473,6 @@ define inreg <7 x i64> @bitcast_v28i16_to_v7i64_scalar(<28 x i16> inreg %a, i32
; GFX11-NEXT: v_pk_add_u16 v1, s13, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB43_3:
-; GFX11-NEXT: s_branch .LBB43_2
; GFX11-NEXT: .LBB43_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -9620,6 +9783,7 @@ define inreg <28 x half> @bitcast_v7i64_to_v28f16_scalar(<7 x i64> inreg %a, i32
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB45_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s4, s29, 16
@@ -9753,17 +9917,23 @@ define inreg <28 x half> @bitcast_v7i64_to_v28f16_scalar(<7 x i64> inreg %a, i32
; SI-NEXT: ; implicit-def: $vgpr25
; SI-NEXT: ; implicit-def: $vgpr26
; SI-NEXT: ; implicit-def: $vgpr27
-; SI-NEXT: s_branch .LBB45_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB45_2
+; SI-NEXT: s_branch .LBB45_3
;
; VI-LABEL: bitcast_v7i64_to_v28f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: s_cbranch_scc0 .LBB45_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB45_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB45_3
-; VI-NEXT: .LBB45_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB45_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB45_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_u32 s28, s28, 3
; VI-NEXT: s_addc_u32 s29, s29, 0
; VI-NEXT: s_add_u32 s26, s26, 3
@@ -9778,7 +9948,7 @@ define inreg <28 x half> @bitcast_v7i64_to_v28f16_scalar(<7 x i64> inreg %a, i32
; VI-NEXT: s_addc_u32 s19, s19, 0
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
-; VI-NEXT: .LBB45_3: ; %end
+; VI-NEXT: .LBB45_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -9794,18 +9964,20 @@ define inreg <28 x half> @bitcast_v7i64_to_v28f16_scalar(<7 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB45_4:
-; VI-NEXT: s_branch .LBB45_2
;
; GFX9-LABEL: bitcast_v7i64_to_v28f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: s_cbranch_scc0 .LBB45_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB45_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB45_3
-; GFX9-NEXT: .LBB45_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB45_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB45_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_u32 s28, s28, 3
; GFX9-NEXT: s_addc_u32 s29, s29, 0
; GFX9-NEXT: s_add_u32 s26, s26, 3
@@ -9820,7 +9992,7 @@ define inreg <28 x half> @bitcast_v7i64_to_v28f16_scalar(<7 x i64> inreg %a, i32
; GFX9-NEXT: s_addc_u32 s19, s19, 0
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
-; GFX9-NEXT: .LBB45_3: ; %end
+; GFX9-NEXT: .LBB45_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
@@ -9836,19 +10008,20 @@ define inreg <28 x half> @bitcast_v7i64_to_v28f16_scalar(<7 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB45_4:
-; GFX9-NEXT: s_branch .LBB45_2
;
; GFX11-LABEL: bitcast_v7i64_to_v28f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB45_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB45_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB45_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB45_3
-; GFX11-NEXT: .LBB45_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB45_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s24, s24, 3
; GFX11-NEXT: s_addc_u32 s25, s25, 0
; GFX11-NEXT: s_add_u32 s22, s22, 3
@@ -9863,7 +10036,7 @@ define inreg <28 x half> @bitcast_v7i64_to_v28f16_scalar(<7 x i64> inreg %a, i32
; GFX11-NEXT: s_addc_u32 s3, s3, 0
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: .LBB45_3: ; %end
+; GFX11-NEXT: .LBB45_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -9873,8 +10046,6 @@ define inreg <28 x half> @bitcast_v7i64_to_v28f16_scalar(<7 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB45_4:
-; GFX11-NEXT: s_branch .LBB45_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -10251,7 +10422,7 @@ define inreg <7 x i64> @bitcast_v28f16_to_v7i64_scalar(<28 x half> inreg %a, i32
; SI-NEXT: v_cvt_f16_f32_e32 v49, s19
; SI-NEXT: v_cvt_f16_f32_e32 v38, s18
; SI-NEXT: v_cvt_f16_f32_e32 v36, s21
-; SI-NEXT: v_cvt_f16_f32_e32 v35, s20
+; SI-NEXT: v_cvt_f16_f32_e32 v34, s20
; SI-NEXT: v_cvt_f16_f32_e32 v29, v1
; SI-NEXT: v_cvt_f16_f32_e32 v28, v0
; SI-NEXT: v_cvt_f16_f32_e32 v27, v3
@@ -10269,13 +10440,14 @@ define inreg <7 x i64> @bitcast_v28f16_to_v7i64_scalar(<28 x half> inreg %a, i32
; SI-NEXT: v_cvt_f16_f32_e32 v48, s23
; SI-NEXT: v_cvt_f16_f32_e32 v39, s22
; SI-NEXT: v_cvt_f16_f32_e32 v37, s25
-; SI-NEXT: v_cvt_f16_f32_e32 v34, s24
+; SI-NEXT: v_cvt_f16_f32_e32 v35, s24
; SI-NEXT: v_cvt_f16_f32_e32 v33, s27
; SI-NEXT: v_cvt_f16_f32_e32 v32, s26
; SI-NEXT: v_cvt_f16_f32_e32 v31, s29
; SI-NEXT: v_cvt_f16_f32_e32 v30, s28
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB47_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v51
@@ -10294,9 +10466,9 @@ define inreg <7 x i64> @bitcast_v28f16_to_v7i64_scalar(<28 x half> inreg %a, i32
; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v17
; SI-NEXT: v_or_b32_e32 v0, v50, v0
; SI-NEXT: v_or_b32_e32 v1, v38, v1
-; SI-NEXT: v_or_b32_e32 v2, v35, v2
+; SI-NEXT: v_or_b32_e32 v2, v34, v2
; SI-NEXT: v_or_b32_e32 v3, v39, v3
-; SI-NEXT: v_or_b32_e32 v4, v34, v4
+; SI-NEXT: v_or_b32_e32 v4, v35, v4
; SI-NEXT: v_or_b32_e32 v5, v32, v5
; SI-NEXT: v_or_b32_e32 v6, v30, v6
; SI-NEXT: v_or_b32_e32 v7, v28, v7
@@ -10319,7 +10491,7 @@ define inreg <7 x i64> @bitcast_v28f16_to_v7i64_scalar(<28 x half> inreg %a, i32
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; SI-NEXT: v_cvt_f32_f16_e32 v4, v35
+; SI-NEXT: v_cvt_f32_f16_e32 v4, v34
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v2
; SI-NEXT: v_cvt_f32_f16_e32 v2, v36
@@ -10342,7 +10514,7 @@ define inreg <7 x i64> @bitcast_v28f16_to_v7i64_scalar(<28 x half> inreg %a, i32
; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; SI-NEXT: v_cvt_f32_f16_e32 v6, v34
+; SI-NEXT: v_cvt_f32_f16_e32 v6, v35
; SI-NEXT: v_or_b32_e32 v3, v4, v3
; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v5
; SI-NEXT: v_cvt_f32_f16_e32 v5, v33
@@ -10424,17 +10596,23 @@ define inreg <7 x i64> @bitcast_v28f16_to_v7i64_scalar(<28 x half> inreg %a, i32
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB47_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; SI-NEXT: s_branch .LBB47_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB47_2
+; SI-NEXT: s_branch .LBB47_3
;
; VI-LABEL: bitcast_v28f16_to_v7i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: s_cbranch_scc0 .LBB47_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB47_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB47_4
-; VI-NEXT: .LBB47_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB47_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB47_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s29, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -10507,8 +10685,6 @@ define inreg <7 x i64> @bitcast_v28f16_to_v7i64_scalar(<28 x half> inreg %a, i32
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v14
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB47_3:
-; VI-NEXT: s_branch .LBB47_2
; VI-NEXT: .LBB47_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -10533,10 +10709,14 @@ define inreg <7 x i64> @bitcast_v28f16_to_v7i64_scalar(<28 x half> inreg %a, i32
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB47_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB47_4
-; GFX9-NEXT: .LBB47_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB47_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v13, s29, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v12, s28, v0 op_sel_hi:[1,0]
@@ -10553,8 +10733,6 @@ define inreg <7 x i64> @bitcast_v28f16_to_v7i64_scalar(<28 x half> inreg %a, i32
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB47_3:
-; GFX9-NEXT: s_branch .LBB47_2
; GFX9-NEXT: .LBB47_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -10582,12 +10760,15 @@ define inreg <7 x i64> @bitcast_v28f16_to_v7i64_scalar(<28 x half> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB47_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB47_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB47_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB47_4
-; GFX11-NEXT: .LBB47_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v13, 0x200, s25 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v12, 0x200, s24 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v11, 0x200, s23 op_sel_hi:[0,1]
@@ -10603,8 +10784,6 @@ define inreg <7 x i64> @bitcast_v28f16_to_v7i64_scalar(<28 x half> inreg %a, i32
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s13 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB47_3:
-; GFX11-NEXT: s_branch .LBB47_2
; GFX11-NEXT: .LBB47_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -10807,6 +10986,7 @@ define inreg <28 x i16> @bitcast_v7f64_to_v28i16_scalar(<7 x double> inreg %a, i
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB49_3
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s28
@@ -10869,7 +11049,8 @@ define inreg <28 x i16> @bitcast_v7f64_to_v28i16_scalar(<7 x double> inreg %a, i
; SI-NEXT: ; implicit-def: $sgpr11
; SI-NEXT: ; implicit-def: $vgpr28
; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: s_branch .LBB49_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB49_2
; SI-NEXT: .LBB49_4:
; SI-NEXT: v_mov_b32_e32 v5, s19
; SI-NEXT: v_mov_b32_e32 v9, s21
@@ -10914,10 +11095,14 @@ define inreg <28 x i16> @bitcast_v7f64_to_v28i16_scalar(<7 x double> inreg %a, i
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: s_cbranch_scc0 .LBB49_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB49_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB49_4
-; VI-NEXT: .LBB49_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB49_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB49_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
; VI-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
; VI-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
@@ -10926,8 +11111,6 @@ define inreg <28 x i16> @bitcast_v7f64_to_v28i16_scalar(<7 x double> inreg %a, i
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB49_3:
-; VI-NEXT: s_branch .LBB49_2
; VI-NEXT: .LBB49_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -10952,10 +11135,14 @@ define inreg <28 x i16> @bitcast_v7f64_to_v28i16_scalar(<7 x double> inreg %a, i
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB49_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB49_4
-; GFX9-NEXT: .LBB49_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB49_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
; GFX9-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
; GFX9-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
@@ -10964,8 +11151,6 @@ define inreg <28 x i16> @bitcast_v7f64_to_v28i16_scalar(<7 x double> inreg %a, i
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB49_3:
-; GFX9-NEXT: s_branch .LBB49_2
; GFX9-NEXT: .LBB49_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -10993,12 +11178,15 @@ define inreg <28 x i16> @bitcast_v7f64_to_v28i16_scalar(<7 x double> inreg %a, i
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB49_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB49_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB49_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB49_4
-; GFX11-NEXT: .LBB49_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[12:13], s[24:25], 1.0
; GFX11-NEXT: v_add_f64 v[10:11], s[22:23], 1.0
; GFX11-NEXT: v_add_f64 v[8:9], s[20:21], 1.0
@@ -11007,8 +11195,6 @@ define inreg <28 x i16> @bitcast_v7f64_to_v28i16_scalar(<7 x double> inreg %a, i
; GFX11-NEXT: v_add_f64 v[2:3], s[14:15], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[12:13], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB49_3:
-; GFX11-NEXT: s_branch .LBB49_2
; GFX11-NEXT: .LBB49_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -11325,6 +11511,7 @@ define inreg <7 x double> @bitcast_v28i16_to_v7f64_scalar(<28 x i16> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v16, v12
; SI-NEXT: v_mov_b32_e32 v17, v10
; SI-NEXT: v_mov_b32_e32 v18, v8
@@ -11332,7 +11519,7 @@ define inreg <7 x double> @bitcast_v28i16_to_v7f64_scalar(<28 x i16> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v20, v4
; SI-NEXT: v_mov_b32_e32 v21, v2
; SI-NEXT: v_mov_b32_e32 v22, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v3
; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v5
@@ -11460,17 +11647,23 @@ define inreg <7 x double> @bitcast_v28i16_to_v7f64_scalar(<28 x i16> inreg %a, i
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB51_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; SI-NEXT: s_branch .LBB51_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB51_2
+; SI-NEXT: s_branch .LBB51_3
;
; VI-LABEL: bitcast_v28i16_to_v7f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: s_cbranch_scc0 .LBB51_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB51_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB51_3
-; VI-NEXT: .LBB51_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB51_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB51_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s29, 3
; VI-NEXT: s_and_b32 s4, s29, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
@@ -11541,7 +11734,7 @@ define inreg <7 x double> @bitcast_v28i16_to_v7f64_scalar(<28 x i16> inreg %a, i
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB51_3: ; %end
+; VI-NEXT: .LBB51_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -11557,18 +11750,20 @@ define inreg <7 x double> @bitcast_v28i16_to_v7f64_scalar(<28 x i16> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB51_4:
-; VI-NEXT: s_branch .LBB51_2
;
; GFX9-LABEL: bitcast_v28i16_to_v7f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: s_cbranch_scc0 .LBB51_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB51_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB51_4
-; GFX9-NEXT: .LBB51_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB51_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB51_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v13, s29, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v12, s28, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v11, s27, 3 op_sel_hi:[1,0]
@@ -11584,8 +11779,6 @@ define inreg <7 x double> @bitcast_v28i16_to_v7f64_scalar(<28 x i16> inreg %a, i
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB51_3:
-; GFX9-NEXT: s_branch .LBB51_2
; GFX9-NEXT: .LBB51_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -11613,12 +11806,15 @@ define inreg <7 x double> @bitcast_v28i16_to_v7f64_scalar(<28 x i16> inreg %a, i
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB51_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB51_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB51_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB51_4
-; GFX11-NEXT: .LBB51_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v13, s25, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v12, s24, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v11, s23, 3 op_sel_hi:[1,0]
@@ -11634,8 +11830,6 @@ define inreg <7 x double> @bitcast_v28i16_to_v7f64_scalar(<28 x i16> inreg %a, i
; GFX11-NEXT: v_pk_add_u16 v1, s13, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB51_3:
-; GFX11-NEXT: s_branch .LBB51_2
; GFX11-NEXT: .LBB51_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -11903,6 +12097,7 @@ define inreg <28 x half> @bitcast_v7f64_to_v28f16_scalar(<7 x double> inreg %a,
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB53_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s4, s29, 16
@@ -12029,17 +12224,23 @@ define inreg <28 x half> @bitcast_v7f64_to_v28f16_scalar(<7 x double> inreg %a,
; SI-NEXT: ; implicit-def: $vgpr25
; SI-NEXT: ; implicit-def: $vgpr26
; SI-NEXT: ; implicit-def: $vgpr27
-; SI-NEXT: s_branch .LBB53_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB53_2
+; SI-NEXT: s_branch .LBB53_3
;
; VI-LABEL: bitcast_v7f64_to_v28f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: s_cbranch_scc0 .LBB53_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB53_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB53_4
-; VI-NEXT: .LBB53_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB53_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB53_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
; VI-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
; VI-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
@@ -12048,8 +12249,6 @@ define inreg <28 x half> @bitcast_v7f64_to_v28f16_scalar(<7 x double> inreg %a,
; VI-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB53_3:
-; VI-NEXT: s_branch .LBB53_2
; VI-NEXT: .LBB53_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -12074,10 +12273,14 @@ define inreg <28 x half> @bitcast_v7f64_to_v28f16_scalar(<7 x double> inreg %a,
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: s_cbranch_scc0 .LBB53_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB53_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB53_4
-; GFX9-NEXT: .LBB53_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB53_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB53_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[12:13], s[28:29], 1.0
; GFX9-NEXT: v_add_f64 v[10:11], s[26:27], 1.0
; GFX9-NEXT: v_add_f64 v[8:9], s[24:25], 1.0
@@ -12086,8 +12289,6 @@ define inreg <28 x half> @bitcast_v7f64_to_v28f16_scalar(<7 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[2:3], s[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB53_3:
-; GFX9-NEXT: s_branch .LBB53_2
; GFX9-NEXT: .LBB53_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -12115,12 +12316,15 @@ define inreg <28 x half> @bitcast_v7f64_to_v28f16_scalar(<7 x double> inreg %a,
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB53_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB53_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB53_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB53_4
-; GFX11-NEXT: .LBB53_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[12:13], s[24:25], 1.0
; GFX11-NEXT: v_add_f64 v[10:11], s[22:23], 1.0
; GFX11-NEXT: v_add_f64 v[8:9], s[20:21], 1.0
@@ -12129,8 +12333,6 @@ define inreg <28 x half> @bitcast_v7f64_to_v28f16_scalar(<7 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[2:3], s[14:15], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[12:13], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB53_3:
-; GFX11-NEXT: s_branch .LBB53_2
; GFX11-NEXT: .LBB53_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -12517,7 +12719,7 @@ define inreg <7 x double> @bitcast_v28f16_to_v7f64_scalar(<28 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v49, s19
; SI-NEXT: v_cvt_f16_f32_e32 v38, s18
; SI-NEXT: v_cvt_f16_f32_e32 v36, s21
-; SI-NEXT: v_cvt_f16_f32_e32 v35, s20
+; SI-NEXT: v_cvt_f16_f32_e32 v34, s20
; SI-NEXT: v_cvt_f16_f32_e32 v29, v1
; SI-NEXT: v_cvt_f16_f32_e32 v28, v0
; SI-NEXT: v_cvt_f16_f32_e32 v27, v3
@@ -12535,13 +12737,14 @@ define inreg <7 x double> @bitcast_v28f16_to_v7f64_scalar(<28 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v48, s23
; SI-NEXT: v_cvt_f16_f32_e32 v39, s22
; SI-NEXT: v_cvt_f16_f32_e32 v37, s25
-; SI-NEXT: v_cvt_f16_f32_e32 v34, s24
+; SI-NEXT: v_cvt_f16_f32_e32 v35, s24
; SI-NEXT: v_cvt_f16_f32_e32 v33, s27
; SI-NEXT: v_cvt_f16_f32_e32 v32, s26
; SI-NEXT: v_cvt_f16_f32_e32 v31, s29
; SI-NEXT: v_cvt_f16_f32_e32 v30, s28
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB55_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v51
@@ -12560,9 +12763,9 @@ define inreg <7 x double> @bitcast_v28f16_to_v7f64_scalar(<28 x half> inreg %a,
; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v17
; SI-NEXT: v_or_b32_e32 v0, v50, v0
; SI-NEXT: v_or_b32_e32 v1, v38, v1
-; SI-NEXT: v_or_b32_e32 v2, v35, v2
+; SI-NEXT: v_or_b32_e32 v2, v34, v2
; SI-NEXT: v_or_b32_e32 v3, v39, v3
-; SI-NEXT: v_or_b32_e32 v4, v34, v4
+; SI-NEXT: v_or_b32_e32 v4, v35, v4
; SI-NEXT: v_or_b32_e32 v5, v32, v5
; SI-NEXT: v_or_b32_e32 v6, v30, v6
; SI-NEXT: v_or_b32_e32 v7, v28, v7
@@ -12585,7 +12788,7 @@ define inreg <7 x double> @bitcast_v28f16_to_v7f64_scalar(<28 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; SI-NEXT: v_cvt_f32_f16_e32 v4, v35
+; SI-NEXT: v_cvt_f32_f16_e32 v4, v34
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v2
; SI-NEXT: v_cvt_f32_f16_e32 v2, v36
@@ -12608,7 +12811,7 @@ define inreg <7 x double> @bitcast_v28f16_to_v7f64_scalar(<28 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; SI-NEXT: v_cvt_f32_f16_e32 v6, v34
+; SI-NEXT: v_cvt_f32_f16_e32 v6, v35
; SI-NEXT: v_or_b32_e32 v3, v4, v3
; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v5
; SI-NEXT: v_cvt_f32_f16_e32 v5, v33
@@ -12690,17 +12893,23 @@ define inreg <7 x double> @bitcast_v28f16_to_v7f64_scalar(<28 x half> inreg %a,
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB55_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; SI-NEXT: s_branch .LBB55_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB55_2
+; SI-NEXT: s_branch .LBB55_3
;
; VI-LABEL: bitcast_v28f16_to_v7f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: s_cbranch_scc0 .LBB55_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB55_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB55_4
-; VI-NEXT: .LBB55_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB55_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB55_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s29, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -12773,8 +12982,6 @@ define inreg <7 x double> @bitcast_v28f16_to_v7f64_scalar(<28 x half> inreg %a,
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v14
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB55_3:
-; VI-NEXT: s_branch .LBB55_2
; VI-NEXT: .LBB55_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -12799,10 +13006,14 @@ define inreg <7 x double> @bitcast_v28f16_to_v7f64_scalar(<28 x half> inreg %a,
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: s_cbranch_scc0 .LBB55_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB55_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB55_4
-; GFX9-NEXT: .LBB55_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB55_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB55_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v13, s29, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v12, s28, v0 op_sel_hi:[1,0]
@@ -12819,8 +13030,6 @@ define inreg <7 x double> @bitcast_v28f16_to_v7f64_scalar(<28 x half> inreg %a,
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB55_3:
-; GFX9-NEXT: s_branch .LBB55_2
; GFX9-NEXT: .LBB55_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -12848,12 +13057,15 @@ define inreg <7 x double> @bitcast_v28f16_to_v7f64_scalar(<28 x half> inreg %a,
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB55_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB55_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB55_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB55_4
-; GFX11-NEXT: .LBB55_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v13, 0x200, s25 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v12, 0x200, s24 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v11, 0x200, s23 op_sel_hi:[0,1]
@@ -12869,8 +13081,6 @@ define inreg <7 x double> @bitcast_v28f16_to_v7f64_scalar(<28 x half> inreg %a,
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s13 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB55_3:
-; GFX11-NEXT: s_branch .LBB55_2
; GFX11-NEXT: .LBB55_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -13236,6 +13446,7 @@ define inreg <28 x half> @bitcast_v28i16_to_v28f16_scalar(<28 x i16> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v38, v13
; SI-NEXT: v_mov_b32_e32 v37, v12
; SI-NEXT: v_mov_b32_e32 v36, v11
@@ -13250,7 +13461,7 @@ define inreg <28 x half> @bitcast_v28i16_to_v28f16_scalar(<28 x i16> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v49, v2
; SI-NEXT: v_mov_b32_e32 v48, v1
; SI-NEXT: v_mov_b32_e32 v39, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB57_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_cvt_f32_f16_e32 v0, s16
@@ -13370,17 +13581,23 @@ define inreg <28 x half> @bitcast_v28i16_to_v28f16_scalar(<28 x i16> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr25
; SI-NEXT: ; implicit-def: $vgpr26
; SI-NEXT: ; implicit-def: $vgpr27
-; SI-NEXT: s_branch .LBB57_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB57_2
+; SI-NEXT: s_branch .LBB57_3
;
; VI-LABEL: bitcast_v28i16_to_v28f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: s_cbranch_scc0 .LBB57_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB57_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB57_3
-; VI-NEXT: .LBB57_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB57_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB57_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_and_b32 s4, s16, 0xffff0000
; VI-NEXT: s_add_i32 s5, s16, 3
; VI-NEXT: s_and_b32 s6, s17, 0xffff0000
@@ -13451,7 +13668,7 @@ define inreg <28 x half> @bitcast_v28i16_to_v28f16_scalar(<28 x i16> inreg %a, i
; VI-NEXT: s_add_i32 s18, s8, 0x30000
; VI-NEXT: s_add_i32 s17, s6, 0x30000
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB57_3: ; %end
+; VI-NEXT: .LBB57_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -13467,18 +13684,20 @@ define inreg <28 x half> @bitcast_v28i16_to_v28f16_scalar(<28 x i16> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB57_4:
-; VI-NEXT: s_branch .LBB57_2
;
; GFX9-LABEL: bitcast_v28i16_to_v28f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB57_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB57_4
-; GFX9-NEXT: .LBB57_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB57_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB57_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v13, s29, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v12, s28, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v11, s27, 3 op_sel_hi:[1,0]
@@ -13494,8 +13713,6 @@ define inreg <28 x half> @bitcast_v28i16_to_v28f16_scalar(<28 x i16> inreg %a, i
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB57_3:
-; GFX9-NEXT: s_branch .LBB57_2
; GFX9-NEXT: .LBB57_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -13523,12 +13740,15 @@ define inreg <28 x half> @bitcast_v28i16_to_v28f16_scalar(<28 x i16> inreg %a, i
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB57_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB57_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB57_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB57_4
-; GFX11-NEXT: .LBB57_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v13, s25, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v12, s24, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v11, s23, 3 op_sel_hi:[1,0]
@@ -13544,8 +13764,6 @@ define inreg <28 x half> @bitcast_v28i16_to_v28f16_scalar(<28 x i16> inreg %a, i
; GFX11-NEXT: v_pk_add_u16 v1, s13, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB57_3:
-; GFX11-NEXT: s_branch .LBB57_2
; GFX11-NEXT: .LBB57_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -13902,10 +14120,14 @@ define inreg <28 x i16> @bitcast_v28f16_to_v28i16_scalar(<28 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v12, s28
; SI-NEXT: v_cvt_f16_f32_e32 v13, s29
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: s_cbranch_scc0 .LBB59_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB59_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB59_3
-; SI-NEXT: .LBB59_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB59_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB59_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v27, v27
; SI-NEXT: v_cvt_f32_f16_e32 v26, v26
; SI-NEXT: v_cvt_f32_f16_e32 v23, v23
@@ -14025,20 +14247,22 @@ define inreg <28 x i16> @bitcast_v28f16_to_v28i16_scalar(<28 x half> inreg %a, i
; SI-NEXT: v_alignbit_b32 v17, v18, v17, 16
; SI-NEXT: v_alignbit_b32 v21, v22, v21, 16
; SI-NEXT: v_alignbit_b32 v25, v26, v25, 16
-; SI-NEXT: .LBB59_3: ; %end
+; SI-NEXT: .LBB59_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB59_4:
-; SI-NEXT: s_branch .LBB59_2
;
; VI-LABEL: bitcast_v28f16_to_v28i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: s_cbranch_scc0 .LBB59_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB59_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB59_4
-; VI-NEXT: .LBB59_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB59_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB59_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s5, s28, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v2, s5
@@ -14111,8 +14335,6 @@ define inreg <28 x i16> @bitcast_v28f16_to_v28i16_scalar(<28 x half> inreg %a, i
; VI-NEXT: v_or_b32_e32 v1, v0, v1
; VI-NEXT: v_or_b32_e32 v0, v14, v15
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB59_3:
-; VI-NEXT: s_branch .LBB59_2
; VI-NEXT: .LBB59_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -14137,10 +14359,14 @@ define inreg <28 x i16> @bitcast_v28f16_to_v28i16_scalar(<28 x half> inreg %a, i
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB59_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB59_4
-; GFX9-NEXT: .LBB59_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB59_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB59_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v13, s29, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v12, s28, v0 op_sel_hi:[1,0]
@@ -14157,8 +14383,6 @@ define inreg <28 x i16> @bitcast_v28f16_to_v28i16_scalar(<28 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB59_3:
-; GFX9-NEXT: s_branch .LBB59_2
; GFX9-NEXT: .LBB59_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -14186,12 +14410,15 @@ define inreg <28 x i16> @bitcast_v28f16_to_v28i16_scalar(<28 x half> inreg %a, i
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s26, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB59_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB59_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB59_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB59_4
-; GFX11-NEXT: .LBB59_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v13, 0x200, s25 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v12, 0x200, s24 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v11, 0x200, s23 op_sel_hi:[0,1]
@@ -14207,8 +14434,6 @@ define inreg <28 x i16> @bitcast_v28f16_to_v28i16_scalar(<28 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s13 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB59_3:
-; GFX11-NEXT: s_branch .LBB59_2
; GFX11-NEXT: .LBB59_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.48bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.48bit.ll
index 68312b8..030a491 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.48bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.48bit.ll
@@ -246,6 +246,7 @@ define inreg <3 x half> @bitcast_v3bf16_to_v3f16_scalar(<3 x bfloat> inreg %a, i
; SI-NEXT: v_mul_f32_e64 v3, 1.0, s16
; SI-NEXT: v_mul_f32_e64 v4, 1.0, s17
; SI-NEXT: v_mul_f32_e64 v5, 1.0, s18
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB1_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v3
@@ -274,16 +275,22 @@ define inreg <3 x half> @bitcast_v3bf16_to_v3f16_scalar(<3 x bfloat> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr0
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; implicit-def: $vgpr2
-; SI-NEXT: s_branch .LBB1_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB1_2
+; SI-NEXT: s_branch .LBB1_3
;
; VI-LABEL: bitcast_v3bf16_to_v3f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB1_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB1_4
-; VI-NEXT: .LBB1_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB1_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB1_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s17, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x40c00000
; VI-NEXT: v_add_f32_e32 v1, s4, v0
@@ -314,8 +321,6 @@ define inreg <3 x half> @bitcast_v3bf16_to_v3f16_scalar(<3 x bfloat> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v2, 0x7fc00000
; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_3:
-; VI-NEXT: s_branch .LBB1_2
; VI-NEXT: .LBB1_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -325,10 +330,14 @@ define inreg <3 x half> @bitcast_v3bf16_to_v3f16_scalar(<3 x bfloat> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB1_4
-; GFX9-NEXT: .LBB1_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB1_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_lshl_b32 s4, s17, 16
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
; GFX9-NEXT: v_add_f32_e32 v1, s4, v0
@@ -362,8 +371,6 @@ define inreg <3 x half> @bitcast_v3bf16_to_v3f16_scalar(<3 x bfloat> inreg %a, i
; GFX9-NEXT: v_lshl_or_b32 v0, v2, 16, v0
; GFX9-NEXT: v_lshl_or_b32 v1, s4, 16, v1
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_3:
-; GFX9-NEXT: s_branch .LBB1_2
; GFX9-NEXT: .LBB1_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -373,12 +380,15 @@ define inreg <3 x half> @bitcast_v3bf16_to_v3f16_scalar(<3 x bfloat> inreg %a, i
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB1_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB1_4
-; GFX11-NEXT: .LBB1_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_lshl_b32 s2, s0, 16
; GFX11-NEXT: s_lshl_b32 s1, s1, 16
; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
@@ -417,8 +427,6 @@ define inreg <3 x half> @bitcast_v3bf16_to_v3f16_scalar(<3 x bfloat> inreg %a, i
; GFX11-NEXT: v_lshl_or_b32 v1, 0x7fc0, 16, v1
; GFX11-NEXT: v_lshl_or_b32 v0, v2, 16, v0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_3:
-; GFX11-NEXT: s_branch .LBB1_2
; GFX11-NEXT: .LBB1_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -556,6 +564,7 @@ define inreg <3 x bfloat> @bitcast_v3f16_to_v3bf16_scalar(<3 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v4, s17
; SI-NEXT: v_cvt_f16_f32_e32 v5, s18
; SI-NEXT: s_cmp_lg_u32 s19, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB3_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v3
@@ -581,16 +590,22 @@ define inreg <3 x bfloat> @bitcast_v3f16_to_v3bf16_scalar(<3 x half> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr0
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; implicit-def: $vgpr2
-; SI-NEXT: s_branch .LBB3_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB3_2
+; SI-NEXT: s_branch .LBB3_3
;
; VI-LABEL: bitcast_v3f16_to_v3bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB3_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_4
-; VI-NEXT: .LBB3_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB3_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB3_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v3, s4
@@ -600,8 +615,6 @@ define inreg <3 x bfloat> @bitcast_v3f16_to_v3bf16_scalar(<3 x half> inreg %a, i
; VI-NEXT: v_or_b32_e32 v0, v2, v0
; VI-NEXT: v_or_b32_e32 v1, 0x7e000000, v1
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB3_3:
-; VI-NEXT: s_branch .LBB3_2
; VI-NEXT: .LBB3_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -611,16 +624,18 @@ define inreg <3 x bfloat> @bitcast_v3f16_to_v3bf16_scalar(<3 x half> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_4
-; GFX9-NEXT: .LBB3_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB3_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB3_3:
-; GFX9-NEXT: s_branch .LBB3_2
; GFX9-NEXT: .LBB3_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -630,17 +645,18 @@ define inreg <3 x bfloat> @bitcast_v3f16_to_v3bf16_scalar(<3 x half> inreg %a, i
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB3_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
-; GFX11-NEXT: .LBB3_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB3_3:
-; GFX11-NEXT: s_branch .LBB3_2
; GFX11-NEXT: .LBB3_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -900,6 +916,7 @@ define inreg <3 x i16> @bitcast_v3bf16_to_v3i16_scalar(<3 x bfloat> inreg %a, i3
; SI-NEXT: v_mul_f32_e64 v5, 1.0, s16
; SI-NEXT: v_mul_f32_e64 v4, 1.0, s17
; SI-NEXT: v_mul_f32_e64 v3, 1.0, s18
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB5_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v5
@@ -924,16 +941,22 @@ define inreg <3 x i16> @bitcast_v3bf16_to_v3i16_scalar(<3 x bfloat> inreg %a, i3
; SI-NEXT: ; implicit-def: $vgpr0
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; implicit-def: $vgpr2
-; SI-NEXT: s_branch .LBB5_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB5_2
+; SI-NEXT: s_branch .LBB5_3
;
; VI-LABEL: bitcast_v3bf16_to_v3i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB5_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB5_4
-; VI-NEXT: .LBB5_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB5_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB5_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s17, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x40c00000
; VI-NEXT: v_add_f32_e32 v1, s4, v0
@@ -964,8 +987,6 @@ define inreg <3 x i16> @bitcast_v3bf16_to_v3i16_scalar(<3 x bfloat> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v2, 0x7fc00000
; VI-NEXT: v_or_b32_sdwa v1, v1, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB5_3:
-; VI-NEXT: s_branch .LBB5_2
; VI-NEXT: .LBB5_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -975,10 +996,14 @@ define inreg <3 x i16> @bitcast_v3bf16_to_v3i16_scalar(<3 x bfloat> inreg %a, i3
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB5_4
-; GFX9-NEXT: .LBB5_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB5_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_lshl_b32 s4, s17, 16
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
; GFX9-NEXT: v_add_f32_e32 v1, s4, v0
@@ -1012,8 +1037,6 @@ define inreg <3 x i16> @bitcast_v3bf16_to_v3i16_scalar(<3 x bfloat> inreg %a, i3
; GFX9-NEXT: v_and_b32_sdwa v1, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: v_lshl_or_b32 v1, s4, 16, v1
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_3:
-; GFX9-NEXT: s_branch .LBB5_2
; GFX9-NEXT: .LBB5_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -1023,12 +1046,15 @@ define inreg <3 x i16> @bitcast_v3bf16_to_v3i16_scalar(<3 x bfloat> inreg %a, i3
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB5_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB5_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB5_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB5_4
-; GFX11-NEXT: .LBB5_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_lshl_b32 s2, s0, 16
; GFX11-NEXT: s_lshl_b32 s1, s1, 16
; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
@@ -1064,8 +1090,6 @@ define inreg <3 x i16> @bitcast_v3bf16_to_v3i16_scalar(<3 x bfloat> inreg %a, i3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2)
; GFX11-NEXT: v_and_or_b32 v0, 0xffff0000, v2, v0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB5_3:
-; GFX11-NEXT: s_branch .LBB5_2
; GFX11-NEXT: .LBB5_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -1187,6 +1211,7 @@ define inreg <3 x bfloat> @bitcast_v3i16_to_v3bf16_scalar(<3 x i16> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s19, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB7_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshl_b32 s6, s16, 16
@@ -1212,42 +1237,48 @@ define inreg <3 x bfloat> @bitcast_v3i16_to_v3bf16_scalar(<3 x i16> inreg %a, i3
; SI-NEXT: ; implicit-def: $sgpr6
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: s_branch .LBB7_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB7_2
+; SI-NEXT: s_branch .LBB7_3
;
; VI-LABEL: bitcast_v3i16_to_v3bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB7_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB7_3
-; VI-NEXT: .LBB7_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB7_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB7_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s16, 3
; VI-NEXT: s_and_b32 s4, s16, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB7_3: ; %end
+; VI-NEXT: .LBB7_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB7_4:
-; VI-NEXT: s_branch .LBB7_2
;
; GFX9-LABEL: bitcast_v3i16_to_v3bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB7_4
-; GFX9-NEXT: .LBB7_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB7_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB7_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB7_3:
-; GFX9-NEXT: s_branch .LBB7_2
; GFX9-NEXT: .LBB7_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -1257,17 +1288,18 @@ define inreg <3 x bfloat> @bitcast_v3i16_to_v3bf16_scalar(<3 x i16> inreg %a, i3
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB7_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB7_4
-; GFX11-NEXT: .LBB7_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB7_3:
-; GFX11-NEXT: s_branch .LBB7_2
; GFX11-NEXT: .LBB7_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -1389,10 +1421,14 @@ define inreg <3 x i16> @bitcast_v3f16_to_v3i16_scalar(<3 x half> inreg %a, i32 i
; SI-NEXT: v_cvt_f16_f32_e32 v1, s17
; SI-NEXT: v_cvt_f16_f32_e32 v2, s18
; SI-NEXT: s_cmp_lg_u32 s19, 0
-; SI-NEXT: s_cbranch_scc0 .LBB9_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB9_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB9_3
-; SI-NEXT: .LBB9_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB9_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB9_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
; SI-NEXT: v_cvt_f32_f16_e32 v0, v0
; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
@@ -1405,19 +1441,21 @@ define inreg <3 x i16> @bitcast_v3f16_to_v3i16_scalar(<3 x half> inreg %a, i32 i
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_or_b32_e32 v0, v0, v1
; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16
-; SI-NEXT: .LBB9_3: ; %end
+; SI-NEXT: .LBB9_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB9_4:
-; SI-NEXT: s_branch .LBB9_2
;
; VI-LABEL: bitcast_v3f16_to_v3i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB9_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB9_4
-; VI-NEXT: .LBB9_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB9_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB9_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v3, s4
@@ -1427,8 +1465,6 @@ define inreg <3 x i16> @bitcast_v3f16_to_v3i16_scalar(<3 x half> inreg %a, i32 i
; VI-NEXT: v_or_b32_e32 v0, v2, v0
; VI-NEXT: v_or_b32_e32 v1, 0x7e000000, v1
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB9_3:
-; VI-NEXT: s_branch .LBB9_2
; VI-NEXT: .LBB9_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -1438,16 +1474,18 @@ define inreg <3 x i16> @bitcast_v3f16_to_v3i16_scalar(<3 x half> inreg %a, i32 i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB9_4
-; GFX9-NEXT: .LBB9_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB9_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB9_3:
-; GFX9-NEXT: s_branch .LBB9_2
; GFX9-NEXT: .LBB9_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -1457,17 +1495,18 @@ define inreg <3 x i16> @bitcast_v3f16_to_v3i16_scalar(<3 x half> inreg %a, i32 i
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB9_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB9_4
-; GFX11-NEXT: .LBB9_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB9_3:
-; GFX11-NEXT: s_branch .LBB9_2
; GFX11-NEXT: .LBB9_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -1594,6 +1633,7 @@ define inreg <3 x half> @bitcast_v3i16_to_v3f16_scalar(<3 x i16> inreg %a, i32 i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s19, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB11_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_cvt_f32_f16_e32 v0, s16
@@ -1613,42 +1653,48 @@ define inreg <3 x half> @bitcast_v3i16_to_v3f16_scalar(<3 x i16> inreg %a, i32 i
; SI-NEXT: ; implicit-def: $vgpr0
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; implicit-def: $vgpr2
-; SI-NEXT: s_branch .LBB11_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB11_2
+; SI-NEXT: s_branch .LBB11_3
;
; VI-LABEL: bitcast_v3i16_to_v3f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB11_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB11_3
-; VI-NEXT: .LBB11_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB11_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB11_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s16, 3
; VI-NEXT: s_and_b32 s4, s16, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB11_3: ; %end
+; VI-NEXT: .LBB11_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB11_4:
-; VI-NEXT: s_branch .LBB11_2
;
; GFX9-LABEL: bitcast_v3i16_to_v3f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_4
-; GFX9-NEXT: .LBB11_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB11_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB11_3:
-; GFX9-NEXT: s_branch .LBB11_2
; GFX9-NEXT: .LBB11_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -1658,17 +1704,18 @@ define inreg <3 x half> @bitcast_v3i16_to_v3f16_scalar(<3 x i16> inreg %a, i32 i
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB11_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
-; GFX11-NEXT: .LBB11_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: s_branch .LBB11_2
; GFX11-NEXT: .LBB11_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll
index 5aac06a..e446df0 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.512bit.ll
@@ -144,8 +144,9 @@ define inreg <16 x float> @bitcast_v16i32_to_v16f32_scalar(<16 x i32> inreg %a,
; SI-LABEL: bitcast_v16i32_to_v16f32_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v13, v2
-; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; SI-NEXT: v_mov_b32_e32 v16, v2
+; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v15, v1
; SI-NEXT: v_mov_b32_e32 v14, v0
; SI-NEXT: v_mov_b32_e32 v0, s16
@@ -161,12 +162,15 @@ define inreg <16 x float> @bitcast_v16i32_to_v16f32_scalar(<16 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB1_3
-; SI-NEXT: .LBB1_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB1_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB1_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v15, vcc, 3, v15
; SI-NEXT: v_add_i32_e32 v14, vcc, 3, v14
; SI-NEXT: v_add_i32_e32 v13, vcc, 3, v13
@@ -183,16 +187,15 @@ define inreg <16 x float> @bitcast_v16i32_to_v16f32_scalar(<16 x i32> inreg %a,
; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
-; SI-NEXT: .LBB1_3: ; %end
+; SI-NEXT: .LBB1_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: s_branch .LBB1_2
;
; VI-LABEL: bitcast_v16i32_to_v16f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v13, v2
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: v_mov_b32_e32 v16, v2
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
@@ -208,12 +211,15 @@ define inreg <16 x float> @bitcast_v16i32_to_v16f32_scalar(<16 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB1_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB1_3
-; VI-NEXT: .LBB1_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB1_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB1_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v15, vcc, 3, v15
; VI-NEXT: v_add_u32_e32 v14, vcc, 3, v14
; VI-NEXT: v_add_u32_e32 v13, vcc, 3, v13
@@ -230,16 +236,15 @@ define inreg <16 x float> @bitcast_v16i32_to_v16f32_scalar(<16 x i32> inreg %a,
; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: .LBB1_3: ; %end
+; VI-NEXT: .LBB1_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_4:
-; VI-NEXT: s_branch .LBB1_2
;
; GFX9-LABEL: bitcast_v16i32_to_v16f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v2
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v16, v2
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
@@ -255,12 +260,15 @@ define inreg <16 x float> @bitcast_v16i32_to_v16f32_scalar(<16 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB1_3
-; GFX9-NEXT: .LBB1_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB1_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_u32_e32 v15, 3, v15
; GFX9-NEXT: v_add_u32_e32 v14, 3, v14
; GFX9-NEXT: v_add_u32_e32 v13, 3, v13
@@ -277,21 +285,22 @@ define inreg <16 x float> @bitcast_v16i32_to_v16f32_scalar(<16 x i32> inreg %a,
; GFX9-NEXT: v_add_u32_e32 v2, 3, v2
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: .LBB1_3: ; %end
+; GFX9-NEXT: .LBB1_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-LABEL: bitcast_v16i32_to_v16f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB1_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB1_3
-; GFX11-NEXT: .LBB1_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s27, s27, 3
; GFX11-NEXT: s_add_i32 s26, s26, 3
; GFX11-NEXT: s_add_i32 s25, s25, 3
@@ -308,7 +317,7 @@ define inreg <16 x float> @bitcast_v16i32_to_v16f32_scalar(<16 x i32> inreg %a,
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB1_3: ; %end
+; GFX11-NEXT: .LBB1_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -319,8 +328,6 @@ define inreg <16 x float> @bitcast_v16i32_to_v16f32_scalar(<16 x i32> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_4:
-; GFX11-NEXT: s_branch .LBB1_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -468,8 +475,9 @@ define inreg <16 x i32> @bitcast_v16f32_to_v16i32_scalar(<16 x float> inreg %a,
; SI-LABEL: bitcast_v16f32_to_v16i32_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v13, v2
-; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; SI-NEXT: v_mov_b32_e32 v16, v2
+; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v15, v1
; SI-NEXT: v_mov_b32_e32 v14, v0
; SI-NEXT: v_mov_b32_e32 v0, s16
@@ -485,12 +493,15 @@ define inreg <16 x i32> @bitcast_v16f32_to_v16i32_scalar(<16 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB3_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB3_3
-; SI-NEXT: .LBB3_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB3_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB3_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e32 v15, 1.0, v15
; SI-NEXT: v_add_f32_e32 v14, 1.0, v14
; SI-NEXT: v_add_f32_e32 v13, 1.0, v13
@@ -507,16 +518,15 @@ define inreg <16 x i32> @bitcast_v16f32_to_v16i32_scalar(<16 x float> inreg %a,
; SI-NEXT: v_add_f32_e32 v2, 1.0, v2
; SI-NEXT: v_add_f32_e32 v1, 1.0, v1
; SI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; SI-NEXT: .LBB3_3: ; %end
+; SI-NEXT: .LBB3_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB3_4:
-; SI-NEXT: s_branch .LBB3_2
;
; VI-LABEL: bitcast_v16f32_to_v16i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v13, v2
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: v_mov_b32_e32 v16, v2
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
@@ -532,12 +542,15 @@ define inreg <16 x i32> @bitcast_v16f32_to_v16i32_scalar(<16 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB3_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_3
-; VI-NEXT: .LBB3_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB3_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB3_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e32 v15, 1.0, v15
; VI-NEXT: v_add_f32_e32 v14, 1.0, v14
; VI-NEXT: v_add_f32_e32 v13, 1.0, v13
@@ -554,16 +567,15 @@ define inreg <16 x i32> @bitcast_v16f32_to_v16i32_scalar(<16 x float> inreg %a,
; VI-NEXT: v_add_f32_e32 v2, 1.0, v2
; VI-NEXT: v_add_f32_e32 v1, 1.0, v1
; VI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; VI-NEXT: .LBB3_3: ; %end
+; VI-NEXT: .LBB3_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB3_4:
-; VI-NEXT: s_branch .LBB3_2
;
; GFX9-LABEL: bitcast_v16f32_to_v16i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v2
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v16, v2
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
@@ -579,12 +591,15 @@ define inreg <16 x i32> @bitcast_v16f32_to_v16i32_scalar(<16 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_3
-; GFX9-NEXT: .LBB3_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB3_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e32 v15, 1.0, v15
; GFX9-NEXT: v_add_f32_e32 v14, 1.0, v14
; GFX9-NEXT: v_add_f32_e32 v13, 1.0, v13
@@ -601,10 +616,8 @@ define inreg <16 x i32> @bitcast_v16f32_to_v16i32_scalar(<16 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e32 v2, 1.0, v2
; GFX9-NEXT: v_add_f32_e32 v1, 1.0, v1
; GFX9-NEXT: v_add_f32_e32 v0, 1.0, v0
-; GFX9-NEXT: .LBB3_3: ; %end
+; GFX9-NEXT: .LBB3_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB3_4:
-; GFX9-NEXT: s_branch .LBB3_2
;
; GFX11-LABEL: bitcast_v16f32_to_v16i32_scalar:
; GFX11: ; %bb.0:
@@ -614,12 +627,15 @@ define inreg <16 x i32> @bitcast_v16f32_to_v16i32_scalar(<16 x float> inreg %a,
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB3_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
-; GFX11-NEXT: .LBB3_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v15, s27, 1.0
; GFX11-NEXT: v_add_f32_e64 v14, s26, 1.0
; GFX11-NEXT: v_add_f32_e64 v13, s25, 1.0
@@ -637,8 +653,6 @@ define inreg <16 x i32> @bitcast_v16f32_to_v16i32_scalar(<16 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v1, s13, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB3_3:
-; GFX11-NEXT: s_branch .LBB3_2
; GFX11-NEXT: .LBB3_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -804,8 +818,9 @@ define inreg <8 x i64> @bitcast_v16i32_to_v8i64_scalar(<16 x i32> inreg %a, i32
; SI-LABEL: bitcast_v16i32_to_v8i64_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v13, v2
-; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; SI-NEXT: v_mov_b32_e32 v16, v2
+; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v15, v1
; SI-NEXT: v_mov_b32_e32 v14, v0
; SI-NEXT: v_mov_b32_e32 v0, s16
@@ -821,12 +836,15 @@ define inreg <8 x i64> @bitcast_v16i32_to_v8i64_scalar(<16 x i32> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB5_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB5_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB5_3
-; SI-NEXT: .LBB5_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB5_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB5_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v15, vcc, 3, v15
; SI-NEXT: v_add_i32_e32 v14, vcc, 3, v14
; SI-NEXT: v_add_i32_e32 v13, vcc, 3, v13
@@ -843,16 +861,15 @@ define inreg <8 x i64> @bitcast_v16i32_to_v8i64_scalar(<16 x i32> inreg %a, i32
; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
-; SI-NEXT: .LBB5_3: ; %end
+; SI-NEXT: .LBB5_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB5_4:
-; SI-NEXT: s_branch .LBB5_2
;
; VI-LABEL: bitcast_v16i32_to_v8i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v13, v2
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: v_mov_b32_e32 v16, v2
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
@@ -868,12 +885,15 @@ define inreg <8 x i64> @bitcast_v16i32_to_v8i64_scalar(<16 x i32> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB5_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB5_3
-; VI-NEXT: .LBB5_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB5_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB5_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v15, vcc, 3, v15
; VI-NEXT: v_add_u32_e32 v14, vcc, 3, v14
; VI-NEXT: v_add_u32_e32 v13, vcc, 3, v13
@@ -890,16 +910,15 @@ define inreg <8 x i64> @bitcast_v16i32_to_v8i64_scalar(<16 x i32> inreg %a, i32
; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: .LBB5_3: ; %end
+; VI-NEXT: .LBB5_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB5_4:
-; VI-NEXT: s_branch .LBB5_2
;
; GFX9-LABEL: bitcast_v16i32_to_v8i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v2
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v16, v2
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
@@ -915,12 +934,15 @@ define inreg <8 x i64> @bitcast_v16i32_to_v8i64_scalar(<16 x i32> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB5_3
-; GFX9-NEXT: .LBB5_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB5_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_u32_e32 v15, 3, v15
; GFX9-NEXT: v_add_u32_e32 v14, 3, v14
; GFX9-NEXT: v_add_u32_e32 v13, 3, v13
@@ -937,21 +959,22 @@ define inreg <8 x i64> @bitcast_v16i32_to_v8i64_scalar(<16 x i32> inreg %a, i32
; GFX9-NEXT: v_add_u32_e32 v2, 3, v2
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: .LBB5_3: ; %end
+; GFX9-NEXT: .LBB5_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_4:
-; GFX9-NEXT: s_branch .LBB5_2
;
; GFX11-LABEL: bitcast_v16i32_to_v8i64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB5_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB5_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB5_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB5_3
-; GFX11-NEXT: .LBB5_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s27, s27, 3
; GFX11-NEXT: s_add_i32 s26, s26, 3
; GFX11-NEXT: s_add_i32 s25, s25, 3
@@ -968,7 +991,7 @@ define inreg <8 x i64> @bitcast_v16i32_to_v8i64_scalar(<16 x i32> inreg %a, i32
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB5_3: ; %end
+; GFX11-NEXT: .LBB5_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -979,8 +1002,6 @@ define inreg <8 x i64> @bitcast_v16i32_to_v8i64_scalar(<16 x i32> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB5_4:
-; GFX11-NEXT: s_branch .LBB5_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1140,8 +1161,9 @@ define inreg <16 x i32> @bitcast_v8i64_to_v16i32_scalar(<8 x i64> inreg %a, i32
; SI-LABEL: bitcast_v8i64_to_v16i32_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v13, v2
-; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; SI-NEXT: v_mov_b32_e32 v16, v2
+; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v15, v1
; SI-NEXT: v_mov_b32_e32 v14, v0
; SI-NEXT: v_mov_b32_e32 v0, s16
@@ -1157,12 +1179,15 @@ define inreg <16 x i32> @bitcast_v8i64_to_v16i32_scalar(<8 x i64> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB7_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB7_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB7_3
-; SI-NEXT: .LBB7_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB7_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB7_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v14, vcc, 3, v14
; SI-NEXT: v_addc_u32_e32 v15, vcc, 0, v15, vcc
; SI-NEXT: v_add_i32_e32 v12, vcc, 3, v12
@@ -1179,16 +1204,15 @@ define inreg <16 x i32> @bitcast_v8i64_to_v16i32_scalar(<8 x i64> inreg %a, i32
; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT: .LBB7_3: ; %end
+; SI-NEXT: .LBB7_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB7_4:
-; SI-NEXT: s_branch .LBB7_2
;
; VI-LABEL: bitcast_v8i64_to_v16i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v13, v2
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: v_mov_b32_e32 v16, v2
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
@@ -1204,12 +1228,15 @@ define inreg <16 x i32> @bitcast_v8i64_to_v16i32_scalar(<8 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB7_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB7_3
-; VI-NEXT: .LBB7_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB7_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB7_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v14, vcc, 3, v14
; VI-NEXT: v_addc_u32_e32 v15, vcc, 0, v15, vcc
; VI-NEXT: v_add_u32_e32 v12, vcc, 3, v12
@@ -1226,16 +1253,15 @@ define inreg <16 x i32> @bitcast_v8i64_to_v16i32_scalar(<8 x i64> inreg %a, i32
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT: .LBB7_3: ; %end
+; VI-NEXT: .LBB7_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB7_4:
-; VI-NEXT: s_branch .LBB7_2
;
; GFX9-LABEL: bitcast_v8i64_to_v16i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v2
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v16, v2
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
@@ -1251,12 +1277,15 @@ define inreg <16 x i32> @bitcast_v8i64_to_v16i32_scalar(<8 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB7_3
-; GFX9-NEXT: .LBB7_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB7_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB7_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_co_u32_e32 v14, vcc, 3, v14
; GFX9-NEXT: v_addc_co_u32_e32 v15, vcc, 0, v15, vcc
; GFX9-NEXT: v_add_co_u32_e32 v12, vcc, 3, v12
@@ -1273,21 +1302,22 @@ define inreg <16 x i32> @bitcast_v8i64_to_v16i32_scalar(<8 x i64> inreg %a, i32
; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 3, v0
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
-; GFX9-NEXT: .LBB7_3: ; %end
+; GFX9-NEXT: .LBB7_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB7_4:
-; GFX9-NEXT: s_branch .LBB7_2
;
; GFX11-LABEL: bitcast_v8i64_to_v16i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB7_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB7_3
-; GFX11-NEXT: .LBB7_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB7_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s26, s26, 3
; GFX11-NEXT: s_addc_u32 s27, s27, 0
; GFX11-NEXT: s_add_u32 s24, s24, 3
@@ -1304,7 +1334,7 @@ define inreg <16 x i32> @bitcast_v8i64_to_v16i32_scalar(<8 x i64> inreg %a, i32
; GFX11-NEXT: s_addc_u32 s3, s3, 0
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: .LBB7_3: ; %end
+; GFX11-NEXT: .LBB7_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -1315,8 +1345,6 @@ define inreg <16 x i32> @bitcast_v8i64_to_v16i32_scalar(<8 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB7_4:
-; GFX11-NEXT: s_branch .LBB7_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1472,8 +1500,9 @@ define inreg <8 x double> @bitcast_v16i32_to_v8f64_scalar(<16 x i32> inreg %a, i
; SI-LABEL: bitcast_v16i32_to_v8f64_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v13, v2
-; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; SI-NEXT: v_mov_b32_e32 v16, v2
+; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v15, v1
; SI-NEXT: v_mov_b32_e32 v14, v0
; SI-NEXT: v_mov_b32_e32 v0, s16
@@ -1489,12 +1518,15 @@ define inreg <8 x double> @bitcast_v16i32_to_v8f64_scalar(<16 x i32> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB9_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB9_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB9_3
-; SI-NEXT: .LBB9_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB9_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB9_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v15, vcc, 3, v15
; SI-NEXT: v_add_i32_e32 v14, vcc, 3, v14
; SI-NEXT: v_add_i32_e32 v13, vcc, 3, v13
@@ -1511,16 +1543,15 @@ define inreg <8 x double> @bitcast_v16i32_to_v8f64_scalar(<16 x i32> inreg %a, i
; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
-; SI-NEXT: .LBB9_3: ; %end
+; SI-NEXT: .LBB9_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB9_4:
-; SI-NEXT: s_branch .LBB9_2
;
; VI-LABEL: bitcast_v16i32_to_v8f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v13, v2
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: v_mov_b32_e32 v16, v2
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
@@ -1536,12 +1567,15 @@ define inreg <8 x double> @bitcast_v16i32_to_v8f64_scalar(<16 x i32> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB9_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB9_3
-; VI-NEXT: .LBB9_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB9_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB9_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v15, vcc, 3, v15
; VI-NEXT: v_add_u32_e32 v14, vcc, 3, v14
; VI-NEXT: v_add_u32_e32 v13, vcc, 3, v13
@@ -1558,16 +1592,15 @@ define inreg <8 x double> @bitcast_v16i32_to_v8f64_scalar(<16 x i32> inreg %a, i
; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: .LBB9_3: ; %end
+; VI-NEXT: .LBB9_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB9_4:
-; VI-NEXT: s_branch .LBB9_2
;
; GFX9-LABEL: bitcast_v16i32_to_v8f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v2
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v16, v2
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
@@ -1583,12 +1616,15 @@ define inreg <8 x double> @bitcast_v16i32_to_v8f64_scalar(<16 x i32> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB9_3
-; GFX9-NEXT: .LBB9_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB9_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_u32_e32 v15, 3, v15
; GFX9-NEXT: v_add_u32_e32 v14, 3, v14
; GFX9-NEXT: v_add_u32_e32 v13, 3, v13
@@ -1605,21 +1641,22 @@ define inreg <8 x double> @bitcast_v16i32_to_v8f64_scalar(<16 x i32> inreg %a, i
; GFX9-NEXT: v_add_u32_e32 v2, 3, v2
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: .LBB9_3: ; %end
+; GFX9-NEXT: .LBB9_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB9_4:
-; GFX9-NEXT: s_branch .LBB9_2
;
; GFX11-LABEL: bitcast_v16i32_to_v8f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB9_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB9_3
-; GFX11-NEXT: .LBB9_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s27, s27, 3
; GFX11-NEXT: s_add_i32 s26, s26, 3
; GFX11-NEXT: s_add_i32 s25, s25, 3
@@ -1636,7 +1673,7 @@ define inreg <8 x double> @bitcast_v16i32_to_v8f64_scalar(<16 x i32> inreg %a, i
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB9_3: ; %end
+; GFX11-NEXT: .LBB9_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -1647,8 +1684,6 @@ define inreg <8 x double> @bitcast_v16i32_to_v8f64_scalar(<16 x i32> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB9_4:
-; GFX11-NEXT: s_branch .LBB9_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1772,8 +1807,9 @@ define inreg <16 x i32> @bitcast_v8f64_to_v16i32_scalar(<8 x double> inreg %a, i
; SI-LABEL: bitcast_v8f64_to_v16i32_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v13, v2
-; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; SI-NEXT: v_mov_b32_e32 v16, v2
+; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v15, v1
; SI-NEXT: v_mov_b32_e32 v14, v0
; SI-NEXT: v_mov_b32_e32 v0, s16
@@ -1789,12 +1825,15 @@ define inreg <16 x i32> @bitcast_v8f64_to_v16i32_scalar(<8 x double> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB11_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB11_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB11_3
-; SI-NEXT: .LBB11_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB11_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB11_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
; SI-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
; SI-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
@@ -1803,16 +1842,15 @@ define inreg <16 x i32> @bitcast_v8f64_to_v16i32_scalar(<8 x double> inreg %a, i
; SI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; SI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; SI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; SI-NEXT: .LBB11_3: ; %end
+; SI-NEXT: .LBB11_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB11_4:
-; SI-NEXT: s_branch .LBB11_2
;
; VI-LABEL: bitcast_v8f64_to_v16i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v13, v2
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: v_mov_b32_e32 v16, v2
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
@@ -1828,12 +1866,15 @@ define inreg <16 x i32> @bitcast_v8f64_to_v16i32_scalar(<8 x double> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB11_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB11_3
-; VI-NEXT: .LBB11_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB11_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB11_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
; VI-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
; VI-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
@@ -1842,16 +1883,15 @@ define inreg <16 x i32> @bitcast_v8f64_to_v16i32_scalar(<8 x double> inreg %a, i
; VI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; VI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; VI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; VI-NEXT: .LBB11_3: ; %end
+; VI-NEXT: .LBB11_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB11_4:
-; VI-NEXT: s_branch .LBB11_2
;
; GFX9-LABEL: bitcast_v8f64_to_v16i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v2
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v16, v2
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
@@ -1867,12 +1907,15 @@ define inreg <16 x i32> @bitcast_v8f64_to_v16i32_scalar(<8 x double> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_3
-; GFX9-NEXT: .LBB11_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB11_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
; GFX9-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
; GFX9-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
@@ -1881,10 +1924,8 @@ define inreg <16 x i32> @bitcast_v8f64_to_v16i32_scalar(<8 x double> inreg %a, i
; GFX9-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX9-NEXT: .LBB11_3: ; %end
+; GFX9-NEXT: .LBB11_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB11_4:
-; GFX9-NEXT: s_branch .LBB11_2
;
; GFX11-LABEL: bitcast_v8f64_to_v16i32_scalar:
; GFX11: ; %bb.0:
@@ -1894,12 +1935,15 @@ define inreg <16 x i32> @bitcast_v8f64_to_v16i32_scalar(<8 x double> inreg %a, i
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB11_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
-; GFX11-NEXT: .LBB11_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[14:15], s[26:27], 1.0
; GFX11-NEXT: v_add_f64 v[12:13], s[24:25], 1.0
; GFX11-NEXT: v_add_f64 v[10:11], s[22:23], 1.0
@@ -1909,8 +1953,6 @@ define inreg <16 x i32> @bitcast_v8f64_to_v16i32_scalar(<8 x double> inreg %a, i
; GFX11-NEXT: v_add_f64 v[2:3], s[14:15], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[12:13], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: s_branch .LBB11_2
; GFX11-NEXT: .LBB11_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -2145,6 +2187,7 @@ define inreg <32 x i16> @bitcast_v16i32_to_v32i16_scalar(<16 x i32> inreg %a, i3
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v3, v2
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v30, v1
; SI-NEXT: v_mov_b32_e32 v28, v0
; SI-NEXT: v_mov_b32_e32 v0, s16
@@ -2160,8 +2203,8 @@ define inreg <32 x i16> @bitcast_v16i32_to_v32i16_scalar(<16 x i32> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v20, s26
; SI-NEXT: v_mov_b32_e32 v22, s27
; SI-NEXT: v_mov_b32_e32 v24, s28
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v26, s29
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB13_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_alignbit_b32 v29, v30, v28, 16
@@ -2233,13 +2276,16 @@ define inreg <32 x i16> @bitcast_v16i32_to_v32i16_scalar(<16 x i32> inreg %a, i3
; SI-NEXT: ; implicit-def: $vgpr27
; SI-NEXT: ; implicit-def: $vgpr29
; SI-NEXT: ; implicit-def: $vgpr31
-; SI-NEXT: s_branch .LBB13_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB13_2
+; SI-NEXT: s_branch .LBB13_3
;
; VI-LABEL: bitcast_v16i32_to_v32i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v13, v2
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: v_mov_b32_e32 v16, v2
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
@@ -2255,12 +2301,15 @@ define inreg <32 x i16> @bitcast_v16i32_to_v32i16_scalar(<16 x i32> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB13_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB13_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB13_3
-; VI-NEXT: .LBB13_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB13_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB13_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v15, vcc, 3, v15
; VI-NEXT: v_add_u32_e32 v14, vcc, 3, v14
; VI-NEXT: v_add_u32_e32 v13, vcc, 3, v13
@@ -2277,16 +2326,15 @@ define inreg <32 x i16> @bitcast_v16i32_to_v32i16_scalar(<16 x i32> inreg %a, i3
; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: .LBB13_3: ; %end
+; VI-NEXT: .LBB13_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB13_4:
-; VI-NEXT: s_branch .LBB13_2
;
; GFX9-LABEL: bitcast_v16i32_to_v32i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v2
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v16, v2
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
@@ -2302,12 +2350,15 @@ define inreg <32 x i16> @bitcast_v16i32_to_v32i16_scalar(<16 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB13_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB13_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB13_3
-; GFX9-NEXT: .LBB13_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB13_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB13_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_u32_e32 v15, 3, v15
; GFX9-NEXT: v_add_u32_e32 v14, 3, v14
; GFX9-NEXT: v_add_u32_e32 v13, 3, v13
@@ -2324,21 +2375,22 @@ define inreg <32 x i16> @bitcast_v16i32_to_v32i16_scalar(<16 x i32> inreg %a, i3
; GFX9-NEXT: v_add_u32_e32 v2, 3, v2
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: .LBB13_3: ; %end
+; GFX9-NEXT: .LBB13_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB13_4:
-; GFX9-NEXT: s_branch .LBB13_2
;
; GFX11-LABEL: bitcast_v16i32_to_v32i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB13_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB13_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB13_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB13_3
-; GFX11-NEXT: .LBB13_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB13_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s27, s27, 3
; GFX11-NEXT: s_add_i32 s26, s26, 3
; GFX11-NEXT: s_add_i32 s25, s25, 3
@@ -2355,7 +2407,7 @@ define inreg <32 x i16> @bitcast_v16i32_to_v32i16_scalar(<16 x i32> inreg %a, i3
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB13_3: ; %end
+; GFX11-NEXT: .LBB13_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -2366,8 +2418,6 @@ define inreg <32 x i16> @bitcast_v16i32_to_v32i16_scalar(<16 x i32> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB13_4:
-; GFX11-NEXT: s_branch .LBB13_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2710,6 +2760,7 @@ define inreg <16 x i32> @bitcast_v32i16_to_v16i32_scalar(<32 x i16> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v26, v14
; SI-NEXT: v_mov_b32_e32 v25, v12
; SI-NEXT: v_mov_b32_e32 v19, v10
@@ -2718,7 +2769,7 @@ define inreg <16 x i32> @bitcast_v32i16_to_v16i32_scalar(<32 x i16> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v22, v4
; SI-NEXT: v_mov_b32_e32 v23, v2
; SI-NEXT: v_mov_b32_e32 v24, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v3
; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v5
@@ -2860,19 +2911,25 @@ define inreg <16 x i32> @bitcast_v32i16_to_v16i32_scalar(<32 x i16> inreg %a, i3
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB15_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; SI-NEXT: s_branch .LBB15_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB15_2
+; SI-NEXT: s_branch .LBB15_3
;
; VI-LABEL: bitcast_v32i16_to_v16i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; VI-NEXT: v_readfirstlane_b32 s6, v0
; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: v_readfirstlane_b32 s6, v0
; VI-NEXT: v_readfirstlane_b32 s7, v1
-; VI-NEXT: s_cbranch_scc0 .LBB15_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB15_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB15_3
-; VI-NEXT: .LBB15_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB15_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB15_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s7, 3
; VI-NEXT: s_and_b32 s4, s7, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
@@ -2953,7 +3010,7 @@ define inreg <16 x i32> @bitcast_v32i16_to_v16i32_scalar(<32 x i16> inreg %a, i3
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB15_3: ; %end
+; VI-NEXT: .LBB15_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -2971,14 +3028,13 @@ define inreg <16 x i32> @bitcast_v32i16_to_v16i32_scalar(<32 x i16> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v14, s6
; VI-NEXT: v_mov_b32_e32 v15, s7
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB15_4:
-; VI-NEXT: s_branch .LBB15_2
;
; GFX9-LABEL: bitcast_v32i16_to_v16i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v2
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v16, v2
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
@@ -2994,12 +3050,15 @@ define inreg <16 x i32> @bitcast_v32i16_to_v16i32_scalar(<32 x i16> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB15_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB15_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB15_3
-; GFX9-NEXT: .LBB15_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB15_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB15_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
@@ -3016,10 +3075,8 @@ define inreg <16 x i32> @bitcast_v32i16_to_v16i32_scalar(<32 x i16> inreg %a, i3
; GFX9-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: .LBB15_3: ; %end
+; GFX9-NEXT: .LBB15_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB15_4:
-; GFX9-NEXT: s_branch .LBB15_2
;
; GFX11-LABEL: bitcast_v32i16_to_v16i32_scalar:
; GFX11: ; %bb.0:
@@ -3029,12 +3086,15 @@ define inreg <16 x i32> @bitcast_v32i16_to_v16i32_scalar(<32 x i16> inreg %a, i3
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB15_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB15_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB15_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB15_4
-; GFX11-NEXT: .LBB15_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v15, s27, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v14, s26, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v13, s25, 3 op_sel_hi:[1,0]
@@ -3052,8 +3112,6 @@ define inreg <16 x i32> @bitcast_v32i16_to_v16i32_scalar(<32 x i16> inreg %a, i3
; GFX11-NEXT: v_pk_add_u16 v1, s13, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB15_3:
-; GFX11-NEXT: s_branch .LBB15_2
; GFX11-NEXT: .LBB15_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -3387,14 +3445,15 @@ define inreg <32 x half> @bitcast_v16i32_to_v32f16_scalar(<16 x i32> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; SI-NEXT: v_readfirstlane_b32 s6, v0
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: v_readfirstlane_b32 s7, v1
+; SI-NEXT: v_readfirstlane_b32 s7, v0
+; SI-NEXT: v_readfirstlane_b32 s6, v1
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB17_4
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_lshr_b32 s4, s7, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v31, s4
; SI-NEXT: s_lshr_b32 s4, s6, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v31, s4
+; SI-NEXT: s_lshr_b32 s4, s7, 16
; SI-NEXT: v_cvt_f32_f16_e32 v29, s4
; SI-NEXT: s_lshr_b32 s4, s29, 16
; SI-NEXT: v_cvt_f32_f16_e32 v27, s4
@@ -3424,8 +3483,8 @@ define inreg <32 x half> @bitcast_v16i32_to_v32f16_scalar(<16 x i32> inreg %a, i
; SI-NEXT: v_cvt_f32_f16_e32 v3, s4
; SI-NEXT: s_lshr_b32 s4, s16, 16
; SI-NEXT: v_cvt_f32_f16_e32 v1, s4
-; SI-NEXT: v_cvt_f32_f16_e32 v30, s7
-; SI-NEXT: v_cvt_f32_f16_e32 v28, s6
+; SI-NEXT: v_cvt_f32_f16_e32 v30, s6
+; SI-NEXT: v_cvt_f32_f16_e32 v28, s7
; SI-NEXT: v_cvt_f32_f16_e32 v26, s29
; SI-NEXT: v_cvt_f32_f16_e32 v24, s28
; SI-NEXT: v_cvt_f32_f16_e32 v22, s27
@@ -3456,8 +3515,8 @@ define inreg <32 x half> @bitcast_v16i32_to_v32f16_scalar(<16 x i32> inreg %a, i
; SI-NEXT: s_add_i32 s27, s27, 3
; SI-NEXT: s_add_i32 s28, s28, 3
; SI-NEXT: s_add_i32 s29, s29, 3
-; SI-NEXT: s_add_i32 s6, s6, 3
; SI-NEXT: s_add_i32 s7, s7, 3
+; SI-NEXT: s_add_i32 s6, s6, 3
; SI-NEXT: s_lshr_b32 s4, s16, 16
; SI-NEXT: s_lshr_b32 s5, s17, 16
; SI-NEXT: s_lshr_b32 s8, s18, 16
@@ -3472,10 +3531,10 @@ define inreg <32 x half> @bitcast_v16i32_to_v32f16_scalar(<16 x i32> inreg %a, i
; SI-NEXT: s_lshr_b32 s41, s27, 16
; SI-NEXT: s_lshr_b32 s42, s28, 16
; SI-NEXT: s_lshr_b32 s43, s29, 16
-; SI-NEXT: s_lshr_b32 s44, s6, 16
-; SI-NEXT: s_lshr_b32 s45, s7, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v30, s7
-; SI-NEXT: v_cvt_f32_f16_e32 v28, s6
+; SI-NEXT: s_lshr_b32 s44, s7, 16
+; SI-NEXT: s_lshr_b32 s45, s6, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v30, s6
+; SI-NEXT: v_cvt_f32_f16_e32 v28, s7
; SI-NEXT: v_cvt_f32_f16_e32 v26, s29
; SI-NEXT: v_cvt_f32_f16_e32 v24, s28
; SI-NEXT: v_cvt_f32_f16_e32 v22, s27
@@ -3541,13 +3600,16 @@ define inreg <32 x half> @bitcast_v16i32_to_v32f16_scalar(<16 x i32> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr29
; SI-NEXT: ; implicit-def: $vgpr30
; SI-NEXT: ; implicit-def: $vgpr31
-; SI-NEXT: s_branch .LBB17_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB17_2
+; SI-NEXT: s_branch .LBB17_3
;
; VI-LABEL: bitcast_v16i32_to_v32f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v13, v2
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: v_mov_b32_e32 v16, v2
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
@@ -3563,12 +3625,15 @@ define inreg <32 x half> @bitcast_v16i32_to_v32f16_scalar(<16 x i32> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB17_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB17_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB17_3
-; VI-NEXT: .LBB17_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB17_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB17_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v15, vcc, 3, v15
; VI-NEXT: v_add_u32_e32 v14, vcc, 3, v14
; VI-NEXT: v_add_u32_e32 v13, vcc, 3, v13
@@ -3585,16 +3650,15 @@ define inreg <32 x half> @bitcast_v16i32_to_v32f16_scalar(<16 x i32> inreg %a, i
; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: .LBB17_3: ; %end
+; VI-NEXT: .LBB17_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB17_4:
-; VI-NEXT: s_branch .LBB17_2
;
; GFX9-LABEL: bitcast_v16i32_to_v32f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v2
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v16, v2
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
@@ -3610,12 +3674,15 @@ define inreg <32 x half> @bitcast_v16i32_to_v32f16_scalar(<16 x i32> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB17_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB17_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB17_3
-; GFX9-NEXT: .LBB17_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB17_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB17_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_u32_e32 v15, 3, v15
; GFX9-NEXT: v_add_u32_e32 v14, 3, v14
; GFX9-NEXT: v_add_u32_e32 v13, 3, v13
@@ -3632,21 +3699,22 @@ define inreg <32 x half> @bitcast_v16i32_to_v32f16_scalar(<16 x i32> inreg %a, i
; GFX9-NEXT: v_add_u32_e32 v2, 3, v2
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: .LBB17_3: ; %end
+; GFX9-NEXT: .LBB17_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB17_4:
-; GFX9-NEXT: s_branch .LBB17_2
;
; GFX11-LABEL: bitcast_v16i32_to_v32f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB17_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB17_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB17_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB17_3
-; GFX11-NEXT: .LBB17_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB17_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s27, s27, 3
; GFX11-NEXT: s_add_i32 s26, s26, 3
; GFX11-NEXT: s_add_i32 s25, s25, 3
@@ -3663,7 +3731,7 @@ define inreg <32 x half> @bitcast_v16i32_to_v32f16_scalar(<16 x i32> inreg %a, i
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB17_3: ; %end
+; GFX11-NEXT: .LBB17_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -3674,8 +3742,6 @@ define inreg <32 x half> @bitcast_v16i32_to_v32f16_scalar(<16 x i32> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB17_4:
-; GFX11-NEXT: s_branch .LBB17_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -4138,6 +4204,7 @@ define inreg <16 x i32> @bitcast_v32f16_to_v16i32_scalar(<32 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v35, s28
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB19_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v51
@@ -4308,13 +4375,16 @@ define inreg <16 x i32> @bitcast_v32f16_to_v16i32_scalar(<32 x half> inreg %a, i
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB19_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; SI-NEXT: s_branch .LBB19_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB19_2
+; SI-NEXT: s_branch .LBB19_3
;
; VI-LABEL: bitcast_v32f16_to_v16i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v13, v2
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: v_mov_b32_e32 v16, v2
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
@@ -4330,12 +4400,15 @@ define inreg <16 x i32> @bitcast_v32f16_to_v16i32_scalar(<32 x half> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB19_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB19_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB19_3
-; VI-NEXT: .LBB19_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB19_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB19_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v16, 0x200
; VI-NEXT: v_add_f16_sdwa v17, v15, v16 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v15, 0x200, v15
@@ -4385,16 +4458,15 @@ define inreg <16 x i32> @bitcast_v32f16_to_v16i32_scalar(<32 x half> inreg %a, i
; VI-NEXT: v_add_f16_e32 v0, 0x200, v0
; VI-NEXT: v_or_b32_e32 v1, v1, v17
; VI-NEXT: v_or_b32_e32 v0, v0, v16
-; VI-NEXT: .LBB19_3: ; %end
+; VI-NEXT: .LBB19_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB19_4:
-; VI-NEXT: s_branch .LBB19_2
;
; GFX9-LABEL: bitcast_v32f16_to_v16i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v2
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v16, v2
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
@@ -4410,12 +4482,15 @@ define inreg <16 x i32> @bitcast_v32f16_to_v16i32_scalar(<32 x half> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB19_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB19_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB19_3
-; GFX9-NEXT: .LBB19_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB19_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB19_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_movk_i32 s4, 0x200
; GFX9-NEXT: v_pk_add_f16 v15, v15, s4 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v14, v14, s4 op_sel_hi:[1,0]
@@ -4433,10 +4508,8 @@ define inreg <16 x i32> @bitcast_v32f16_to_v16i32_scalar(<32 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v2, v2, s4 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v1, v1, s4 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, v0, s4 op_sel_hi:[1,0]
-; GFX9-NEXT: .LBB19_3: ; %end
+; GFX9-NEXT: .LBB19_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB19_4:
-; GFX9-NEXT: s_branch .LBB19_2
;
; GFX11-LABEL: bitcast_v32f16_to_v16i32_scalar:
; GFX11: ; %bb.0:
@@ -4446,12 +4519,15 @@ define inreg <16 x i32> @bitcast_v32f16_to_v16i32_scalar(<32 x half> inreg %a, i
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB19_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB19_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB19_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB19_4
-; GFX11-NEXT: .LBB19_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v15, 0x200, s27 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v14, 0x200, s26 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v13, 0x200, s25 op_sel_hi:[0,1]
@@ -4469,8 +4545,6 @@ define inreg <16 x i32> @bitcast_v32f16_to_v16i32_scalar(<32 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s13 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB19_3:
-; GFX11-NEXT: s_branch .LBB19_2
; GFX11-NEXT: .LBB19_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -4772,9 +4846,10 @@ define inreg <32 x bfloat> @bitcast_v16i32_to_v32bf16_scalar(<16 x i32> inreg %a
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; SI-NEXT: v_readfirstlane_b32 s78, v0
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: v_readfirstlane_b32 s78, v0
; SI-NEXT: v_readfirstlane_b32 s79, v1
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB21_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s6, s79, 0xffff0000
@@ -4926,13 +5001,16 @@ define inreg <32 x bfloat> @bitcast_v16i32_to_v32bf16_scalar(<16 x i32> inreg %a
; SI-NEXT: ; implicit-def: $sgpr8
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB21_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB21_2
+; SI-NEXT: s_branch .LBB21_3
;
; VI-LABEL: bitcast_v16i32_to_v32bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v13, v2
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: v_mov_b32_e32 v16, v2
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
@@ -4948,12 +5026,15 @@ define inreg <32 x bfloat> @bitcast_v16i32_to_v32bf16_scalar(<16 x i32> inreg %a
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB21_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB21_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB21_3
-; VI-NEXT: .LBB21_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB21_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB21_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v15, vcc, 3, v15
; VI-NEXT: v_add_u32_e32 v14, vcc, 3, v14
; VI-NEXT: v_add_u32_e32 v13, vcc, 3, v13
@@ -4970,16 +5051,15 @@ define inreg <32 x bfloat> @bitcast_v16i32_to_v32bf16_scalar(<16 x i32> inreg %a
; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: .LBB21_3: ; %end
+; VI-NEXT: .LBB21_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB21_4:
-; VI-NEXT: s_branch .LBB21_2
;
; GFX9-LABEL: bitcast_v16i32_to_v32bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v2
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v16, v2
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
@@ -4995,12 +5075,15 @@ define inreg <32 x bfloat> @bitcast_v16i32_to_v32bf16_scalar(<16 x i32> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB21_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB21_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB21_3
-; GFX9-NEXT: .LBB21_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB21_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_u32_e32 v15, 3, v15
; GFX9-NEXT: v_add_u32_e32 v14, 3, v14
; GFX9-NEXT: v_add_u32_e32 v13, 3, v13
@@ -5017,21 +5100,22 @@ define inreg <32 x bfloat> @bitcast_v16i32_to_v32bf16_scalar(<16 x i32> inreg %a
; GFX9-NEXT: v_add_u32_e32 v2, 3, v2
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: .LBB21_3: ; %end
+; GFX9-NEXT: .LBB21_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB21_4:
-; GFX9-NEXT: s_branch .LBB21_2
;
; GFX11-LABEL: bitcast_v16i32_to_v32bf16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB21_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB21_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB21_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB21_3
-; GFX11-NEXT: .LBB21_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s27, s27, 3
; GFX11-NEXT: s_add_i32 s26, s26, 3
; GFX11-NEXT: s_add_i32 s25, s25, 3
@@ -5048,7 +5132,7 @@ define inreg <32 x bfloat> @bitcast_v16i32_to_v32bf16_scalar(<16 x i32> inreg %a
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB21_3: ; %end
+; GFX11-NEXT: .LBB21_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -5059,8 +5143,6 @@ define inreg <32 x bfloat> @bitcast_v16i32_to_v32bf16_scalar(<16 x i32> inreg %a
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB21_4:
-; GFX11-NEXT: s_branch .LBB21_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -6488,6 +6570,7 @@ define inreg <16 x i32> @bitcast_v32bf16_to_v16i32_scalar(<32 x bfloat> inreg %a
; SI-NEXT: v_mul_f32_e32 v19, 1.0, v14
; SI-NEXT: v_mul_f32_e32 v17, 1.0, v17
; SI-NEXT: v_mul_f32_e32 v16, 1.0, v16
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mul_f32_e64 v54, 1.0, s19
; SI-NEXT: v_mul_f32_e64 v55, 1.0, s18
; SI-NEXT: v_mul_f32_e64 v52, 1.0, s21
@@ -6636,7 +6719,9 @@ define inreg <16 x i32> @bitcast_v32bf16_to_v16i32_scalar(<32 x bfloat> inreg %a
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB23_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; SI-NEXT: s_branch .LBB23_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB23_2
+; SI-NEXT: s_branch .LBB23_3
;
; VI-LABEL: bitcast_v32bf16_to_v16i32_scalar:
; VI: ; %bb.0:
@@ -6644,16 +6729,20 @@ define inreg <16 x i32> @bitcast_v32bf16_to_v16i32_scalar(<32 x bfloat> inreg %a
; VI-NEXT: s_xor_saveexec_b64 s[4:5], -1
; VI-NEXT: buffer_store_dword v19, off, s[0:3], s32 ; 4-byte Folded Spill
; VI-NEXT: s_mov_b64 exec, s[4:5]
-; VI-NEXT: v_writelane_b32 v19, s30, 0
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
+; VI-NEXT: v_writelane_b32 v19, s30, 0
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_writelane_b32 v19, s31, 1
; VI-NEXT: v_readfirstlane_b32 s30, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s31, v1
-; VI-NEXT: s_cbranch_scc0 .LBB23_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB23_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB23_4
-; VI-NEXT: .LBB23_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB23_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB23_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s31, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x40c00000
; VI-NEXT: v_add_f32_e32 v1, s4, v0
@@ -6944,8 +7033,6 @@ define inreg <16 x i32> @bitcast_v32bf16_to_v16i32_scalar(<32 x bfloat> inreg %a
; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; VI-NEXT: v_alignbit_b32 v0, v0, v16, 16
; VI-NEXT: s_branch .LBB23_5
-; VI-NEXT: .LBB23_3:
-; VI-NEXT: s_branch .LBB23_2
; VI-NEXT: .LBB23_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -6978,16 +7065,20 @@ define inreg <16 x i32> @bitcast_v32bf16_to_v16i32_scalar(<32 x bfloat> inreg %a
; GFX9-NEXT: s_xor_saveexec_b64 s[4:5], -1
; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 ; 4-byte Folded Spill
; GFX9-NEXT: s_mov_b64 exec, s[4:5]
-; GFX9-NEXT: v_writelane_b32 v20, s30, 0
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
+; GFX9-NEXT: v_writelane_b32 v20, s30, 0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_writelane_b32 v20, s31, 1
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
-; GFX9-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB23_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB23_4
-; GFX9-NEXT: .LBB23_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB23_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_and_b32 s4, s31, 0xffff0000
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
; GFX9-NEXT: v_add_f32_e32 v1, s4, v0
@@ -7295,8 +7386,6 @@ define inreg <16 x i32> @bitcast_v32bf16_to_v16i32_scalar(<32 x bfloat> inreg %a
; GFX9-NEXT: v_and_b32_sdwa v16, v16, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: v_lshl_or_b32 v0, v0, 16, v16
; GFX9-NEXT: s_branch .LBB23_5
-; GFX9-NEXT: .LBB23_3:
-; GFX9-NEXT: s_branch .LBB23_2
; GFX9-NEXT: .LBB23_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -7331,12 +7420,15 @@ define inreg <16 x i32> @bitcast_v32bf16_to_v16i32_scalar(<32 x bfloat> inreg %a
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB23_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB23_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB23_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB23_4
-; GFX11-NEXT: .LBB23_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_and_b32 s1, s27, 0xffff0000
; GFX11-NEXT: s_lshl_b32 s0, s27, 16
; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
@@ -7665,8 +7757,6 @@ define inreg <16 x i32> @bitcast_v32bf16_to_v16i32_scalar(<32 x bfloat> inreg %a
; GFX11-NEXT: v_lshl_or_b32 v2, v16, 16, v21
; GFX11-NEXT: v_lshl_or_b32 v0, v20, 16, v17
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB23_3:
-; GFX11-NEXT: s_branch .LBB23_2
; GFX11-NEXT: .LBB23_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -9396,9 +9486,10 @@ define inreg <64 x i8> @bitcast_v16i32_to_v64i8_scalar(<16 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3
-; SI-NEXT: v_readfirstlane_b32 s7, v1
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: v_readfirstlane_b32 s7, v1
; SI-NEXT: v_readfirstlane_b32 s6, v2
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB25_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v9, s26
@@ -9775,7 +9866,9 @@ define inreg <64 x i8> @bitcast_v16i32_to_v64i8_scalar(<16 x i32> inreg %a, i32
; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: s_branch .LBB25_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB25_2
+; SI-NEXT: s_branch .LBB25_3
;
; VI-LABEL: bitcast_v16i32_to_v64i8_scalar:
; VI: ; %bb.0:
@@ -9804,8 +9897,9 @@ define inreg <64 x i8> @bitcast_v16i32_to_v64i8_scalar(<16 x i32> inreg %a, i32
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3
; VI-NEXT: v_writelane_b32 v4, s66, 18
; VI-NEXT: v_readfirstlane_b32 s4, v1
-; VI-NEXT: s_and_b64 s[6:7], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s5, v2
+; VI-NEXT: s_and_b64 s[6:7], vcc, exec
+; VI-NEXT: s_mov_b64 s[46:47], -1
; VI-NEXT: v_writelane_b32 v4, s67, 19
; VI-NEXT: s_cbranch_scc0 .LBB25_4
; VI-NEXT: ; %bb.1: ; %cmp.false
@@ -10189,7 +10283,9 @@ define inreg <64 x i8> @bitcast_v16i32_to_v64i8_scalar(<16 x i32> inreg %a, i32
; VI-NEXT: ; implicit-def: $sgpr58
; VI-NEXT: ; implicit-def: $sgpr57
; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: s_branch .LBB25_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[46:47]
+; VI-NEXT: s_cbranch_vccz .LBB25_2
+; VI-NEXT: s_branch .LBB25_3
;
; GFX9-LABEL: bitcast_v16i32_to_v64i8_scalar:
; GFX9: ; %bb.0:
@@ -10214,8 +10310,9 @@ define inreg <64 x i8> @bitcast_v16i32_to_v64i8_scalar(<16 x i32> inreg %a, i32
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3
; GFX9-NEXT: v_writelane_b32 v4, s54, 14
; GFX9-NEXT: v_readfirstlane_b32 s4, v1
-; GFX9-NEXT: s_and_b64 s[6:7], vcc, exec
; GFX9-NEXT: v_readfirstlane_b32 s5, v2
+; GFX9-NEXT: s_and_b64 s[6:7], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[46:47], -1
; GFX9-NEXT: v_writelane_b32 v4, s55, 15
; GFX9-NEXT: s_cbranch_scc0 .LBB25_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
@@ -10580,7 +10677,9 @@ define inreg <64 x i8> @bitcast_v16i32_to_v64i8_scalar(<16 x i32> inreg %a, i32
; GFX9-NEXT: ; implicit-def: $sgpr58
; GFX9-NEXT: ; implicit-def: $sgpr57
; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: s_branch .LBB25_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[46:47]
+; GFX9-NEXT: s_cbranch_vccz .LBB25_2
+; GFX9-NEXT: s_branch .LBB25_3
;
; GFX11-LABEL: bitcast_v16i32_to_v64i8_scalar:
; GFX11: ; %bb.0:
@@ -10590,7 +10689,7 @@ define inreg <64 x i8> @bitcast_v16i32_to_v64i8_scalar(<16 x i32> inreg %a, i32
; GFX11-NEXT: s_mov_b32 exec_lo, s4
; GFX11-NEXT: v_writelane_b32 v17, s30, 0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
-; GFX11-NEXT: s_mov_b32 vcc_lo, 0
+; GFX11-NEXT: s_mov_b32 s5, -1
; GFX11-NEXT: v_writelane_b32 v17, s31, 1
; GFX11-NEXT: v_writelane_b32 v17, s34, 2
; GFX11-NEXT: v_writelane_b32 v17, s35, 3
@@ -10601,6 +10700,7 @@ define inreg <64 x i8> @bitcast_v16i32_to_v64i8_scalar(<16 x i32> inreg %a, i32
; GFX11-NEXT: v_writelane_b32 v17, s48, 8
; GFX11-NEXT: s_cbranch_scc0 .LBB25_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-NEXT: s_lshr_b64 s[4:5], s[26:27], 24
; GFX11-NEXT: s_lshr_b32 s42, s27, 24
; GFX11-NEXT: s_lshr_b32 s43, s27, 16
; GFX11-NEXT: s_lshr_b32 s44, s27, 8
@@ -10641,7 +10741,6 @@ define inreg <64 x i8> @bitcast_v16i32_to_v64i8_scalar(<16 x i32> inreg %a, i32
; GFX11-NEXT: s_lshr_b32 s38, s1, 8
; GFX11-NEXT: s_lshr_b32 s39, s0, 16
; GFX11-NEXT: s_lshr_b32 s48, s0, 8
-; GFX11-NEXT: s_lshr_b64 s[4:5], s[26:27], 24
; GFX11-NEXT: s_lshr_b64 s[6:7], s[24:25], 24
; GFX11-NEXT: s_lshr_b64 s[8:9], s[22:23], 24
; GFX11-NEXT: s_lshr_b64 s[10:11], s[20:21], 24
@@ -10649,8 +10748,7 @@ define inreg <64 x i8> @bitcast_v16i32_to_v64i8_scalar(<16 x i32> inreg %a, i32
; GFX11-NEXT: s_lshr_b64 s[14:15], s[16:17], 24
; GFX11-NEXT: s_lshr_b64 s[28:29], s[2:3], 24
; GFX11-NEXT: s_lshr_b64 s[40:41], s[0:1], 24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, vcc_lo
-; GFX11-NEXT: s_cbranch_vccnz .LBB25_3
+; GFX11-NEXT: s_cbranch_execnz .LBB25_3
; GFX11-NEXT: .LBB25_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
@@ -10937,7 +11035,9 @@ define inreg <64 x i8> @bitcast_v16i32_to_v64i8_scalar(<16 x i32> inreg %a, i32
; GFX11-NEXT: ; implicit-def: $sgpr44
; GFX11-NEXT: ; implicit-def: $sgpr43
; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: s_branch .LBB25_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
+; GFX11-NEXT: s_cbranch_vccz .LBB25_2
+; GFX11-NEXT: s_branch .LBB25_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -13471,13 +13571,13 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:76
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:8
-; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:4
-; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:16
; SI-NEXT: s_waitcnt expcnt(3)
-; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:12
+; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:4
+; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:16
+; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:12
; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:24
-; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:20
-; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:32
+; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:20
+; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:32
; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:28
; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:40
; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:36
@@ -13488,8 +13588,9 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:64
; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:60
; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:72
-; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:68
+; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:68
; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v3
@@ -13506,23 +13607,22 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: v_lshlrev_b32_e32 v9, 24, v25
; SI-NEXT: v_lshlrev_b32_e32 v45, 8, v27
; SI-NEXT: v_lshlrev_b32_e32 v25, 24, v29
+; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: v_lshlrev_b32_e32 v23, 8, v2
; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v4
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: v_lshlrev_b32_e32 v21, 8, v51
+; SI-NEXT: s_and_b64 s[6:7], vcc, exec
+; SI-NEXT: v_lshlrev_b32_e32 v21, 8, v50
; SI-NEXT: v_lshlrev_b32_e32 v19, 24, v39
+; SI-NEXT: v_lshlrev_b32_e32 v17, 8, v37
; SI-NEXT: s_waitcnt vmcnt(12)
-; SI-NEXT: v_lshlrev_b32_e32 v17, 8, v38
-; SI-NEXT: s_waitcnt vmcnt(10)
; SI-NEXT: v_lshlrev_b32_e32 v13, 24, v36
-; SI-NEXT: s_waitcnt vmcnt(8)
+; SI-NEXT: s_waitcnt vmcnt(10)
; SI-NEXT: v_lshlrev_b32_e32 v51, 8, v30
-; SI-NEXT: s_waitcnt vmcnt(6)
+; SI-NEXT: s_waitcnt vmcnt(8)
; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v42
-; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(6)
; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v43
; SI-NEXT: s_waitcnt vmcnt(4)
@@ -13530,7 +13630,7 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: s_cbranch_scc0 .LBB27_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_and_b32_e32 v0, 0xff, v32
-; SI-NEXT: v_mov_b32_e32 v38, v1
+; SI-NEXT: v_mov_b32_e32 v50, v1
; SI-NEXT: v_or_b32_e32 v0, v0, v1
; SI-NEXT: v_and_b32_e32 v1, 0xff, v33
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
@@ -13585,7 +13685,7 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v44, v10
; SI-NEXT: v_or_b32_e32 v10, v0, v1
; SI-NEXT: v_and_b32_e32 v0, 0xff, v48
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v50
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v60
; SI-NEXT: v_or_b32_e32 v0, v0, v23
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
@@ -13594,14 +13694,12 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v16, v18
; SI-NEXT: v_mov_b32_e32 v18, v20
; SI-NEXT: v_mov_b32_e32 v20, v22
-; SI-NEXT: v_mov_b32_e32 v22, v24
-; SI-NEXT: v_mov_b32_e32 v24, v26
-; SI-NEXT: v_mov_b32_e32 v26, v28
-; SI-NEXT: v_mov_b32_e32 v28, v25
+; SI-NEXT: v_mov_b32_e32 v22, v46
+; SI-NEXT: v_mov_b32_e32 v46, v25
; SI-NEXT: v_mov_b32_e32 v25, v11
; SI-NEXT: v_or_b32_e32 v11, v0, v1
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v60
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v49
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v49
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v38
; SI-NEXT: v_or_b32_e32 v0, v0, v21
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
@@ -13616,8 +13714,7 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: v_or_b32_e32 v1, v13, v1
; SI-NEXT: v_mov_b32_e32 v62, v58
; SI-NEXT: v_mov_b32_e32 v58, v47
-; SI-NEXT: v_mov_b32_e32 v47, v46
-; SI-NEXT: v_mov_b32_e32 v46, v45
+; SI-NEXT: v_mov_b32_e32 v47, v45
; SI-NEXT: v_mov_b32_e32 v45, v23
; SI-NEXT: v_mov_b32_e32 v23, v21
; SI-NEXT: v_mov_b32_e32 v21, v19
@@ -13630,19 +13727,22 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v1, v27, v1
-; SI-NEXT: v_mov_b32_e32 v52, v14
+; SI-NEXT: v_mov_b32_e32 v37, v14
; SI-NEXT: v_or_b32_e32 v14, v0, v1
; SI-NEXT: s_waitcnt vmcnt(3)
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v37
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v52
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT: v_mov_b32_e32 v30, v48
+; SI-NEXT: v_mov_b32_e32 v48, v27
; SI-NEXT: v_mov_b32_e32 v27, v42
; SI-NEXT: v_or_b32_e32 v1, v42, v1
; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v0, 0xff, v53
; SI-NEXT: v_or_b32_e32 v0, v0, v15
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_mov_b32_e32 v30, v48
-; SI-NEXT: v_mov_b32_e32 v48, v51
+; SI-NEXT: v_mov_b32_e32 v24, v26
+; SI-NEXT: v_mov_b32_e32 v26, v28
+; SI-NEXT: v_mov_b32_e32 v28, v51
; SI-NEXT: v_mov_b32_e32 v51, v15
; SI-NEXT: v_or_b32_e32 v15, v0, v1
; SI-NEXT: s_and_b32 s4, s28, 0xff
@@ -13703,7 +13803,7 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v33
; SI-NEXT: s_addk_i32 s4, 0x300
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; SI-NEXT: v_or_b32_e32 v1, v38, v1
+; SI-NEXT: v_or_b32_e32 v1, v50, v1
; SI-NEXT: v_and_b32_e32 v2, 0xff, v2
; SI-NEXT: s_and_b32 s4, s4, 0xffff
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -13738,9 +13838,8 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: v_or_b32_e32 v1, v62, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v6, vcc, 0x3000000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v52
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: v_or_b32_e32 v0, v61, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
@@ -13784,7 +13883,8 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: s_add_i32 s4, s4, 0x3000000
; SI-NEXT: s_add_i32 s5, s5, 0x3000000
; SI-NEXT: s_add_i32 s6, s6, 0x3000000
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_mov_b32_e32 v2, s6
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
@@ -13801,32 +13901,34 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v1, v58, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v8, vcc, 0x3000000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v20
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v22
-; SI-NEXT: v_or_b32_e32 v0, v47, v0
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
+; SI-NEXT: v_or_b32_e32 v0, v22, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_or_b32_e32 v1, v56, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v9, vcc, 0x3000000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v24
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v26
-; SI-NEXT: v_or_b32_e32 v0, v46, v0
+; SI-NEXT: v_or_b32_e32 v0, v47, v0
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v28, v1
+; SI-NEXT: v_or_b32_e32 v1, v46, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v10, vcc, 0x3000000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v30
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v50
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v60
; SI-NEXT: v_or_b32_e32 v0, v45, v0
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
@@ -13835,9 +13937,9 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: v_or_b32_e32 v1, v25, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v11, vcc, 0x3000000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v60
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v49
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v49
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v38
; SI-NEXT: v_or_b32_e32 v0, v23, v0
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
@@ -13860,18 +13962,17 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v55
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v54
-; SI-NEXT: v_or_b32_e32 v0, v48, v0
+; SI-NEXT: v_or_b32_e32 v0, v28, v0
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
+; SI-NEXT: v_or_b32_e32 v1, v48, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v14, vcc, 0x3000000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v37
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v52
; SI-NEXT: v_or_b32_e32 v0, v51, v0
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
@@ -13882,7 +13983,6 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: v_add_i32_e32 v15, vcc, 0x3000000, v0
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
-; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: .LBB27_3: ; %end
; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
@@ -13903,23 +14003,22 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB27_4:
-; SI-NEXT: s_waitcnt expcnt(1)
+; SI-NEXT: v_mov_b32_e32 v30, v48
+; SI-NEXT: v_mov_b32_e32 v48, v27
; SI-NEXT: v_mov_b32_e32 v27, v42
; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
-; SI-NEXT: v_mov_b32_e32 v38, v1
+; SI-NEXT: v_mov_b32_e32 v50, v1
; SI-NEXT: v_mov_b32_e32 v43, v6
; SI-NEXT: v_mov_b32_e32 v29, v8
; SI-NEXT: v_mov_b32_e32 v44, v10
; SI-NEXT: v_mov_b32_e32 v36, v12
-; SI-NEXT: v_mov_b32_e32 v52, v14
+; SI-NEXT: v_mov_b32_e32 v37, v14
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mov_b32_e32 v16, v18
; SI-NEXT: v_mov_b32_e32 v18, v20
; SI-NEXT: v_mov_b32_e32 v20, v22
-; SI-NEXT: v_mov_b32_e32 v22, v24
; SI-NEXT: v_mov_b32_e32 v24, v26
; SI-NEXT: v_mov_b32_e32 v26, v28
-; SI-NEXT: v_mov_b32_e32 v30, v48
; SI-NEXT: v_mov_b32_e32 v39, v40
; SI-NEXT: v_mov_b32_e32 v41, v3
; SI-NEXT: v_mov_b32_e32 v40, v5
@@ -13929,20 +14028,22 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v57, v7
; SI-NEXT: v_mov_b32_e32 v59, v56
; SI-NEXT: v_mov_b32_e32 v58, v47
-; SI-NEXT: v_mov_b32_e32 v47, v46
+; SI-NEXT: v_mov_b32_e32 v22, v46
; SI-NEXT: v_mov_b32_e32 v56, v9
-; SI-NEXT: v_mov_b32_e32 v46, v45
-; SI-NEXT: v_mov_b32_e32 v28, v25
+; SI-NEXT: v_mov_b32_e32 v47, v45
+; SI-NEXT: v_mov_b32_e32 v46, v25
; SI-NEXT: v_mov_b32_e32 v45, v23
; SI-NEXT: v_mov_b32_e32 v25, v11
; SI-NEXT: v_mov_b32_e32 v23, v21
; SI-NEXT: v_mov_b32_e32 v21, v19
; SI-NEXT: v_mov_b32_e32 v19, v17
; SI-NEXT: v_mov_b32_e32 v17, v13
-; SI-NEXT: v_mov_b32_e32 v48, v51
+; SI-NEXT: v_mov_b32_e32 v28, v51
; SI-NEXT: v_mov_b32_e32 v51, v15
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; SI-NEXT: s_branch .LBB27_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB27_2
+; SI-NEXT: s_branch .LBB27_3
;
; VI-LABEL: bitcast_v64i8_to_v16i32_scalar:
; VI: ; %bb.0:
@@ -13964,16 +14065,16 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v37, v30
-; VI-NEXT: v_mov_b32_e32 v61, v28
+; VI-NEXT: v_mov_b32_e32 v32, v28
; VI-NEXT: v_mov_b32_e32 v31, v0
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:76
-; VI-NEXT: buffer_load_ushort v48, off, s[0:3], s32
+; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32
; VI-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:8
-; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:4
+; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:4
; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:16
-; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:12
+; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:12
; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:24
-; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:20
+; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:20
; VI-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:32
; VI-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:28
; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:40
@@ -13986,8 +14087,9 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:60
; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:72
; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:68
-; VI-NEXT: v_lshlrev_b32_e32 v32, 8, v1
-; VI-NEXT: v_lshlrev_b32_e32 v39, 8, v3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: v_lshlrev_b32_e32 v62, 8, v1
+; VI-NEXT: v_lshlrev_b32_e32 v48, 8, v3
; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v5
; VI-NEXT: v_lshlrev_b32_e32 v5, 8, v7
; VI-NEXT: v_lshlrev_b32_e32 v3, 8, v9
@@ -14003,12 +14105,12 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: v_lshlrev_b32_e32 v25, 8, v29
; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; VI-NEXT: v_lshlrev_b32_e32 v23, 8, v48
+; VI-NEXT: v_lshlrev_b32_e32 v23, 8, v39
; VI-NEXT: v_lshlrev_b32_e32 v11, 8, v28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_and_b64 s[6:7], vcc, exec
; VI-NEXT: v_lshlrev_b32_e32 v21, 8, v38
; VI-NEXT: v_lshlrev_b32_e32 v19, 8, v36
; VI-NEXT: v_lshlrev_b32_e32 v17, 8, v35
@@ -14024,49 +14126,48 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: v_lshlrev_b32_e32 v42, 8, v44
; VI-NEXT: s_cbranch_scc0 .LBB27_4
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: v_or_b32_sdwa v0, v2, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v2, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v38, v1
; VI-NEXT: v_or_b32_sdwa v1, v4, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v35, v4
+; VI-NEXT: v_mov_b32_e32 v36, v4
; VI-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v10, v59 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v1, v12, v58 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v44, v2
+; VI-NEXT: v_mov_b32_e32 v35, v2
; VI-NEXT: v_mov_b32_e32 v49, v6
; VI-NEXT: v_or_b32_sdwa v2, v6, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v14, v57 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v1, v16, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v36, v58
+; VI-NEXT: v_mov_b32_e32 v63, v59
+; VI-NEXT: v_mov_b32_e32 v59, v58
; VI-NEXT: v_mov_b32_e32 v58, v57
; VI-NEXT: v_mov_b32_e32 v57, v7
; VI-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v18, v56 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v1, v20, v47 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v34, v48
; VI-NEXT: v_mov_b32_e32 v40, v3
; VI-NEXT: v_mov_b32_e32 v48, v8
; VI-NEXT: v_or_b32_sdwa v3, v8, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v22, v46 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v1, v24, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v63, v59
-; VI-NEXT: v_mov_b32_e32 v59, v56
; VI-NEXT: v_mov_b32_e32 v56, v47
; VI-NEXT: v_mov_b32_e32 v47, v46
; VI-NEXT: v_mov_b32_e32 v46, v9
; VI-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v26, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v61, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v34, v39
+; VI-NEXT: v_or_b32_sdwa v1, v32, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v39, v10
; VI-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v37, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v62, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v61, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v45, v25
; VI-NEXT: v_mov_b32_e32 v25, v11
; VI-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v60, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v33, v19 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v33, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v60, v19 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v43, v12
; VI-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v55, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
@@ -14076,8 +14177,8 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v20, v22
; VI-NEXT: v_mov_b32_e32 v22, v24
; VI-NEXT: v_mov_b32_e32 v24, v26
-; VI-NEXT: v_mov_b32_e32 v26, v61
-; VI-NEXT: v_mov_b32_e32 v61, v23
+; VI-NEXT: v_mov_b32_e32 v26, v32
+; VI-NEXT: v_mov_b32_e32 v32, v23
; VI-NEXT: v_mov_b32_e32 v23, v21
; VI-NEXT: v_mov_b32_e32 v21, v19
; VI-NEXT: v_mov_b32_e32 v19, v17
@@ -14087,17 +14188,18 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: v_or_b32_sdwa v1, v52, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_and_b32 s4, s28, 0xff
; VI-NEXT: s_lshl_b32 s5, s29, 8
+; VI-NEXT: v_mov_b32_e32 v44, v14
; VI-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v51, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(3)
; VI-NEXT: v_or_b32_sdwa v1, v50, v42 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_or_b32 s4, s4, s5
-; VI-NEXT: v_mov_b32_e32 v29, v33
-; VI-NEXT: v_mov_b32_e32 v33, v28
+; VI-NEXT: v_mov_b32_e32 v29, v61
+; VI-NEXT: v_mov_b32_e32 v61, v28
; VI-NEXT: v_mov_b32_e32 v28, v15
; VI-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: s_and_b32 s4, s4, 0xffff
-; VI-NEXT: v_or_b32_sdwa v0, v31, v32 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v31, v62 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v41, v5
; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_e32 v3, s4, v0
@@ -14142,11 +14244,11 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: s_or_b32 s4, s5, s4
; VI-NEXT: s_addk_i32 s4, 0x300
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v31
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v44
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v35
; VI-NEXT: s_and_b32 s4, s4, 0xffff
-; VI-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v1, v34, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v35
+; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v36
; VI-NEXT: v_or_b32_e32 v0, s4, v0
; VI-NEXT: v_add_u32_e32 v1, vcc, 0x300, v1
; VI-NEXT: v_or_b32_sdwa v2, v38, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
@@ -14164,11 +14266,13 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v43
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
-; VI-NEXT: v_or_b32_sdwa v1, v36, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v6, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v6, vcc, 0x3000000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v44
+; VI-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_add_i32 s16, s16, 3
; VI-NEXT: s_and_b32 s4, s16, 0xff
; VI-NEXT: s_lshl_b32 s5, s17, 8
@@ -14209,17 +14313,15 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: s_add_i32 s5, s5, 0x3000000
; VI-NEXT: s_add_i32 s6, s6, 0x3000000
; VI-NEXT: v_mov_b32_e32 v2, s6
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
-; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v1, v57, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v7, vcc, 0x3000000, v0
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v16
-; VI-NEXT: v_or_b32_sdwa v0, v59, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v18
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v1, v56, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
@@ -14242,15 +14344,15 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v10, vcc, 0x3000000, v0
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v30
-; VI-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v62
+; VI-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v29
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v1, v25, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v11, vcc, 0x3000000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v60
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v33
; VI-NEXT: v_or_b32_sdwa v0, v23, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v29
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v60
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v1, v21, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
@@ -14263,7 +14365,7 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v13, vcc, 0x3000000, v0
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v53
-; VI-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v52
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
@@ -14298,34 +14400,34 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB27_4:
-; VI-NEXT: v_mov_b32_e32 v44, v2
-; VI-NEXT: v_mov_b32_e32 v34, v39
-; VI-NEXT: v_mov_b32_e32 v35, v4
-; VI-NEXT: v_mov_b32_e32 v29, v33
+; VI-NEXT: v_mov_b32_e32 v34, v48
+; VI-NEXT: v_mov_b32_e32 v35, v2
+; VI-NEXT: v_mov_b32_e32 v36, v4
+; VI-NEXT: v_mov_b32_e32 v29, v61
; VI-NEXT: v_mov_b32_e32 v49, v6
; VI-NEXT: v_mov_b32_e32 v48, v8
; VI-NEXT: v_mov_b32_e32 v39, v10
; VI-NEXT: v_mov_b32_e32 v43, v12
+; VI-NEXT: v_mov_b32_e32 v44, v14
; VI-NEXT: v_mov_b32_e32 v16, v18
; VI-NEXT: v_mov_b32_e32 v18, v20
; VI-NEXT: v_mov_b32_e32 v20, v22
; VI-NEXT: v_mov_b32_e32 v22, v24
; VI-NEXT: v_mov_b32_e32 v24, v26
-; VI-NEXT: v_mov_b32_e32 v26, v61
+; VI-NEXT: v_mov_b32_e32 v26, v32
; VI-NEXT: v_mov_b32_e32 v30, v37
; VI-NEXT: v_mov_b32_e32 v38, v1
; VI-NEXT: v_mov_b32_e32 v41, v5
; VI-NEXT: v_mov_b32_e32 v40, v3
; VI-NEXT: v_mov_b32_e32 v63, v59
-; VI-NEXT: v_mov_b32_e32 v36, v58
+; VI-NEXT: v_mov_b32_e32 v59, v58
; VI-NEXT: v_mov_b32_e32 v58, v57
; VI-NEXT: v_mov_b32_e32 v57, v7
-; VI-NEXT: v_mov_b32_e32 v59, v56
; VI-NEXT: v_mov_b32_e32 v56, v47
; VI-NEXT: v_mov_b32_e32 v47, v46
; VI-NEXT: v_mov_b32_e32 v46, v9
; VI-NEXT: v_mov_b32_e32 v45, v25
-; VI-NEXT: v_mov_b32_e32 v61, v23
+; VI-NEXT: v_mov_b32_e32 v32, v23
; VI-NEXT: v_mov_b32_e32 v25, v11
; VI-NEXT: v_mov_b32_e32 v23, v21
; VI-NEXT: v_mov_b32_e32 v21, v19
@@ -14333,10 +14435,12 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v17, v13
; VI-NEXT: v_mov_b32_e32 v37, v27
; VI-NEXT: v_mov_b32_e32 v27, v42
-; VI-NEXT: v_mov_b32_e32 v33, v28
+; VI-NEXT: v_mov_b32_e32 v61, v28
; VI-NEXT: v_mov_b32_e32 v28, v15
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; VI-NEXT: s_branch .LBB27_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB27_2
+; VI-NEXT: s_branch .LBB27_3
;
; GFX9-LABEL: bitcast_v64i8_to_v16i32_scalar:
; GFX9: ; %bb.0:
@@ -14358,16 +14462,16 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v37, v30
-; GFX9-NEXT: v_mov_b32_e32 v61, v28
+; GFX9-NEXT: v_mov_b32_e32 v32, v28
; GFX9-NEXT: v_mov_b32_e32 v31, v0
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:76
-; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32
+; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32
; GFX9-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:8
-; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:4
+; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:4
; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:16
-; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:12
+; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:12
; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:24
-; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:20
+; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:20
; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:32
; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:28
; GFX9-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:40
@@ -14380,8 +14484,9 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:60
; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:72
; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:68
-; GFX9-NEXT: v_lshlrev_b32_e32 v32, 8, v1
-; GFX9-NEXT: v_lshlrev_b32_e32 v39, 8, v3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: v_lshlrev_b32_e32 v62, 8, v1
+; GFX9-NEXT: v_lshlrev_b32_e32 v48, 8, v3
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v5
; GFX9-NEXT: v_lshlrev_b32_e32 v5, 8, v7
; GFX9-NEXT: v_lshlrev_b32_e32 v3, 8, v9
@@ -14397,14 +14502,14 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: v_lshlrev_b32_e32 v25, 8, v29
; GFX9-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(22)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT: s_waitcnt vmcnt(21)
-; GFX9-NEXT: v_lshlrev_b32_e32 v23, 8, v48
+; GFX9-NEXT: v_lshlrev_b32_e32 v23, 8, v39
; GFX9-NEXT: s_waitcnt vmcnt(20)
; GFX9-NEXT: v_lshlrev_b32_e32 v11, 8, v28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_and_b64 s[6:7], vcc, exec
; GFX9-NEXT: s_waitcnt vmcnt(18)
; GFX9-NEXT: v_lshlrev_b32_e32 v21, 8, v38
; GFX9-NEXT: s_waitcnt vmcnt(16)
@@ -14423,49 +14528,48 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: v_lshlrev_b32_e32 v42, 8, v44
; GFX9-NEXT: s_cbranch_scc0 .LBB27_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: v_or_b32_sdwa v0, v2, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v2, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_mov_b32_e32 v38, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v4, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_mov_b32_e32 v35, v4
+; GFX9-NEXT: v_mov_b32_e32 v36, v4
; GFX9-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v10, v59 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v1, v12, v58 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_mov_b32_e32 v44, v2
+; GFX9-NEXT: v_mov_b32_e32 v35, v2
; GFX9-NEXT: v_mov_b32_e32 v49, v6
; GFX9-NEXT: v_or_b32_sdwa v2, v6, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v14, v57 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v1, v16, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_mov_b32_e32 v36, v58
+; GFX9-NEXT: v_mov_b32_e32 v63, v59
+; GFX9-NEXT: v_mov_b32_e32 v59, v58
; GFX9-NEXT: v_mov_b32_e32 v58, v57
; GFX9-NEXT: v_mov_b32_e32 v57, v7
; GFX9-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v18, v56 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v1, v20, v47 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v34, v48
; GFX9-NEXT: v_mov_b32_e32 v40, v3
; GFX9-NEXT: v_mov_b32_e32 v48, v8
; GFX9-NEXT: v_or_b32_sdwa v3, v8, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v22, v46 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v1, v24, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_mov_b32_e32 v63, v59
-; GFX9-NEXT: v_mov_b32_e32 v59, v56
; GFX9-NEXT: v_mov_b32_e32 v56, v47
; GFX9-NEXT: v_mov_b32_e32 v47, v46
; GFX9-NEXT: v_mov_b32_e32 v46, v9
; GFX9-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v26, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v61, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_mov_b32_e32 v34, v39
+; GFX9-NEXT: v_or_b32_sdwa v1, v32, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_mov_b32_e32 v39, v10
; GFX9-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v37, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v62, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v61, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_mov_b32_e32 v45, v25
; GFX9-NEXT: v_mov_b32_e32 v25, v11
; GFX9-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v0, v60, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v33, v19 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v33, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v60, v19 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_mov_b32_e32 v43, v12
; GFX9-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v55, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
@@ -14475,8 +14579,8 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v20, v22
; GFX9-NEXT: v_mov_b32_e32 v22, v24
; GFX9-NEXT: v_mov_b32_e32 v24, v26
-; GFX9-NEXT: v_mov_b32_e32 v26, v61
-; GFX9-NEXT: v_mov_b32_e32 v61, v23
+; GFX9-NEXT: v_mov_b32_e32 v26, v32
+; GFX9-NEXT: v_mov_b32_e32 v32, v23
; GFX9-NEXT: v_mov_b32_e32 v23, v21
; GFX9-NEXT: v_mov_b32_e32 v21, v19
; GFX9-NEXT: v_mov_b32_e32 v19, v17
@@ -14486,17 +14590,18 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: v_or_b32_sdwa v1, v52, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_and_b32 s4, s28, 0xff
; GFX9-NEXT: s_lshl_b32 s5, s29, 8
+; GFX9-NEXT: v_mov_b32_e32 v44, v14
; GFX9-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v51, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(3)
; GFX9-NEXT: v_or_b32_sdwa v1, v50, v42 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_or_b32 s4, s4, s5
-; GFX9-NEXT: v_mov_b32_e32 v29, v33
-; GFX9-NEXT: v_mov_b32_e32 v33, v28
+; GFX9-NEXT: v_mov_b32_e32 v29, v61
+; GFX9-NEXT: v_mov_b32_e32 v61, v28
; GFX9-NEXT: v_mov_b32_e32 v28, v15
; GFX9-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: s_and_b32 s4, s4, 0xffff
-; GFX9-NEXT: v_or_b32_sdwa v0, v31, v32 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v31, v62 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_mov_b32_e32 v41, v5
; GFX9-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_e32 v3, s4, v0
@@ -14540,11 +14645,11 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: s_lshl_b32 s6, s29, 8
; GFX9-NEXT: s_or_b32 s5, s6, s5
; GFX9-NEXT: v_add_u32_e32 v0, 3, v31
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v44
-; GFX9-NEXT: v_add_u32_e32 v2, 3, v35
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v35
+; GFX9-NEXT: v_add_u32_e32 v2, 3, v36
; GFX9-NEXT: s_movk_i32 s4, 0x300
; GFX9-NEXT: s_addk_i32 s5, 0x300
-; GFX9-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_or_b32_sdwa v1, v34, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_or_b32_sdwa v2, v38, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: s_and_b32 s5, s5, 0xffff
@@ -14563,12 +14668,14 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: v_add_u32_e32 v0, 3, v39
; GFX9-NEXT: v_add_u32_e32 v1, 3, v43
; GFX9-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_or_b32_sdwa v1, v36, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v44
+; GFX9-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_add_i32 s16, s16, 3
; GFX9-NEXT: s_and_b32 s5, s16, 0xff
; GFX9-NEXT: s_lshl_b32 s6, s17, 8
@@ -14609,18 +14716,16 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: s_lshl_b32 s8, s8, 16
; GFX9-NEXT: s_or_b32 s7, s7, s8
; GFX9-NEXT: v_mov_b32_e32 v2, s7
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_or_b32_sdwa v1, v57, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 3, v16
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v1, 3, v18
-; GFX9-NEXT: v_or_b32_sdwa v0, v59, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_or_b32_sdwa v1, v56, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
@@ -14642,14 +14747,14 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_add_u32_e32 v0, 3, v30
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v62
-; GFX9-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v29
+; GFX9-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_or_b32_sdwa v1, v25, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v60
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v29
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v33
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v60
; GFX9-NEXT: v_or_b32_sdwa v0, v23, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_or_b32_sdwa v1, v21, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
@@ -14664,7 +14769,7 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_add_u32_e32 v0, 3, v53
; GFX9-NEXT: v_add_u32_e32 v1, 3, v52
-; GFX9-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
@@ -14698,34 +14803,34 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB27_4:
-; GFX9-NEXT: v_mov_b32_e32 v44, v2
-; GFX9-NEXT: v_mov_b32_e32 v34, v39
-; GFX9-NEXT: v_mov_b32_e32 v35, v4
-; GFX9-NEXT: v_mov_b32_e32 v29, v33
+; GFX9-NEXT: v_mov_b32_e32 v34, v48
+; GFX9-NEXT: v_mov_b32_e32 v35, v2
+; GFX9-NEXT: v_mov_b32_e32 v36, v4
+; GFX9-NEXT: v_mov_b32_e32 v29, v61
; GFX9-NEXT: v_mov_b32_e32 v49, v6
; GFX9-NEXT: v_mov_b32_e32 v48, v8
; GFX9-NEXT: v_mov_b32_e32 v39, v10
; GFX9-NEXT: v_mov_b32_e32 v43, v12
+; GFX9-NEXT: v_mov_b32_e32 v44, v14
; GFX9-NEXT: v_mov_b32_e32 v16, v18
; GFX9-NEXT: v_mov_b32_e32 v18, v20
; GFX9-NEXT: v_mov_b32_e32 v20, v22
; GFX9-NEXT: v_mov_b32_e32 v22, v24
; GFX9-NEXT: v_mov_b32_e32 v24, v26
-; GFX9-NEXT: v_mov_b32_e32 v26, v61
+; GFX9-NEXT: v_mov_b32_e32 v26, v32
; GFX9-NEXT: v_mov_b32_e32 v30, v37
; GFX9-NEXT: v_mov_b32_e32 v38, v1
; GFX9-NEXT: v_mov_b32_e32 v41, v5
; GFX9-NEXT: v_mov_b32_e32 v40, v3
; GFX9-NEXT: v_mov_b32_e32 v63, v59
-; GFX9-NEXT: v_mov_b32_e32 v36, v58
+; GFX9-NEXT: v_mov_b32_e32 v59, v58
; GFX9-NEXT: v_mov_b32_e32 v58, v57
; GFX9-NEXT: v_mov_b32_e32 v57, v7
-; GFX9-NEXT: v_mov_b32_e32 v59, v56
; GFX9-NEXT: v_mov_b32_e32 v56, v47
; GFX9-NEXT: v_mov_b32_e32 v47, v46
; GFX9-NEXT: v_mov_b32_e32 v46, v9
; GFX9-NEXT: v_mov_b32_e32 v45, v25
-; GFX9-NEXT: v_mov_b32_e32 v61, v23
+; GFX9-NEXT: v_mov_b32_e32 v32, v23
; GFX9-NEXT: v_mov_b32_e32 v25, v11
; GFX9-NEXT: v_mov_b32_e32 v23, v21
; GFX9-NEXT: v_mov_b32_e32 v21, v19
@@ -14733,10 +14838,12 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v17, v13
; GFX9-NEXT: v_mov_b32_e32 v37, v27
; GFX9-NEXT: v_mov_b32_e32 v27, v42
-; GFX9-NEXT: v_mov_b32_e32 v33, v28
+; GFX9-NEXT: v_mov_b32_e32 v61, v28
; GFX9-NEXT: v_mov_b32_e32 v28, v15
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX9-NEXT: s_branch .LBB27_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB27_2
+; GFX9-NEXT: s_branch .LBB27_3
;
; GFX11-TRUE16-LABEL: bitcast_v64i8_to_v16i32_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -14777,7 +14884,6 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v69, 8, v25
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v27, 8, v27
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v29, 8, v29
-; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(15)
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v25, 8, v0
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(13)
@@ -14796,67 +14902,68 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 8, v14
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(6)
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v23, 8, v86
-; GFX11-TRUE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, -1
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB27_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s3, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s0, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s2, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s17, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s16, 0xff
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s18, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s19, 8
; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s8
; GFX11-TRUE16-NEXT: s_and_b32 s5, s5, 0xffff
; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s6, 16
-; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-TRUE16-NEXT: s_and_b32 s6, s16, 0xff
-; GFX11-TRUE16-NEXT: s_and_b32 s8, s18, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s19, 8
-; GFX11-TRUE16-NEXT: s_or_b32 s6, s6, s7
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s8
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s22, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s23, 8
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s25, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s24, 0xff
; GFX11-TRUE16-NEXT: s_and_b32 s6, s6, 0xffff
; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s7, 16
-; GFX11-TRUE16-NEXT: s_and_b32 s8, s20, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s21, 8
-; GFX11-TRUE16-NEXT: s_or_b32 s6, s6, s7
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s9
-; GFX11-TRUE16-NEXT: s_and_b32 s8, s22, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s23, 8
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s25, 8
; GFX11-TRUE16-NEXT: s_or_b32 s8, s8, s9
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s8, 16
-; GFX11-TRUE16-NEXT: s_or_b32 s9, s9, s10
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_and_b32 s8, s9, 0xffff
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s26, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s27, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s8, 0xffff
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s26, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s27, 8
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v31
-; GFX11-TRUE16-NEXT: s_or_b32 s9, s9, s10
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s8, s9
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v32
-; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s9, 16
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s8, 16
; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v38
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s8, s9
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v33
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v83
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v84
; GFX11-TRUE16-NEXT: v_and_b32_e32 v10, 0xff, v22
; GFX11-TRUE16-NEXT: v_and_b32_e32 v11, 0xff, v24
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v85
-; GFX11-TRUE16-NEXT: s_and_b32 s11, s28, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s12, s29, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s28, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s29, 8
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v6, v82
; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v10, v68
; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v11, v69
-; GFX11-TRUE16-NEXT: s_or_b32 s10, s11, s12
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s11
; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v1, v2
-; GFX11-TRUE16-NEXT: s_and_b32 s10, s10, 0xffff
+; GFX11-TRUE16-NEXT: s_and_b32 s9, s9, 0xffff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v35
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, s10, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, s9, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v34
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v36
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v37
@@ -14922,10 +15029,9 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v13, v87
; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v96, v14
; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v15, v86
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB27_3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB27_3
; GFX11-TRUE16-NEXT: .LBB27_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_add_i32 s0, s0, 3
; GFX11-TRUE16-NEXT: s_add_i32 s2, s2, 3
@@ -15121,7 +15227,9 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB27_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-TRUE16-NEXT: s_branch .LBB27_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB27_2
+; GFX11-TRUE16-NEXT: s_branch .LBB27_3
;
; GFX11-FAKE16-LABEL: bitcast_v64i8_to_v16i32_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -15162,7 +15270,6 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v69, 8, v25
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v27, 8, v27
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v29, 8, v29
-; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(15)
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v25, 8, v0
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(13)
@@ -15181,67 +15288,68 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v21, 8, v14
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(6)
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v23, 8, v86
-; GFX11-FAKE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, -1
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB27_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-FAKE16-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s3, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s0, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s2, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s17, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s16, 0xff
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s18, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s19, 8
; GFX11-FAKE16-NEXT: s_or_b32 s5, s5, s6
; GFX11-FAKE16-NEXT: s_or_b32 s6, s7, s8
; GFX11-FAKE16-NEXT: s_and_b32 s5, s5, 0xffff
; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s6, 16
-; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s21, 8
; GFX11-FAKE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-FAKE16-NEXT: s_and_b32 s6, s16, 0xff
-; GFX11-FAKE16-NEXT: s_and_b32 s8, s18, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s19, 8
-; GFX11-FAKE16-NEXT: s_or_b32 s6, s6, s7
-; GFX11-FAKE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s7, s8
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s22, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s23, 8
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s25, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s24, 0xff
; GFX11-FAKE16-NEXT: s_and_b32 s6, s6, 0xffff
; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s7, 16
-; GFX11-FAKE16-NEXT: s_and_b32 s8, s20, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s21, 8
-; GFX11-FAKE16-NEXT: s_or_b32 s6, s6, s7
-; GFX11-FAKE16-NEXT: s_or_b32 s7, s8, s9
-; GFX11-FAKE16-NEXT: s_and_b32 s8, s22, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s23, 8
-; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s25, 8
; GFX11-FAKE16-NEXT: s_or_b32 s8, s8, s9
-; GFX11-FAKE16-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-FAKE16-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s8, 16
-; GFX11-FAKE16-NEXT: s_or_b32 s9, s9, s10
-; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-FAKE16-NEXT: s_and_b32 s8, s9, 0xffff
-; GFX11-FAKE16-NEXT: s_and_b32 s9, s26, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s27, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s8, 0xffff
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s26, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s27, 8
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v31
-; GFX11-FAKE16-NEXT: s_or_b32 s9, s9, s10
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s8, s9
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v32
-; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s9, 16
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s8, 16
; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xff, v38
-; GFX11-FAKE16-NEXT: s_or_b32 s8, s8, s9
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v33
; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v83
; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, v1, v84
; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xff, v22
; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xff, v24
; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, v2, v85
-; GFX11-FAKE16-NEXT: s_and_b32 s11, s28, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s12, s29, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s10, s28, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s11, s29, 8
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, v6, v82
; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, v10, v68
; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, v11, v69
-; GFX11-FAKE16-NEXT: s_or_b32 s10, s11, s12
+; GFX11-FAKE16-NEXT: s_or_b32 s9, s10, s11
; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, v1, v2
-; GFX11-FAKE16-NEXT: s_and_b32 s10, s10, 0xffff
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s9, 0xffff
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v35
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, s10, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, s9, v0
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v34
; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v36
; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v37
@@ -15307,10 +15415,9 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, v13, v87
; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, v96, v14
; GFX11-FAKE16-NEXT: v_or_b32_e32 v15, v15, v86
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB27_3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB27_3
; GFX11-FAKE16-NEXT: .LBB27_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_add_i32 s0, s0, 3
; GFX11-FAKE16-NEXT: s_add_i32 s2, s2, 3
@@ -15506,7 +15613,9 @@ define inreg <16 x i32> @bitcast_v64i8_to_v16i32_scalar(<64 x i8> inreg %a, i32
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB27_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-FAKE16-NEXT: s_branch .LBB27_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB27_2
+; GFX11-FAKE16-NEXT: s_branch .LBB27_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -15654,8 +15763,9 @@ define inreg <8 x i64> @bitcast_v16f32_to_v8i64_scalar(<16 x float> inreg %a, i3
; SI-LABEL: bitcast_v16f32_to_v8i64_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v13, v2
-; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; SI-NEXT: v_mov_b32_e32 v16, v2
+; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v15, v1
; SI-NEXT: v_mov_b32_e32 v14, v0
; SI-NEXT: v_mov_b32_e32 v0, s16
@@ -15671,12 +15781,15 @@ define inreg <8 x i64> @bitcast_v16f32_to_v8i64_scalar(<16 x float> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB29_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB29_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB29_3
-; SI-NEXT: .LBB29_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB29_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB29_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e32 v15, 1.0, v15
; SI-NEXT: v_add_f32_e32 v14, 1.0, v14
; SI-NEXT: v_add_f32_e32 v13, 1.0, v13
@@ -15693,16 +15806,15 @@ define inreg <8 x i64> @bitcast_v16f32_to_v8i64_scalar(<16 x float> inreg %a, i3
; SI-NEXT: v_add_f32_e32 v2, 1.0, v2
; SI-NEXT: v_add_f32_e32 v1, 1.0, v1
; SI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; SI-NEXT: .LBB29_3: ; %end
+; SI-NEXT: .LBB29_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB29_4:
-; SI-NEXT: s_branch .LBB29_2
;
; VI-LABEL: bitcast_v16f32_to_v8i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v13, v2
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: v_mov_b32_e32 v16, v2
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
@@ -15718,12 +15830,15 @@ define inreg <8 x i64> @bitcast_v16f32_to_v8i64_scalar(<16 x float> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB29_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB29_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB29_3
-; VI-NEXT: .LBB29_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB29_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB29_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e32 v15, 1.0, v15
; VI-NEXT: v_add_f32_e32 v14, 1.0, v14
; VI-NEXT: v_add_f32_e32 v13, 1.0, v13
@@ -15740,16 +15855,15 @@ define inreg <8 x i64> @bitcast_v16f32_to_v8i64_scalar(<16 x float> inreg %a, i3
; VI-NEXT: v_add_f32_e32 v2, 1.0, v2
; VI-NEXT: v_add_f32_e32 v1, 1.0, v1
; VI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; VI-NEXT: .LBB29_3: ; %end
+; VI-NEXT: .LBB29_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB29_4:
-; VI-NEXT: s_branch .LBB29_2
;
; GFX9-LABEL: bitcast_v16f32_to_v8i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v2
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v16, v2
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
@@ -15765,12 +15879,15 @@ define inreg <8 x i64> @bitcast_v16f32_to_v8i64_scalar(<16 x float> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB29_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB29_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB29_3
-; GFX9-NEXT: .LBB29_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB29_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB29_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e32 v15, 1.0, v15
; GFX9-NEXT: v_add_f32_e32 v14, 1.0, v14
; GFX9-NEXT: v_add_f32_e32 v13, 1.0, v13
@@ -15787,10 +15904,8 @@ define inreg <8 x i64> @bitcast_v16f32_to_v8i64_scalar(<16 x float> inreg %a, i3
; GFX9-NEXT: v_add_f32_e32 v2, 1.0, v2
; GFX9-NEXT: v_add_f32_e32 v1, 1.0, v1
; GFX9-NEXT: v_add_f32_e32 v0, 1.0, v0
-; GFX9-NEXT: .LBB29_3: ; %end
+; GFX9-NEXT: .LBB29_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB29_4:
-; GFX9-NEXT: s_branch .LBB29_2
;
; GFX11-LABEL: bitcast_v16f32_to_v8i64_scalar:
; GFX11: ; %bb.0:
@@ -15800,12 +15915,15 @@ define inreg <8 x i64> @bitcast_v16f32_to_v8i64_scalar(<16 x float> inreg %a, i3
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB29_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB29_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB29_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB29_4
-; GFX11-NEXT: .LBB29_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v15, s27, 1.0
; GFX11-NEXT: v_add_f32_e64 v14, s26, 1.0
; GFX11-NEXT: v_add_f32_e64 v13, s25, 1.0
@@ -15823,8 +15941,6 @@ define inreg <8 x i64> @bitcast_v16f32_to_v8i64_scalar(<16 x float> inreg %a, i3
; GFX11-NEXT: v_add_f32_e64 v1, s13, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB29_3:
-; GFX11-NEXT: s_branch .LBB29_2
; GFX11-NEXT: .LBB29_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -15994,8 +16110,9 @@ define inreg <16 x float> @bitcast_v8i64_to_v16f32_scalar(<8 x i64> inreg %a, i3
; SI-LABEL: bitcast_v8i64_to_v16f32_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v13, v2
-; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; SI-NEXT: v_mov_b32_e32 v16, v2
+; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v15, v1
; SI-NEXT: v_mov_b32_e32 v14, v0
; SI-NEXT: v_mov_b32_e32 v0, s16
@@ -16011,12 +16128,15 @@ define inreg <16 x float> @bitcast_v8i64_to_v16f32_scalar(<8 x i64> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB31_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB31_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB31_3
-; SI-NEXT: .LBB31_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB31_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB31_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v14, vcc, 3, v14
; SI-NEXT: v_addc_u32_e32 v15, vcc, 0, v15, vcc
; SI-NEXT: v_add_i32_e32 v12, vcc, 3, v12
@@ -16033,16 +16153,15 @@ define inreg <16 x float> @bitcast_v8i64_to_v16f32_scalar(<8 x i64> inreg %a, i3
; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT: .LBB31_3: ; %end
+; SI-NEXT: .LBB31_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB31_4:
-; SI-NEXT: s_branch .LBB31_2
;
; VI-LABEL: bitcast_v8i64_to_v16f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v13, v2
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: v_mov_b32_e32 v16, v2
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
@@ -16058,12 +16177,15 @@ define inreg <16 x float> @bitcast_v8i64_to_v16f32_scalar(<8 x i64> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB31_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB31_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB31_3
-; VI-NEXT: .LBB31_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB31_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB31_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v14, vcc, 3, v14
; VI-NEXT: v_addc_u32_e32 v15, vcc, 0, v15, vcc
; VI-NEXT: v_add_u32_e32 v12, vcc, 3, v12
@@ -16080,16 +16202,15 @@ define inreg <16 x float> @bitcast_v8i64_to_v16f32_scalar(<8 x i64> inreg %a, i3
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT: .LBB31_3: ; %end
+; VI-NEXT: .LBB31_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB31_4:
-; VI-NEXT: s_branch .LBB31_2
;
; GFX9-LABEL: bitcast_v8i64_to_v16f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v2
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v16, v2
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
@@ -16105,12 +16226,15 @@ define inreg <16 x float> @bitcast_v8i64_to_v16f32_scalar(<8 x i64> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB31_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB31_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB31_3
-; GFX9-NEXT: .LBB31_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB31_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB31_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_co_u32_e32 v14, vcc, 3, v14
; GFX9-NEXT: v_addc_co_u32_e32 v15, vcc, 0, v15, vcc
; GFX9-NEXT: v_add_co_u32_e32 v12, vcc, 3, v12
@@ -16127,21 +16251,22 @@ define inreg <16 x float> @bitcast_v8i64_to_v16f32_scalar(<8 x i64> inreg %a, i3
; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 3, v0
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
-; GFX9-NEXT: .LBB31_3: ; %end
+; GFX9-NEXT: .LBB31_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB31_4:
-; GFX9-NEXT: s_branch .LBB31_2
;
; GFX11-LABEL: bitcast_v8i64_to_v16f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB31_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB31_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB31_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB31_3
-; GFX11-NEXT: .LBB31_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB31_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s26, s26, 3
; GFX11-NEXT: s_addc_u32 s27, s27, 0
; GFX11-NEXT: s_add_u32 s24, s24, 3
@@ -16158,7 +16283,7 @@ define inreg <16 x float> @bitcast_v8i64_to_v16f32_scalar(<8 x i64> inreg %a, i3
; GFX11-NEXT: s_addc_u32 s3, s3, 0
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: .LBB31_3: ; %end
+; GFX11-NEXT: .LBB31_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -16169,8 +16294,6 @@ define inreg <16 x float> @bitcast_v8i64_to_v16f32_scalar(<8 x i64> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB31_4:
-; GFX11-NEXT: s_branch .LBB31_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -16318,8 +16441,9 @@ define inreg <8 x double> @bitcast_v16f32_to_v8f64_scalar(<16 x float> inreg %a,
; SI-LABEL: bitcast_v16f32_to_v8f64_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v13, v2
-; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; SI-NEXT: v_mov_b32_e32 v16, v2
+; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v15, v1
; SI-NEXT: v_mov_b32_e32 v14, v0
; SI-NEXT: v_mov_b32_e32 v0, s16
@@ -16335,12 +16459,15 @@ define inreg <8 x double> @bitcast_v16f32_to_v8f64_scalar(<16 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB33_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB33_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB33_3
-; SI-NEXT: .LBB33_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB33_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB33_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e32 v15, 1.0, v15
; SI-NEXT: v_add_f32_e32 v14, 1.0, v14
; SI-NEXT: v_add_f32_e32 v13, 1.0, v13
@@ -16357,16 +16484,15 @@ define inreg <8 x double> @bitcast_v16f32_to_v8f64_scalar(<16 x float> inreg %a,
; SI-NEXT: v_add_f32_e32 v2, 1.0, v2
; SI-NEXT: v_add_f32_e32 v1, 1.0, v1
; SI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; SI-NEXT: .LBB33_3: ; %end
+; SI-NEXT: .LBB33_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB33_4:
-; SI-NEXT: s_branch .LBB33_2
;
; VI-LABEL: bitcast_v16f32_to_v8f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v13, v2
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: v_mov_b32_e32 v16, v2
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
@@ -16382,12 +16508,15 @@ define inreg <8 x double> @bitcast_v16f32_to_v8f64_scalar(<16 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB33_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB33_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB33_3
-; VI-NEXT: .LBB33_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB33_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB33_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e32 v15, 1.0, v15
; VI-NEXT: v_add_f32_e32 v14, 1.0, v14
; VI-NEXT: v_add_f32_e32 v13, 1.0, v13
@@ -16404,16 +16533,15 @@ define inreg <8 x double> @bitcast_v16f32_to_v8f64_scalar(<16 x float> inreg %a,
; VI-NEXT: v_add_f32_e32 v2, 1.0, v2
; VI-NEXT: v_add_f32_e32 v1, 1.0, v1
; VI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; VI-NEXT: .LBB33_3: ; %end
+; VI-NEXT: .LBB33_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB33_4:
-; VI-NEXT: s_branch .LBB33_2
;
; GFX9-LABEL: bitcast_v16f32_to_v8f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v2
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v16, v2
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
@@ -16429,12 +16557,15 @@ define inreg <8 x double> @bitcast_v16f32_to_v8f64_scalar(<16 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB33_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB33_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB33_3
-; GFX9-NEXT: .LBB33_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB33_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB33_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e32 v15, 1.0, v15
; GFX9-NEXT: v_add_f32_e32 v14, 1.0, v14
; GFX9-NEXT: v_add_f32_e32 v13, 1.0, v13
@@ -16451,10 +16582,8 @@ define inreg <8 x double> @bitcast_v16f32_to_v8f64_scalar(<16 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e32 v2, 1.0, v2
; GFX9-NEXT: v_add_f32_e32 v1, 1.0, v1
; GFX9-NEXT: v_add_f32_e32 v0, 1.0, v0
-; GFX9-NEXT: .LBB33_3: ; %end
+; GFX9-NEXT: .LBB33_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB33_4:
-; GFX9-NEXT: s_branch .LBB33_2
;
; GFX11-LABEL: bitcast_v16f32_to_v8f64_scalar:
; GFX11: ; %bb.0:
@@ -16464,12 +16593,15 @@ define inreg <8 x double> @bitcast_v16f32_to_v8f64_scalar(<16 x float> inreg %a,
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB33_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB33_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB33_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB33_4
-; GFX11-NEXT: .LBB33_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v15, s27, 1.0
; GFX11-NEXT: v_add_f32_e64 v14, s26, 1.0
; GFX11-NEXT: v_add_f32_e64 v13, s25, 1.0
@@ -16487,8 +16619,6 @@ define inreg <8 x double> @bitcast_v16f32_to_v8f64_scalar(<16 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v1, s13, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB33_3:
-; GFX11-NEXT: s_branch .LBB33_2
; GFX11-NEXT: .LBB33_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -16622,8 +16752,9 @@ define inreg <16 x float> @bitcast_v8f64_to_v16f32_scalar(<8 x double> inreg %a,
; SI-LABEL: bitcast_v8f64_to_v16f32_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v13, v2
-; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; SI-NEXT: v_mov_b32_e32 v16, v2
+; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v15, v1
; SI-NEXT: v_mov_b32_e32 v14, v0
; SI-NEXT: v_mov_b32_e32 v0, s16
@@ -16639,12 +16770,15 @@ define inreg <16 x float> @bitcast_v8f64_to_v16f32_scalar(<8 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB35_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB35_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB35_3
-; SI-NEXT: .LBB35_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB35_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB35_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
; SI-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
; SI-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
@@ -16653,16 +16787,15 @@ define inreg <16 x float> @bitcast_v8f64_to_v16f32_scalar(<8 x double> inreg %a,
; SI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; SI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; SI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; SI-NEXT: .LBB35_3: ; %end
+; SI-NEXT: .LBB35_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB35_4:
-; SI-NEXT: s_branch .LBB35_2
;
; VI-LABEL: bitcast_v8f64_to_v16f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v13, v2
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: v_mov_b32_e32 v16, v2
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
@@ -16678,12 +16811,15 @@ define inreg <16 x float> @bitcast_v8f64_to_v16f32_scalar(<8 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB35_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB35_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB35_3
-; VI-NEXT: .LBB35_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB35_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB35_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
; VI-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
; VI-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
@@ -16692,16 +16828,15 @@ define inreg <16 x float> @bitcast_v8f64_to_v16f32_scalar(<8 x double> inreg %a,
; VI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; VI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; VI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; VI-NEXT: .LBB35_3: ; %end
+; VI-NEXT: .LBB35_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB35_4:
-; VI-NEXT: s_branch .LBB35_2
;
; GFX9-LABEL: bitcast_v8f64_to_v16f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v2
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v16, v2
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
@@ -16717,12 +16852,15 @@ define inreg <16 x float> @bitcast_v8f64_to_v16f32_scalar(<8 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB35_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB35_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB35_3
-; GFX9-NEXT: .LBB35_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB35_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB35_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
; GFX9-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
; GFX9-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
@@ -16731,10 +16869,8 @@ define inreg <16 x float> @bitcast_v8f64_to_v16f32_scalar(<8 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX9-NEXT: .LBB35_3: ; %end
+; GFX9-NEXT: .LBB35_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB35_4:
-; GFX9-NEXT: s_branch .LBB35_2
;
; GFX11-LABEL: bitcast_v8f64_to_v16f32_scalar:
; GFX11: ; %bb.0:
@@ -16744,12 +16880,15 @@ define inreg <16 x float> @bitcast_v8f64_to_v16f32_scalar(<8 x double> inreg %a,
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB35_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB35_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB35_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB35_4
-; GFX11-NEXT: .LBB35_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[14:15], s[26:27], 1.0
; GFX11-NEXT: v_add_f64 v[12:13], s[24:25], 1.0
; GFX11-NEXT: v_add_f64 v[10:11], s[22:23], 1.0
@@ -16759,8 +16898,6 @@ define inreg <16 x float> @bitcast_v8f64_to_v16f32_scalar(<8 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[2:3], s[14:15], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[12:13], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB35_3:
-; GFX11-NEXT: s_branch .LBB35_2
; GFX11-NEXT: .LBB35_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -16987,6 +17124,7 @@ define inreg <32 x i16> @bitcast_v16f32_to_v32i16_scalar(<16 x float> inreg %a,
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v3, v2
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v30, v1
; SI-NEXT: v_mov_b32_e32 v28, v0
; SI-NEXT: v_mov_b32_e32 v0, s16
@@ -17002,8 +17140,8 @@ define inreg <32 x i16> @bitcast_v16f32_to_v32i16_scalar(<16 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v20, s26
; SI-NEXT: v_mov_b32_e32 v22, s27
; SI-NEXT: v_mov_b32_e32 v24, s28
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v26, s29
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB37_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_alignbit_b32 v29, v30, v28, 16
@@ -17075,13 +17213,16 @@ define inreg <32 x i16> @bitcast_v16f32_to_v32i16_scalar(<16 x float> inreg %a,
; SI-NEXT: ; implicit-def: $vgpr27
; SI-NEXT: ; implicit-def: $vgpr29
; SI-NEXT: ; implicit-def: $vgpr31
-; SI-NEXT: s_branch .LBB37_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB37_2
+; SI-NEXT: s_branch .LBB37_3
;
; VI-LABEL: bitcast_v16f32_to_v32i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v13, v2
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: v_mov_b32_e32 v16, v2
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
@@ -17097,12 +17238,15 @@ define inreg <32 x i16> @bitcast_v16f32_to_v32i16_scalar(<16 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB37_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB37_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB37_3
-; VI-NEXT: .LBB37_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB37_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB37_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e32 v15, 1.0, v15
; VI-NEXT: v_add_f32_e32 v14, 1.0, v14
; VI-NEXT: v_add_f32_e32 v13, 1.0, v13
@@ -17119,16 +17263,15 @@ define inreg <32 x i16> @bitcast_v16f32_to_v32i16_scalar(<16 x float> inreg %a,
; VI-NEXT: v_add_f32_e32 v2, 1.0, v2
; VI-NEXT: v_add_f32_e32 v1, 1.0, v1
; VI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; VI-NEXT: .LBB37_3: ; %end
+; VI-NEXT: .LBB37_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB37_4:
-; VI-NEXT: s_branch .LBB37_2
;
; GFX9-LABEL: bitcast_v16f32_to_v32i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v2
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v16, v2
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
@@ -17144,12 +17287,15 @@ define inreg <32 x i16> @bitcast_v16f32_to_v32i16_scalar(<16 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB37_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB37_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB37_3
-; GFX9-NEXT: .LBB37_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB37_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB37_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e32 v15, 1.0, v15
; GFX9-NEXT: v_add_f32_e32 v14, 1.0, v14
; GFX9-NEXT: v_add_f32_e32 v13, 1.0, v13
@@ -17166,10 +17312,8 @@ define inreg <32 x i16> @bitcast_v16f32_to_v32i16_scalar(<16 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e32 v2, 1.0, v2
; GFX9-NEXT: v_add_f32_e32 v1, 1.0, v1
; GFX9-NEXT: v_add_f32_e32 v0, 1.0, v0
-; GFX9-NEXT: .LBB37_3: ; %end
+; GFX9-NEXT: .LBB37_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB37_4:
-; GFX9-NEXT: s_branch .LBB37_2
;
; GFX11-LABEL: bitcast_v16f32_to_v32i16_scalar:
; GFX11: ; %bb.0:
@@ -17179,12 +17323,15 @@ define inreg <32 x i16> @bitcast_v16f32_to_v32i16_scalar(<16 x float> inreg %a,
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB37_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB37_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB37_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB37_4
-; GFX11-NEXT: .LBB37_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v15, s27, 1.0
; GFX11-NEXT: v_add_f32_e64 v14, s26, 1.0
; GFX11-NEXT: v_add_f32_e64 v13, s25, 1.0
@@ -17202,8 +17349,6 @@ define inreg <32 x i16> @bitcast_v16f32_to_v32i16_scalar(<16 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v1, s13, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB37_3:
-; GFX11-NEXT: s_branch .LBB37_2
; GFX11-NEXT: .LBB37_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -17556,6 +17701,7 @@ define inreg <16 x float> @bitcast_v32i16_to_v16f32_scalar(<32 x i16> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v26, v14
; SI-NEXT: v_mov_b32_e32 v25, v12
; SI-NEXT: v_mov_b32_e32 v19, v10
@@ -17564,7 +17710,7 @@ define inreg <16 x float> @bitcast_v32i16_to_v16f32_scalar(<32 x i16> inreg %a,
; SI-NEXT: v_mov_b32_e32 v22, v4
; SI-NEXT: v_mov_b32_e32 v23, v2
; SI-NEXT: v_mov_b32_e32 v24, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v3
; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v5
@@ -17706,19 +17852,25 @@ define inreg <16 x float> @bitcast_v32i16_to_v16f32_scalar(<32 x i16> inreg %a,
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB39_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; SI-NEXT: s_branch .LBB39_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB39_2
+; SI-NEXT: s_branch .LBB39_3
;
; VI-LABEL: bitcast_v32i16_to_v16f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; VI-NEXT: v_readfirstlane_b32 s6, v0
; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: v_readfirstlane_b32 s6, v0
; VI-NEXT: v_readfirstlane_b32 s7, v1
-; VI-NEXT: s_cbranch_scc0 .LBB39_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB39_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB39_3
-; VI-NEXT: .LBB39_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB39_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB39_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s7, 3
; VI-NEXT: s_and_b32 s4, s7, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
@@ -17799,7 +17951,7 @@ define inreg <16 x float> @bitcast_v32i16_to_v16f32_scalar(<32 x i16> inreg %a,
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB39_3: ; %end
+; VI-NEXT: .LBB39_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -17817,14 +17969,13 @@ define inreg <16 x float> @bitcast_v32i16_to_v16f32_scalar(<32 x i16> inreg %a,
; VI-NEXT: v_mov_b32_e32 v14, s6
; VI-NEXT: v_mov_b32_e32 v15, s7
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB39_4:
-; VI-NEXT: s_branch .LBB39_2
;
; GFX9-LABEL: bitcast_v32i16_to_v16f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v2
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v16, v2
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
@@ -17840,12 +17991,15 @@ define inreg <16 x float> @bitcast_v32i16_to_v16f32_scalar(<32 x i16> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB39_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB39_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB39_3
-; GFX9-NEXT: .LBB39_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB39_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB39_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
@@ -17862,10 +18016,8 @@ define inreg <16 x float> @bitcast_v32i16_to_v16f32_scalar(<32 x i16> inreg %a,
; GFX9-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: .LBB39_3: ; %end
+; GFX9-NEXT: .LBB39_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB39_4:
-; GFX9-NEXT: s_branch .LBB39_2
;
; GFX11-LABEL: bitcast_v32i16_to_v16f32_scalar:
; GFX11: ; %bb.0:
@@ -17875,12 +18027,15 @@ define inreg <16 x float> @bitcast_v32i16_to_v16f32_scalar(<32 x i16> inreg %a,
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB39_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB39_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB39_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB39_4
-; GFX11-NEXT: .LBB39_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v15, s27, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v14, s26, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v13, s25, 3 op_sel_hi:[1,0]
@@ -17898,8 +18053,6 @@ define inreg <16 x float> @bitcast_v32i16_to_v16f32_scalar(<32 x i16> inreg %a,
; GFX11-NEXT: v_pk_add_u16 v1, s13, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB39_3:
-; GFX11-NEXT: s_branch .LBB39_2
; GFX11-NEXT: .LBB39_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -18225,14 +18378,15 @@ define inreg <32 x half> @bitcast_v16f32_to_v32f16_scalar(<16 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; SI-NEXT: v_readfirstlane_b32 s6, v0
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: v_readfirstlane_b32 s7, v1
+; SI-NEXT: v_readfirstlane_b32 s7, v0
+; SI-NEXT: v_readfirstlane_b32 s6, v1
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB41_4
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_lshr_b32 s4, s7, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v31, s4
; SI-NEXT: s_lshr_b32 s4, s6, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v31, s4
+; SI-NEXT: s_lshr_b32 s4, s7, 16
; SI-NEXT: v_cvt_f32_f16_e32 v29, s4
; SI-NEXT: s_lshr_b32 s4, s29, 16
; SI-NEXT: v_cvt_f32_f16_e32 v27, s4
@@ -18262,8 +18416,8 @@ define inreg <32 x half> @bitcast_v16f32_to_v32f16_scalar(<16 x float> inreg %a,
; SI-NEXT: v_cvt_f32_f16_e32 v3, s4
; SI-NEXT: s_lshr_b32 s4, s16, 16
; SI-NEXT: v_cvt_f32_f16_e32 v1, s4
-; SI-NEXT: v_cvt_f32_f16_e32 v30, s7
-; SI-NEXT: v_cvt_f32_f16_e32 v28, s6
+; SI-NEXT: v_cvt_f32_f16_e32 v30, s6
+; SI-NEXT: v_cvt_f32_f16_e32 v28, s7
; SI-NEXT: v_cvt_f32_f16_e32 v26, s29
; SI-NEXT: v_cvt_f32_f16_e32 v24, s28
; SI-NEXT: v_cvt_f32_f16_e32 v22, s27
@@ -18294,8 +18448,8 @@ define inreg <32 x half> @bitcast_v16f32_to_v32f16_scalar(<16 x float> inreg %a,
; SI-NEXT: v_add_f32_e64 v22, s27, 1.0
; SI-NEXT: v_add_f32_e64 v24, s28, 1.0
; SI-NEXT: v_add_f32_e64 v26, s29, 1.0
-; SI-NEXT: v_add_f32_e64 v28, s6, 1.0
-; SI-NEXT: v_add_f32_e64 v30, s7, 1.0
+; SI-NEXT: v_add_f32_e64 v28, s7, 1.0
+; SI-NEXT: v_add_f32_e64 v30, s6, 1.0
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v0
; SI-NEXT: v_lshrrev_b32_e32 v3, 16, v2
; SI-NEXT: v_lshrrev_b32_e32 v5, 16, v4
@@ -18379,13 +18533,16 @@ define inreg <32 x half> @bitcast_v16f32_to_v32f16_scalar(<16 x float> inreg %a,
; SI-NEXT: ; implicit-def: $vgpr29
; SI-NEXT: ; implicit-def: $vgpr30
; SI-NEXT: ; implicit-def: $vgpr31
-; SI-NEXT: s_branch .LBB41_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB41_2
+; SI-NEXT: s_branch .LBB41_3
;
; VI-LABEL: bitcast_v16f32_to_v32f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v13, v2
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: v_mov_b32_e32 v16, v2
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
@@ -18401,12 +18558,15 @@ define inreg <32 x half> @bitcast_v16f32_to_v32f16_scalar(<16 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB41_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB41_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB41_3
-; VI-NEXT: .LBB41_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB41_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB41_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e32 v15, 1.0, v15
; VI-NEXT: v_add_f32_e32 v14, 1.0, v14
; VI-NEXT: v_add_f32_e32 v13, 1.0, v13
@@ -18423,16 +18583,15 @@ define inreg <32 x half> @bitcast_v16f32_to_v32f16_scalar(<16 x float> inreg %a,
; VI-NEXT: v_add_f32_e32 v2, 1.0, v2
; VI-NEXT: v_add_f32_e32 v1, 1.0, v1
; VI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; VI-NEXT: .LBB41_3: ; %end
+; VI-NEXT: .LBB41_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB41_4:
-; VI-NEXT: s_branch .LBB41_2
;
; GFX9-LABEL: bitcast_v16f32_to_v32f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v2
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v16, v2
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
@@ -18448,12 +18607,15 @@ define inreg <32 x half> @bitcast_v16f32_to_v32f16_scalar(<16 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB41_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB41_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB41_3
-; GFX9-NEXT: .LBB41_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB41_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB41_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e32 v15, 1.0, v15
; GFX9-NEXT: v_add_f32_e32 v14, 1.0, v14
; GFX9-NEXT: v_add_f32_e32 v13, 1.0, v13
@@ -18470,10 +18632,8 @@ define inreg <32 x half> @bitcast_v16f32_to_v32f16_scalar(<16 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e32 v2, 1.0, v2
; GFX9-NEXT: v_add_f32_e32 v1, 1.0, v1
; GFX9-NEXT: v_add_f32_e32 v0, 1.0, v0
-; GFX9-NEXT: .LBB41_3: ; %end
+; GFX9-NEXT: .LBB41_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB41_4:
-; GFX9-NEXT: s_branch .LBB41_2
;
; GFX11-LABEL: bitcast_v16f32_to_v32f16_scalar:
; GFX11: ; %bb.0:
@@ -18483,12 +18643,15 @@ define inreg <32 x half> @bitcast_v16f32_to_v32f16_scalar(<16 x float> inreg %a,
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB41_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB41_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB41_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB41_4
-; GFX11-NEXT: .LBB41_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v15, s27, 1.0
; GFX11-NEXT: v_add_f32_e64 v14, s26, 1.0
; GFX11-NEXT: v_add_f32_e64 v13, s25, 1.0
@@ -18506,8 +18669,6 @@ define inreg <32 x half> @bitcast_v16f32_to_v32f16_scalar(<16 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v1, s13, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB41_3:
-; GFX11-NEXT: s_branch .LBB41_2
; GFX11-NEXT: .LBB41_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -18980,6 +19141,7 @@ define inreg <16 x float> @bitcast_v32f16_to_v16f32_scalar(<32 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v35, s28
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB43_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v51
@@ -19150,13 +19312,16 @@ define inreg <16 x float> @bitcast_v32f16_to_v16f32_scalar(<32 x half> inreg %a,
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB43_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; SI-NEXT: s_branch .LBB43_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB43_2
+; SI-NEXT: s_branch .LBB43_3
;
; VI-LABEL: bitcast_v32f16_to_v16f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v13, v2
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: v_mov_b32_e32 v16, v2
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
@@ -19172,12 +19337,15 @@ define inreg <16 x float> @bitcast_v32f16_to_v16f32_scalar(<32 x half> inreg %a,
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB43_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB43_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB43_3
-; VI-NEXT: .LBB43_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB43_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB43_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v16, 0x200
; VI-NEXT: v_add_f16_sdwa v17, v15, v16 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v15, 0x200, v15
@@ -19227,16 +19395,15 @@ define inreg <16 x float> @bitcast_v32f16_to_v16f32_scalar(<32 x half> inreg %a,
; VI-NEXT: v_add_f16_e32 v0, 0x200, v0
; VI-NEXT: v_or_b32_e32 v1, v1, v17
; VI-NEXT: v_or_b32_e32 v0, v0, v16
-; VI-NEXT: .LBB43_3: ; %end
+; VI-NEXT: .LBB43_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB43_4:
-; VI-NEXT: s_branch .LBB43_2
;
; GFX9-LABEL: bitcast_v32f16_to_v16f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v2
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v16, v2
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
@@ -19252,12 +19419,15 @@ define inreg <16 x float> @bitcast_v32f16_to_v16f32_scalar(<32 x half> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB43_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB43_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB43_3
-; GFX9-NEXT: .LBB43_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB43_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB43_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_movk_i32 s4, 0x200
; GFX9-NEXT: v_pk_add_f16 v15, v15, s4 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v14, v14, s4 op_sel_hi:[1,0]
@@ -19275,10 +19445,8 @@ define inreg <16 x float> @bitcast_v32f16_to_v16f32_scalar(<32 x half> inreg %a,
; GFX9-NEXT: v_pk_add_f16 v2, v2, s4 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v1, v1, s4 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, v0, s4 op_sel_hi:[1,0]
-; GFX9-NEXT: .LBB43_3: ; %end
+; GFX9-NEXT: .LBB43_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB43_4:
-; GFX9-NEXT: s_branch .LBB43_2
;
; GFX11-LABEL: bitcast_v32f16_to_v16f32_scalar:
; GFX11: ; %bb.0:
@@ -19288,12 +19456,15 @@ define inreg <16 x float> @bitcast_v32f16_to_v16f32_scalar(<32 x half> inreg %a,
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB43_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB43_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB43_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB43_4
-; GFX11-NEXT: .LBB43_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v15, 0x200, s27 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v14, 0x200, s26 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v13, 0x200, s25 op_sel_hi:[0,1]
@@ -19311,8 +19482,6 @@ define inreg <16 x float> @bitcast_v32f16_to_v16f32_scalar(<32 x half> inreg %a,
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s13 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB43_3:
-; GFX11-NEXT: s_branch .LBB43_2
; GFX11-NEXT: .LBB43_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -19606,6 +19775,7 @@ define inreg <32 x bfloat> @bitcast_v16f32_to_v32bf16_scalar(<16 x float> inreg
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v34, s16
; SI-NEXT: v_mov_b32_e32 v35, s17
; SI-NEXT: v_mov_b32_e32 v36, s18
@@ -19619,8 +19789,8 @@ define inreg <32 x bfloat> @bitcast_v16f32_to_v32bf16_scalar(<16 x float> inreg
; SI-NEXT: v_mov_b32_e32 v52, s26
; SI-NEXT: v_mov_b32_e32 v53, s27
; SI-NEXT: v_mov_b32_e32 v54, s28
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v55, s29
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB45_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_and_b32_e32 v31, 0xffff0000, v1
@@ -19742,13 +19912,16 @@ define inreg <32 x bfloat> @bitcast_v16f32_to_v32bf16_scalar(<16 x float> inreg
; SI-NEXT: ; implicit-def: $vgpr29
; SI-NEXT: ; implicit-def: $vgpr30
; SI-NEXT: ; implicit-def: $vgpr31
-; SI-NEXT: s_branch .LBB45_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB45_2
+; SI-NEXT: s_branch .LBB45_3
;
; VI-LABEL: bitcast_v16f32_to_v32bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v13, v2
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: v_mov_b32_e32 v16, v2
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
@@ -19764,12 +19937,15 @@ define inreg <32 x bfloat> @bitcast_v16f32_to_v32bf16_scalar(<16 x float> inreg
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB45_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB45_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB45_3
-; VI-NEXT: .LBB45_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB45_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB45_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e32 v15, 1.0, v15
; VI-NEXT: v_add_f32_e32 v14, 1.0, v14
; VI-NEXT: v_add_f32_e32 v13, 1.0, v13
@@ -19786,16 +19962,15 @@ define inreg <32 x bfloat> @bitcast_v16f32_to_v32bf16_scalar(<16 x float> inreg
; VI-NEXT: v_add_f32_e32 v2, 1.0, v2
; VI-NEXT: v_add_f32_e32 v1, 1.0, v1
; VI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; VI-NEXT: .LBB45_3: ; %end
+; VI-NEXT: .LBB45_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB45_4:
-; VI-NEXT: s_branch .LBB45_2
;
; GFX9-LABEL: bitcast_v16f32_to_v32bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v2
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v16, v2
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
@@ -19811,12 +19986,15 @@ define inreg <32 x bfloat> @bitcast_v16f32_to_v32bf16_scalar(<16 x float> inreg
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB45_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB45_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB45_3
-; GFX9-NEXT: .LBB45_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB45_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB45_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e32 v15, 1.0, v15
; GFX9-NEXT: v_add_f32_e32 v14, 1.0, v14
; GFX9-NEXT: v_add_f32_e32 v13, 1.0, v13
@@ -19833,10 +20011,8 @@ define inreg <32 x bfloat> @bitcast_v16f32_to_v32bf16_scalar(<16 x float> inreg
; GFX9-NEXT: v_add_f32_e32 v2, 1.0, v2
; GFX9-NEXT: v_add_f32_e32 v1, 1.0, v1
; GFX9-NEXT: v_add_f32_e32 v0, 1.0, v0
-; GFX9-NEXT: .LBB45_3: ; %end
+; GFX9-NEXT: .LBB45_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB45_4:
-; GFX9-NEXT: s_branch .LBB45_2
;
; GFX11-LABEL: bitcast_v16f32_to_v32bf16_scalar:
; GFX11: ; %bb.0:
@@ -19846,12 +20022,15 @@ define inreg <32 x bfloat> @bitcast_v16f32_to_v32bf16_scalar(<16 x float> inreg
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB45_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB45_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB45_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB45_4
-; GFX11-NEXT: .LBB45_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v15, s27, 1.0
; GFX11-NEXT: v_add_f32_e64 v14, s26, 1.0
; GFX11-NEXT: v_add_f32_e64 v13, s25, 1.0
@@ -19869,8 +20048,6 @@ define inreg <32 x bfloat> @bitcast_v16f32_to_v32bf16_scalar(<16 x float> inreg
; GFX11-NEXT: v_add_f32_e64 v1, s13, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s12, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB45_3:
-; GFX11-NEXT: s_branch .LBB45_2
; GFX11-NEXT: .LBB45_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -21308,6 +21485,7 @@ define inreg <16 x float> @bitcast_v32bf16_to_v16f32_scalar(<32 x bfloat> inreg
; SI-NEXT: v_mul_f32_e32 v19, 1.0, v14
; SI-NEXT: v_mul_f32_e32 v17, 1.0, v17
; SI-NEXT: v_mul_f32_e32 v16, 1.0, v16
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mul_f32_e64 v54, 1.0, s19
; SI-NEXT: v_mul_f32_e64 v55, 1.0, s18
; SI-NEXT: v_mul_f32_e64 v52, 1.0, s21
@@ -21456,7 +21634,9 @@ define inreg <16 x float> @bitcast_v32bf16_to_v16f32_scalar(<32 x bfloat> inreg
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB47_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; SI-NEXT: s_branch .LBB47_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB47_2
+; SI-NEXT: s_branch .LBB47_3
;
; VI-LABEL: bitcast_v32bf16_to_v16f32_scalar:
; VI: ; %bb.0:
@@ -21464,16 +21644,20 @@ define inreg <16 x float> @bitcast_v32bf16_to_v16f32_scalar(<32 x bfloat> inreg
; VI-NEXT: s_xor_saveexec_b64 s[4:5], -1
; VI-NEXT: buffer_store_dword v19, off, s[0:3], s32 ; 4-byte Folded Spill
; VI-NEXT: s_mov_b64 exec, s[4:5]
-; VI-NEXT: v_writelane_b32 v19, s30, 0
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
+; VI-NEXT: v_writelane_b32 v19, s30, 0
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_writelane_b32 v19, s31, 1
; VI-NEXT: v_readfirstlane_b32 s30, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s31, v1
-; VI-NEXT: s_cbranch_scc0 .LBB47_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB47_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB47_4
-; VI-NEXT: .LBB47_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB47_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB47_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s31, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x40c00000
; VI-NEXT: v_add_f32_e32 v1, s4, v0
@@ -21764,8 +21948,6 @@ define inreg <16 x float> @bitcast_v32bf16_to_v16f32_scalar(<32 x bfloat> inreg
; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; VI-NEXT: v_alignbit_b32 v0, v0, v16, 16
; VI-NEXT: s_branch .LBB47_5
-; VI-NEXT: .LBB47_3:
-; VI-NEXT: s_branch .LBB47_2
; VI-NEXT: .LBB47_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -21798,16 +21980,20 @@ define inreg <16 x float> @bitcast_v32bf16_to_v16f32_scalar(<32 x bfloat> inreg
; GFX9-NEXT: s_xor_saveexec_b64 s[4:5], -1
; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 ; 4-byte Folded Spill
; GFX9-NEXT: s_mov_b64 exec, s[4:5]
-; GFX9-NEXT: v_writelane_b32 v20, s30, 0
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
+; GFX9-NEXT: v_writelane_b32 v20, s30, 0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_writelane_b32 v20, s31, 1
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
-; GFX9-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB47_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB47_4
-; GFX9-NEXT: .LBB47_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB47_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_and_b32 s4, s31, 0xffff0000
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
; GFX9-NEXT: v_add_f32_e32 v1, s4, v0
@@ -22115,8 +22301,6 @@ define inreg <16 x float> @bitcast_v32bf16_to_v16f32_scalar(<32 x bfloat> inreg
; GFX9-NEXT: v_and_b32_sdwa v16, v16, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: v_lshl_or_b32 v0, v0, 16, v16
; GFX9-NEXT: s_branch .LBB47_5
-; GFX9-NEXT: .LBB47_3:
-; GFX9-NEXT: s_branch .LBB47_2
; GFX9-NEXT: .LBB47_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -22151,12 +22335,15 @@ define inreg <16 x float> @bitcast_v32bf16_to_v16f32_scalar(<32 x bfloat> inreg
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB47_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB47_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB47_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB47_4
-; GFX11-NEXT: .LBB47_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_and_b32 s1, s27, 0xffff0000
; GFX11-NEXT: s_lshl_b32 s0, s27, 16
; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
@@ -22485,8 +22672,6 @@ define inreg <16 x float> @bitcast_v32bf16_to_v16f32_scalar(<32 x bfloat> inreg
; GFX11-NEXT: v_lshl_or_b32 v2, v16, 16, v21
; GFX11-NEXT: v_lshl_or_b32 v0, v20, 16, v17
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB47_3:
-; GFX11-NEXT: s_branch .LBB47_2
; GFX11-NEXT: .LBB47_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -24200,6 +24385,7 @@ define inreg <64 x i8> @bitcast_v16f32_to_v64i8_scalar(<16 x float> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v28, s16
; SI-NEXT: v_mov_b32_e32 v25, s17
; SI-NEXT: v_mov_b32_e32 v20, s18
@@ -24207,14 +24393,14 @@ define inreg <64 x i8> @bitcast_v16f32_to_v64i8_scalar(<16 x float> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v15, s20
; SI-NEXT: v_mov_b32_e32 v14, s21
; SI-NEXT: v_mov_b32_e32 v11, s22
-; SI-NEXT: v_mov_b32_e32 v9, s23
-; SI-NEXT: v_mov_b32_e32 v8, s24
-; SI-NEXT: v_mov_b32_e32 v7, s25
-; SI-NEXT: v_mov_b32_e32 v6, s26
-; SI-NEXT: v_mov_b32_e32 v5, s27
-; SI-NEXT: v_mov_b32_e32 v4, s28
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: v_mov_b32_e32 v3, s29
+; SI-NEXT: v_mov_b32_e32 v10, s23
+; SI-NEXT: v_mov_b32_e32 v9, s24
+; SI-NEXT: v_mov_b32_e32 v8, s25
+; SI-NEXT: v_mov_b32_e32 v7, s26
+; SI-NEXT: v_mov_b32_e32 v6, s27
+; SI-NEXT: v_mov_b32_e32 v5, s28
+; SI-NEXT: v_mov_b32_e32 v4, s29
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
@@ -24233,22 +24419,22 @@ define inreg <64 x i8> @bitcast_v16f32_to_v64i8_scalar(<16 x float> inreg %a, i3
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB49_4
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: v_alignbit_b32 v10, v2, v1, 24
-; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v3, v2, v1, 24
+; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; SI-NEXT: v_alignbit_b32 v12, v2, v1, 16
; SI-NEXT: v_alignbit_b32 v13, v2, v1, 8
-; SI-NEXT: v_alignbit_b32 v16, v3, v4, 24
-; SI-NEXT: v_alignbit_b32 v17, v3, v4, 16
-; SI-NEXT: v_alignbit_b32 v18, v3, v4, 8
-; SI-NEXT: v_alignbit_b32 v21, v5, v6, 24
-; SI-NEXT: v_alignbit_b32 v22, v5, v6, 16
-; SI-NEXT: v_alignbit_b32 v23, v5, v6, 8
-; SI-NEXT: v_alignbit_b32 v29, v7, v8, 24
-; SI-NEXT: v_alignbit_b32 v30, v7, v8, 16
-; SI-NEXT: v_alignbit_b32 v31, v7, v8, 8
-; SI-NEXT: v_alignbit_b32 v35, v9, v11, 24
-; SI-NEXT: v_alignbit_b32 v36, v9, v11, 16
-; SI-NEXT: v_alignbit_b32 v37, v9, v11, 8
+; SI-NEXT: v_alignbit_b32 v16, v4, v5, 24
+; SI-NEXT: v_alignbit_b32 v17, v4, v5, 16
+; SI-NEXT: v_alignbit_b32 v18, v4, v5, 8
+; SI-NEXT: v_alignbit_b32 v21, v6, v7, 24
+; SI-NEXT: v_alignbit_b32 v22, v6, v7, 16
+; SI-NEXT: v_alignbit_b32 v23, v6, v7, 8
+; SI-NEXT: v_alignbit_b32 v29, v8, v9, 24
+; SI-NEXT: v_alignbit_b32 v30, v8, v9, 16
+; SI-NEXT: v_alignbit_b32 v31, v8, v9, 8
+; SI-NEXT: v_alignbit_b32 v35, v10, v11, 24
+; SI-NEXT: v_alignbit_b32 v36, v10, v11, 16
+; SI-NEXT: v_alignbit_b32 v37, v10, v11, 8
; SI-NEXT: v_alignbit_b32 v49, v14, v15, 24
; SI-NEXT: v_alignbit_b32 v50, v14, v15, 16
; SI-NEXT: v_alignbit_b32 v52, v14, v15, 8
@@ -24262,18 +24448,18 @@ define inreg <64 x i8> @bitcast_v16f32_to_v64i8_scalar(<16 x float> inreg %a, i3
; SI-NEXT: v_lshrrev_b32_e32 v24, 24, v2
; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v2
; SI-NEXT: v_lshrrev_b32_e32 v27, 8, v2
-; SI-NEXT: v_lshrrev_b32_e32 v32, 24, v3
-; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v3
-; SI-NEXT: v_lshrrev_b32_e32 v34, 8, v3
-; SI-NEXT: v_lshrrev_b32_e32 v38, 24, v5
-; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v5
-; SI-NEXT: v_lshrrev_b32_e32 v48, 8, v5
-; SI-NEXT: v_lshrrev_b32_e32 v51, 24, v7
-; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v7
-; SI-NEXT: v_lshrrev_b32_e32 v54, 8, v7
-; SI-NEXT: v_lshrrev_b32_e32 v40, 24, v9
-; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v9
-; SI-NEXT: v_lshrrev_b32_e32 v44, 8, v9
+; SI-NEXT: v_lshrrev_b32_e32 v32, 24, v4
+; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v4
+; SI-NEXT: v_lshrrev_b32_e32 v34, 8, v4
+; SI-NEXT: v_lshrrev_b32_e32 v38, 24, v6
+; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v6
+; SI-NEXT: v_lshrrev_b32_e32 v48, 8, v6
+; SI-NEXT: v_lshrrev_b32_e32 v51, 24, v8
+; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v8
+; SI-NEXT: v_lshrrev_b32_e32 v54, 8, v8
+; SI-NEXT: v_lshrrev_b32_e32 v40, 24, v10
+; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v10
+; SI-NEXT: v_lshrrev_b32_e32 v44, 8, v10
; SI-NEXT: v_lshrrev_b32_e32 v45, 24, v14
; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v14
; SI-NEXT: v_lshrrev_b32_e32 v57, 8, v14
@@ -24288,7 +24474,7 @@ define inreg <64 x i8> @bitcast_v16f32_to_v64i8_scalar(<16 x float> inreg %a, i3
; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v25
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v10, 8, v25
+; SI-NEXT: v_lshrrev_b32_e32 v3, 8, v25
; SI-NEXT: s_cbranch_execnz .LBB49_3
; SI-NEXT: .LBB49_2: ; %cmp.true
; SI-NEXT: v_add_f32_e32 v2, 1.0, v2
@@ -24299,30 +24485,30 @@ define inreg <64 x i8> @bitcast_v16f32_to_v64i8_scalar(<16 x float> inreg %a, i3
; SI-NEXT: v_add_f32_e32 v20, 1.0, v20
; SI-NEXT: v_add_f32_e32 v14, 1.0, v14
; SI-NEXT: v_add_f32_e32 v15, 1.0, v15
-; SI-NEXT: v_add_f32_e32 v9, 1.0, v9
+; SI-NEXT: v_add_f32_e32 v10, 1.0, v10
; SI-NEXT: v_add_f32_e32 v11, 1.0, v11
-; SI-NEXT: v_add_f32_e32 v7, 1.0, v7
; SI-NEXT: v_add_f32_e32 v8, 1.0, v8
-; SI-NEXT: v_add_f32_e32 v5, 1.0, v5
+; SI-NEXT: v_add_f32_e32 v9, 1.0, v9
; SI-NEXT: v_add_f32_e32 v6, 1.0, v6
-; SI-NEXT: v_add_f32_e32 v3, 1.0, v3
+; SI-NEXT: v_add_f32_e32 v7, 1.0, v7
; SI-NEXT: v_add_f32_e32 v4, 1.0, v4
-; SI-NEXT: v_alignbit_b32 v10, v2, v1, 24
-; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
+; SI-NEXT: v_add_f32_e32 v5, 1.0, v5
+; SI-NEXT: v_alignbit_b32 v3, v2, v1, 24
+; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; SI-NEXT: v_alignbit_b32 v12, v2, v1, 16
; SI-NEXT: v_alignbit_b32 v13, v2, v1, 8
-; SI-NEXT: v_alignbit_b32 v16, v3, v4, 24
-; SI-NEXT: v_alignbit_b32 v17, v3, v4, 16
-; SI-NEXT: v_alignbit_b32 v18, v3, v4, 8
-; SI-NEXT: v_alignbit_b32 v21, v5, v6, 24
-; SI-NEXT: v_alignbit_b32 v22, v5, v6, 16
-; SI-NEXT: v_alignbit_b32 v23, v5, v6, 8
-; SI-NEXT: v_alignbit_b32 v29, v7, v8, 24
-; SI-NEXT: v_alignbit_b32 v30, v7, v8, 16
-; SI-NEXT: v_alignbit_b32 v31, v7, v8, 8
-; SI-NEXT: v_alignbit_b32 v35, v9, v11, 24
-; SI-NEXT: v_alignbit_b32 v36, v9, v11, 16
-; SI-NEXT: v_alignbit_b32 v37, v9, v11, 8
+; SI-NEXT: v_alignbit_b32 v16, v4, v5, 24
+; SI-NEXT: v_alignbit_b32 v17, v4, v5, 16
+; SI-NEXT: v_alignbit_b32 v18, v4, v5, 8
+; SI-NEXT: v_alignbit_b32 v21, v6, v7, 24
+; SI-NEXT: v_alignbit_b32 v22, v6, v7, 16
+; SI-NEXT: v_alignbit_b32 v23, v6, v7, 8
+; SI-NEXT: v_alignbit_b32 v29, v8, v9, 24
+; SI-NEXT: v_alignbit_b32 v30, v8, v9, 16
+; SI-NEXT: v_alignbit_b32 v31, v8, v9, 8
+; SI-NEXT: v_alignbit_b32 v35, v10, v11, 24
+; SI-NEXT: v_alignbit_b32 v36, v10, v11, 16
+; SI-NEXT: v_alignbit_b32 v37, v10, v11, 8
; SI-NEXT: v_alignbit_b32 v49, v14, v15, 24
; SI-NEXT: v_alignbit_b32 v50, v14, v15, 16
; SI-NEXT: v_alignbit_b32 v52, v14, v15, 8
@@ -24336,18 +24522,18 @@ define inreg <64 x i8> @bitcast_v16f32_to_v64i8_scalar(<16 x float> inreg %a, i3
; SI-NEXT: v_lshrrev_b32_e32 v24, 24, v2
; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v2
; SI-NEXT: v_lshrrev_b32_e32 v27, 8, v2
-; SI-NEXT: v_lshrrev_b32_e32 v32, 24, v3
-; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v3
-; SI-NEXT: v_lshrrev_b32_e32 v34, 8, v3
-; SI-NEXT: v_lshrrev_b32_e32 v38, 24, v5
-; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v5
-; SI-NEXT: v_lshrrev_b32_e32 v48, 8, v5
-; SI-NEXT: v_lshrrev_b32_e32 v51, 24, v7
-; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v7
-; SI-NEXT: v_lshrrev_b32_e32 v54, 8, v7
-; SI-NEXT: v_lshrrev_b32_e32 v40, 24, v9
-; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v9
-; SI-NEXT: v_lshrrev_b32_e32 v44, 8, v9
+; SI-NEXT: v_lshrrev_b32_e32 v32, 24, v4
+; SI-NEXT: v_lshrrev_b32_e32 v33, 16, v4
+; SI-NEXT: v_lshrrev_b32_e32 v34, 8, v4
+; SI-NEXT: v_lshrrev_b32_e32 v38, 24, v6
+; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v6
+; SI-NEXT: v_lshrrev_b32_e32 v48, 8, v6
+; SI-NEXT: v_lshrrev_b32_e32 v51, 24, v8
+; SI-NEXT: v_lshrrev_b32_e32 v53, 16, v8
+; SI-NEXT: v_lshrrev_b32_e32 v54, 8, v8
+; SI-NEXT: v_lshrrev_b32_e32 v40, 24, v10
+; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v10
+; SI-NEXT: v_lshrrev_b32_e32 v44, 8, v10
; SI-NEXT: v_lshrrev_b32_e32 v45, 24, v14
; SI-NEXT: v_lshrrev_b32_e32 v47, 16, v14
; SI-NEXT: v_lshrrev_b32_e32 v57, 8, v14
@@ -24362,7 +24548,7 @@ define inreg <64 x i8> @bitcast_v16f32_to_v64i8_scalar(<16 x float> inreg %a, i3
; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: v_lshrrev_b32_e32 v63, 16, v25
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v10, 8, v25
+; SI-NEXT: v_lshrrev_b32_e32 v3, 8, v25
; SI-NEXT: .LBB49_3: ; %end
; SI-NEXT: v_and_b32_e32 v28, 0xff, v28
; SI-NEXT: s_waitcnt expcnt(5)
@@ -24374,153 +24560,153 @@ define inreg <64 x i8> @bitcast_v16f32_to_v64i8_scalar(<16 x float> inreg %a, i3
; SI-NEXT: v_or_b32_e32 v46, v46, v56
; SI-NEXT: v_and_b32_e32 v28, 0xffff, v28
; SI-NEXT: v_and_b32_e32 v25, 0xff, v25
-; SI-NEXT: v_lshlrev_b32_e32 v10, 8, v10
+; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v3
; SI-NEXT: v_or_b32_e32 v28, v28, v46
-; SI-NEXT: v_or_b32_e32 v10, v25, v10
+; SI-NEXT: v_or_b32_e32 v3, v25, v3
; SI-NEXT: v_and_b32_e32 v25, 0xff, v63
; SI-NEXT: buffer_store_dword v28, v0, s[0:3], 0 offen
; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v25
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v28, 24, v62
-; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3
; SI-NEXT: v_or_b32_e32 v25, v28, v25
-; SI-NEXT: v_or_b32_e32 v10, v10, v25
+; SI-NEXT: v_or_b32_e32 v3, v3, v25
; SI-NEXT: v_add_i32_e32 v25, vcc, 4, v0
-; SI-NEXT: buffer_store_dword v10, v25, s[0:3], 0 offen
+; SI-NEXT: buffer_store_dword v3, v25, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_and_b32_e32 v10, 0xff, v20
+; SI-NEXT: v_and_b32_e32 v3, 0xff, v20
; SI-NEXT: v_lshlrev_b32_e32 v20, 8, v43
-; SI-NEXT: v_or_b32_e32 v10, v10, v20
+; SI-NEXT: v_or_b32_e32 v3, v3, v20
; SI-NEXT: v_and_b32_e32 v20, 0xff, v41
; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20
; SI-NEXT: v_lshlrev_b32_e32 v25, 24, v55
-; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3
; SI-NEXT: v_or_b32_e32 v20, v25, v20
-; SI-NEXT: v_or_b32_e32 v10, v10, v20
+; SI-NEXT: v_or_b32_e32 v3, v3, v20
; SI-NEXT: v_add_i32_e32 v20, vcc, 8, v0
-; SI-NEXT: buffer_store_dword v10, v20, s[0:3], 0 offen
+; SI-NEXT: buffer_store_dword v3, v20, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_and_b32_e32 v10, 0xff, v19
+; SI-NEXT: v_and_b32_e32 v3, 0xff, v19
; SI-NEXT: v_lshlrev_b32_e32 v19, 8, v61
-; SI-NEXT: v_or_b32_e32 v10, v10, v19
+; SI-NEXT: v_or_b32_e32 v3, v3, v19
; SI-NEXT: v_and_b32_e32 v19, 0xff, v60
; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19
; SI-NEXT: v_lshlrev_b32_e32 v20, 24, v59
-; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3
; SI-NEXT: v_or_b32_e32 v19, v20, v19
-; SI-NEXT: v_or_b32_e32 v10, v10, v19
+; SI-NEXT: v_or_b32_e32 v3, v3, v19
; SI-NEXT: v_add_i32_e32 v19, vcc, 12, v0
-; SI-NEXT: buffer_store_dword v10, v19, s[0:3], 0 offen
+; SI-NEXT: buffer_store_dword v3, v19, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_and_b32_e32 v10, 0xff, v15
+; SI-NEXT: v_and_b32_e32 v3, 0xff, v15
; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v52
-; SI-NEXT: v_or_b32_e32 v10, v10, v15
+; SI-NEXT: v_or_b32_e32 v3, v3, v15
; SI-NEXT: v_and_b32_e32 v15, 0xff, v50
; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15
; SI-NEXT: v_lshlrev_b32_e32 v19, 24, v49
-; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3
; SI-NEXT: v_or_b32_e32 v15, v19, v15
-; SI-NEXT: v_or_b32_e32 v10, v10, v15
+; SI-NEXT: v_or_b32_e32 v3, v3, v15
; SI-NEXT: v_add_i32_e32 v15, vcc, 16, v0
-; SI-NEXT: buffer_store_dword v10, v15, s[0:3], 0 offen
+; SI-NEXT: buffer_store_dword v3, v15, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_and_b32_e32 v10, 0xff, v14
+; SI-NEXT: v_and_b32_e32 v3, 0xff, v14
; SI-NEXT: v_lshlrev_b32_e32 v14, 8, v57
-; SI-NEXT: v_or_b32_e32 v10, v10, v14
+; SI-NEXT: v_or_b32_e32 v3, v3, v14
; SI-NEXT: v_and_b32_e32 v14, 0xff, v47
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
; SI-NEXT: v_lshlrev_b32_e32 v15, 24, v45
-; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3
; SI-NEXT: v_or_b32_e32 v14, v15, v14
-; SI-NEXT: v_or_b32_e32 v10, v10, v14
+; SI-NEXT: v_or_b32_e32 v3, v3, v14
; SI-NEXT: v_add_i32_e32 v14, vcc, 20, v0
-; SI-NEXT: buffer_store_dword v10, v14, s[0:3], 0 offen
+; SI-NEXT: buffer_store_dword v3, v14, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_and_b32_e32 v10, 0xff, v11
+; SI-NEXT: v_and_b32_e32 v3, 0xff, v11
; SI-NEXT: v_lshlrev_b32_e32 v11, 8, v37
-; SI-NEXT: v_or_b32_e32 v10, v10, v11
+; SI-NEXT: v_or_b32_e32 v3, v3, v11
; SI-NEXT: v_and_b32_e32 v11, 0xff, v36
; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11
; SI-NEXT: v_lshlrev_b32_e32 v14, 24, v35
-; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3
; SI-NEXT: v_or_b32_e32 v11, v14, v11
-; SI-NEXT: v_or_b32_e32 v10, v10, v11
+; SI-NEXT: v_or_b32_e32 v3, v3, v11
; SI-NEXT: v_add_i32_e32 v11, vcc, 24, v0
-; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen
-; SI-NEXT: v_and_b32_e32 v9, 0xff, v9
+; SI-NEXT: buffer_store_dword v3, v11, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_and_b32_e32 v3, 0xff, v10
; SI-NEXT: v_lshlrev_b32_e32 v10, 8, v44
-; SI-NEXT: v_or_b32_e32 v9, v9, v10
+; SI-NEXT: v_or_b32_e32 v3, v3, v10
; SI-NEXT: v_and_b32_e32 v10, 0xff, v42
; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10
; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v40
-; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9
+; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3
; SI-NEXT: v_or_b32_e32 v10, v11, v10
-; SI-NEXT: v_or_b32_e32 v9, v9, v10
+; SI-NEXT: v_or_b32_e32 v3, v3, v10
; SI-NEXT: v_add_i32_e32 v10, vcc, 28, v0
-; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen
-; SI-NEXT: v_and_b32_e32 v8, 0xff, v8
+; SI-NEXT: buffer_store_dword v3, v10, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_and_b32_e32 v3, 0xff, v9
; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v31
-; SI-NEXT: v_or_b32_e32 v8, v8, v9
+; SI-NEXT: v_or_b32_e32 v3, v3, v9
; SI-NEXT: v_and_b32_e32 v9, 0xff, v30
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9
; SI-NEXT: v_lshlrev_b32_e32 v10, 24, v29
-; SI-NEXT: v_and_b32_e32 v8, 0xffff, v8
+; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3
; SI-NEXT: v_or_b32_e32 v9, v10, v9
-; SI-NEXT: v_or_b32_e32 v8, v8, v9
+; SI-NEXT: v_or_b32_e32 v3, v3, v9
; SI-NEXT: v_add_i32_e32 v9, vcc, 32, v0
-; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen
-; SI-NEXT: v_and_b32_e32 v7, 0xff, v7
+; SI-NEXT: buffer_store_dword v3, v9, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_and_b32_e32 v3, 0xff, v8
; SI-NEXT: v_lshlrev_b32_e32 v8, 8, v54
-; SI-NEXT: v_or_b32_e32 v7, v7, v8
+; SI-NEXT: v_or_b32_e32 v3, v3, v8
; SI-NEXT: v_and_b32_e32 v8, 0xff, v53
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8
; SI-NEXT: v_lshlrev_b32_e32 v9, 24, v51
-; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7
+; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3
; SI-NEXT: v_or_b32_e32 v8, v9, v8
-; SI-NEXT: v_or_b32_e32 v7, v7, v8
+; SI-NEXT: v_or_b32_e32 v3, v3, v8
; SI-NEXT: v_add_i32_e32 v8, vcc, 36, v0
-; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen
-; SI-NEXT: v_and_b32_e32 v6, 0xff, v6
+; SI-NEXT: buffer_store_dword v3, v8, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_and_b32_e32 v3, 0xff, v7
; SI-NEXT: v_lshlrev_b32_e32 v7, 8, v23
-; SI-NEXT: v_or_b32_e32 v6, v6, v7
+; SI-NEXT: v_or_b32_e32 v3, v3, v7
; SI-NEXT: v_and_b32_e32 v7, 0xff, v22
; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7
; SI-NEXT: v_lshlrev_b32_e32 v8, 24, v21
-; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6
+; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3
; SI-NEXT: v_or_b32_e32 v7, v8, v7
-; SI-NEXT: v_or_b32_e32 v6, v6, v7
+; SI-NEXT: v_or_b32_e32 v3, v3, v7
; SI-NEXT: v_add_i32_e32 v7, vcc, 40, v0
-; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen
-; SI-NEXT: v_and_b32_e32 v5, 0xff, v5
+; SI-NEXT: buffer_store_dword v3, v7, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_and_b32_e32 v3, 0xff, v6
; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v48
-; SI-NEXT: v_or_b32_e32 v5, v5, v6
+; SI-NEXT: v_or_b32_e32 v3, v3, v6
; SI-NEXT: v_and_b32_e32 v6, 0xff, v39
; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6
; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v38
-; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5
+; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3
; SI-NEXT: v_or_b32_e32 v6, v7, v6
-; SI-NEXT: v_or_b32_e32 v5, v5, v6
+; SI-NEXT: v_or_b32_e32 v3, v3, v6
; SI-NEXT: v_add_i32_e32 v6, vcc, 44, v0
-; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen
-; SI-NEXT: v_and_b32_e32 v4, 0xff, v4
+; SI-NEXT: buffer_store_dword v3, v6, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_and_b32_e32 v3, 0xff, v5
; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v18
-; SI-NEXT: v_or_b32_e32 v4, v4, v5
+; SI-NEXT: v_or_b32_e32 v3, v3, v5
; SI-NEXT: v_and_b32_e32 v5, 0xff, v17
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: v_lshlrev_b32_e32 v6, 24, v16
-; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3
; SI-NEXT: v_or_b32_e32 v5, v6, v5
-; SI-NEXT: v_or_b32_e32 v4, v4, v5
+; SI-NEXT: v_or_b32_e32 v3, v3, v5
; SI-NEXT: v_add_i32_e32 v5, vcc, 48, v0
-; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen
-; SI-NEXT: v_and_b32_e32 v3, 0xff, v3
+; SI-NEXT: buffer_store_dword v3, v5, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_and_b32_e32 v3, 0xff, v4
; SI-NEXT: v_lshlrev_b32_e32 v4, 8, v34
; SI-NEXT: v_or_b32_e32 v3, v3, v4
; SI-NEXT: v_and_b32_e32 v4, 0xff, v33
@@ -24580,7 +24766,7 @@ define inreg <64 x i8> @bitcast_v16f32_to_v64i8_scalar(<16 x float> inreg %a, i3
; SI-NEXT: ; implicit-def: $vgpr58
; SI-NEXT: ; implicit-def: $vgpr56
; SI-NEXT: ; implicit-def: $vgpr46
-; SI-NEXT: ; implicit-def: $vgpr10
+; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; implicit-def: $vgpr63
; SI-NEXT: ; implicit-def: $vgpr62
; SI-NEXT: ; implicit-def: $vgpr43
@@ -24625,7 +24811,9 @@ define inreg <64 x i8> @bitcast_v16f32_to_v64i8_scalar(<16 x float> inreg %a, i3
; SI-NEXT: ; implicit-def: $vgpr27
; SI-NEXT: ; implicit-def: $vgpr26
; SI-NEXT: ; implicit-def: $vgpr24
-; SI-NEXT: s_branch .LBB49_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB49_2
+; SI-NEXT: s_branch .LBB49_3
;
; VI-LABEL: bitcast_v16f32_to_v64i8_scalar:
; VI: ; %bb.0:
@@ -24655,8 +24843,9 @@ define inreg <64 x i8> @bitcast_v16f32_to_v64i8_scalar(<16 x float> inreg %a, i3
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3
; VI-NEXT: v_writelane_b32 v63, s67, 19
; VI-NEXT: v_readfirstlane_b32 s4, v1
-; VI-NEXT: s_and_b64 s[6:7], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s5, v2
+; VI-NEXT: s_and_b64 s[6:7], vcc, exec
+; VI-NEXT: s_mov_b64 s[46:47], -1
; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
@@ -24842,7 +25031,8 @@ define inreg <64 x i8> @bitcast_v16f32_to_v64i8_scalar(<16 x float> inreg %a, i3
; VI-NEXT: ; implicit-def: $sgpr59
; VI-NEXT: ; implicit-def: $sgpr57
; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: s_branch .LBB49_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[46:47]
+; VI-NEXT: s_cbranch_vccz .LBB49_2
; VI-NEXT: .LBB49_4:
; VI-NEXT: v_mov_b32_e32 v20, s44
; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
@@ -25095,8 +25285,9 @@ define inreg <64 x i8> @bitcast_v16f32_to_v64i8_scalar(<16 x float> inreg %a, i3
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3
; GFX9-NEXT: v_writelane_b32 v63, s55, 15
; GFX9-NEXT: v_readfirstlane_b32 s4, v1
-; GFX9-NEXT: s_and_b64 s[6:7], vcc, exec
; GFX9-NEXT: v_readfirstlane_b32 s5, v2
+; GFX9-NEXT: s_and_b64 s[6:7], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[46:47], -1
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
@@ -25284,7 +25475,8 @@ define inreg <64 x i8> @bitcast_v16f32_to_v64i8_scalar(<16 x float> inreg %a, i3
; GFX9-NEXT: ; implicit-def: $sgpr59
; GFX9-NEXT: ; implicit-def: $sgpr57
; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: s_branch .LBB49_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[46:47]
+; GFX9-NEXT: s_cbranch_vccz .LBB49_2
; GFX9-NEXT: .LBB49_4:
; GFX9-NEXT: v_mov_b32_e32 v21, s44
; GFX9-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
@@ -25504,7 +25696,7 @@ define inreg <64 x i8> @bitcast_v16f32_to_v64i8_scalar(<16 x float> inreg %a, i3
; GFX11-NEXT: s_mov_b32 exec_lo, s4
; GFX11-NEXT: v_writelane_b32 v40, s30, 0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
-; GFX11-NEXT: s_mov_b32 s42, 0
+; GFX11-NEXT: s_mov_b32 s5, -1
; GFX11-NEXT: v_writelane_b32 v40, s31, 1
; GFX11-NEXT: v_writelane_b32 v40, s34, 2
; GFX11-NEXT: v_writelane_b32 v40, s35, 3
@@ -25513,49 +25705,49 @@ define inreg <64 x i8> @bitcast_v16f32_to_v64i8_scalar(<16 x float> inreg %a, i3
; GFX11-NEXT: v_writelane_b32 v40, s38, 6
; GFX11-NEXT: v_writelane_b32 v40, s39, 7
; GFX11-NEXT: v_writelane_b32 v40, s48, 8
-; GFX11-NEXT: v_writelane_b32 v40, s49, 9
; GFX11-NEXT: s_cbranch_scc0 .LBB49_3
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s43, s27, 24
-; GFX11-NEXT: s_lshr_b32 s44, s27, 16
-; GFX11-NEXT: s_lshr_b32 s46, s27, 8
-; GFX11-NEXT: s_lshr_b32 s45, s26, 16
-; GFX11-NEXT: s_lshr_b32 s47, s26, 8
-; GFX11-NEXT: s_lshr_b32 s56, s25, 24
-; GFX11-NEXT: s_lshr_b32 s57, s25, 16
-; GFX11-NEXT: s_lshr_b32 s59, s25, 8
-; GFX11-NEXT: s_lshr_b32 s58, s24, 16
-; GFX11-NEXT: s_lshr_b32 s60, s24, 8
-; GFX11-NEXT: s_lshr_b32 s61, s23, 24
-; GFX11-NEXT: s_lshr_b32 s62, s23, 16
-; GFX11-NEXT: s_lshr_b32 s72, s23, 8
-; GFX11-NEXT: s_lshr_b32 s63, s22, 16
-; GFX11-NEXT: s_lshr_b32 s73, s22, 8
-; GFX11-NEXT: s_lshr_b32 s74, s21, 24
-; GFX11-NEXT: s_lshr_b32 s75, s21, 16
-; GFX11-NEXT: s_lshr_b32 s77, s21, 8
-; GFX11-NEXT: s_lshr_b32 s76, s20, 16
-; GFX11-NEXT: s_lshr_b32 s78, s20, 8
-; GFX11-NEXT: s_lshr_b32 s79, s19, 24
-; GFX11-NEXT: s_lshr_b32 s88, s19, 16
-; GFX11-NEXT: s_lshr_b32 s90, s19, 8
-; GFX11-NEXT: s_lshr_b32 s89, s18, 16
-; GFX11-NEXT: s_lshr_b32 s91, s18, 8
-; GFX11-NEXT: s_lshr_b32 s92, s17, 24
-; GFX11-NEXT: s_lshr_b32 s93, s17, 16
-; GFX11-NEXT: s_lshr_b32 s95, s17, 8
-; GFX11-NEXT: s_lshr_b32 s94, s16, 16
-; GFX11-NEXT: s_lshr_b32 vcc_hi, s16, 8
-; GFX11-NEXT: s_lshr_b32 s30, s3, 24
-; GFX11-NEXT: s_lshr_b32 s31, s3, 16
-; GFX11-NEXT: s_lshr_b32 s35, s3, 8
-; GFX11-NEXT: s_lshr_b32 s34, s2, 16
-; GFX11-NEXT: s_lshr_b32 s36, s2, 8
-; GFX11-NEXT: s_lshr_b32 s37, s1, 24
-; GFX11-NEXT: s_lshr_b32 s38, s1, 16
-; GFX11-NEXT: s_lshr_b32 s48, s1, 8
-; GFX11-NEXT: s_lshr_b32 s39, s0, 16
-; GFX11-NEXT: s_lshr_b32 s49, s0, 8
+; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
+; GFX11-NEXT: s_lshr_b32 s42, s27, 24
+; GFX11-NEXT: s_lshr_b32 s43, s27, 16
+; GFX11-NEXT: s_lshr_b32 s45, s27, 8
+; GFX11-NEXT: s_lshr_b32 s44, s26, 16
+; GFX11-NEXT: s_lshr_b32 s46, s26, 8
+; GFX11-NEXT: s_lshr_b32 s47, s25, 24
+; GFX11-NEXT: s_lshr_b32 s56, s25, 16
+; GFX11-NEXT: s_lshr_b32 s58, s25, 8
+; GFX11-NEXT: s_lshr_b32 s57, s24, 16
+; GFX11-NEXT: s_lshr_b32 s59, s24, 8
+; GFX11-NEXT: s_lshr_b32 s60, s23, 24
+; GFX11-NEXT: s_lshr_b32 s61, s23, 16
+; GFX11-NEXT: s_lshr_b32 s63, s23, 8
+; GFX11-NEXT: s_lshr_b32 s62, s22, 16
+; GFX11-NEXT: s_lshr_b32 s72, s22, 8
+; GFX11-NEXT: s_lshr_b32 s73, s21, 24
+; GFX11-NEXT: s_lshr_b32 s74, s21, 16
+; GFX11-NEXT: s_lshr_b32 s76, s21, 8
+; GFX11-NEXT: s_lshr_b32 s75, s20, 16
+; GFX11-NEXT: s_lshr_b32 s77, s20, 8
+; GFX11-NEXT: s_lshr_b32 s78, s19, 24
+; GFX11-NEXT: s_lshr_b32 s79, s19, 16
+; GFX11-NEXT: s_lshr_b32 s89, s19, 8
+; GFX11-NEXT: s_lshr_b32 s88, s18, 16
+; GFX11-NEXT: s_lshr_b32 s90, s18, 8
+; GFX11-NEXT: s_lshr_b32 s91, s17, 24
+; GFX11-NEXT: s_lshr_b32 s92, s17, 16
+; GFX11-NEXT: s_lshr_b32 s94, s17, 8
+; GFX11-NEXT: s_lshr_b32 s93, s16, 16
+; GFX11-NEXT: s_lshr_b32 s95, s16, 8
+; GFX11-NEXT: s_lshr_b32 vcc_hi, s3, 24
+; GFX11-NEXT: s_lshr_b32 s30, s3, 16
+; GFX11-NEXT: s_lshr_b32 s34, s3, 8
+; GFX11-NEXT: s_lshr_b32 s31, s2, 16
+; GFX11-NEXT: s_lshr_b32 s35, s2, 8
+; GFX11-NEXT: s_lshr_b32 s36, s1, 24
+; GFX11-NEXT: s_lshr_b32 s37, s1, 16
+; GFX11-NEXT: s_lshr_b32 s39, s1, 8
+; GFX11-NEXT: s_lshr_b32 s38, s0, 16
+; GFX11-NEXT: s_lshr_b32 s48, s0, 8
; GFX11-NEXT: s_lshr_b64 s[40:41], s[26:27], 24
; GFX11-NEXT: s_lshr_b64 s[28:29], s[24:25], 24
; GFX11-NEXT: s_lshr_b64 s[14:15], s[22:23], 24
@@ -25563,9 +25755,7 @@ define inreg <64 x i8> @bitcast_v16f32_to_v64i8_scalar(<16 x float> inreg %a, i3
; GFX11-NEXT: s_lshr_b64 s[10:11], s[18:19], 24
; GFX11-NEXT: s_lshr_b64 s[8:9], s[16:17], 24
; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
-; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
-; GFX11-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX11-NEXT: s_cbranch_execnz .LBB49_4
; GFX11-NEXT: .LBB49_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v14, s19, 1.0
; GFX11-NEXT: v_add_f32_e64 v13, s18, 1.0
@@ -25633,55 +25823,56 @@ define inreg <64 x i8> @bitcast_v16f32_to_v64i8_scalar(<16 x float> inreg %a, i3
; GFX11-NEXT: v_lshrrev_b32_e32 v96, 8, v23
; GFX11-NEXT: s_branch .LBB49_5
; GFX11-NEXT: .LBB49_3:
-; GFX11-NEXT: ; implicit-def: $sgpr49
-; GFX11-NEXT: ; implicit-def: $sgpr39
-; GFX11-NEXT: ; implicit-def: $sgpr4
; GFX11-NEXT: ; implicit-def: $sgpr48
; GFX11-NEXT: ; implicit-def: $sgpr38
+; GFX11-NEXT: ; implicit-def: $sgpr4
+; GFX11-NEXT: ; implicit-def: $sgpr39
; GFX11-NEXT: ; implicit-def: $sgpr37
; GFX11-NEXT: ; implicit-def: $sgpr36
-; GFX11-NEXT: ; implicit-def: $sgpr34
-; GFX11-NEXT: ; implicit-def: $sgpr6
; GFX11-NEXT: ; implicit-def: $sgpr35
; GFX11-NEXT: ; implicit-def: $sgpr31
+; GFX11-NEXT: ; implicit-def: $sgpr6
+; GFX11-NEXT: ; implicit-def: $sgpr34
; GFX11-NEXT: ; implicit-def: $sgpr30
; GFX11-NEXT: ; implicit-def: $vcc_hi
-; GFX11-NEXT: ; implicit-def: $sgpr94
-; GFX11-NEXT: ; implicit-def: $sgpr8
; GFX11-NEXT: ; implicit-def: $sgpr95
; GFX11-NEXT: ; implicit-def: $sgpr93
+; GFX11-NEXT: ; implicit-def: $sgpr8
+; GFX11-NEXT: ; implicit-def: $sgpr94
; GFX11-NEXT: ; implicit-def: $sgpr92
; GFX11-NEXT: ; implicit-def: $sgpr91
-; GFX11-NEXT: ; implicit-def: $sgpr89
-; GFX11-NEXT: ; implicit-def: $sgpr10
; GFX11-NEXT: ; implicit-def: $sgpr90
; GFX11-NEXT: ; implicit-def: $sgpr88
+; GFX11-NEXT: ; implicit-def: $sgpr10
+; GFX11-NEXT: ; implicit-def: $sgpr89
; GFX11-NEXT: ; implicit-def: $sgpr79
; GFX11-NEXT: ; implicit-def: $sgpr78
-; GFX11-NEXT: ; implicit-def: $sgpr76
-; GFX11-NEXT: ; implicit-def: $sgpr12
; GFX11-NEXT: ; implicit-def: $sgpr77
; GFX11-NEXT: ; implicit-def: $sgpr75
+; GFX11-NEXT: ; implicit-def: $sgpr12
+; GFX11-NEXT: ; implicit-def: $sgpr76
; GFX11-NEXT: ; implicit-def: $sgpr74
; GFX11-NEXT: ; implicit-def: $sgpr73
-; GFX11-NEXT: ; implicit-def: $sgpr63
-; GFX11-NEXT: ; implicit-def: $sgpr14
; GFX11-NEXT: ; implicit-def: $sgpr72
; GFX11-NEXT: ; implicit-def: $sgpr62
+; GFX11-NEXT: ; implicit-def: $sgpr14
+; GFX11-NEXT: ; implicit-def: $sgpr63
; GFX11-NEXT: ; implicit-def: $sgpr61
; GFX11-NEXT: ; implicit-def: $sgpr60
-; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr28
; GFX11-NEXT: ; implicit-def: $sgpr59
; GFX11-NEXT: ; implicit-def: $sgpr57
+; GFX11-NEXT: ; implicit-def: $sgpr28
+; GFX11-NEXT: ; implicit-def: $sgpr58
; GFX11-NEXT: ; implicit-def: $sgpr56
; GFX11-NEXT: ; implicit-def: $sgpr47
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr40
; GFX11-NEXT: ; implicit-def: $sgpr46
; GFX11-NEXT: ; implicit-def: $sgpr44
+; GFX11-NEXT: ; implicit-def: $sgpr40
+; GFX11-NEXT: ; implicit-def: $sgpr45
; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: s_branch .LBB49_2
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
+; GFX11-NEXT: s_cbranch_vccz .LBB49_2
; GFX11-NEXT: .LBB49_4:
; GFX11-NEXT: v_dual_mov_b32 v23, s0 :: v_dual_mov_b32 v24, s1
; GFX11-NEXT: v_dual_mov_b32 v19, s2 :: v_dual_mov_b32 v20, s3
@@ -25691,28 +25882,28 @@ define inreg <64 x i8> @bitcast_v16f32_to_v64i8_scalar(<16 x float> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v5, s22 :: v_dual_mov_b32 v6, s23
; GFX11-NEXT: v_dual_mov_b32 v3, s24 :: v_dual_mov_b32 v4, s25
; GFX11-NEXT: v_dual_mov_b32 v1, s26 :: v_dual_mov_b32 v2, s27
-; GFX11-NEXT: v_dual_mov_b32 v96, s49 :: v_dual_mov_b32 v87, s39
-; GFX11-NEXT: v_dual_mov_b32 v86, s48 :: v_dual_mov_b32 v85, s38
-; GFX11-NEXT: v_dual_mov_b32 v84, s37 :: v_dual_mov_b32 v83, s36
-; GFX11-NEXT: v_dual_mov_b32 v82, s34 :: v_dual_mov_b32 v81, s35
-; GFX11-NEXT: v_dual_mov_b32 v80, s31 :: v_dual_mov_b32 v71, s30
-; GFX11-NEXT: v_dual_mov_b32 v70, vcc_hi :: v_dual_mov_b32 v69, s94
-; GFX11-NEXT: v_dual_mov_b32 v68, s95 :: v_dual_mov_b32 v67, s93
-; GFX11-NEXT: v_dual_mov_b32 v66, s92 :: v_dual_mov_b32 v65, s91
-; GFX11-NEXT: v_dual_mov_b32 v64, s89 :: v_dual_mov_b32 v55, s90
-; GFX11-NEXT: v_dual_mov_b32 v54, s88 :: v_dual_mov_b32 v53, s79
-; GFX11-NEXT: v_dual_mov_b32 v52, s78 :: v_dual_mov_b32 v51, s76
-; GFX11-NEXT: v_dual_mov_b32 v50, s77 :: v_dual_mov_b32 v49, s75
-; GFX11-NEXT: v_dual_mov_b32 v48, s74 :: v_dual_mov_b32 v39, s73
-; GFX11-NEXT: v_dual_mov_b32 v38, s63 :: v_dual_mov_b32 v37, s72
-; GFX11-NEXT: v_dual_mov_b32 v36, s62 :: v_dual_mov_b32 v35, s61
-; GFX11-NEXT: v_dual_mov_b32 v34, s60 :: v_dual_mov_b32 v33, s58
-; GFX11-NEXT: v_dual_mov_b32 v32, s59 :: v_dual_mov_b32 v31, s57
-; GFX11-NEXT: v_dual_mov_b32 v30, s56 :: v_dual_mov_b32 v29, s47
-; GFX11-NEXT: v_dual_mov_b32 v22, s45 :: v_dual_mov_b32 v7, s40
-; GFX11-NEXT: v_dual_mov_b32 v18, s46 :: v_dual_mov_b32 v11, s28
-; GFX11-NEXT: v_dual_mov_b32 v12, s44 :: v_dual_mov_b32 v17, s14
-; GFX11-NEXT: v_dual_mov_b32 v8, s43 :: v_dual_mov_b32 v21, s12
+; GFX11-NEXT: v_dual_mov_b32 v96, s48 :: v_dual_mov_b32 v87, s38
+; GFX11-NEXT: v_dual_mov_b32 v86, s39 :: v_dual_mov_b32 v85, s37
+; GFX11-NEXT: v_dual_mov_b32 v84, s36 :: v_dual_mov_b32 v83, s35
+; GFX11-NEXT: v_dual_mov_b32 v82, s31 :: v_dual_mov_b32 v81, s34
+; GFX11-NEXT: v_dual_mov_b32 v80, s30 :: v_dual_mov_b32 v71, vcc_hi
+; GFX11-NEXT: v_dual_mov_b32 v70, s95 :: v_dual_mov_b32 v69, s93
+; GFX11-NEXT: v_dual_mov_b32 v68, s94 :: v_dual_mov_b32 v67, s92
+; GFX11-NEXT: v_dual_mov_b32 v66, s91 :: v_dual_mov_b32 v65, s90
+; GFX11-NEXT: v_dual_mov_b32 v64, s88 :: v_dual_mov_b32 v55, s89
+; GFX11-NEXT: v_dual_mov_b32 v54, s79 :: v_dual_mov_b32 v53, s78
+; GFX11-NEXT: v_dual_mov_b32 v52, s77 :: v_dual_mov_b32 v51, s75
+; GFX11-NEXT: v_dual_mov_b32 v50, s76 :: v_dual_mov_b32 v49, s74
+; GFX11-NEXT: v_dual_mov_b32 v48, s73 :: v_dual_mov_b32 v39, s72
+; GFX11-NEXT: v_dual_mov_b32 v38, s62 :: v_dual_mov_b32 v37, s63
+; GFX11-NEXT: v_dual_mov_b32 v36, s61 :: v_dual_mov_b32 v35, s60
+; GFX11-NEXT: v_dual_mov_b32 v34, s59 :: v_dual_mov_b32 v33, s57
+; GFX11-NEXT: v_dual_mov_b32 v32, s58 :: v_dual_mov_b32 v31, s56
+; GFX11-NEXT: v_dual_mov_b32 v30, s47 :: v_dual_mov_b32 v29, s46
+; GFX11-NEXT: v_dual_mov_b32 v22, s44 :: v_dual_mov_b32 v7, s40
+; GFX11-NEXT: v_dual_mov_b32 v18, s45 :: v_dual_mov_b32 v11, s28
+; GFX11-NEXT: v_dual_mov_b32 v12, s43 :: v_dual_mov_b32 v17, s14
+; GFX11-NEXT: v_dual_mov_b32 v8, s42 :: v_dual_mov_b32 v21, s12
; GFX11-NEXT: v_dual_mov_b32 v25, s10 :: v_dual_mov_b32 v26, s8
; GFX11-NEXT: v_dual_mov_b32 v27, s6 :: v_dual_mov_b32 v28, s4
; GFX11-NEXT: .LBB49_5: ; %end
@@ -25866,7 +26057,6 @@ define inreg <64 x i8> @bitcast_v16f32_to_v64i8_scalar(<16 x float> inreg %a, i3
; GFX11-NEXT: scratch_store_b128 v0, v[23:26], off offset:16
; GFX11-NEXT: scratch_store_b128 v0, v[13:16], off offset:32
; GFX11-NEXT: scratch_store_b128 v0, v[1:4], off offset:48
-; GFX11-NEXT: v_readlane_b32 s49, v40, 9
; GFX11-NEXT: v_readlane_b32 s48, v40, 8
; GFX11-NEXT: v_readlane_b32 s39, v40, 7
; GFX11-NEXT: v_readlane_b32 s38, v40, 6
@@ -28414,13 +28604,13 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:76
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:8
-; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:4
-; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:16
; SI-NEXT: s_waitcnt expcnt(3)
-; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:12
+; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:4
+; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:16
+; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:12
; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:24
-; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:20
-; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:32
+; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:20
+; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:32
; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:28
; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:40
; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:36
@@ -28431,8 +28621,9 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:64
; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:60
; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:72
-; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:68
+; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:68
; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v3
@@ -28449,23 +28640,22 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; SI-NEXT: v_lshlrev_b32_e32 v9, 24, v25
; SI-NEXT: v_lshlrev_b32_e32 v45, 8, v27
; SI-NEXT: v_lshlrev_b32_e32 v25, 24, v29
+; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: v_lshlrev_b32_e32 v23, 8, v2
; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v4
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: v_lshlrev_b32_e32 v21, 8, v51
+; SI-NEXT: s_and_b64 s[6:7], vcc, exec
+; SI-NEXT: v_lshlrev_b32_e32 v21, 8, v50
; SI-NEXT: v_lshlrev_b32_e32 v19, 24, v39
+; SI-NEXT: v_lshlrev_b32_e32 v17, 8, v37
; SI-NEXT: s_waitcnt vmcnt(12)
-; SI-NEXT: v_lshlrev_b32_e32 v17, 8, v38
-; SI-NEXT: s_waitcnt vmcnt(10)
; SI-NEXT: v_lshlrev_b32_e32 v13, 24, v36
-; SI-NEXT: s_waitcnt vmcnt(8)
+; SI-NEXT: s_waitcnt vmcnt(10)
; SI-NEXT: v_lshlrev_b32_e32 v51, 8, v30
-; SI-NEXT: s_waitcnt vmcnt(6)
+; SI-NEXT: s_waitcnt vmcnt(8)
; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v42
-; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(6)
; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v43
; SI-NEXT: s_waitcnt vmcnt(4)
@@ -28473,7 +28663,7 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; SI-NEXT: s_cbranch_scc0 .LBB51_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_and_b32_e32 v0, 0xff, v32
-; SI-NEXT: v_mov_b32_e32 v38, v1
+; SI-NEXT: v_mov_b32_e32 v50, v1
; SI-NEXT: v_or_b32_e32 v0, v0, v1
; SI-NEXT: v_and_b32_e32 v1, 0xff, v33
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
@@ -28528,7 +28718,7 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v44, v10
; SI-NEXT: v_or_b32_e32 v10, v0, v1
; SI-NEXT: v_and_b32_e32 v0, 0xff, v48
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v50
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v60
; SI-NEXT: v_or_b32_e32 v0, v0, v23
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
@@ -28537,14 +28727,12 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v16, v18
; SI-NEXT: v_mov_b32_e32 v18, v20
; SI-NEXT: v_mov_b32_e32 v20, v22
-; SI-NEXT: v_mov_b32_e32 v22, v24
-; SI-NEXT: v_mov_b32_e32 v24, v26
-; SI-NEXT: v_mov_b32_e32 v26, v28
-; SI-NEXT: v_mov_b32_e32 v28, v25
+; SI-NEXT: v_mov_b32_e32 v22, v46
+; SI-NEXT: v_mov_b32_e32 v46, v25
; SI-NEXT: v_mov_b32_e32 v25, v11
; SI-NEXT: v_or_b32_e32 v11, v0, v1
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v60
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v49
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v49
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v38
; SI-NEXT: v_or_b32_e32 v0, v0, v21
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
@@ -28559,8 +28747,7 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; SI-NEXT: v_or_b32_e32 v1, v13, v1
; SI-NEXT: v_mov_b32_e32 v62, v58
; SI-NEXT: v_mov_b32_e32 v58, v47
-; SI-NEXT: v_mov_b32_e32 v47, v46
-; SI-NEXT: v_mov_b32_e32 v46, v45
+; SI-NEXT: v_mov_b32_e32 v47, v45
; SI-NEXT: v_mov_b32_e32 v45, v23
; SI-NEXT: v_mov_b32_e32 v23, v21
; SI-NEXT: v_mov_b32_e32 v21, v19
@@ -28573,19 +28760,22 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v1, v27, v1
-; SI-NEXT: v_mov_b32_e32 v52, v14
+; SI-NEXT: v_mov_b32_e32 v37, v14
; SI-NEXT: v_or_b32_e32 v14, v0, v1
; SI-NEXT: s_waitcnt vmcnt(3)
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v37
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v52
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT: v_mov_b32_e32 v30, v48
+; SI-NEXT: v_mov_b32_e32 v48, v27
; SI-NEXT: v_mov_b32_e32 v27, v42
; SI-NEXT: v_or_b32_e32 v1, v42, v1
; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v0, 0xff, v53
; SI-NEXT: v_or_b32_e32 v0, v0, v15
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_mov_b32_e32 v30, v48
-; SI-NEXT: v_mov_b32_e32 v48, v51
+; SI-NEXT: v_mov_b32_e32 v24, v26
+; SI-NEXT: v_mov_b32_e32 v26, v28
+; SI-NEXT: v_mov_b32_e32 v28, v51
; SI-NEXT: v_mov_b32_e32 v51, v15
; SI-NEXT: v_or_b32_e32 v15, v0, v1
; SI-NEXT: s_and_b32 s4, s28, 0xff
@@ -28646,7 +28836,7 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v33
; SI-NEXT: s_addk_i32 s4, 0x300
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; SI-NEXT: v_or_b32_e32 v1, v38, v1
+; SI-NEXT: v_or_b32_e32 v1, v50, v1
; SI-NEXT: v_and_b32_e32 v2, 0xff, v2
; SI-NEXT: s_and_b32 s4, s4, 0xffff
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -28681,9 +28871,8 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; SI-NEXT: v_or_b32_e32 v1, v62, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v6, vcc, 0x3000000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v52
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: v_or_b32_e32 v0, v61, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
@@ -28727,7 +28916,8 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; SI-NEXT: s_add_i32 s4, s4, 0x3000000
; SI-NEXT: s_add_i32 s5, s5, 0x3000000
; SI-NEXT: s_add_i32 s6, s6, 0x3000000
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_mov_b32_e32 v2, s6
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
@@ -28744,32 +28934,34 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v1, v58, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v8, vcc, 0x3000000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v20
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v22
-; SI-NEXT: v_or_b32_e32 v0, v47, v0
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
+; SI-NEXT: v_or_b32_e32 v0, v22, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_or_b32_e32 v1, v56, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v9, vcc, 0x3000000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v24
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v26
-; SI-NEXT: v_or_b32_e32 v0, v46, v0
+; SI-NEXT: v_or_b32_e32 v0, v47, v0
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v28, v1
+; SI-NEXT: v_or_b32_e32 v1, v46, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v10, vcc, 0x3000000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v30
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v50
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v60
; SI-NEXT: v_or_b32_e32 v0, v45, v0
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
@@ -28778,9 +28970,9 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; SI-NEXT: v_or_b32_e32 v1, v25, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v11, vcc, 0x3000000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v60
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v49
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v49
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v38
; SI-NEXT: v_or_b32_e32 v0, v23, v0
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
@@ -28803,18 +28995,17 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v55
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v54
-; SI-NEXT: v_or_b32_e32 v0, v48, v0
+; SI-NEXT: v_or_b32_e32 v0, v28, v0
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
+; SI-NEXT: v_or_b32_e32 v1, v48, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v14, vcc, 0x3000000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v37
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v52
; SI-NEXT: v_or_b32_e32 v0, v51, v0
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
@@ -28825,7 +29016,6 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; SI-NEXT: v_add_i32_e32 v15, vcc, 0x3000000, v0
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
-; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: .LBB51_3: ; %end
; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
@@ -28846,23 +29036,22 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB51_4:
-; SI-NEXT: s_waitcnt expcnt(1)
+; SI-NEXT: v_mov_b32_e32 v30, v48
+; SI-NEXT: v_mov_b32_e32 v48, v27
; SI-NEXT: v_mov_b32_e32 v27, v42
; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
-; SI-NEXT: v_mov_b32_e32 v38, v1
+; SI-NEXT: v_mov_b32_e32 v50, v1
; SI-NEXT: v_mov_b32_e32 v43, v6
; SI-NEXT: v_mov_b32_e32 v29, v8
; SI-NEXT: v_mov_b32_e32 v44, v10
; SI-NEXT: v_mov_b32_e32 v36, v12
-; SI-NEXT: v_mov_b32_e32 v52, v14
+; SI-NEXT: v_mov_b32_e32 v37, v14
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mov_b32_e32 v16, v18
; SI-NEXT: v_mov_b32_e32 v18, v20
; SI-NEXT: v_mov_b32_e32 v20, v22
-; SI-NEXT: v_mov_b32_e32 v22, v24
; SI-NEXT: v_mov_b32_e32 v24, v26
; SI-NEXT: v_mov_b32_e32 v26, v28
-; SI-NEXT: v_mov_b32_e32 v30, v48
; SI-NEXT: v_mov_b32_e32 v39, v40
; SI-NEXT: v_mov_b32_e32 v41, v3
; SI-NEXT: v_mov_b32_e32 v40, v5
@@ -28872,20 +29061,22 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v57, v7
; SI-NEXT: v_mov_b32_e32 v59, v56
; SI-NEXT: v_mov_b32_e32 v58, v47
-; SI-NEXT: v_mov_b32_e32 v47, v46
+; SI-NEXT: v_mov_b32_e32 v22, v46
; SI-NEXT: v_mov_b32_e32 v56, v9
-; SI-NEXT: v_mov_b32_e32 v46, v45
-; SI-NEXT: v_mov_b32_e32 v28, v25
+; SI-NEXT: v_mov_b32_e32 v47, v45
+; SI-NEXT: v_mov_b32_e32 v46, v25
; SI-NEXT: v_mov_b32_e32 v45, v23
; SI-NEXT: v_mov_b32_e32 v25, v11
; SI-NEXT: v_mov_b32_e32 v23, v21
; SI-NEXT: v_mov_b32_e32 v21, v19
; SI-NEXT: v_mov_b32_e32 v19, v17
; SI-NEXT: v_mov_b32_e32 v17, v13
-; SI-NEXT: v_mov_b32_e32 v48, v51
+; SI-NEXT: v_mov_b32_e32 v28, v51
; SI-NEXT: v_mov_b32_e32 v51, v15
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; SI-NEXT: s_branch .LBB51_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB51_2
+; SI-NEXT: s_branch .LBB51_3
;
; VI-LABEL: bitcast_v64i8_to_v16f32_scalar:
; VI: ; %bb.0:
@@ -28907,16 +29098,16 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v37, v30
-; VI-NEXT: v_mov_b32_e32 v61, v28
+; VI-NEXT: v_mov_b32_e32 v32, v28
; VI-NEXT: v_mov_b32_e32 v31, v0
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:76
-; VI-NEXT: buffer_load_ushort v48, off, s[0:3], s32
+; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32
; VI-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:8
-; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:4
+; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:4
; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:16
-; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:12
+; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:12
; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:24
-; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:20
+; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:20
; VI-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:32
; VI-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:28
; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:40
@@ -28929,8 +29120,9 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:60
; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:72
; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:68
-; VI-NEXT: v_lshlrev_b32_e32 v32, 8, v1
-; VI-NEXT: v_lshlrev_b32_e32 v39, 8, v3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: v_lshlrev_b32_e32 v62, 8, v1
+; VI-NEXT: v_lshlrev_b32_e32 v48, 8, v3
; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v5
; VI-NEXT: v_lshlrev_b32_e32 v5, 8, v7
; VI-NEXT: v_lshlrev_b32_e32 v3, 8, v9
@@ -28946,12 +29138,12 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; VI-NEXT: v_lshlrev_b32_e32 v25, 8, v29
; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; VI-NEXT: v_lshlrev_b32_e32 v23, 8, v48
+; VI-NEXT: v_lshlrev_b32_e32 v23, 8, v39
; VI-NEXT: v_lshlrev_b32_e32 v11, 8, v28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_and_b64 s[6:7], vcc, exec
; VI-NEXT: v_lshlrev_b32_e32 v21, 8, v38
; VI-NEXT: v_lshlrev_b32_e32 v19, 8, v36
; VI-NEXT: v_lshlrev_b32_e32 v17, 8, v35
@@ -28967,49 +29159,48 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; VI-NEXT: v_lshlrev_b32_e32 v42, 8, v44
; VI-NEXT: s_cbranch_scc0 .LBB51_4
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: v_or_b32_sdwa v0, v2, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v2, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v38, v1
; VI-NEXT: v_or_b32_sdwa v1, v4, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v35, v4
+; VI-NEXT: v_mov_b32_e32 v36, v4
; VI-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v10, v59 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v1, v12, v58 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v44, v2
+; VI-NEXT: v_mov_b32_e32 v35, v2
; VI-NEXT: v_mov_b32_e32 v49, v6
; VI-NEXT: v_or_b32_sdwa v2, v6, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v14, v57 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v1, v16, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v36, v58
+; VI-NEXT: v_mov_b32_e32 v63, v59
+; VI-NEXT: v_mov_b32_e32 v59, v58
; VI-NEXT: v_mov_b32_e32 v58, v57
; VI-NEXT: v_mov_b32_e32 v57, v7
; VI-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v18, v56 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v1, v20, v47 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v34, v48
; VI-NEXT: v_mov_b32_e32 v40, v3
; VI-NEXT: v_mov_b32_e32 v48, v8
; VI-NEXT: v_or_b32_sdwa v3, v8, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v22, v46 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v1, v24, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v63, v59
-; VI-NEXT: v_mov_b32_e32 v59, v56
; VI-NEXT: v_mov_b32_e32 v56, v47
; VI-NEXT: v_mov_b32_e32 v47, v46
; VI-NEXT: v_mov_b32_e32 v46, v9
; VI-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v26, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v61, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v34, v39
+; VI-NEXT: v_or_b32_sdwa v1, v32, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v39, v10
; VI-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v37, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v62, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v61, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v45, v25
; VI-NEXT: v_mov_b32_e32 v25, v11
; VI-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v60, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v33, v19 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v33, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v60, v19 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v43, v12
; VI-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v55, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
@@ -29019,8 +29210,8 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v20, v22
; VI-NEXT: v_mov_b32_e32 v22, v24
; VI-NEXT: v_mov_b32_e32 v24, v26
-; VI-NEXT: v_mov_b32_e32 v26, v61
-; VI-NEXT: v_mov_b32_e32 v61, v23
+; VI-NEXT: v_mov_b32_e32 v26, v32
+; VI-NEXT: v_mov_b32_e32 v32, v23
; VI-NEXT: v_mov_b32_e32 v23, v21
; VI-NEXT: v_mov_b32_e32 v21, v19
; VI-NEXT: v_mov_b32_e32 v19, v17
@@ -29030,17 +29221,18 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; VI-NEXT: v_or_b32_sdwa v1, v52, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_and_b32 s4, s28, 0xff
; VI-NEXT: s_lshl_b32 s5, s29, 8
+; VI-NEXT: v_mov_b32_e32 v44, v14
; VI-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v51, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(3)
; VI-NEXT: v_or_b32_sdwa v1, v50, v42 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_or_b32 s4, s4, s5
-; VI-NEXT: v_mov_b32_e32 v29, v33
-; VI-NEXT: v_mov_b32_e32 v33, v28
+; VI-NEXT: v_mov_b32_e32 v29, v61
+; VI-NEXT: v_mov_b32_e32 v61, v28
; VI-NEXT: v_mov_b32_e32 v28, v15
; VI-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: s_and_b32 s4, s4, 0xffff
-; VI-NEXT: v_or_b32_sdwa v0, v31, v32 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v31, v62 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v41, v5
; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_e32 v3, s4, v0
@@ -29085,11 +29277,11 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; VI-NEXT: s_or_b32 s4, s5, s4
; VI-NEXT: s_addk_i32 s4, 0x300
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v31
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v44
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v35
; VI-NEXT: s_and_b32 s4, s4, 0xffff
-; VI-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v1, v34, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v35
+; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v36
; VI-NEXT: v_or_b32_e32 v0, s4, v0
; VI-NEXT: v_add_u32_e32 v1, vcc, 0x300, v1
; VI-NEXT: v_or_b32_sdwa v2, v38, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
@@ -29107,11 +29299,13 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; VI-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v43
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
-; VI-NEXT: v_or_b32_sdwa v1, v36, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v6, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v6, vcc, 0x3000000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v44
+; VI-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_add_i32 s16, s16, 3
; VI-NEXT: s_and_b32 s4, s16, 0xff
; VI-NEXT: s_lshl_b32 s5, s17, 8
@@ -29152,17 +29346,15 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; VI-NEXT: s_add_i32 s5, s5, 0x3000000
; VI-NEXT: s_add_i32 s6, s6, 0x3000000
; VI-NEXT: v_mov_b32_e32 v2, s6
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
-; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v1, v57, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v7, vcc, 0x3000000, v0
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v16
-; VI-NEXT: v_or_b32_sdwa v0, v59, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v18
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v1, v56, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
@@ -29185,15 +29377,15 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v10, vcc, 0x3000000, v0
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v30
-; VI-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v62
+; VI-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v29
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v1, v25, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v11, vcc, 0x3000000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v60
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v33
; VI-NEXT: v_or_b32_sdwa v0, v23, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v29
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v60
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v1, v21, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
@@ -29206,7 +29398,7 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v13, vcc, 0x3000000, v0
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v53
-; VI-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v52
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
@@ -29241,34 +29433,34 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB51_4:
-; VI-NEXT: v_mov_b32_e32 v44, v2
-; VI-NEXT: v_mov_b32_e32 v34, v39
-; VI-NEXT: v_mov_b32_e32 v35, v4
-; VI-NEXT: v_mov_b32_e32 v29, v33
+; VI-NEXT: v_mov_b32_e32 v34, v48
+; VI-NEXT: v_mov_b32_e32 v35, v2
+; VI-NEXT: v_mov_b32_e32 v36, v4
+; VI-NEXT: v_mov_b32_e32 v29, v61
; VI-NEXT: v_mov_b32_e32 v49, v6
; VI-NEXT: v_mov_b32_e32 v48, v8
; VI-NEXT: v_mov_b32_e32 v39, v10
; VI-NEXT: v_mov_b32_e32 v43, v12
+; VI-NEXT: v_mov_b32_e32 v44, v14
; VI-NEXT: v_mov_b32_e32 v16, v18
; VI-NEXT: v_mov_b32_e32 v18, v20
; VI-NEXT: v_mov_b32_e32 v20, v22
; VI-NEXT: v_mov_b32_e32 v22, v24
; VI-NEXT: v_mov_b32_e32 v24, v26
-; VI-NEXT: v_mov_b32_e32 v26, v61
+; VI-NEXT: v_mov_b32_e32 v26, v32
; VI-NEXT: v_mov_b32_e32 v30, v37
; VI-NEXT: v_mov_b32_e32 v38, v1
; VI-NEXT: v_mov_b32_e32 v41, v5
; VI-NEXT: v_mov_b32_e32 v40, v3
; VI-NEXT: v_mov_b32_e32 v63, v59
-; VI-NEXT: v_mov_b32_e32 v36, v58
+; VI-NEXT: v_mov_b32_e32 v59, v58
; VI-NEXT: v_mov_b32_e32 v58, v57
; VI-NEXT: v_mov_b32_e32 v57, v7
-; VI-NEXT: v_mov_b32_e32 v59, v56
; VI-NEXT: v_mov_b32_e32 v56, v47
; VI-NEXT: v_mov_b32_e32 v47, v46
; VI-NEXT: v_mov_b32_e32 v46, v9
; VI-NEXT: v_mov_b32_e32 v45, v25
-; VI-NEXT: v_mov_b32_e32 v61, v23
+; VI-NEXT: v_mov_b32_e32 v32, v23
; VI-NEXT: v_mov_b32_e32 v25, v11
; VI-NEXT: v_mov_b32_e32 v23, v21
; VI-NEXT: v_mov_b32_e32 v21, v19
@@ -29276,10 +29468,12 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v17, v13
; VI-NEXT: v_mov_b32_e32 v37, v27
; VI-NEXT: v_mov_b32_e32 v27, v42
-; VI-NEXT: v_mov_b32_e32 v33, v28
+; VI-NEXT: v_mov_b32_e32 v61, v28
; VI-NEXT: v_mov_b32_e32 v28, v15
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; VI-NEXT: s_branch .LBB51_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB51_2
+; VI-NEXT: s_branch .LBB51_3
;
; GFX9-LABEL: bitcast_v64i8_to_v16f32_scalar:
; GFX9: ; %bb.0:
@@ -29301,16 +29495,16 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v37, v30
-; GFX9-NEXT: v_mov_b32_e32 v61, v28
+; GFX9-NEXT: v_mov_b32_e32 v32, v28
; GFX9-NEXT: v_mov_b32_e32 v31, v0
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:76
-; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32
+; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32
; GFX9-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:8
-; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:4
+; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:4
; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:16
-; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:12
+; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:12
; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:24
-; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:20
+; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:20
; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:32
; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:28
; GFX9-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:40
@@ -29323,8 +29517,9 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:60
; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:72
; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:68
-; GFX9-NEXT: v_lshlrev_b32_e32 v32, 8, v1
-; GFX9-NEXT: v_lshlrev_b32_e32 v39, 8, v3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: v_lshlrev_b32_e32 v62, 8, v1
+; GFX9-NEXT: v_lshlrev_b32_e32 v48, 8, v3
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v5
; GFX9-NEXT: v_lshlrev_b32_e32 v5, 8, v7
; GFX9-NEXT: v_lshlrev_b32_e32 v3, 8, v9
@@ -29340,14 +29535,14 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; GFX9-NEXT: v_lshlrev_b32_e32 v25, 8, v29
; GFX9-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(22)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT: s_waitcnt vmcnt(21)
-; GFX9-NEXT: v_lshlrev_b32_e32 v23, 8, v48
+; GFX9-NEXT: v_lshlrev_b32_e32 v23, 8, v39
; GFX9-NEXT: s_waitcnt vmcnt(20)
; GFX9-NEXT: v_lshlrev_b32_e32 v11, 8, v28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_and_b64 s[6:7], vcc, exec
; GFX9-NEXT: s_waitcnt vmcnt(18)
; GFX9-NEXT: v_lshlrev_b32_e32 v21, 8, v38
; GFX9-NEXT: s_waitcnt vmcnt(16)
@@ -29366,49 +29561,48 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; GFX9-NEXT: v_lshlrev_b32_e32 v42, 8, v44
; GFX9-NEXT: s_cbranch_scc0 .LBB51_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: v_or_b32_sdwa v0, v2, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v2, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_mov_b32_e32 v38, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v4, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_mov_b32_e32 v35, v4
+; GFX9-NEXT: v_mov_b32_e32 v36, v4
; GFX9-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v10, v59 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v1, v12, v58 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_mov_b32_e32 v44, v2
+; GFX9-NEXT: v_mov_b32_e32 v35, v2
; GFX9-NEXT: v_mov_b32_e32 v49, v6
; GFX9-NEXT: v_or_b32_sdwa v2, v6, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v14, v57 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v1, v16, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_mov_b32_e32 v36, v58
+; GFX9-NEXT: v_mov_b32_e32 v63, v59
+; GFX9-NEXT: v_mov_b32_e32 v59, v58
; GFX9-NEXT: v_mov_b32_e32 v58, v57
; GFX9-NEXT: v_mov_b32_e32 v57, v7
; GFX9-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v18, v56 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v1, v20, v47 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v34, v48
; GFX9-NEXT: v_mov_b32_e32 v40, v3
; GFX9-NEXT: v_mov_b32_e32 v48, v8
; GFX9-NEXT: v_or_b32_sdwa v3, v8, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v22, v46 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v1, v24, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_mov_b32_e32 v63, v59
-; GFX9-NEXT: v_mov_b32_e32 v59, v56
; GFX9-NEXT: v_mov_b32_e32 v56, v47
; GFX9-NEXT: v_mov_b32_e32 v47, v46
; GFX9-NEXT: v_mov_b32_e32 v46, v9
; GFX9-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v26, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v61, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_mov_b32_e32 v34, v39
+; GFX9-NEXT: v_or_b32_sdwa v1, v32, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_mov_b32_e32 v39, v10
; GFX9-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v37, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v62, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v61, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_mov_b32_e32 v45, v25
; GFX9-NEXT: v_mov_b32_e32 v25, v11
; GFX9-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v0, v60, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v33, v19 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v33, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v60, v19 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_mov_b32_e32 v43, v12
; GFX9-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v55, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
@@ -29418,8 +29612,8 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v20, v22
; GFX9-NEXT: v_mov_b32_e32 v22, v24
; GFX9-NEXT: v_mov_b32_e32 v24, v26
-; GFX9-NEXT: v_mov_b32_e32 v26, v61
-; GFX9-NEXT: v_mov_b32_e32 v61, v23
+; GFX9-NEXT: v_mov_b32_e32 v26, v32
+; GFX9-NEXT: v_mov_b32_e32 v32, v23
; GFX9-NEXT: v_mov_b32_e32 v23, v21
; GFX9-NEXT: v_mov_b32_e32 v21, v19
; GFX9-NEXT: v_mov_b32_e32 v19, v17
@@ -29429,17 +29623,18 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; GFX9-NEXT: v_or_b32_sdwa v1, v52, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_and_b32 s4, s28, 0xff
; GFX9-NEXT: s_lshl_b32 s5, s29, 8
+; GFX9-NEXT: v_mov_b32_e32 v44, v14
; GFX9-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v51, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(3)
; GFX9-NEXT: v_or_b32_sdwa v1, v50, v42 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_or_b32 s4, s4, s5
-; GFX9-NEXT: v_mov_b32_e32 v29, v33
-; GFX9-NEXT: v_mov_b32_e32 v33, v28
+; GFX9-NEXT: v_mov_b32_e32 v29, v61
+; GFX9-NEXT: v_mov_b32_e32 v61, v28
; GFX9-NEXT: v_mov_b32_e32 v28, v15
; GFX9-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: s_and_b32 s4, s4, 0xffff
-; GFX9-NEXT: v_or_b32_sdwa v0, v31, v32 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v31, v62 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_mov_b32_e32 v41, v5
; GFX9-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_e32 v3, s4, v0
@@ -29483,11 +29678,11 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; GFX9-NEXT: s_lshl_b32 s6, s29, 8
; GFX9-NEXT: s_or_b32 s5, s6, s5
; GFX9-NEXT: v_add_u32_e32 v0, 3, v31
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v44
-; GFX9-NEXT: v_add_u32_e32 v2, 3, v35
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v35
+; GFX9-NEXT: v_add_u32_e32 v2, 3, v36
; GFX9-NEXT: s_movk_i32 s4, 0x300
; GFX9-NEXT: s_addk_i32 s5, 0x300
-; GFX9-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_or_b32_sdwa v1, v34, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_or_b32_sdwa v2, v38, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: s_and_b32 s5, s5, 0xffff
@@ -29506,12 +29701,14 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; GFX9-NEXT: v_add_u32_e32 v0, 3, v39
; GFX9-NEXT: v_add_u32_e32 v1, 3, v43
; GFX9-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_or_b32_sdwa v1, v36, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v44
+; GFX9-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_add_i32 s16, s16, 3
; GFX9-NEXT: s_and_b32 s5, s16, 0xff
; GFX9-NEXT: s_lshl_b32 s6, s17, 8
@@ -29552,18 +29749,16 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; GFX9-NEXT: s_lshl_b32 s8, s8, 16
; GFX9-NEXT: s_or_b32 s7, s7, s8
; GFX9-NEXT: v_mov_b32_e32 v2, s7
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_or_b32_sdwa v1, v57, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 3, v16
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v1, 3, v18
-; GFX9-NEXT: v_or_b32_sdwa v0, v59, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_or_b32_sdwa v1, v56, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
@@ -29585,14 +29780,14 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_add_u32_e32 v0, 3, v30
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v62
-; GFX9-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v29
+; GFX9-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_or_b32_sdwa v1, v25, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v60
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v29
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v33
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v60
; GFX9-NEXT: v_or_b32_sdwa v0, v23, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_or_b32_sdwa v1, v21, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
@@ -29607,7 +29802,7 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; GFX9-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_add_u32_e32 v0, 3, v53
; GFX9-NEXT: v_add_u32_e32 v1, 3, v52
-; GFX9-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
@@ -29641,34 +29836,34 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB51_4:
-; GFX9-NEXT: v_mov_b32_e32 v44, v2
-; GFX9-NEXT: v_mov_b32_e32 v34, v39
-; GFX9-NEXT: v_mov_b32_e32 v35, v4
-; GFX9-NEXT: v_mov_b32_e32 v29, v33
+; GFX9-NEXT: v_mov_b32_e32 v34, v48
+; GFX9-NEXT: v_mov_b32_e32 v35, v2
+; GFX9-NEXT: v_mov_b32_e32 v36, v4
+; GFX9-NEXT: v_mov_b32_e32 v29, v61
; GFX9-NEXT: v_mov_b32_e32 v49, v6
; GFX9-NEXT: v_mov_b32_e32 v48, v8
; GFX9-NEXT: v_mov_b32_e32 v39, v10
; GFX9-NEXT: v_mov_b32_e32 v43, v12
+; GFX9-NEXT: v_mov_b32_e32 v44, v14
; GFX9-NEXT: v_mov_b32_e32 v16, v18
; GFX9-NEXT: v_mov_b32_e32 v18, v20
; GFX9-NEXT: v_mov_b32_e32 v20, v22
; GFX9-NEXT: v_mov_b32_e32 v22, v24
; GFX9-NEXT: v_mov_b32_e32 v24, v26
-; GFX9-NEXT: v_mov_b32_e32 v26, v61
+; GFX9-NEXT: v_mov_b32_e32 v26, v32
; GFX9-NEXT: v_mov_b32_e32 v30, v37
; GFX9-NEXT: v_mov_b32_e32 v38, v1
; GFX9-NEXT: v_mov_b32_e32 v41, v5
; GFX9-NEXT: v_mov_b32_e32 v40, v3
; GFX9-NEXT: v_mov_b32_e32 v63, v59
-; GFX9-NEXT: v_mov_b32_e32 v36, v58
+; GFX9-NEXT: v_mov_b32_e32 v59, v58
; GFX9-NEXT: v_mov_b32_e32 v58, v57
; GFX9-NEXT: v_mov_b32_e32 v57, v7
-; GFX9-NEXT: v_mov_b32_e32 v59, v56
; GFX9-NEXT: v_mov_b32_e32 v56, v47
; GFX9-NEXT: v_mov_b32_e32 v47, v46
; GFX9-NEXT: v_mov_b32_e32 v46, v9
; GFX9-NEXT: v_mov_b32_e32 v45, v25
-; GFX9-NEXT: v_mov_b32_e32 v61, v23
+; GFX9-NEXT: v_mov_b32_e32 v32, v23
; GFX9-NEXT: v_mov_b32_e32 v25, v11
; GFX9-NEXT: v_mov_b32_e32 v23, v21
; GFX9-NEXT: v_mov_b32_e32 v21, v19
@@ -29676,10 +29871,12 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v17, v13
; GFX9-NEXT: v_mov_b32_e32 v37, v27
; GFX9-NEXT: v_mov_b32_e32 v27, v42
-; GFX9-NEXT: v_mov_b32_e32 v33, v28
+; GFX9-NEXT: v_mov_b32_e32 v61, v28
; GFX9-NEXT: v_mov_b32_e32 v28, v15
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX9-NEXT: s_branch .LBB51_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB51_2
+; GFX9-NEXT: s_branch .LBB51_3
;
; GFX11-TRUE16-LABEL: bitcast_v64i8_to_v16f32_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -29720,7 +29917,6 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v69, 8, v25
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v27, 8, v27
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v29, 8, v29
-; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(15)
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v25, 8, v0
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(13)
@@ -29739,67 +29935,68 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 8, v14
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(6)
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v23, 8, v86
-; GFX11-TRUE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, -1
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB51_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s3, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s0, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s2, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s17, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s16, 0xff
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s18, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s19, 8
; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s8
; GFX11-TRUE16-NEXT: s_and_b32 s5, s5, 0xffff
; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s6, 16
-; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-TRUE16-NEXT: s_and_b32 s6, s16, 0xff
-; GFX11-TRUE16-NEXT: s_and_b32 s8, s18, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s19, 8
-; GFX11-TRUE16-NEXT: s_or_b32 s6, s6, s7
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s8
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s22, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s23, 8
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s25, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s24, 0xff
; GFX11-TRUE16-NEXT: s_and_b32 s6, s6, 0xffff
; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s7, 16
-; GFX11-TRUE16-NEXT: s_and_b32 s8, s20, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s21, 8
-; GFX11-TRUE16-NEXT: s_or_b32 s6, s6, s7
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s9
-; GFX11-TRUE16-NEXT: s_and_b32 s8, s22, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s23, 8
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s25, 8
; GFX11-TRUE16-NEXT: s_or_b32 s8, s8, s9
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s8, 16
-; GFX11-TRUE16-NEXT: s_or_b32 s9, s9, s10
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_and_b32 s8, s9, 0xffff
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s26, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s27, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s8, 0xffff
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s26, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s27, 8
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v31
-; GFX11-TRUE16-NEXT: s_or_b32 s9, s9, s10
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s8, s9
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v32
-; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s9, 16
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s8, 16
; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v38
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s8, s9
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v33
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v83
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v84
; GFX11-TRUE16-NEXT: v_and_b32_e32 v10, 0xff, v22
; GFX11-TRUE16-NEXT: v_and_b32_e32 v11, 0xff, v24
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v85
-; GFX11-TRUE16-NEXT: s_and_b32 s11, s28, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s12, s29, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s28, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s29, 8
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v6, v82
; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v10, v68
; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v11, v69
-; GFX11-TRUE16-NEXT: s_or_b32 s10, s11, s12
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s11
; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v1, v2
-; GFX11-TRUE16-NEXT: s_and_b32 s10, s10, 0xffff
+; GFX11-TRUE16-NEXT: s_and_b32 s9, s9, 0xffff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v35
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, s10, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, s9, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v34
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v36
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v37
@@ -29865,10 +30062,9 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v13, v87
; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v96, v14
; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v15, v86
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB51_3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB51_3
; GFX11-TRUE16-NEXT: .LBB51_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_add_i32 s0, s0, 3
; GFX11-TRUE16-NEXT: s_add_i32 s2, s2, 3
@@ -30064,7 +30260,9 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB51_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-TRUE16-NEXT: s_branch .LBB51_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB51_2
+; GFX11-TRUE16-NEXT: s_branch .LBB51_3
;
; GFX11-FAKE16-LABEL: bitcast_v64i8_to_v16f32_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -30105,7 +30303,6 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v69, 8, v25
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v27, 8, v27
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v29, 8, v29
-; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(15)
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v25, 8, v0
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(13)
@@ -30124,67 +30321,68 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v21, 8, v14
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(6)
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v23, 8, v86
-; GFX11-FAKE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, -1
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB51_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-FAKE16-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s3, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s0, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s2, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s17, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s16, 0xff
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s18, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s19, 8
; GFX11-FAKE16-NEXT: s_or_b32 s5, s5, s6
; GFX11-FAKE16-NEXT: s_or_b32 s6, s7, s8
; GFX11-FAKE16-NEXT: s_and_b32 s5, s5, 0xffff
; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s6, 16
-; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s21, 8
; GFX11-FAKE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-FAKE16-NEXT: s_and_b32 s6, s16, 0xff
-; GFX11-FAKE16-NEXT: s_and_b32 s8, s18, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s19, 8
-; GFX11-FAKE16-NEXT: s_or_b32 s6, s6, s7
-; GFX11-FAKE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s7, s8
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s22, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s23, 8
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s25, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s24, 0xff
; GFX11-FAKE16-NEXT: s_and_b32 s6, s6, 0xffff
; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s7, 16
-; GFX11-FAKE16-NEXT: s_and_b32 s8, s20, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s21, 8
-; GFX11-FAKE16-NEXT: s_or_b32 s6, s6, s7
-; GFX11-FAKE16-NEXT: s_or_b32 s7, s8, s9
-; GFX11-FAKE16-NEXT: s_and_b32 s8, s22, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s23, 8
-; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s25, 8
; GFX11-FAKE16-NEXT: s_or_b32 s8, s8, s9
-; GFX11-FAKE16-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-FAKE16-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s8, 16
-; GFX11-FAKE16-NEXT: s_or_b32 s9, s9, s10
-; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-FAKE16-NEXT: s_and_b32 s8, s9, 0xffff
-; GFX11-FAKE16-NEXT: s_and_b32 s9, s26, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s27, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s8, 0xffff
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s26, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s27, 8
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v31
-; GFX11-FAKE16-NEXT: s_or_b32 s9, s9, s10
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s8, s9
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v32
-; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s9, 16
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s8, 16
; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xff, v38
-; GFX11-FAKE16-NEXT: s_or_b32 s8, s8, s9
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v33
; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v83
; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, v1, v84
; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xff, v22
; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xff, v24
; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, v2, v85
-; GFX11-FAKE16-NEXT: s_and_b32 s11, s28, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s12, s29, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s10, s28, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s11, s29, 8
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, v6, v82
; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, v10, v68
; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, v11, v69
-; GFX11-FAKE16-NEXT: s_or_b32 s10, s11, s12
+; GFX11-FAKE16-NEXT: s_or_b32 s9, s10, s11
; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, v1, v2
-; GFX11-FAKE16-NEXT: s_and_b32 s10, s10, 0xffff
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s9, 0xffff
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v35
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, s10, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, s9, v0
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v34
; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v36
; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v37
@@ -30250,10 +30448,9 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, v13, v87
; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, v96, v14
; GFX11-FAKE16-NEXT: v_or_b32_e32 v15, v15, v86
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB51_3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB51_3
; GFX11-FAKE16-NEXT: .LBB51_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_add_i32 s0, s0, 3
; GFX11-FAKE16-NEXT: s_add_i32 s2, s2, 3
@@ -30449,7 +30646,9 @@ define inreg <16 x float> @bitcast_v64i8_to_v16f32_scalar(<64 x i8> inreg %a, i3
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB51_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-FAKE16-NEXT: s_branch .LBB51_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB51_2
+; GFX11-FAKE16-NEXT: s_branch .LBB51_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -30609,8 +30808,9 @@ define inreg <8 x double> @bitcast_v8i64_to_v8f64_scalar(<8 x i64> inreg %a, i32
; SI-LABEL: bitcast_v8i64_to_v8f64_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v13, v2
-; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; SI-NEXT: v_mov_b32_e32 v16, v2
+; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v15, v1
; SI-NEXT: v_mov_b32_e32 v14, v0
; SI-NEXT: v_mov_b32_e32 v0, s16
@@ -30626,12 +30826,15 @@ define inreg <8 x double> @bitcast_v8i64_to_v8f64_scalar(<8 x i64> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB53_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB53_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB53_3
-; SI-NEXT: .LBB53_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB53_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB53_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2
@@ -30648,16 +30851,15 @@ define inreg <8 x double> @bitcast_v8i64_to_v8f64_scalar(<8 x i64> inreg %a, i32
; SI-NEXT: v_addc_u32_e32 v13, vcc, 0, v13, vcc
; SI-NEXT: v_add_i32_e32 v14, vcc, 3, v14
; SI-NEXT: v_addc_u32_e32 v15, vcc, 0, v15, vcc
-; SI-NEXT: .LBB53_3: ; %end
+; SI-NEXT: .LBB53_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB53_4:
-; SI-NEXT: s_branch .LBB53_2
;
; VI-LABEL: bitcast_v8i64_to_v8f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v13, v2
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: v_mov_b32_e32 v16, v2
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
@@ -30673,12 +30875,15 @@ define inreg <8 x double> @bitcast_v8i64_to_v8f64_scalar(<8 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB53_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB53_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB53_3
-; VI-NEXT: .LBB53_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB53_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB53_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
@@ -30695,16 +30900,15 @@ define inreg <8 x double> @bitcast_v8i64_to_v8f64_scalar(<8 x i64> inreg %a, i32
; VI-NEXT: v_addc_u32_e32 v13, vcc, 0, v13, vcc
; VI-NEXT: v_add_u32_e32 v14, vcc, 3, v14
; VI-NEXT: v_addc_u32_e32 v15, vcc, 0, v15, vcc
-; VI-NEXT: .LBB53_3: ; %end
+; VI-NEXT: .LBB53_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB53_4:
-; VI-NEXT: s_branch .LBB53_2
;
; GFX9-LABEL: bitcast_v8i64_to_v8f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v2
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v16, v2
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
@@ -30720,12 +30924,15 @@ define inreg <8 x double> @bitcast_v8i64_to_v8f64_scalar(<8 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB53_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB53_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB53_3
-; GFX9-NEXT: .LBB53_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB53_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB53_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 3, v0
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, 3, v2
@@ -30742,21 +30949,22 @@ define inreg <8 x double> @bitcast_v8i64_to_v8f64_scalar(<8 x i64> inreg %a, i32
; GFX9-NEXT: v_addc_co_u32_e32 v13, vcc, 0, v13, vcc
; GFX9-NEXT: v_add_co_u32_e32 v14, vcc, 3, v14
; GFX9-NEXT: v_addc_co_u32_e32 v15, vcc, 0, v15, vcc
-; GFX9-NEXT: .LBB53_3: ; %end
+; GFX9-NEXT: .LBB53_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB53_4:
-; GFX9-NEXT: s_branch .LBB53_2
;
; GFX11-LABEL: bitcast_v8i64_to_v8f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB53_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB53_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB53_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB53_3
-; GFX11-NEXT: .LBB53_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB53_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
; GFX11-NEXT: s_add_u32 s2, s2, 3
@@ -30773,7 +30981,7 @@ define inreg <8 x double> @bitcast_v8i64_to_v8f64_scalar(<8 x i64> inreg %a, i32
; GFX11-NEXT: s_addc_u32 s25, s25, 0
; GFX11-NEXT: s_add_u32 s26, s26, 3
; GFX11-NEXT: s_addc_u32 s27, s27, 0
-; GFX11-NEXT: .LBB53_3: ; %end
+; GFX11-NEXT: .LBB53_4: ; %end
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -30783,8 +30991,6 @@ define inreg <8 x double> @bitcast_v8i64_to_v8f64_scalar(<8 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB53_4:
-; GFX11-NEXT: s_branch .LBB53_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -30908,8 +31114,9 @@ define inreg <8 x i64> @bitcast_v8f64_to_v8i64_scalar(<8 x double> inreg %a, i32
; SI-LABEL: bitcast_v8f64_to_v8i64_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v13, v2
-; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; SI-NEXT: v_mov_b32_e32 v16, v2
+; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v15, v1
; SI-NEXT: v_mov_b32_e32 v14, v0
; SI-NEXT: v_mov_b32_e32 v0, s16
@@ -30925,12 +31132,15 @@ define inreg <8 x i64> @bitcast_v8f64_to_v8i64_scalar(<8 x double> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB55_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB55_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB55_3
-; SI-NEXT: .LBB55_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB55_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB55_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
; SI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; SI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
@@ -30939,16 +31149,15 @@ define inreg <8 x i64> @bitcast_v8f64_to_v8i64_scalar(<8 x double> inreg %a, i32
; SI-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
; SI-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
; SI-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
-; SI-NEXT: .LBB55_3: ; %end
+; SI-NEXT: .LBB55_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB55_4:
-; SI-NEXT: s_branch .LBB55_2
;
; VI-LABEL: bitcast_v8f64_to_v8i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v13, v2
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: v_mov_b32_e32 v16, v2
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
@@ -30964,12 +31173,15 @@ define inreg <8 x i64> @bitcast_v8f64_to_v8i64_scalar(<8 x double> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB55_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB55_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB55_3
-; VI-NEXT: .LBB55_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB55_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB55_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
; VI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; VI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
@@ -30978,16 +31190,15 @@ define inreg <8 x i64> @bitcast_v8f64_to_v8i64_scalar(<8 x double> inreg %a, i32
; VI-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
; VI-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
; VI-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
-; VI-NEXT: .LBB55_3: ; %end
+; VI-NEXT: .LBB55_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB55_4:
-; VI-NEXT: s_branch .LBB55_2
;
; GFX9-LABEL: bitcast_v8f64_to_v8i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v2
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v16, v2
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
@@ -31003,12 +31214,15 @@ define inreg <8 x i64> @bitcast_v8f64_to_v8i64_scalar(<8 x double> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB55_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB55_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB55_3
-; GFX9-NEXT: .LBB55_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB55_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB55_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX9-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
@@ -31017,10 +31231,8 @@ define inreg <8 x i64> @bitcast_v8f64_to_v8i64_scalar(<8 x double> inreg %a, i32
; GFX9-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
; GFX9-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
; GFX9-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
-; GFX9-NEXT: .LBB55_3: ; %end
+; GFX9-NEXT: .LBB55_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB55_4:
-; GFX9-NEXT: s_branch .LBB55_2
;
; GFX11-LABEL: bitcast_v8f64_to_v8i64_scalar:
; GFX11: ; %bb.0:
@@ -31030,12 +31242,15 @@ define inreg <8 x i64> @bitcast_v8f64_to_v8i64_scalar(<8 x double> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB55_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB55_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB55_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB55_4
-; GFX11-NEXT: .LBB55_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[0:1], s[12:13], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[14:15], 1.0
; GFX11-NEXT: v_add_f64 v[4:5], s[16:17], 1.0
@@ -31045,8 +31260,6 @@ define inreg <8 x i64> @bitcast_v8f64_to_v8i64_scalar(<8 x double> inreg %a, i32
; GFX11-NEXT: v_add_f64 v[12:13], s[24:25], 1.0
; GFX11-NEXT: v_add_f64 v[14:15], s[26:27], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB55_3:
-; GFX11-NEXT: s_branch .LBB55_2
; GFX11-NEXT: .LBB55_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -31285,6 +31498,7 @@ define inreg <32 x i16> @bitcast_v8i64_to_v32i16_scalar(<8 x i64> inreg %a, i32
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v3, v2
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v30, v1
; SI-NEXT: v_mov_b32_e32 v28, v0
; SI-NEXT: v_mov_b32_e32 v0, s16
@@ -31300,8 +31514,8 @@ define inreg <32 x i16> @bitcast_v8i64_to_v32i16_scalar(<8 x i64> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v20, s26
; SI-NEXT: v_mov_b32_e32 v22, s27
; SI-NEXT: v_mov_b32_e32 v24, s28
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v26, s29
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB57_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_alignbit_b32 v29, v30, v28, 16
@@ -31373,13 +31587,16 @@ define inreg <32 x i16> @bitcast_v8i64_to_v32i16_scalar(<8 x i64> inreg %a, i32
; SI-NEXT: ; implicit-def: $vgpr27
; SI-NEXT: ; implicit-def: $vgpr29
; SI-NEXT: ; implicit-def: $vgpr31
-; SI-NEXT: s_branch .LBB57_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB57_2
+; SI-NEXT: s_branch .LBB57_3
;
; VI-LABEL: bitcast_v8i64_to_v32i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v13, v2
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: v_mov_b32_e32 v16, v2
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
@@ -31395,12 +31612,15 @@ define inreg <32 x i16> @bitcast_v8i64_to_v32i16_scalar(<8 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB57_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB57_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB57_3
-; VI-NEXT: .LBB57_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB57_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB57_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v14, vcc, 3, v14
; VI-NEXT: v_addc_u32_e32 v15, vcc, 0, v15, vcc
; VI-NEXT: v_add_u32_e32 v12, vcc, 3, v12
@@ -31417,16 +31637,15 @@ define inreg <32 x i16> @bitcast_v8i64_to_v32i16_scalar(<8 x i64> inreg %a, i32
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT: .LBB57_3: ; %end
+; VI-NEXT: .LBB57_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB57_4:
-; VI-NEXT: s_branch .LBB57_2
;
; GFX9-LABEL: bitcast_v8i64_to_v32i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v2
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v16, v2
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
@@ -31442,12 +31661,15 @@ define inreg <32 x i16> @bitcast_v8i64_to_v32i16_scalar(<8 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB57_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB57_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB57_3
-; GFX9-NEXT: .LBB57_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB57_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB57_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_co_u32_e32 v14, vcc, 3, v14
; GFX9-NEXT: v_addc_co_u32_e32 v15, vcc, 0, v15, vcc
; GFX9-NEXT: v_add_co_u32_e32 v12, vcc, 3, v12
@@ -31464,21 +31686,22 @@ define inreg <32 x i16> @bitcast_v8i64_to_v32i16_scalar(<8 x i64> inreg %a, i32
; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 3, v0
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
-; GFX9-NEXT: .LBB57_3: ; %end
+; GFX9-NEXT: .LBB57_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB57_4:
-; GFX9-NEXT: s_branch .LBB57_2
;
; GFX11-LABEL: bitcast_v8i64_to_v32i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB57_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB57_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB57_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB57_3
-; GFX11-NEXT: .LBB57_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB57_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s26, s26, 3
; GFX11-NEXT: s_addc_u32 s27, s27, 0
; GFX11-NEXT: s_add_u32 s24, s24, 3
@@ -31495,7 +31718,7 @@ define inreg <32 x i16> @bitcast_v8i64_to_v32i16_scalar(<8 x i64> inreg %a, i32
; GFX11-NEXT: s_addc_u32 s3, s3, 0
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: .LBB57_3: ; %end
+; GFX11-NEXT: .LBB57_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -31506,8 +31729,6 @@ define inreg <32 x i16> @bitcast_v8i64_to_v32i16_scalar(<8 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB57_4:
-; GFX11-NEXT: s_branch .LBB57_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -31850,6 +32071,7 @@ define inreg <8 x i64> @bitcast_v32i16_to_v8i64_scalar(<32 x i16> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v26, v14
; SI-NEXT: v_mov_b32_e32 v25, v12
; SI-NEXT: v_mov_b32_e32 v19, v10
@@ -31858,7 +32080,7 @@ define inreg <8 x i64> @bitcast_v32i16_to_v8i64_scalar(<32 x i16> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v22, v4
; SI-NEXT: v_mov_b32_e32 v23, v2
; SI-NEXT: v_mov_b32_e32 v24, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v3
; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v5
@@ -32000,19 +32222,25 @@ define inreg <8 x i64> @bitcast_v32i16_to_v8i64_scalar(<32 x i16> inreg %a, i32
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB59_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; SI-NEXT: s_branch .LBB59_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB59_2
+; SI-NEXT: s_branch .LBB59_3
;
; VI-LABEL: bitcast_v32i16_to_v8i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; VI-NEXT: v_readfirstlane_b32 s6, v0
; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: v_readfirstlane_b32 s6, v0
; VI-NEXT: v_readfirstlane_b32 s7, v1
-; VI-NEXT: s_cbranch_scc0 .LBB59_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB59_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB59_3
-; VI-NEXT: .LBB59_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB59_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB59_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s7, 3
; VI-NEXT: s_and_b32 s4, s7, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
@@ -32093,7 +32321,7 @@ define inreg <8 x i64> @bitcast_v32i16_to_v8i64_scalar(<32 x i16> inreg %a, i32
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB59_3: ; %end
+; VI-NEXT: .LBB59_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -32111,14 +32339,13 @@ define inreg <8 x i64> @bitcast_v32i16_to_v8i64_scalar(<32 x i16> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v14, s6
; VI-NEXT: v_mov_b32_e32 v15, s7
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB59_4:
-; VI-NEXT: s_branch .LBB59_2
;
; GFX9-LABEL: bitcast_v32i16_to_v8i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v2
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v16, v2
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
@@ -32134,12 +32361,15 @@ define inreg <8 x i64> @bitcast_v32i16_to_v8i64_scalar(<32 x i16> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB59_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB59_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB59_3
-; GFX9-NEXT: .LBB59_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB59_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB59_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
@@ -32156,10 +32386,8 @@ define inreg <8 x i64> @bitcast_v32i16_to_v8i64_scalar(<32 x i16> inreg %a, i32
; GFX9-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: .LBB59_3: ; %end
+; GFX9-NEXT: .LBB59_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB59_4:
-; GFX9-NEXT: s_branch .LBB59_2
;
; GFX11-LABEL: bitcast_v32i16_to_v8i64_scalar:
; GFX11: ; %bb.0:
@@ -32169,12 +32397,15 @@ define inreg <8 x i64> @bitcast_v32i16_to_v8i64_scalar(<32 x i16> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB59_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB59_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB59_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB59_4
-; GFX11-NEXT: .LBB59_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v15, s27, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v14, s26, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v13, s25, 3 op_sel_hi:[1,0]
@@ -32192,8 +32423,6 @@ define inreg <8 x i64> @bitcast_v32i16_to_v8i64_scalar(<32 x i16> inreg %a, i32
; GFX11-NEXT: v_pk_add_u16 v1, s13, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB59_3:
-; GFX11-NEXT: s_branch .LBB59_2
; GFX11-NEXT: .LBB59_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -32531,9 +32760,10 @@ define inreg <32 x half> @bitcast_v8i64_to_v32f16_scalar(<8 x i64> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; SI-NEXT: v_readfirstlane_b32 s6, v0
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: v_readfirstlane_b32 s6, v0
; SI-NEXT: v_readfirstlane_b32 s7, v1
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB61_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s4, s7, 16
@@ -32685,13 +32915,16 @@ define inreg <32 x half> @bitcast_v8i64_to_v32f16_scalar(<8 x i64> inreg %a, i32
; SI-NEXT: ; implicit-def: $vgpr29
; SI-NEXT: ; implicit-def: $vgpr30
; SI-NEXT: ; implicit-def: $vgpr31
-; SI-NEXT: s_branch .LBB61_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB61_2
+; SI-NEXT: s_branch .LBB61_3
;
; VI-LABEL: bitcast_v8i64_to_v32f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v13, v2
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: v_mov_b32_e32 v16, v2
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
@@ -32707,12 +32940,15 @@ define inreg <32 x half> @bitcast_v8i64_to_v32f16_scalar(<8 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB61_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB61_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB61_3
-; VI-NEXT: .LBB61_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB61_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB61_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v14, vcc, 3, v14
; VI-NEXT: v_addc_u32_e32 v15, vcc, 0, v15, vcc
; VI-NEXT: v_add_u32_e32 v12, vcc, 3, v12
@@ -32729,16 +32965,15 @@ define inreg <32 x half> @bitcast_v8i64_to_v32f16_scalar(<8 x i64> inreg %a, i32
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT: .LBB61_3: ; %end
+; VI-NEXT: .LBB61_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB61_4:
-; VI-NEXT: s_branch .LBB61_2
;
; GFX9-LABEL: bitcast_v8i64_to_v32f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v2
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v16, v2
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
@@ -32754,12 +32989,15 @@ define inreg <32 x half> @bitcast_v8i64_to_v32f16_scalar(<8 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB61_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB61_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB61_3
-; GFX9-NEXT: .LBB61_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB61_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB61_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_co_u32_e32 v14, vcc, 3, v14
; GFX9-NEXT: v_addc_co_u32_e32 v15, vcc, 0, v15, vcc
; GFX9-NEXT: v_add_co_u32_e32 v12, vcc, 3, v12
@@ -32776,21 +33014,22 @@ define inreg <32 x half> @bitcast_v8i64_to_v32f16_scalar(<8 x i64> inreg %a, i32
; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 3, v0
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
-; GFX9-NEXT: .LBB61_3: ; %end
+; GFX9-NEXT: .LBB61_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB61_4:
-; GFX9-NEXT: s_branch .LBB61_2
;
; GFX11-LABEL: bitcast_v8i64_to_v32f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB61_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB61_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB61_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB61_3
-; GFX11-NEXT: .LBB61_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB61_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s26, s26, 3
; GFX11-NEXT: s_addc_u32 s27, s27, 0
; GFX11-NEXT: s_add_u32 s24, s24, 3
@@ -32807,7 +33046,7 @@ define inreg <32 x half> @bitcast_v8i64_to_v32f16_scalar(<8 x i64> inreg %a, i32
; GFX11-NEXT: s_addc_u32 s3, s3, 0
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: .LBB61_3: ; %end
+; GFX11-NEXT: .LBB61_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -32818,8 +33057,6 @@ define inreg <32 x half> @bitcast_v8i64_to_v32f16_scalar(<8 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB61_4:
-; GFX11-NEXT: s_branch .LBB61_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -33282,6 +33519,7 @@ define inreg <8 x i64> @bitcast_v32f16_to_v8i64_scalar(<32 x half> inreg %a, i32
; SI-NEXT: v_cvt_f16_f32_e32 v35, s28
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB63_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v51
@@ -33452,13 +33690,16 @@ define inreg <8 x i64> @bitcast_v32f16_to_v8i64_scalar(<32 x half> inreg %a, i32
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB63_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; SI-NEXT: s_branch .LBB63_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB63_2
+; SI-NEXT: s_branch .LBB63_3
;
; VI-LABEL: bitcast_v32f16_to_v8i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v13, v2
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: v_mov_b32_e32 v16, v2
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
@@ -33474,12 +33715,15 @@ define inreg <8 x i64> @bitcast_v32f16_to_v8i64_scalar(<32 x half> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB63_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB63_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB63_3
-; VI-NEXT: .LBB63_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB63_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB63_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v16, 0x200
; VI-NEXT: v_add_f16_sdwa v17, v15, v16 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v15, 0x200, v15
@@ -33529,16 +33773,15 @@ define inreg <8 x i64> @bitcast_v32f16_to_v8i64_scalar(<32 x half> inreg %a, i32
; VI-NEXT: v_add_f16_e32 v0, 0x200, v0
; VI-NEXT: v_or_b32_e32 v1, v1, v17
; VI-NEXT: v_or_b32_e32 v0, v0, v16
-; VI-NEXT: .LBB63_3: ; %end
+; VI-NEXT: .LBB63_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB63_4:
-; VI-NEXT: s_branch .LBB63_2
;
; GFX9-LABEL: bitcast_v32f16_to_v8i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v2
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v16, v2
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
@@ -33554,12 +33797,15 @@ define inreg <8 x i64> @bitcast_v32f16_to_v8i64_scalar(<32 x half> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB63_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB63_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB63_3
-; GFX9-NEXT: .LBB63_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB63_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB63_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_movk_i32 s4, 0x200
; GFX9-NEXT: v_pk_add_f16 v15, v15, s4 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v14, v14, s4 op_sel_hi:[1,0]
@@ -33577,10 +33823,8 @@ define inreg <8 x i64> @bitcast_v32f16_to_v8i64_scalar(<32 x half> inreg %a, i32
; GFX9-NEXT: v_pk_add_f16 v2, v2, s4 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v1, v1, s4 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, v0, s4 op_sel_hi:[1,0]
-; GFX9-NEXT: .LBB63_3: ; %end
+; GFX9-NEXT: .LBB63_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB63_4:
-; GFX9-NEXT: s_branch .LBB63_2
;
; GFX11-LABEL: bitcast_v32f16_to_v8i64_scalar:
; GFX11: ; %bb.0:
@@ -33590,12 +33834,15 @@ define inreg <8 x i64> @bitcast_v32f16_to_v8i64_scalar(<32 x half> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB63_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB63_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB63_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB63_4
-; GFX11-NEXT: .LBB63_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v15, 0x200, s27 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v14, 0x200, s26 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v13, 0x200, s25 op_sel_hi:[0,1]
@@ -33613,8 +33860,6 @@ define inreg <8 x i64> @bitcast_v32f16_to_v8i64_scalar(<32 x half> inreg %a, i32
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s13 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB63_3:
-; GFX11-NEXT: s_branch .LBB63_2
; GFX11-NEXT: .LBB63_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -33920,9 +34165,10 @@ define inreg <32 x bfloat> @bitcast_v8i64_to_v32bf16_scalar(<8 x i64> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; SI-NEXT: v_readfirstlane_b32 s78, v0
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: v_readfirstlane_b32 s78, v0
; SI-NEXT: v_readfirstlane_b32 s79, v1
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB65_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s6, s79, 0xffff0000
@@ -34074,13 +34320,16 @@ define inreg <32 x bfloat> @bitcast_v8i64_to_v32bf16_scalar(<8 x i64> inreg %a,
; SI-NEXT: ; implicit-def: $sgpr8
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB65_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB65_2
+; SI-NEXT: s_branch .LBB65_3
;
; VI-LABEL: bitcast_v8i64_to_v32bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v13, v2
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: v_mov_b32_e32 v16, v2
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
@@ -34096,12 +34345,15 @@ define inreg <32 x bfloat> @bitcast_v8i64_to_v32bf16_scalar(<8 x i64> inreg %a,
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB65_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB65_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB65_3
-; VI-NEXT: .LBB65_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB65_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB65_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v14, vcc, 3, v14
; VI-NEXT: v_addc_u32_e32 v15, vcc, 0, v15, vcc
; VI-NEXT: v_add_u32_e32 v12, vcc, 3, v12
@@ -34118,16 +34370,15 @@ define inreg <32 x bfloat> @bitcast_v8i64_to_v32bf16_scalar(<8 x i64> inreg %a,
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT: .LBB65_3: ; %end
+; VI-NEXT: .LBB65_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB65_4:
-; VI-NEXT: s_branch .LBB65_2
;
; GFX9-LABEL: bitcast_v8i64_to_v32bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v2
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v16, v2
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
@@ -34143,12 +34394,15 @@ define inreg <32 x bfloat> @bitcast_v8i64_to_v32bf16_scalar(<8 x i64> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB65_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB65_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB65_3
-; GFX9-NEXT: .LBB65_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB65_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB65_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_co_u32_e32 v14, vcc, 3, v14
; GFX9-NEXT: v_addc_co_u32_e32 v15, vcc, 0, v15, vcc
; GFX9-NEXT: v_add_co_u32_e32 v12, vcc, 3, v12
@@ -34165,21 +34419,22 @@ define inreg <32 x bfloat> @bitcast_v8i64_to_v32bf16_scalar(<8 x i64> inreg %a,
; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 3, v0
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
-; GFX9-NEXT: .LBB65_3: ; %end
+; GFX9-NEXT: .LBB65_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB65_4:
-; GFX9-NEXT: s_branch .LBB65_2
;
; GFX11-LABEL: bitcast_v8i64_to_v32bf16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB65_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB65_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB65_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB65_3
-; GFX11-NEXT: .LBB65_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB65_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s26, s26, 3
; GFX11-NEXT: s_addc_u32 s27, s27, 0
; GFX11-NEXT: s_add_u32 s24, s24, 3
@@ -34196,7 +34451,7 @@ define inreg <32 x bfloat> @bitcast_v8i64_to_v32bf16_scalar(<8 x i64> inreg %a,
; GFX11-NEXT: s_addc_u32 s3, s3, 0
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: .LBB65_3: ; %end
+; GFX11-NEXT: .LBB65_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -34207,8 +34462,6 @@ define inreg <32 x bfloat> @bitcast_v8i64_to_v32bf16_scalar(<8 x i64> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB65_4:
-; GFX11-NEXT: s_branch .LBB65_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -35636,6 +35889,7 @@ define inreg <8 x i64> @bitcast_v32bf16_to_v8i64_scalar(<32 x bfloat> inreg %a,
; SI-NEXT: v_mul_f32_e32 v19, 1.0, v14
; SI-NEXT: v_mul_f32_e32 v17, 1.0, v17
; SI-NEXT: v_mul_f32_e32 v16, 1.0, v16
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mul_f32_e64 v54, 1.0, s19
; SI-NEXT: v_mul_f32_e64 v55, 1.0, s18
; SI-NEXT: v_mul_f32_e64 v52, 1.0, s21
@@ -35784,7 +36038,9 @@ define inreg <8 x i64> @bitcast_v32bf16_to_v8i64_scalar(<32 x bfloat> inreg %a,
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB67_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; SI-NEXT: s_branch .LBB67_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB67_2
+; SI-NEXT: s_branch .LBB67_3
;
; VI-LABEL: bitcast_v32bf16_to_v8i64_scalar:
; VI: ; %bb.0:
@@ -35792,16 +36048,20 @@ define inreg <8 x i64> @bitcast_v32bf16_to_v8i64_scalar(<32 x bfloat> inreg %a,
; VI-NEXT: s_xor_saveexec_b64 s[4:5], -1
; VI-NEXT: buffer_store_dword v19, off, s[0:3], s32 ; 4-byte Folded Spill
; VI-NEXT: s_mov_b64 exec, s[4:5]
-; VI-NEXT: v_writelane_b32 v19, s30, 0
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
+; VI-NEXT: v_writelane_b32 v19, s30, 0
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_writelane_b32 v19, s31, 1
; VI-NEXT: v_readfirstlane_b32 s30, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s31, v1
-; VI-NEXT: s_cbranch_scc0 .LBB67_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB67_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB67_4
-; VI-NEXT: .LBB67_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB67_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB67_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s31, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x40c00000
; VI-NEXT: v_add_f32_e32 v1, s4, v0
@@ -36092,8 +36352,6 @@ define inreg <8 x i64> @bitcast_v32bf16_to_v8i64_scalar(<32 x bfloat> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; VI-NEXT: v_alignbit_b32 v0, v0, v16, 16
; VI-NEXT: s_branch .LBB67_5
-; VI-NEXT: .LBB67_3:
-; VI-NEXT: s_branch .LBB67_2
; VI-NEXT: .LBB67_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -36126,16 +36384,20 @@ define inreg <8 x i64> @bitcast_v32bf16_to_v8i64_scalar(<32 x bfloat> inreg %a,
; GFX9-NEXT: s_xor_saveexec_b64 s[4:5], -1
; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 ; 4-byte Folded Spill
; GFX9-NEXT: s_mov_b64 exec, s[4:5]
-; GFX9-NEXT: v_writelane_b32 v20, s30, 0
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
+; GFX9-NEXT: v_writelane_b32 v20, s30, 0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_writelane_b32 v20, s31, 1
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
-; GFX9-NEXT: s_cbranch_scc0 .LBB67_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB67_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB67_4
-; GFX9-NEXT: .LBB67_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB67_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB67_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_and_b32 s4, s31, 0xffff0000
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
; GFX9-NEXT: v_add_f32_e32 v1, s4, v0
@@ -36443,8 +36705,6 @@ define inreg <8 x i64> @bitcast_v32bf16_to_v8i64_scalar(<32 x bfloat> inreg %a,
; GFX9-NEXT: v_and_b32_sdwa v16, v16, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: v_lshl_or_b32 v0, v0, 16, v16
; GFX9-NEXT: s_branch .LBB67_5
-; GFX9-NEXT: .LBB67_3:
-; GFX9-NEXT: s_branch .LBB67_2
; GFX9-NEXT: .LBB67_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -36479,12 +36739,15 @@ define inreg <8 x i64> @bitcast_v32bf16_to_v8i64_scalar(<32 x bfloat> inreg %a,
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB67_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB67_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB67_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB67_4
-; GFX11-NEXT: .LBB67_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_and_b32 s1, s27, 0xffff0000
; GFX11-NEXT: s_lshl_b32 s0, s27, 16
; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
@@ -36813,8 +37076,6 @@ define inreg <8 x i64> @bitcast_v32bf16_to_v8i64_scalar(<32 x bfloat> inreg %a,
; GFX11-NEXT: v_lshl_or_b32 v2, v16, 16, v21
; GFX11-NEXT: v_lshl_or_b32 v0, v20, 16, v17
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB67_3:
-; GFX11-NEXT: s_branch .LBB67_2
; GFX11-NEXT: .LBB67_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -38554,9 +38815,10 @@ define inreg <64 x i8> @bitcast_v8i64_to_v64i8_scalar(<8 x i64> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3
-; SI-NEXT: v_readfirstlane_b32 s7, v1
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: v_readfirstlane_b32 s7, v1
; SI-NEXT: v_readfirstlane_b32 s6, v2
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB69_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v3, s7
@@ -38933,7 +39195,9 @@ define inreg <64 x i8> @bitcast_v8i64_to_v64i8_scalar(<8 x i64> inreg %a, i32 in
; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: s_branch .LBB69_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB69_2
+; SI-NEXT: s_branch .LBB69_3
;
; VI-LABEL: bitcast_v8i64_to_v64i8_scalar:
; VI: ; %bb.0:
@@ -38962,8 +39226,9 @@ define inreg <64 x i8> @bitcast_v8i64_to_v64i8_scalar(<8 x i64> inreg %a, i32 in
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3
; VI-NEXT: v_writelane_b32 v4, s66, 18
; VI-NEXT: v_readfirstlane_b32 s4, v1
-; VI-NEXT: s_and_b64 s[6:7], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s5, v2
+; VI-NEXT: s_and_b64 s[6:7], vcc, exec
+; VI-NEXT: s_mov_b64 s[46:47], -1
; VI-NEXT: v_writelane_b32 v4, s67, 19
; VI-NEXT: s_cbranch_scc0 .LBB69_4
; VI-NEXT: ; %bb.1: ; %cmp.false
@@ -39347,7 +39612,9 @@ define inreg <64 x i8> @bitcast_v8i64_to_v64i8_scalar(<8 x i64> inreg %a, i32 in
; VI-NEXT: ; implicit-def: $sgpr58
; VI-NEXT: ; implicit-def: $sgpr57
; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: s_branch .LBB69_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[46:47]
+; VI-NEXT: s_cbranch_vccz .LBB69_2
+; VI-NEXT: s_branch .LBB69_3
;
; GFX9-LABEL: bitcast_v8i64_to_v64i8_scalar:
; GFX9: ; %bb.0:
@@ -39372,8 +39639,9 @@ define inreg <64 x i8> @bitcast_v8i64_to_v64i8_scalar(<8 x i64> inreg %a, i32 in
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3
; GFX9-NEXT: v_writelane_b32 v4, s54, 14
; GFX9-NEXT: v_readfirstlane_b32 s4, v1
-; GFX9-NEXT: s_and_b64 s[6:7], vcc, exec
; GFX9-NEXT: v_readfirstlane_b32 s5, v2
+; GFX9-NEXT: s_and_b64 s[6:7], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[46:47], -1
; GFX9-NEXT: v_writelane_b32 v4, s55, 15
; GFX9-NEXT: s_cbranch_scc0 .LBB69_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
@@ -39738,7 +40006,9 @@ define inreg <64 x i8> @bitcast_v8i64_to_v64i8_scalar(<8 x i64> inreg %a, i32 in
; GFX9-NEXT: ; implicit-def: $sgpr58
; GFX9-NEXT: ; implicit-def: $sgpr57
; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: s_branch .LBB69_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[46:47]
+; GFX9-NEXT: s_cbranch_vccz .LBB69_2
+; GFX9-NEXT: s_branch .LBB69_3
;
; GFX11-LABEL: bitcast_v8i64_to_v64i8_scalar:
; GFX11: ; %bb.0:
@@ -39748,7 +40018,7 @@ define inreg <64 x i8> @bitcast_v8i64_to_v64i8_scalar(<8 x i64> inreg %a, i32 in
; GFX11-NEXT: s_mov_b32 exec_lo, s4
; GFX11-NEXT: v_writelane_b32 v17, s30, 0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
-; GFX11-NEXT: s_mov_b32 vcc_lo, 0
+; GFX11-NEXT: s_mov_b32 s5, -1
; GFX11-NEXT: v_writelane_b32 v17, s31, 1
; GFX11-NEXT: v_writelane_b32 v17, s34, 2
; GFX11-NEXT: v_writelane_b32 v17, s35, 3
@@ -39759,6 +40029,7 @@ define inreg <64 x i8> @bitcast_v8i64_to_v64i8_scalar(<8 x i64> inreg %a, i32 in
; GFX11-NEXT: v_writelane_b32 v17, s48, 8
; GFX11-NEXT: s_cbranch_scc0 .LBB69_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-NEXT: s_lshr_b64 s[4:5], s[26:27], 24
; GFX11-NEXT: s_lshr_b32 s42, s27, 24
; GFX11-NEXT: s_lshr_b32 s43, s27, 16
; GFX11-NEXT: s_lshr_b32 s44, s27, 8
@@ -39799,7 +40070,6 @@ define inreg <64 x i8> @bitcast_v8i64_to_v64i8_scalar(<8 x i64> inreg %a, i32 in
; GFX11-NEXT: s_lshr_b32 s38, s1, 8
; GFX11-NEXT: s_lshr_b32 s39, s0, 16
; GFX11-NEXT: s_lshr_b32 s48, s0, 8
-; GFX11-NEXT: s_lshr_b64 s[4:5], s[26:27], 24
; GFX11-NEXT: s_lshr_b64 s[6:7], s[24:25], 24
; GFX11-NEXT: s_lshr_b64 s[8:9], s[22:23], 24
; GFX11-NEXT: s_lshr_b64 s[10:11], s[20:21], 24
@@ -39807,8 +40077,7 @@ define inreg <64 x i8> @bitcast_v8i64_to_v64i8_scalar(<8 x i64> inreg %a, i32 in
; GFX11-NEXT: s_lshr_b64 s[14:15], s[16:17], 24
; GFX11-NEXT: s_lshr_b64 s[28:29], s[2:3], 24
; GFX11-NEXT: s_lshr_b64 s[40:41], s[0:1], 24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, vcc_lo
-; GFX11-NEXT: s_cbranch_vccnz .LBB69_3
+; GFX11-NEXT: s_cbranch_execnz .LBB69_3
; GFX11-NEXT: .LBB69_2: ; %cmp.true
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
@@ -40095,7 +40364,9 @@ define inreg <64 x i8> @bitcast_v8i64_to_v64i8_scalar(<8 x i64> inreg %a, i32 in
; GFX11-NEXT: ; implicit-def: $sgpr44
; GFX11-NEXT: ; implicit-def: $sgpr43
; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: s_branch .LBB69_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
+; GFX11-NEXT: s_cbranch_vccz .LBB69_2
+; GFX11-NEXT: s_branch .LBB69_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -42629,13 +42900,13 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:76
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:8
-; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:4
-; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:16
; SI-NEXT: s_waitcnt expcnt(3)
-; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:12
+; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:4
+; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:16
+; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:12
; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:24
-; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:20
-; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:32
+; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:20
+; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:32
; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:28
; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:40
; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:36
@@ -42646,8 +42917,9 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:64
; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:60
; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:72
-; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:68
+; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:68
; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v3
@@ -42664,23 +42936,22 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; SI-NEXT: v_lshlrev_b32_e32 v9, 24, v25
; SI-NEXT: v_lshlrev_b32_e32 v45, 8, v27
; SI-NEXT: v_lshlrev_b32_e32 v25, 24, v29
+; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: v_lshlrev_b32_e32 v23, 8, v2
; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v4
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: v_lshlrev_b32_e32 v21, 8, v51
+; SI-NEXT: s_and_b64 s[6:7], vcc, exec
+; SI-NEXT: v_lshlrev_b32_e32 v21, 8, v50
; SI-NEXT: v_lshlrev_b32_e32 v19, 24, v39
+; SI-NEXT: v_lshlrev_b32_e32 v17, 8, v37
; SI-NEXT: s_waitcnt vmcnt(12)
-; SI-NEXT: v_lshlrev_b32_e32 v17, 8, v38
-; SI-NEXT: s_waitcnt vmcnt(10)
; SI-NEXT: v_lshlrev_b32_e32 v13, 24, v36
-; SI-NEXT: s_waitcnt vmcnt(8)
+; SI-NEXT: s_waitcnt vmcnt(10)
; SI-NEXT: v_lshlrev_b32_e32 v51, 8, v30
-; SI-NEXT: s_waitcnt vmcnt(6)
+; SI-NEXT: s_waitcnt vmcnt(8)
; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v42
-; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(6)
; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v43
; SI-NEXT: s_waitcnt vmcnt(4)
@@ -42688,7 +42959,7 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; SI-NEXT: s_cbranch_scc0 .LBB71_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_and_b32_e32 v0, 0xff, v32
-; SI-NEXT: v_mov_b32_e32 v38, v1
+; SI-NEXT: v_mov_b32_e32 v50, v1
; SI-NEXT: v_or_b32_e32 v0, v0, v1
; SI-NEXT: v_and_b32_e32 v1, 0xff, v33
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
@@ -42743,7 +43014,7 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; SI-NEXT: v_mov_b32_e32 v44, v10
; SI-NEXT: v_or_b32_e32 v10, v0, v1
; SI-NEXT: v_and_b32_e32 v0, 0xff, v48
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v50
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v60
; SI-NEXT: v_or_b32_e32 v0, v0, v23
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
@@ -42752,14 +43023,12 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; SI-NEXT: v_mov_b32_e32 v16, v18
; SI-NEXT: v_mov_b32_e32 v18, v20
; SI-NEXT: v_mov_b32_e32 v20, v22
-; SI-NEXT: v_mov_b32_e32 v22, v24
-; SI-NEXT: v_mov_b32_e32 v24, v26
-; SI-NEXT: v_mov_b32_e32 v26, v28
-; SI-NEXT: v_mov_b32_e32 v28, v25
+; SI-NEXT: v_mov_b32_e32 v22, v46
+; SI-NEXT: v_mov_b32_e32 v46, v25
; SI-NEXT: v_mov_b32_e32 v25, v11
; SI-NEXT: v_or_b32_e32 v11, v0, v1
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v60
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v49
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v49
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v38
; SI-NEXT: v_or_b32_e32 v0, v0, v21
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
@@ -42774,8 +43043,7 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; SI-NEXT: v_or_b32_e32 v1, v13, v1
; SI-NEXT: v_mov_b32_e32 v62, v58
; SI-NEXT: v_mov_b32_e32 v58, v47
-; SI-NEXT: v_mov_b32_e32 v47, v46
-; SI-NEXT: v_mov_b32_e32 v46, v45
+; SI-NEXT: v_mov_b32_e32 v47, v45
; SI-NEXT: v_mov_b32_e32 v45, v23
; SI-NEXT: v_mov_b32_e32 v23, v21
; SI-NEXT: v_mov_b32_e32 v21, v19
@@ -42788,19 +43056,22 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v1, v27, v1
-; SI-NEXT: v_mov_b32_e32 v52, v14
+; SI-NEXT: v_mov_b32_e32 v37, v14
; SI-NEXT: v_or_b32_e32 v14, v0, v1
; SI-NEXT: s_waitcnt vmcnt(3)
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v37
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v52
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT: v_mov_b32_e32 v30, v48
+; SI-NEXT: v_mov_b32_e32 v48, v27
; SI-NEXT: v_mov_b32_e32 v27, v42
; SI-NEXT: v_or_b32_e32 v1, v42, v1
; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v0, 0xff, v53
; SI-NEXT: v_or_b32_e32 v0, v0, v15
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_mov_b32_e32 v30, v48
-; SI-NEXT: v_mov_b32_e32 v48, v51
+; SI-NEXT: v_mov_b32_e32 v24, v26
+; SI-NEXT: v_mov_b32_e32 v26, v28
+; SI-NEXT: v_mov_b32_e32 v28, v51
; SI-NEXT: v_mov_b32_e32 v51, v15
; SI-NEXT: v_or_b32_e32 v15, v0, v1
; SI-NEXT: s_and_b32 s4, s28, 0xff
@@ -42861,7 +43132,7 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v33
; SI-NEXT: s_addk_i32 s4, 0x300
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; SI-NEXT: v_or_b32_e32 v1, v38, v1
+; SI-NEXT: v_or_b32_e32 v1, v50, v1
; SI-NEXT: v_and_b32_e32 v2, 0xff, v2
; SI-NEXT: s_and_b32 s4, s4, 0xffff
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -42896,9 +43167,8 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; SI-NEXT: v_or_b32_e32 v1, v62, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v6, vcc, 0x3000000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v52
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: v_or_b32_e32 v0, v61, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
@@ -42942,7 +43212,8 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; SI-NEXT: s_add_i32 s4, s4, 0x3000000
; SI-NEXT: s_add_i32 s5, s5, 0x3000000
; SI-NEXT: s_add_i32 s6, s6, 0x3000000
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_mov_b32_e32 v2, s6
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
@@ -42959,32 +43230,34 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v1, v58, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v8, vcc, 0x3000000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v20
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v22
-; SI-NEXT: v_or_b32_e32 v0, v47, v0
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
+; SI-NEXT: v_or_b32_e32 v0, v22, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_or_b32_e32 v1, v56, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v9, vcc, 0x3000000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v24
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v26
-; SI-NEXT: v_or_b32_e32 v0, v46, v0
+; SI-NEXT: v_or_b32_e32 v0, v47, v0
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v28, v1
+; SI-NEXT: v_or_b32_e32 v1, v46, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v10, vcc, 0x3000000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v30
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v50
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v60
; SI-NEXT: v_or_b32_e32 v0, v45, v0
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
@@ -42993,9 +43266,9 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; SI-NEXT: v_or_b32_e32 v1, v25, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v11, vcc, 0x3000000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v60
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v49
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v49
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v38
; SI-NEXT: v_or_b32_e32 v0, v23, v0
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
@@ -43018,18 +43291,17 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v55
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v54
-; SI-NEXT: v_or_b32_e32 v0, v48, v0
+; SI-NEXT: v_or_b32_e32 v0, v28, v0
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
+; SI-NEXT: v_or_b32_e32 v1, v48, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v14, vcc, 0x3000000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v37
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v52
; SI-NEXT: v_or_b32_e32 v0, v51, v0
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
@@ -43040,7 +43312,6 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; SI-NEXT: v_add_i32_e32 v15, vcc, 0x3000000, v0
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
-; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: .LBB71_3: ; %end
; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
@@ -43061,23 +43332,22 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB71_4:
-; SI-NEXT: s_waitcnt expcnt(1)
+; SI-NEXT: v_mov_b32_e32 v30, v48
+; SI-NEXT: v_mov_b32_e32 v48, v27
; SI-NEXT: v_mov_b32_e32 v27, v42
; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
-; SI-NEXT: v_mov_b32_e32 v38, v1
+; SI-NEXT: v_mov_b32_e32 v50, v1
; SI-NEXT: v_mov_b32_e32 v43, v6
; SI-NEXT: v_mov_b32_e32 v29, v8
; SI-NEXT: v_mov_b32_e32 v44, v10
; SI-NEXT: v_mov_b32_e32 v36, v12
-; SI-NEXT: v_mov_b32_e32 v52, v14
+; SI-NEXT: v_mov_b32_e32 v37, v14
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mov_b32_e32 v16, v18
; SI-NEXT: v_mov_b32_e32 v18, v20
; SI-NEXT: v_mov_b32_e32 v20, v22
-; SI-NEXT: v_mov_b32_e32 v22, v24
; SI-NEXT: v_mov_b32_e32 v24, v26
; SI-NEXT: v_mov_b32_e32 v26, v28
-; SI-NEXT: v_mov_b32_e32 v30, v48
; SI-NEXT: v_mov_b32_e32 v39, v40
; SI-NEXT: v_mov_b32_e32 v41, v3
; SI-NEXT: v_mov_b32_e32 v40, v5
@@ -43087,20 +43357,22 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; SI-NEXT: v_mov_b32_e32 v57, v7
; SI-NEXT: v_mov_b32_e32 v59, v56
; SI-NEXT: v_mov_b32_e32 v58, v47
-; SI-NEXT: v_mov_b32_e32 v47, v46
+; SI-NEXT: v_mov_b32_e32 v22, v46
; SI-NEXT: v_mov_b32_e32 v56, v9
-; SI-NEXT: v_mov_b32_e32 v46, v45
-; SI-NEXT: v_mov_b32_e32 v28, v25
+; SI-NEXT: v_mov_b32_e32 v47, v45
+; SI-NEXT: v_mov_b32_e32 v46, v25
; SI-NEXT: v_mov_b32_e32 v45, v23
; SI-NEXT: v_mov_b32_e32 v25, v11
; SI-NEXT: v_mov_b32_e32 v23, v21
; SI-NEXT: v_mov_b32_e32 v21, v19
; SI-NEXT: v_mov_b32_e32 v19, v17
; SI-NEXT: v_mov_b32_e32 v17, v13
-; SI-NEXT: v_mov_b32_e32 v48, v51
+; SI-NEXT: v_mov_b32_e32 v28, v51
; SI-NEXT: v_mov_b32_e32 v51, v15
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; SI-NEXT: s_branch .LBB71_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB71_2
+; SI-NEXT: s_branch .LBB71_3
;
; VI-LABEL: bitcast_v64i8_to_v8i64_scalar:
; VI: ; %bb.0:
@@ -43122,16 +43394,16 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v37, v30
-; VI-NEXT: v_mov_b32_e32 v61, v28
+; VI-NEXT: v_mov_b32_e32 v32, v28
; VI-NEXT: v_mov_b32_e32 v31, v0
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:76
-; VI-NEXT: buffer_load_ushort v48, off, s[0:3], s32
+; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32
; VI-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:8
-; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:4
+; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:4
; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:16
-; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:12
+; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:12
; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:24
-; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:20
+; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:20
; VI-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:32
; VI-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:28
; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:40
@@ -43144,8 +43416,9 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:60
; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:72
; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:68
-; VI-NEXT: v_lshlrev_b32_e32 v32, 8, v1
-; VI-NEXT: v_lshlrev_b32_e32 v39, 8, v3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: v_lshlrev_b32_e32 v62, 8, v1
+; VI-NEXT: v_lshlrev_b32_e32 v48, 8, v3
; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v5
; VI-NEXT: v_lshlrev_b32_e32 v5, 8, v7
; VI-NEXT: v_lshlrev_b32_e32 v3, 8, v9
@@ -43161,12 +43434,12 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; VI-NEXT: v_lshlrev_b32_e32 v25, 8, v29
; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; VI-NEXT: v_lshlrev_b32_e32 v23, 8, v48
+; VI-NEXT: v_lshlrev_b32_e32 v23, 8, v39
; VI-NEXT: v_lshlrev_b32_e32 v11, 8, v28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_and_b64 s[6:7], vcc, exec
; VI-NEXT: v_lshlrev_b32_e32 v21, 8, v38
; VI-NEXT: v_lshlrev_b32_e32 v19, 8, v36
; VI-NEXT: v_lshlrev_b32_e32 v17, 8, v35
@@ -43182,49 +43455,48 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; VI-NEXT: v_lshlrev_b32_e32 v42, 8, v44
; VI-NEXT: s_cbranch_scc0 .LBB71_4
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: v_or_b32_sdwa v0, v2, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v2, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v38, v1
; VI-NEXT: v_or_b32_sdwa v1, v4, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v35, v4
+; VI-NEXT: v_mov_b32_e32 v36, v4
; VI-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v10, v59 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v1, v12, v58 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v44, v2
+; VI-NEXT: v_mov_b32_e32 v35, v2
; VI-NEXT: v_mov_b32_e32 v49, v6
; VI-NEXT: v_or_b32_sdwa v2, v6, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v14, v57 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v1, v16, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v36, v58
+; VI-NEXT: v_mov_b32_e32 v63, v59
+; VI-NEXT: v_mov_b32_e32 v59, v58
; VI-NEXT: v_mov_b32_e32 v58, v57
; VI-NEXT: v_mov_b32_e32 v57, v7
; VI-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v18, v56 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v1, v20, v47 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v34, v48
; VI-NEXT: v_mov_b32_e32 v40, v3
; VI-NEXT: v_mov_b32_e32 v48, v8
; VI-NEXT: v_or_b32_sdwa v3, v8, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v22, v46 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v1, v24, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v63, v59
-; VI-NEXT: v_mov_b32_e32 v59, v56
; VI-NEXT: v_mov_b32_e32 v56, v47
; VI-NEXT: v_mov_b32_e32 v47, v46
; VI-NEXT: v_mov_b32_e32 v46, v9
; VI-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v26, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v61, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v34, v39
+; VI-NEXT: v_or_b32_sdwa v1, v32, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v39, v10
; VI-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v37, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v62, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v61, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v45, v25
; VI-NEXT: v_mov_b32_e32 v25, v11
; VI-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v60, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v33, v19 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v33, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v60, v19 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v43, v12
; VI-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v55, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
@@ -43234,8 +43506,8 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; VI-NEXT: v_mov_b32_e32 v20, v22
; VI-NEXT: v_mov_b32_e32 v22, v24
; VI-NEXT: v_mov_b32_e32 v24, v26
-; VI-NEXT: v_mov_b32_e32 v26, v61
-; VI-NEXT: v_mov_b32_e32 v61, v23
+; VI-NEXT: v_mov_b32_e32 v26, v32
+; VI-NEXT: v_mov_b32_e32 v32, v23
; VI-NEXT: v_mov_b32_e32 v23, v21
; VI-NEXT: v_mov_b32_e32 v21, v19
; VI-NEXT: v_mov_b32_e32 v19, v17
@@ -43245,17 +43517,18 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; VI-NEXT: v_or_b32_sdwa v1, v52, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_and_b32 s4, s28, 0xff
; VI-NEXT: s_lshl_b32 s5, s29, 8
+; VI-NEXT: v_mov_b32_e32 v44, v14
; VI-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v51, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(3)
; VI-NEXT: v_or_b32_sdwa v1, v50, v42 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_or_b32 s4, s4, s5
-; VI-NEXT: v_mov_b32_e32 v29, v33
-; VI-NEXT: v_mov_b32_e32 v33, v28
+; VI-NEXT: v_mov_b32_e32 v29, v61
+; VI-NEXT: v_mov_b32_e32 v61, v28
; VI-NEXT: v_mov_b32_e32 v28, v15
; VI-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: s_and_b32 s4, s4, 0xffff
-; VI-NEXT: v_or_b32_sdwa v0, v31, v32 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v31, v62 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v41, v5
; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_e32 v3, s4, v0
@@ -43300,11 +43573,11 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; VI-NEXT: s_or_b32 s4, s5, s4
; VI-NEXT: s_addk_i32 s4, 0x300
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v31
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v44
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v35
; VI-NEXT: s_and_b32 s4, s4, 0xffff
-; VI-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v1, v34, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v35
+; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v36
; VI-NEXT: v_or_b32_e32 v0, s4, v0
; VI-NEXT: v_add_u32_e32 v1, vcc, 0x300, v1
; VI-NEXT: v_or_b32_sdwa v2, v38, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
@@ -43322,11 +43595,13 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; VI-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v43
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
-; VI-NEXT: v_or_b32_sdwa v1, v36, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v6, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v6, vcc, 0x3000000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v44
+; VI-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_add_i32 s16, s16, 3
; VI-NEXT: s_and_b32 s4, s16, 0xff
; VI-NEXT: s_lshl_b32 s5, s17, 8
@@ -43367,17 +43642,15 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; VI-NEXT: s_add_i32 s5, s5, 0x3000000
; VI-NEXT: s_add_i32 s6, s6, 0x3000000
; VI-NEXT: v_mov_b32_e32 v2, s6
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
-; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v1, v57, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v7, vcc, 0x3000000, v0
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v16
-; VI-NEXT: v_or_b32_sdwa v0, v59, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v18
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v1, v56, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
@@ -43400,15 +43673,15 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v10, vcc, 0x3000000, v0
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v30
-; VI-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v62
+; VI-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v29
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v1, v25, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v11, vcc, 0x3000000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v60
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v33
; VI-NEXT: v_or_b32_sdwa v0, v23, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v29
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v60
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v1, v21, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
@@ -43421,7 +43694,7 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v13, vcc, 0x3000000, v0
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v53
-; VI-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v52
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
@@ -43456,34 +43729,34 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB71_4:
-; VI-NEXT: v_mov_b32_e32 v44, v2
-; VI-NEXT: v_mov_b32_e32 v34, v39
-; VI-NEXT: v_mov_b32_e32 v35, v4
-; VI-NEXT: v_mov_b32_e32 v29, v33
+; VI-NEXT: v_mov_b32_e32 v34, v48
+; VI-NEXT: v_mov_b32_e32 v35, v2
+; VI-NEXT: v_mov_b32_e32 v36, v4
+; VI-NEXT: v_mov_b32_e32 v29, v61
; VI-NEXT: v_mov_b32_e32 v49, v6
; VI-NEXT: v_mov_b32_e32 v48, v8
; VI-NEXT: v_mov_b32_e32 v39, v10
; VI-NEXT: v_mov_b32_e32 v43, v12
+; VI-NEXT: v_mov_b32_e32 v44, v14
; VI-NEXT: v_mov_b32_e32 v16, v18
; VI-NEXT: v_mov_b32_e32 v18, v20
; VI-NEXT: v_mov_b32_e32 v20, v22
; VI-NEXT: v_mov_b32_e32 v22, v24
; VI-NEXT: v_mov_b32_e32 v24, v26
-; VI-NEXT: v_mov_b32_e32 v26, v61
+; VI-NEXT: v_mov_b32_e32 v26, v32
; VI-NEXT: v_mov_b32_e32 v30, v37
; VI-NEXT: v_mov_b32_e32 v38, v1
; VI-NEXT: v_mov_b32_e32 v41, v5
; VI-NEXT: v_mov_b32_e32 v40, v3
; VI-NEXT: v_mov_b32_e32 v63, v59
-; VI-NEXT: v_mov_b32_e32 v36, v58
+; VI-NEXT: v_mov_b32_e32 v59, v58
; VI-NEXT: v_mov_b32_e32 v58, v57
; VI-NEXT: v_mov_b32_e32 v57, v7
-; VI-NEXT: v_mov_b32_e32 v59, v56
; VI-NEXT: v_mov_b32_e32 v56, v47
; VI-NEXT: v_mov_b32_e32 v47, v46
; VI-NEXT: v_mov_b32_e32 v46, v9
; VI-NEXT: v_mov_b32_e32 v45, v25
-; VI-NEXT: v_mov_b32_e32 v61, v23
+; VI-NEXT: v_mov_b32_e32 v32, v23
; VI-NEXT: v_mov_b32_e32 v25, v11
; VI-NEXT: v_mov_b32_e32 v23, v21
; VI-NEXT: v_mov_b32_e32 v21, v19
@@ -43491,10 +43764,12 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; VI-NEXT: v_mov_b32_e32 v17, v13
; VI-NEXT: v_mov_b32_e32 v37, v27
; VI-NEXT: v_mov_b32_e32 v27, v42
-; VI-NEXT: v_mov_b32_e32 v33, v28
+; VI-NEXT: v_mov_b32_e32 v61, v28
; VI-NEXT: v_mov_b32_e32 v28, v15
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; VI-NEXT: s_branch .LBB71_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB71_2
+; VI-NEXT: s_branch .LBB71_3
;
; GFX9-LABEL: bitcast_v64i8_to_v8i64_scalar:
; GFX9: ; %bb.0:
@@ -43516,16 +43791,16 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v37, v30
-; GFX9-NEXT: v_mov_b32_e32 v61, v28
+; GFX9-NEXT: v_mov_b32_e32 v32, v28
; GFX9-NEXT: v_mov_b32_e32 v31, v0
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:76
-; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32
+; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32
; GFX9-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:8
-; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:4
+; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:4
; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:16
-; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:12
+; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:12
; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:24
-; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:20
+; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:20
; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:32
; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:28
; GFX9-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:40
@@ -43538,8 +43813,9 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:60
; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:72
; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:68
-; GFX9-NEXT: v_lshlrev_b32_e32 v32, 8, v1
-; GFX9-NEXT: v_lshlrev_b32_e32 v39, 8, v3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: v_lshlrev_b32_e32 v62, 8, v1
+; GFX9-NEXT: v_lshlrev_b32_e32 v48, 8, v3
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v5
; GFX9-NEXT: v_lshlrev_b32_e32 v5, 8, v7
; GFX9-NEXT: v_lshlrev_b32_e32 v3, 8, v9
@@ -43555,14 +43831,14 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; GFX9-NEXT: v_lshlrev_b32_e32 v25, 8, v29
; GFX9-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(22)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT: s_waitcnt vmcnt(21)
-; GFX9-NEXT: v_lshlrev_b32_e32 v23, 8, v48
+; GFX9-NEXT: v_lshlrev_b32_e32 v23, 8, v39
; GFX9-NEXT: s_waitcnt vmcnt(20)
; GFX9-NEXT: v_lshlrev_b32_e32 v11, 8, v28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_and_b64 s[6:7], vcc, exec
; GFX9-NEXT: s_waitcnt vmcnt(18)
; GFX9-NEXT: v_lshlrev_b32_e32 v21, 8, v38
; GFX9-NEXT: s_waitcnt vmcnt(16)
@@ -43581,49 +43857,48 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; GFX9-NEXT: v_lshlrev_b32_e32 v42, 8, v44
; GFX9-NEXT: s_cbranch_scc0 .LBB71_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: v_or_b32_sdwa v0, v2, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v2, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_mov_b32_e32 v38, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v4, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_mov_b32_e32 v35, v4
+; GFX9-NEXT: v_mov_b32_e32 v36, v4
; GFX9-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v10, v59 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v1, v12, v58 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_mov_b32_e32 v44, v2
+; GFX9-NEXT: v_mov_b32_e32 v35, v2
; GFX9-NEXT: v_mov_b32_e32 v49, v6
; GFX9-NEXT: v_or_b32_sdwa v2, v6, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v14, v57 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v1, v16, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_mov_b32_e32 v36, v58
+; GFX9-NEXT: v_mov_b32_e32 v63, v59
+; GFX9-NEXT: v_mov_b32_e32 v59, v58
; GFX9-NEXT: v_mov_b32_e32 v58, v57
; GFX9-NEXT: v_mov_b32_e32 v57, v7
; GFX9-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v18, v56 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v1, v20, v47 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v34, v48
; GFX9-NEXT: v_mov_b32_e32 v40, v3
; GFX9-NEXT: v_mov_b32_e32 v48, v8
; GFX9-NEXT: v_or_b32_sdwa v3, v8, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v22, v46 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v1, v24, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_mov_b32_e32 v63, v59
-; GFX9-NEXT: v_mov_b32_e32 v59, v56
; GFX9-NEXT: v_mov_b32_e32 v56, v47
; GFX9-NEXT: v_mov_b32_e32 v47, v46
; GFX9-NEXT: v_mov_b32_e32 v46, v9
; GFX9-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v26, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v61, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_mov_b32_e32 v34, v39
+; GFX9-NEXT: v_or_b32_sdwa v1, v32, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_mov_b32_e32 v39, v10
; GFX9-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v37, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v62, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v61, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_mov_b32_e32 v45, v25
; GFX9-NEXT: v_mov_b32_e32 v25, v11
; GFX9-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v0, v60, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v33, v19 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v33, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v60, v19 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_mov_b32_e32 v43, v12
; GFX9-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v55, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
@@ -43633,8 +43908,8 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; GFX9-NEXT: v_mov_b32_e32 v20, v22
; GFX9-NEXT: v_mov_b32_e32 v22, v24
; GFX9-NEXT: v_mov_b32_e32 v24, v26
-; GFX9-NEXT: v_mov_b32_e32 v26, v61
-; GFX9-NEXT: v_mov_b32_e32 v61, v23
+; GFX9-NEXT: v_mov_b32_e32 v26, v32
+; GFX9-NEXT: v_mov_b32_e32 v32, v23
; GFX9-NEXT: v_mov_b32_e32 v23, v21
; GFX9-NEXT: v_mov_b32_e32 v21, v19
; GFX9-NEXT: v_mov_b32_e32 v19, v17
@@ -43644,17 +43919,18 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; GFX9-NEXT: v_or_b32_sdwa v1, v52, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_and_b32 s4, s28, 0xff
; GFX9-NEXT: s_lshl_b32 s5, s29, 8
+; GFX9-NEXT: v_mov_b32_e32 v44, v14
; GFX9-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v51, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(3)
; GFX9-NEXT: v_or_b32_sdwa v1, v50, v42 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_or_b32 s4, s4, s5
-; GFX9-NEXT: v_mov_b32_e32 v29, v33
-; GFX9-NEXT: v_mov_b32_e32 v33, v28
+; GFX9-NEXT: v_mov_b32_e32 v29, v61
+; GFX9-NEXT: v_mov_b32_e32 v61, v28
; GFX9-NEXT: v_mov_b32_e32 v28, v15
; GFX9-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: s_and_b32 s4, s4, 0xffff
-; GFX9-NEXT: v_or_b32_sdwa v0, v31, v32 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v31, v62 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_mov_b32_e32 v41, v5
; GFX9-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_e32 v3, s4, v0
@@ -43698,11 +43974,11 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; GFX9-NEXT: s_lshl_b32 s6, s29, 8
; GFX9-NEXT: s_or_b32 s5, s6, s5
; GFX9-NEXT: v_add_u32_e32 v0, 3, v31
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v44
-; GFX9-NEXT: v_add_u32_e32 v2, 3, v35
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v35
+; GFX9-NEXT: v_add_u32_e32 v2, 3, v36
; GFX9-NEXT: s_movk_i32 s4, 0x300
; GFX9-NEXT: s_addk_i32 s5, 0x300
-; GFX9-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_or_b32_sdwa v1, v34, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_or_b32_sdwa v2, v38, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: s_and_b32 s5, s5, 0xffff
@@ -43721,12 +43997,14 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; GFX9-NEXT: v_add_u32_e32 v0, 3, v39
; GFX9-NEXT: v_add_u32_e32 v1, 3, v43
; GFX9-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_or_b32_sdwa v1, v36, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v44
+; GFX9-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_add_i32 s16, s16, 3
; GFX9-NEXT: s_and_b32 s5, s16, 0xff
; GFX9-NEXT: s_lshl_b32 s6, s17, 8
@@ -43767,18 +44045,16 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; GFX9-NEXT: s_lshl_b32 s8, s8, 16
; GFX9-NEXT: s_or_b32 s7, s7, s8
; GFX9-NEXT: v_mov_b32_e32 v2, s7
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_or_b32_sdwa v1, v57, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 3, v16
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v1, 3, v18
-; GFX9-NEXT: v_or_b32_sdwa v0, v59, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_or_b32_sdwa v1, v56, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
@@ -43800,14 +44076,14 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_add_u32_e32 v0, 3, v30
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v62
-; GFX9-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v29
+; GFX9-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_or_b32_sdwa v1, v25, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v60
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v29
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v33
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v60
; GFX9-NEXT: v_or_b32_sdwa v0, v23, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_or_b32_sdwa v1, v21, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
@@ -43822,7 +44098,7 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; GFX9-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_add_u32_e32 v0, 3, v53
; GFX9-NEXT: v_add_u32_e32 v1, 3, v52
-; GFX9-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
@@ -43856,34 +44132,34 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB71_4:
-; GFX9-NEXT: v_mov_b32_e32 v44, v2
-; GFX9-NEXT: v_mov_b32_e32 v34, v39
-; GFX9-NEXT: v_mov_b32_e32 v35, v4
-; GFX9-NEXT: v_mov_b32_e32 v29, v33
+; GFX9-NEXT: v_mov_b32_e32 v34, v48
+; GFX9-NEXT: v_mov_b32_e32 v35, v2
+; GFX9-NEXT: v_mov_b32_e32 v36, v4
+; GFX9-NEXT: v_mov_b32_e32 v29, v61
; GFX9-NEXT: v_mov_b32_e32 v49, v6
; GFX9-NEXT: v_mov_b32_e32 v48, v8
; GFX9-NEXT: v_mov_b32_e32 v39, v10
; GFX9-NEXT: v_mov_b32_e32 v43, v12
+; GFX9-NEXT: v_mov_b32_e32 v44, v14
; GFX9-NEXT: v_mov_b32_e32 v16, v18
; GFX9-NEXT: v_mov_b32_e32 v18, v20
; GFX9-NEXT: v_mov_b32_e32 v20, v22
; GFX9-NEXT: v_mov_b32_e32 v22, v24
; GFX9-NEXT: v_mov_b32_e32 v24, v26
-; GFX9-NEXT: v_mov_b32_e32 v26, v61
+; GFX9-NEXT: v_mov_b32_e32 v26, v32
; GFX9-NEXT: v_mov_b32_e32 v30, v37
; GFX9-NEXT: v_mov_b32_e32 v38, v1
; GFX9-NEXT: v_mov_b32_e32 v41, v5
; GFX9-NEXT: v_mov_b32_e32 v40, v3
; GFX9-NEXT: v_mov_b32_e32 v63, v59
-; GFX9-NEXT: v_mov_b32_e32 v36, v58
+; GFX9-NEXT: v_mov_b32_e32 v59, v58
; GFX9-NEXT: v_mov_b32_e32 v58, v57
; GFX9-NEXT: v_mov_b32_e32 v57, v7
-; GFX9-NEXT: v_mov_b32_e32 v59, v56
; GFX9-NEXT: v_mov_b32_e32 v56, v47
; GFX9-NEXT: v_mov_b32_e32 v47, v46
; GFX9-NEXT: v_mov_b32_e32 v46, v9
; GFX9-NEXT: v_mov_b32_e32 v45, v25
-; GFX9-NEXT: v_mov_b32_e32 v61, v23
+; GFX9-NEXT: v_mov_b32_e32 v32, v23
; GFX9-NEXT: v_mov_b32_e32 v25, v11
; GFX9-NEXT: v_mov_b32_e32 v23, v21
; GFX9-NEXT: v_mov_b32_e32 v21, v19
@@ -43891,10 +44167,12 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; GFX9-NEXT: v_mov_b32_e32 v17, v13
; GFX9-NEXT: v_mov_b32_e32 v37, v27
; GFX9-NEXT: v_mov_b32_e32 v27, v42
-; GFX9-NEXT: v_mov_b32_e32 v33, v28
+; GFX9-NEXT: v_mov_b32_e32 v61, v28
; GFX9-NEXT: v_mov_b32_e32 v28, v15
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX9-NEXT: s_branch .LBB71_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB71_2
+; GFX9-NEXT: s_branch .LBB71_3
;
; GFX11-TRUE16-LABEL: bitcast_v64i8_to_v8i64_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -43935,7 +44213,6 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v69, 8, v25
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v27, 8, v27
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v29, 8, v29
-; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(15)
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v25, 8, v0
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(13)
@@ -43954,67 +44231,68 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 8, v14
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(6)
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v23, 8, v86
-; GFX11-TRUE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, -1
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB71_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s3, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s0, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s2, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s17, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s16, 0xff
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s18, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s19, 8
; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s8
; GFX11-TRUE16-NEXT: s_and_b32 s5, s5, 0xffff
; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s6, 16
-; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-TRUE16-NEXT: s_and_b32 s6, s16, 0xff
-; GFX11-TRUE16-NEXT: s_and_b32 s8, s18, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s19, 8
-; GFX11-TRUE16-NEXT: s_or_b32 s6, s6, s7
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s8
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s22, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s23, 8
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s25, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s24, 0xff
; GFX11-TRUE16-NEXT: s_and_b32 s6, s6, 0xffff
; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s7, 16
-; GFX11-TRUE16-NEXT: s_and_b32 s8, s20, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s21, 8
-; GFX11-TRUE16-NEXT: s_or_b32 s6, s6, s7
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s9
-; GFX11-TRUE16-NEXT: s_and_b32 s8, s22, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s23, 8
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s25, 8
; GFX11-TRUE16-NEXT: s_or_b32 s8, s8, s9
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s8, 16
-; GFX11-TRUE16-NEXT: s_or_b32 s9, s9, s10
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_and_b32 s8, s9, 0xffff
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s26, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s27, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s8, 0xffff
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s26, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s27, 8
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v31
-; GFX11-TRUE16-NEXT: s_or_b32 s9, s9, s10
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s8, s9
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v32
-; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s9, 16
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s8, 16
; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v38
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s8, s9
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v33
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v83
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v84
; GFX11-TRUE16-NEXT: v_and_b32_e32 v10, 0xff, v22
; GFX11-TRUE16-NEXT: v_and_b32_e32 v11, 0xff, v24
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v85
-; GFX11-TRUE16-NEXT: s_and_b32 s11, s28, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s12, s29, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s28, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s29, 8
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v6, v82
; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v10, v68
; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v11, v69
-; GFX11-TRUE16-NEXT: s_or_b32 s10, s11, s12
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s11
; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v1, v2
-; GFX11-TRUE16-NEXT: s_and_b32 s10, s10, 0xffff
+; GFX11-TRUE16-NEXT: s_and_b32 s9, s9, 0xffff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v35
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, s10, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, s9, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v34
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v36
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v37
@@ -44080,10 +44358,9 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v13, v87
; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v96, v14
; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v15, v86
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB71_3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB71_3
; GFX11-TRUE16-NEXT: .LBB71_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_add_i32 s0, s0, 3
; GFX11-TRUE16-NEXT: s_add_i32 s2, s2, 3
@@ -44279,7 +44556,9 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB71_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-TRUE16-NEXT: s_branch .LBB71_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB71_2
+; GFX11-TRUE16-NEXT: s_branch .LBB71_3
;
; GFX11-FAKE16-LABEL: bitcast_v64i8_to_v8i64_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -44320,7 +44599,6 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v69, 8, v25
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v27, 8, v27
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v29, 8, v29
-; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(15)
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v25, 8, v0
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(13)
@@ -44339,67 +44617,68 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v21, 8, v14
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(6)
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v23, 8, v86
-; GFX11-FAKE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, -1
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB71_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-FAKE16-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s3, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s0, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s2, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s17, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s16, 0xff
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s18, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s19, 8
; GFX11-FAKE16-NEXT: s_or_b32 s5, s5, s6
; GFX11-FAKE16-NEXT: s_or_b32 s6, s7, s8
; GFX11-FAKE16-NEXT: s_and_b32 s5, s5, 0xffff
; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s6, 16
-; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s21, 8
; GFX11-FAKE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-FAKE16-NEXT: s_and_b32 s6, s16, 0xff
-; GFX11-FAKE16-NEXT: s_and_b32 s8, s18, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s19, 8
-; GFX11-FAKE16-NEXT: s_or_b32 s6, s6, s7
-; GFX11-FAKE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s7, s8
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s22, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s23, 8
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s25, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s24, 0xff
; GFX11-FAKE16-NEXT: s_and_b32 s6, s6, 0xffff
; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s7, 16
-; GFX11-FAKE16-NEXT: s_and_b32 s8, s20, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s21, 8
-; GFX11-FAKE16-NEXT: s_or_b32 s6, s6, s7
-; GFX11-FAKE16-NEXT: s_or_b32 s7, s8, s9
-; GFX11-FAKE16-NEXT: s_and_b32 s8, s22, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s23, 8
-; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s25, 8
; GFX11-FAKE16-NEXT: s_or_b32 s8, s8, s9
-; GFX11-FAKE16-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-FAKE16-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s8, 16
-; GFX11-FAKE16-NEXT: s_or_b32 s9, s9, s10
-; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-FAKE16-NEXT: s_and_b32 s8, s9, 0xffff
-; GFX11-FAKE16-NEXT: s_and_b32 s9, s26, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s27, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s8, 0xffff
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s26, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s27, 8
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v31
-; GFX11-FAKE16-NEXT: s_or_b32 s9, s9, s10
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s8, s9
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v32
-; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s9, 16
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s8, 16
; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xff, v38
-; GFX11-FAKE16-NEXT: s_or_b32 s8, s8, s9
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v33
; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v83
; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, v1, v84
; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xff, v22
; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xff, v24
; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, v2, v85
-; GFX11-FAKE16-NEXT: s_and_b32 s11, s28, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s12, s29, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s10, s28, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s11, s29, 8
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, v6, v82
; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, v10, v68
; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, v11, v69
-; GFX11-FAKE16-NEXT: s_or_b32 s10, s11, s12
+; GFX11-FAKE16-NEXT: s_or_b32 s9, s10, s11
; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, v1, v2
-; GFX11-FAKE16-NEXT: s_and_b32 s10, s10, 0xffff
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s9, 0xffff
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v35
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, s10, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, s9, v0
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v34
; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v36
; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v37
@@ -44465,10 +44744,9 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, v13, v87
; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, v96, v14
; GFX11-FAKE16-NEXT: v_or_b32_e32 v15, v15, v86
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB71_3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB71_3
; GFX11-FAKE16-NEXT: .LBB71_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_add_i32 s0, s0, 3
; GFX11-FAKE16-NEXT: s_add_i32 s2, s2, 3
@@ -44664,7 +44942,9 @@ define inreg <8 x i64> @bitcast_v64i8_to_v8i64_scalar(<64 x i8> inreg %a, i32 in
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB71_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-FAKE16-NEXT: s_branch .LBB71_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB71_2
+; GFX11-FAKE16-NEXT: s_branch .LBB71_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -44870,6 +45150,7 @@ define inreg <32 x i16> @bitcast_v8f64_to_v32i16_scalar(<8 x double> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v33, v1
; SI-NEXT: v_mov_b32_e32 v32, v0
; SI-NEXT: v_mov_b32_e32 v0, s16
@@ -44885,8 +45166,8 @@ define inreg <32 x i16> @bitcast_v8f64_to_v32i16_scalar(<8 x double> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v20, s26
; SI-NEXT: v_mov_b32_e32 v21, s27
; SI-NEXT: v_mov_b32_e32 v24, s28
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v25, s29
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB73_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_alignbit_b32 v29, v33, v32, 16
@@ -44966,13 +45247,16 @@ define inreg <32 x i16> @bitcast_v8f64_to_v32i16_scalar(<8 x double> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr27
; SI-NEXT: ; implicit-def: $vgpr29
; SI-NEXT: ; implicit-def: $vgpr31
-; SI-NEXT: s_branch .LBB73_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB73_2
+; SI-NEXT: s_branch .LBB73_3
;
; VI-LABEL: bitcast_v8f64_to_v32i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v13, v2
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: v_mov_b32_e32 v16, v2
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
@@ -44988,12 +45272,15 @@ define inreg <32 x i16> @bitcast_v8f64_to_v32i16_scalar(<8 x double> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB73_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB73_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB73_3
-; VI-NEXT: .LBB73_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB73_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB73_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
; VI-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
; VI-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
@@ -45002,16 +45289,15 @@ define inreg <32 x i16> @bitcast_v8f64_to_v32i16_scalar(<8 x double> inreg %a, i
; VI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; VI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; VI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; VI-NEXT: .LBB73_3: ; %end
+; VI-NEXT: .LBB73_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB73_4:
-; VI-NEXT: s_branch .LBB73_2
;
; GFX9-LABEL: bitcast_v8f64_to_v32i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v2
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v16, v2
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
@@ -45027,12 +45313,15 @@ define inreg <32 x i16> @bitcast_v8f64_to_v32i16_scalar(<8 x double> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB73_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB73_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB73_3
-; GFX9-NEXT: .LBB73_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB73_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB73_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
; GFX9-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
; GFX9-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
@@ -45041,10 +45330,8 @@ define inreg <32 x i16> @bitcast_v8f64_to_v32i16_scalar(<8 x double> inreg %a, i
; GFX9-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX9-NEXT: .LBB73_3: ; %end
+; GFX9-NEXT: .LBB73_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB73_4:
-; GFX9-NEXT: s_branch .LBB73_2
;
; GFX11-LABEL: bitcast_v8f64_to_v32i16_scalar:
; GFX11: ; %bb.0:
@@ -45054,12 +45341,15 @@ define inreg <32 x i16> @bitcast_v8f64_to_v32i16_scalar(<8 x double> inreg %a, i
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB73_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB73_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB73_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB73_4
-; GFX11-NEXT: .LBB73_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[14:15], s[26:27], 1.0
; GFX11-NEXT: v_add_f64 v[12:13], s[24:25], 1.0
; GFX11-NEXT: v_add_f64 v[10:11], s[22:23], 1.0
@@ -45069,8 +45359,6 @@ define inreg <32 x i16> @bitcast_v8f64_to_v32i16_scalar(<8 x double> inreg %a, i
; GFX11-NEXT: v_add_f64 v[2:3], s[14:15], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[12:13], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB73_3:
-; GFX11-NEXT: s_branch .LBB73_2
; GFX11-NEXT: .LBB73_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -45423,6 +45711,7 @@ define inreg <8 x double> @bitcast_v32i16_to_v8f64_scalar(<32 x i16> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v26, v14
; SI-NEXT: v_mov_b32_e32 v25, v12
; SI-NEXT: v_mov_b32_e32 v19, v10
@@ -45431,7 +45720,7 @@ define inreg <8 x double> @bitcast_v32i16_to_v8f64_scalar(<32 x i16> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v22, v4
; SI-NEXT: v_mov_b32_e32 v23, v2
; SI-NEXT: v_mov_b32_e32 v24, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v3
; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v5
@@ -45573,19 +45862,25 @@ define inreg <8 x double> @bitcast_v32i16_to_v8f64_scalar(<32 x i16> inreg %a, i
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB75_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; SI-NEXT: s_branch .LBB75_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB75_2
+; SI-NEXT: s_branch .LBB75_3
;
; VI-LABEL: bitcast_v32i16_to_v8f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; VI-NEXT: v_readfirstlane_b32 s6, v0
; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: v_readfirstlane_b32 s6, v0
; VI-NEXT: v_readfirstlane_b32 s7, v1
-; VI-NEXT: s_cbranch_scc0 .LBB75_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB75_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB75_3
-; VI-NEXT: .LBB75_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB75_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB75_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s7, 3
; VI-NEXT: s_and_b32 s4, s7, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
@@ -45666,7 +45961,7 @@ define inreg <8 x double> @bitcast_v32i16_to_v8f64_scalar(<32 x i16> inreg %a, i
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB75_3: ; %end
+; VI-NEXT: .LBB75_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -45684,14 +45979,13 @@ define inreg <8 x double> @bitcast_v32i16_to_v8f64_scalar(<32 x i16> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v14, s6
; VI-NEXT: v_mov_b32_e32 v15, s7
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB75_4:
-; VI-NEXT: s_branch .LBB75_2
;
; GFX9-LABEL: bitcast_v32i16_to_v8f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v2
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v16, v2
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
@@ -45707,12 +46001,15 @@ define inreg <8 x double> @bitcast_v32i16_to_v8f64_scalar(<32 x i16> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB75_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB75_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB75_3
-; GFX9-NEXT: .LBB75_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB75_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB75_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
@@ -45729,10 +46026,8 @@ define inreg <8 x double> @bitcast_v32i16_to_v8f64_scalar(<32 x i16> inreg %a, i
; GFX9-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: .LBB75_3: ; %end
+; GFX9-NEXT: .LBB75_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB75_4:
-; GFX9-NEXT: s_branch .LBB75_2
;
; GFX11-LABEL: bitcast_v32i16_to_v8f64_scalar:
; GFX11: ; %bb.0:
@@ -45742,12 +46037,15 @@ define inreg <8 x double> @bitcast_v32i16_to_v8f64_scalar(<32 x i16> inreg %a, i
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB75_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB75_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB75_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB75_4
-; GFX11-NEXT: .LBB75_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v15, s27, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v14, s26, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v13, s25, 3 op_sel_hi:[1,0]
@@ -45765,8 +46063,6 @@ define inreg <8 x double> @bitcast_v32i16_to_v8f64_scalar(<32 x i16> inreg %a, i
; GFX11-NEXT: v_pk_add_u16 v1, s13, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB75_3:
-; GFX11-NEXT: s_branch .LBB75_2
; GFX11-NEXT: .LBB75_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -46056,9 +46352,10 @@ define inreg <32 x half> @bitcast_v8f64_to_v32f16_scalar(<8 x double> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; SI-NEXT: v_readfirstlane_b32 s4, v0
; SI-NEXT: s_and_b64 s[6:7], vcc, exec
+; SI-NEXT: v_readfirstlane_b32 s4, v0
; SI-NEXT: v_readfirstlane_b32 s5, v1
+; SI-NEXT: s_mov_b64 s[6:7], -1
; SI-NEXT: s_cbranch_scc0 .LBB77_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s6, s5, 16
@@ -46202,13 +46499,16 @@ define inreg <32 x half> @bitcast_v8f64_to_v32f16_scalar(<8 x double> inreg %a,
; SI-NEXT: ; implicit-def: $vgpr29
; SI-NEXT: ; implicit-def: $vgpr30
; SI-NEXT: ; implicit-def: $vgpr31
-; SI-NEXT: s_branch .LBB77_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; SI-NEXT: s_cbranch_vccz .LBB77_2
+; SI-NEXT: s_branch .LBB77_3
;
; VI-LABEL: bitcast_v8f64_to_v32f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v13, v2
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: v_mov_b32_e32 v16, v2
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
@@ -46224,12 +46524,15 @@ define inreg <32 x half> @bitcast_v8f64_to_v32f16_scalar(<8 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB77_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB77_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB77_3
-; VI-NEXT: .LBB77_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB77_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB77_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
; VI-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
; VI-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
@@ -46238,16 +46541,15 @@ define inreg <32 x half> @bitcast_v8f64_to_v32f16_scalar(<8 x double> inreg %a,
; VI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; VI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; VI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; VI-NEXT: .LBB77_3: ; %end
+; VI-NEXT: .LBB77_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB77_4:
-; VI-NEXT: s_branch .LBB77_2
;
; GFX9-LABEL: bitcast_v8f64_to_v32f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v2
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v16, v2
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
@@ -46263,12 +46565,15 @@ define inreg <32 x half> @bitcast_v8f64_to_v32f16_scalar(<8 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB77_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB77_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB77_3
-; GFX9-NEXT: .LBB77_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB77_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB77_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
; GFX9-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
; GFX9-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
@@ -46277,10 +46582,8 @@ define inreg <32 x half> @bitcast_v8f64_to_v32f16_scalar(<8 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX9-NEXT: .LBB77_3: ; %end
+; GFX9-NEXT: .LBB77_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB77_4:
-; GFX9-NEXT: s_branch .LBB77_2
;
; GFX11-LABEL: bitcast_v8f64_to_v32f16_scalar:
; GFX11: ; %bb.0:
@@ -46290,12 +46593,15 @@ define inreg <32 x half> @bitcast_v8f64_to_v32f16_scalar(<8 x double> inreg %a,
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB77_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB77_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB77_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB77_4
-; GFX11-NEXT: .LBB77_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[14:15], s[26:27], 1.0
; GFX11-NEXT: v_add_f64 v[12:13], s[24:25], 1.0
; GFX11-NEXT: v_add_f64 v[10:11], s[22:23], 1.0
@@ -46305,8 +46611,6 @@ define inreg <32 x half> @bitcast_v8f64_to_v32f16_scalar(<8 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[2:3], s[14:15], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[12:13], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB77_3:
-; GFX11-NEXT: s_branch .LBB77_2
; GFX11-NEXT: .LBB77_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -46779,6 +47083,7 @@ define inreg <8 x double> @bitcast_v32f16_to_v8f64_scalar(<32 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v35, s28
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB79_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v51
@@ -46949,13 +47254,16 @@ define inreg <8 x double> @bitcast_v32f16_to_v8f64_scalar(<32 x half> inreg %a,
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB79_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; SI-NEXT: s_branch .LBB79_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB79_2
+; SI-NEXT: s_branch .LBB79_3
;
; VI-LABEL: bitcast_v32f16_to_v8f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v13, v2
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: v_mov_b32_e32 v16, v2
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
@@ -46971,12 +47279,15 @@ define inreg <8 x double> @bitcast_v32f16_to_v8f64_scalar(<32 x half> inreg %a,
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB79_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB79_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB79_3
-; VI-NEXT: .LBB79_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB79_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB79_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v16, 0x200
; VI-NEXT: v_add_f16_sdwa v17, v15, v16 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v15, 0x200, v15
@@ -47026,16 +47337,15 @@ define inreg <8 x double> @bitcast_v32f16_to_v8f64_scalar(<32 x half> inreg %a,
; VI-NEXT: v_add_f16_e32 v0, 0x200, v0
; VI-NEXT: v_or_b32_e32 v1, v1, v17
; VI-NEXT: v_or_b32_e32 v0, v0, v16
-; VI-NEXT: .LBB79_3: ; %end
+; VI-NEXT: .LBB79_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB79_4:
-; VI-NEXT: s_branch .LBB79_2
;
; GFX9-LABEL: bitcast_v32f16_to_v8f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v2
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v16, v2
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
@@ -47051,12 +47361,15 @@ define inreg <8 x double> @bitcast_v32f16_to_v8f64_scalar(<32 x half> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB79_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB79_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB79_3
-; GFX9-NEXT: .LBB79_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB79_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB79_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_movk_i32 s4, 0x200
; GFX9-NEXT: v_pk_add_f16 v15, v15, s4 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v14, v14, s4 op_sel_hi:[1,0]
@@ -47074,10 +47387,8 @@ define inreg <8 x double> @bitcast_v32f16_to_v8f64_scalar(<32 x half> inreg %a,
; GFX9-NEXT: v_pk_add_f16 v2, v2, s4 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v1, v1, s4 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, v0, s4 op_sel_hi:[1,0]
-; GFX9-NEXT: .LBB79_3: ; %end
+; GFX9-NEXT: .LBB79_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB79_4:
-; GFX9-NEXT: s_branch .LBB79_2
;
; GFX11-LABEL: bitcast_v32f16_to_v8f64_scalar:
; GFX11: ; %bb.0:
@@ -47087,12 +47398,15 @@ define inreg <8 x double> @bitcast_v32f16_to_v8f64_scalar(<32 x half> inreg %a,
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB79_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB79_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB79_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB79_4
-; GFX11-NEXT: .LBB79_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v15, 0x200, s27 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v14, 0x200, s26 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v13, 0x200, s25 op_sel_hi:[0,1]
@@ -47110,8 +47424,6 @@ define inreg <8 x double> @bitcast_v32f16_to_v8f64_scalar(<32 x half> inreg %a,
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s13 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB79_3:
-; GFX11-NEXT: s_branch .LBB79_2
; GFX11-NEXT: .LBB79_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -47369,6 +47681,7 @@ define inreg <32 x bfloat> @bitcast_v8f64_to_v32bf16_scalar(<8 x double> inreg %
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v54, s16
; SI-NEXT: v_mov_b32_e32 v55, s17
; SI-NEXT: v_mov_b32_e32 v52, s18
@@ -47382,8 +47695,8 @@ define inreg <32 x bfloat> @bitcast_v8f64_to_v32bf16_scalar(<8 x double> inreg %
; SI-NEXT: v_mov_b32_e32 v36, s26
; SI-NEXT: v_mov_b32_e32 v37, s27
; SI-NEXT: v_mov_b32_e32 v34, s28
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v35, s29
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB81_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_and_b32_e32 v31, 0xffff0000, v1
@@ -47497,13 +47810,16 @@ define inreg <32 x bfloat> @bitcast_v8f64_to_v32bf16_scalar(<8 x double> inreg %
; SI-NEXT: ; implicit-def: $vgpr29
; SI-NEXT: ; implicit-def: $vgpr30
; SI-NEXT: ; implicit-def: $vgpr31
-; SI-NEXT: s_branch .LBB81_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB81_2
+; SI-NEXT: s_branch .LBB81_3
;
; VI-LABEL: bitcast_v8f64_to_v32bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v13, v2
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: v_mov_b32_e32 v16, v2
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
@@ -47519,12 +47835,15 @@ define inreg <32 x bfloat> @bitcast_v8f64_to_v32bf16_scalar(<8 x double> inreg %
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB81_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB81_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB81_3
-; VI-NEXT: .LBB81_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB81_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB81_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
; VI-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
; VI-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
@@ -47533,16 +47852,15 @@ define inreg <32 x bfloat> @bitcast_v8f64_to_v32bf16_scalar(<8 x double> inreg %
; VI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; VI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; VI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; VI-NEXT: .LBB81_3: ; %end
+; VI-NEXT: .LBB81_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB81_4:
-; VI-NEXT: s_branch .LBB81_2
;
; GFX9-LABEL: bitcast_v8f64_to_v32bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v2
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v16, v2
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
@@ -47558,12 +47876,15 @@ define inreg <32 x bfloat> @bitcast_v8f64_to_v32bf16_scalar(<8 x double> inreg %
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB81_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB81_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB81_3
-; GFX9-NEXT: .LBB81_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB81_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB81_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
; GFX9-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
; GFX9-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
@@ -47572,10 +47893,8 @@ define inreg <32 x bfloat> @bitcast_v8f64_to_v32bf16_scalar(<8 x double> inreg %
; GFX9-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX9-NEXT: .LBB81_3: ; %end
+; GFX9-NEXT: .LBB81_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB81_4:
-; GFX9-NEXT: s_branch .LBB81_2
;
; GFX11-LABEL: bitcast_v8f64_to_v32bf16_scalar:
; GFX11: ; %bb.0:
@@ -47585,12 +47904,15 @@ define inreg <32 x bfloat> @bitcast_v8f64_to_v32bf16_scalar(<8 x double> inreg %
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB81_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB81_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB81_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB81_4
-; GFX11-NEXT: .LBB81_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[14:15], s[26:27], 1.0
; GFX11-NEXT: v_add_f64 v[12:13], s[24:25], 1.0
; GFX11-NEXT: v_add_f64 v[10:11], s[22:23], 1.0
@@ -47600,8 +47922,6 @@ define inreg <32 x bfloat> @bitcast_v8f64_to_v32bf16_scalar(<8 x double> inreg %
; GFX11-NEXT: v_add_f64 v[2:3], s[14:15], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[12:13], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB81_3:
-; GFX11-NEXT: s_branch .LBB81_2
; GFX11-NEXT: .LBB81_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -49039,6 +49359,7 @@ define inreg <8 x double> @bitcast_v32bf16_to_v8f64_scalar(<32 x bfloat> inreg %
; SI-NEXT: v_mul_f32_e32 v19, 1.0, v14
; SI-NEXT: v_mul_f32_e32 v17, 1.0, v17
; SI-NEXT: v_mul_f32_e32 v16, 1.0, v16
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mul_f32_e64 v54, 1.0, s19
; SI-NEXT: v_mul_f32_e64 v55, 1.0, s18
; SI-NEXT: v_mul_f32_e64 v52, 1.0, s21
@@ -49187,7 +49508,9 @@ define inreg <8 x double> @bitcast_v32bf16_to_v8f64_scalar(<32 x bfloat> inreg %
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB83_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; SI-NEXT: s_branch .LBB83_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB83_2
+; SI-NEXT: s_branch .LBB83_3
;
; VI-LABEL: bitcast_v32bf16_to_v8f64_scalar:
; VI: ; %bb.0:
@@ -49195,16 +49518,20 @@ define inreg <8 x double> @bitcast_v32bf16_to_v8f64_scalar(<32 x bfloat> inreg %
; VI-NEXT: s_xor_saveexec_b64 s[4:5], -1
; VI-NEXT: buffer_store_dword v19, off, s[0:3], s32 ; 4-byte Folded Spill
; VI-NEXT: s_mov_b64 exec, s[4:5]
-; VI-NEXT: v_writelane_b32 v19, s30, 0
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
+; VI-NEXT: v_writelane_b32 v19, s30, 0
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_writelane_b32 v19, s31, 1
; VI-NEXT: v_readfirstlane_b32 s30, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s31, v1
-; VI-NEXT: s_cbranch_scc0 .LBB83_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB83_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB83_4
-; VI-NEXT: .LBB83_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB83_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB83_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s31, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x40c00000
; VI-NEXT: v_add_f32_e32 v1, s4, v0
@@ -49495,8 +49822,6 @@ define inreg <8 x double> @bitcast_v32bf16_to_v8f64_scalar(<32 x bfloat> inreg %
; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; VI-NEXT: v_alignbit_b32 v0, v0, v16, 16
; VI-NEXT: s_branch .LBB83_5
-; VI-NEXT: .LBB83_3:
-; VI-NEXT: s_branch .LBB83_2
; VI-NEXT: .LBB83_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -49529,16 +49854,20 @@ define inreg <8 x double> @bitcast_v32bf16_to_v8f64_scalar(<32 x bfloat> inreg %
; GFX9-NEXT: s_xor_saveexec_b64 s[4:5], -1
; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 ; 4-byte Folded Spill
; GFX9-NEXT: s_mov_b64 exec, s[4:5]
-; GFX9-NEXT: v_writelane_b32 v20, s30, 0
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
+; GFX9-NEXT: v_writelane_b32 v20, s30, 0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_writelane_b32 v20, s31, 1
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
-; GFX9-NEXT: s_cbranch_scc0 .LBB83_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB83_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB83_4
-; GFX9-NEXT: .LBB83_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB83_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB83_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_and_b32 s4, s31, 0xffff0000
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
; GFX9-NEXT: v_add_f32_e32 v1, s4, v0
@@ -49846,8 +50175,6 @@ define inreg <8 x double> @bitcast_v32bf16_to_v8f64_scalar(<32 x bfloat> inreg %
; GFX9-NEXT: v_and_b32_sdwa v16, v16, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: v_lshl_or_b32 v0, v0, 16, v16
; GFX9-NEXT: s_branch .LBB83_5
-; GFX9-NEXT: .LBB83_3:
-; GFX9-NEXT: s_branch .LBB83_2
; GFX9-NEXT: .LBB83_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -49882,12 +50209,15 @@ define inreg <8 x double> @bitcast_v32bf16_to_v8f64_scalar(<32 x bfloat> inreg %
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB83_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB83_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB83_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB83_4
-; GFX11-NEXT: .LBB83_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_and_b32 s1, s27, 0xffff0000
; GFX11-NEXT: s_lshl_b32 s0, s27, 16
; GFX11-NEXT: v_add_f32_e64 v1, 0x40c00000, s1
@@ -50216,8 +50546,6 @@ define inreg <8 x double> @bitcast_v32bf16_to_v8f64_scalar(<32 x bfloat> inreg %
; GFX11-NEXT: v_lshl_or_b32 v2, v16, 16, v21
; GFX11-NEXT: v_lshl_or_b32 v0, v20, 16, v17
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB83_3:
-; GFX11-NEXT: s_branch .LBB83_2
; GFX11-NEXT: .LBB83_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -51907,9 +52235,10 @@ define inreg <64 x i8> @bitcast_v8f64_to_v64i8_scalar(<8 x double> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3
-; SI-NEXT: v_readfirstlane_b32 s4, v1
; SI-NEXT: s_and_b64 s[6:7], vcc, exec
+; SI-NEXT: v_readfirstlane_b32 s4, v1
; SI-NEXT: v_readfirstlane_b32 s5, v2
+; SI-NEXT: s_mov_b64 s[6:7], -1
; SI-NEXT: s_cbranch_scc0 .LBB85_3
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v1, s4
@@ -52084,7 +52413,8 @@ define inreg <64 x i8> @bitcast_v8f64_to_v64i8_scalar(<8 x double> inreg %a, i32
; SI-NEXT: ; implicit-def: $vgpr18
; SI-NEXT: ; implicit-def: $vgpr17
; SI-NEXT: ; implicit-def: $vgpr2
-; SI-NEXT: s_branch .LBB85_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; SI-NEXT: s_cbranch_vccz .LBB85_2
; SI-NEXT: .LBB85_4:
; SI-NEXT: v_mov_b32_e32 v1, s4
; SI-NEXT: v_mov_b32_e32 v3, s28
@@ -52316,8 +52646,9 @@ define inreg <64 x i8> @bitcast_v8f64_to_v64i8_scalar(<8 x double> inreg %a, i32
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3
; VI-NEXT: v_writelane_b32 v40, s66, 18
; VI-NEXT: v_readfirstlane_b32 s4, v1
-; VI-NEXT: s_and_b64 s[6:7], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s5, v2
+; VI-NEXT: s_and_b64 s[6:7], vcc, exec
+; VI-NEXT: s_mov_b64 s[46:47], -1
; VI-NEXT: v_writelane_b32 v40, s67, 19
; VI-NEXT: s_cbranch_scc0 .LBB85_3
; VI-NEXT: ; %bb.1: ; %cmp.false
@@ -52485,7 +52816,8 @@ define inreg <64 x i8> @bitcast_v8f64_to_v64i8_scalar(<8 x double> inreg %a, i32
; VI-NEXT: ; implicit-def: $sgpr58
; VI-NEXT: ; implicit-def: $sgpr57
; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: s_branch .LBB85_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[46:47]
+; VI-NEXT: s_cbranch_vccz .LBB85_2
; VI-NEXT: .LBB85_4:
; VI-NEXT: v_mov_b32_e32 v13, s16
; VI-NEXT: v_mov_b32_e32 v9, s18
@@ -52720,8 +53052,9 @@ define inreg <64 x i8> @bitcast_v8f64_to_v64i8_scalar(<8 x double> inreg %a, i32
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3
; GFX9-NEXT: v_writelane_b32 v40, s54, 14
; GFX9-NEXT: v_readfirstlane_b32 s4, v1
-; GFX9-NEXT: s_and_b64 s[6:7], vcc, exec
; GFX9-NEXT: v_readfirstlane_b32 s5, v2
+; GFX9-NEXT: s_and_b64 s[6:7], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[46:47], -1
; GFX9-NEXT: v_writelane_b32 v40, s55, 15
; GFX9-NEXT: s_cbranch_scc0 .LBB85_3
; GFX9-NEXT: ; %bb.1: ; %cmp.false
@@ -52889,7 +53222,8 @@ define inreg <64 x i8> @bitcast_v8f64_to_v64i8_scalar(<8 x double> inreg %a, i32
; GFX9-NEXT: ; implicit-def: $sgpr58
; GFX9-NEXT: ; implicit-def: $sgpr57
; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: s_branch .LBB85_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[46:47]
+; GFX9-NEXT: s_cbranch_vccz .LBB85_2
; GFX9-NEXT: .LBB85_4:
; GFX9-NEXT: v_mov_b32_e32 v13, s16
; GFX9-NEXT: v_mov_b32_e32 v11, s18
@@ -53090,7 +53424,7 @@ define inreg <64 x i8> @bitcast_v8f64_to_v64i8_scalar(<8 x double> inreg %a, i32
; GFX11-NEXT: s_mov_b32 exec_lo, s4
; GFX11-NEXT: v_writelane_b32 v33, s30, 0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
-; GFX11-NEXT: s_mov_b32 s90, 0
+; GFX11-NEXT: s_mov_b32 s5, -1
; GFX11-NEXT: v_writelane_b32 v33, s31, 1
; GFX11-NEXT: v_writelane_b32 v33, s34, 2
; GFX11-NEXT: v_writelane_b32 v33, s35, 3
@@ -53099,49 +53433,49 @@ define inreg <64 x i8> @bitcast_v8f64_to_v64i8_scalar(<8 x double> inreg %a, i32
; GFX11-NEXT: v_writelane_b32 v33, s38, 6
; GFX11-NEXT: v_writelane_b32 v33, s39, 7
; GFX11-NEXT: v_writelane_b32 v33, s48, 8
-; GFX11-NEXT: v_writelane_b32 v33, s49, 9
; GFX11-NEXT: s_cbranch_scc0 .LBB85_3
; GFX11-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
; GFX11-NEXT: s_lshr_b32 s42, s27, 24
; GFX11-NEXT: s_lshr_b32 s43, s27, 16
; GFX11-NEXT: s_lshr_b32 s44, s27, 8
-; GFX11-NEXT: s_lshr_b32 s92, s26, 16
-; GFX11-NEXT: s_lshr_b32 s91, s26, 8
+; GFX11-NEXT: s_lshr_b32 s91, s26, 16
+; GFX11-NEXT: s_lshr_b32 s90, s26, 8
; GFX11-NEXT: s_lshr_b32 s45, s25, 24
; GFX11-NEXT: s_lshr_b32 s46, s25, 16
; GFX11-NEXT: s_lshr_b32 s47, s25, 8
-; GFX11-NEXT: s_lshr_b32 s94, s24, 16
-; GFX11-NEXT: s_lshr_b32 s93, s24, 8
+; GFX11-NEXT: s_lshr_b32 s93, s24, 16
+; GFX11-NEXT: s_lshr_b32 s92, s24, 8
; GFX11-NEXT: s_lshr_b32 s56, s23, 24
; GFX11-NEXT: s_lshr_b32 s57, s23, 16
; GFX11-NEXT: s_lshr_b32 s58, s23, 8
-; GFX11-NEXT: s_lshr_b32 vcc_hi, s22, 16
-; GFX11-NEXT: s_lshr_b32 s95, s22, 8
+; GFX11-NEXT: s_lshr_b32 s95, s22, 16
+; GFX11-NEXT: s_lshr_b32 s94, s22, 8
; GFX11-NEXT: s_lshr_b32 s59, s21, 24
; GFX11-NEXT: s_lshr_b32 s60, s21, 16
; GFX11-NEXT: s_lshr_b32 s61, s21, 8
-; GFX11-NEXT: s_lshr_b32 s31, s20, 16
-; GFX11-NEXT: s_lshr_b32 s30, s20, 8
+; GFX11-NEXT: s_lshr_b32 s30, s20, 16
+; GFX11-NEXT: s_lshr_b32 vcc_hi, s20, 8
; GFX11-NEXT: s_lshr_b32 s62, s19, 24
; GFX11-NEXT: s_lshr_b32 s63, s19, 16
; GFX11-NEXT: s_lshr_b32 s72, s19, 8
-; GFX11-NEXT: s_lshr_b32 s35, s18, 16
-; GFX11-NEXT: s_lshr_b32 s34, s18, 8
+; GFX11-NEXT: s_lshr_b32 s34, s18, 16
+; GFX11-NEXT: s_lshr_b32 s31, s18, 8
; GFX11-NEXT: s_lshr_b32 s73, s17, 24
; GFX11-NEXT: s_lshr_b32 s74, s17, 16
; GFX11-NEXT: s_lshr_b32 s75, s17, 8
-; GFX11-NEXT: s_lshr_b32 s37, s16, 16
-; GFX11-NEXT: s_lshr_b32 s36, s16, 8
+; GFX11-NEXT: s_lshr_b32 s36, s16, 16
+; GFX11-NEXT: s_lshr_b32 s35, s16, 8
; GFX11-NEXT: s_lshr_b32 s76, s3, 24
; GFX11-NEXT: s_lshr_b32 s77, s3, 16
; GFX11-NEXT: s_lshr_b32 s78, s3, 8
-; GFX11-NEXT: s_lshr_b32 s39, s2, 16
-; GFX11-NEXT: s_lshr_b32 s38, s2, 8
+; GFX11-NEXT: s_lshr_b32 s38, s2, 16
+; GFX11-NEXT: s_lshr_b32 s37, s2, 8
; GFX11-NEXT: s_lshr_b32 s79, s1, 24
; GFX11-NEXT: s_lshr_b32 s88, s1, 16
; GFX11-NEXT: s_lshr_b32 s89, s1, 8
-; GFX11-NEXT: s_lshr_b32 s49, s0, 16
-; GFX11-NEXT: s_lshr_b32 s48, s0, 8
+; GFX11-NEXT: s_lshr_b32 s48, s0, 16
+; GFX11-NEXT: s_lshr_b32 s39, s0, 8
; GFX11-NEXT: s_lshr_b64 s[40:41], s[26:27], 24
; GFX11-NEXT: s_lshr_b64 s[28:29], s[24:25], 24
; GFX11-NEXT: s_lshr_b64 s[14:15], s[22:23], 24
@@ -53149,9 +53483,7 @@ define inreg <64 x i8> @bitcast_v8f64_to_v64i8_scalar(<8 x double> inreg %a, i32
; GFX11-NEXT: s_lshr_b64 s[10:11], s[18:19], 24
; GFX11-NEXT: s_lshr_b64 s[8:9], s[16:17], 24
; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
-; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s90
-; GFX11-NEXT: s_cbranch_vccnz .LBB85_4
+; GFX11-NEXT: s_cbranch_execnz .LBB85_4
; GFX11-NEXT: .LBB85_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[18:19], s[16:17], 1.0
; GFX11-NEXT: v_add_f64 v[24:25], s[0:1], 1.0
@@ -53219,72 +53551,73 @@ define inreg <64 x i8> @bitcast_v8f64_to_v64i8_scalar(<8 x double> inreg %a, i32
; GFX11-NEXT: s_lshr_b32 s89, s1, 8
; GFX11-NEXT: s_branch .LBB85_5
; GFX11-NEXT: .LBB85_3:
+; GFX11-NEXT: ; implicit-def: $sgpr39
; GFX11-NEXT: ; implicit-def: $sgpr48
-; GFX11-NEXT: ; implicit-def: $sgpr49
; GFX11-NEXT: ; implicit-def: $sgpr4
; GFX11-NEXT: ; implicit-def: $sgpr89
; GFX11-NEXT: ; implicit-def: $sgpr88
; GFX11-NEXT: ; implicit-def: $sgpr79
+; GFX11-NEXT: ; implicit-def: $sgpr37
; GFX11-NEXT: ; implicit-def: $sgpr38
-; GFX11-NEXT: ; implicit-def: $sgpr39
; GFX11-NEXT: ; implicit-def: $sgpr6
; GFX11-NEXT: ; implicit-def: $sgpr78
; GFX11-NEXT: ; implicit-def: $sgpr77
; GFX11-NEXT: ; implicit-def: $sgpr76
+; GFX11-NEXT: ; implicit-def: $sgpr35
; GFX11-NEXT: ; implicit-def: $sgpr36
-; GFX11-NEXT: ; implicit-def: $sgpr37
; GFX11-NEXT: ; implicit-def: $sgpr8
; GFX11-NEXT: ; implicit-def: $sgpr75
; GFX11-NEXT: ; implicit-def: $sgpr74
; GFX11-NEXT: ; implicit-def: $sgpr73
+; GFX11-NEXT: ; implicit-def: $sgpr31
; GFX11-NEXT: ; implicit-def: $sgpr34
-; GFX11-NEXT: ; implicit-def: $sgpr35
; GFX11-NEXT: ; implicit-def: $sgpr10
; GFX11-NEXT: ; implicit-def: $sgpr72
; GFX11-NEXT: ; implicit-def: $sgpr63
; GFX11-NEXT: ; implicit-def: $sgpr62
+; GFX11-NEXT: ; implicit-def: $vcc_hi
; GFX11-NEXT: ; implicit-def: $sgpr30
-; GFX11-NEXT: ; implicit-def: $sgpr31
; GFX11-NEXT: ; implicit-def: $sgpr12
; GFX11-NEXT: ; implicit-def: $sgpr61
; GFX11-NEXT: ; implicit-def: $sgpr60
; GFX11-NEXT: ; implicit-def: $sgpr59
+; GFX11-NEXT: ; implicit-def: $sgpr94
; GFX11-NEXT: ; implicit-def: $sgpr95
-; GFX11-NEXT: ; implicit-def: $vcc_hi
; GFX11-NEXT: ; implicit-def: $sgpr14
; GFX11-NEXT: ; implicit-def: $sgpr58
; GFX11-NEXT: ; implicit-def: $sgpr57
; GFX11-NEXT: ; implicit-def: $sgpr56
+; GFX11-NEXT: ; implicit-def: $sgpr92
; GFX11-NEXT: ; implicit-def: $sgpr93
-; GFX11-NEXT: ; implicit-def: $sgpr94
; GFX11-NEXT: ; implicit-def: $sgpr28
; GFX11-NEXT: ; implicit-def: $sgpr47
; GFX11-NEXT: ; implicit-def: $sgpr46
; GFX11-NEXT: ; implicit-def: $sgpr45
+; GFX11-NEXT: ; implicit-def: $sgpr90
; GFX11-NEXT: ; implicit-def: $sgpr91
-; GFX11-NEXT: ; implicit-def: $sgpr92
; GFX11-NEXT: ; implicit-def: $sgpr40
; GFX11-NEXT: ; implicit-def: $sgpr44
; GFX11-NEXT: ; implicit-def: $sgpr43
; GFX11-NEXT: ; implicit-def: $sgpr42
-; GFX11-NEXT: s_branch .LBB85_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
+; GFX11-NEXT: s_cbranch_vccz .LBB85_2
; GFX11-NEXT: .LBB85_4:
; GFX11-NEXT: v_dual_mov_b32 v24, s0 :: v_dual_mov_b32 v3, s24
; GFX11-NEXT: v_dual_mov_b32 v22, s2 :: v_dual_mov_b32 v1, s26
; GFX11-NEXT: v_dual_mov_b32 v18, s16 :: v_dual_mov_b32 v27, s6
; GFX11-NEXT: v_dual_mov_b32 v14, s18 :: v_dual_mov_b32 v5, s40
-; GFX11-NEXT: v_dual_mov_b32 v10, s20 :: v_dual_mov_b32 v31, s49
-; GFX11-NEXT: v_dual_mov_b32 v8, s22 :: v_dual_mov_b32 v29, s39
-; GFX11-NEXT: v_dual_mov_b32 v28, s4 :: v_dual_mov_b32 v23, s37
-; GFX11-NEXT: v_dual_mov_b32 v26, s8 :: v_dual_mov_b32 v25, s36
-; GFX11-NEXT: v_dual_mov_b32 v20, s10 :: v_dual_mov_b32 v19, s35
-; GFX11-NEXT: v_dual_mov_b32 v16, s12 :: v_dual_mov_b32 v21, s34
-; GFX11-NEXT: v_dual_mov_b32 v12, s14 :: v_dual_mov_b32 v15, s31
-; GFX11-NEXT: v_dual_mov_b32 v6, s28 :: v_dual_mov_b32 v17, s30
-; GFX11-NEXT: v_dual_mov_b32 v32, s48 :: v_dual_mov_b32 v11, vcc_hi
-; GFX11-NEXT: v_dual_mov_b32 v30, s38 :: v_dual_mov_b32 v13, s95
-; GFX11-NEXT: v_dual_mov_b32 v7, s94 :: v_dual_mov_b32 v2, s92
-; GFX11-NEXT: v_dual_mov_b32 v9, s93 :: v_dual_mov_b32 v4, s91
+; GFX11-NEXT: v_dual_mov_b32 v10, s20 :: v_dual_mov_b32 v31, s48
+; GFX11-NEXT: v_dual_mov_b32 v8, s22 :: v_dual_mov_b32 v29, s38
+; GFX11-NEXT: v_dual_mov_b32 v28, s4 :: v_dual_mov_b32 v23, s36
+; GFX11-NEXT: v_dual_mov_b32 v26, s8 :: v_dual_mov_b32 v25, s35
+; GFX11-NEXT: v_dual_mov_b32 v20, s10 :: v_dual_mov_b32 v19, s34
+; GFX11-NEXT: v_dual_mov_b32 v16, s12 :: v_dual_mov_b32 v21, s31
+; GFX11-NEXT: v_dual_mov_b32 v12, s14 :: v_dual_mov_b32 v15, s30
+; GFX11-NEXT: v_dual_mov_b32 v6, s28 :: v_dual_mov_b32 v17, vcc_hi
+; GFX11-NEXT: v_dual_mov_b32 v32, s39 :: v_dual_mov_b32 v11, s95
+; GFX11-NEXT: v_dual_mov_b32 v30, s37 :: v_dual_mov_b32 v13, s94
+; GFX11-NEXT: v_dual_mov_b32 v7, s93 :: v_dual_mov_b32 v2, s91
+; GFX11-NEXT: v_dual_mov_b32 v9, s92 :: v_dual_mov_b32 v4, s90
; GFX11-NEXT: .LBB85_5: ; %end
; GFX11-NEXT: s_and_b32 s0, s1, 0xff
; GFX11-NEXT: s_lshl_b32 s1, s89, 8
@@ -53442,7 +53775,6 @@ define inreg <64 x i8> @bitcast_v8f64_to_v64i8_scalar(<8 x double> inreg %a, i32
; GFX11-NEXT: scratch_store_b128 v0, v[14:17], off offset:16
; GFX11-NEXT: scratch_store_b128 v0, v[10:13], off offset:32
; GFX11-NEXT: scratch_store_b128 v0, v[1:4], off offset:48
-; GFX11-NEXT: v_readlane_b32 s49, v33, 9
; GFX11-NEXT: v_readlane_b32 s48, v33, 8
; GFX11-NEXT: v_readlane_b32 s39, v33, 7
; GFX11-NEXT: v_readlane_b32 s38, v33, 6
@@ -55990,13 +56322,13 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:76
; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:8
-; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:4
-; SI-NEXT: buffer_load_dword v51, off, s[0:3], s32 offset:16
; SI-NEXT: s_waitcnt expcnt(3)
-; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:12
+; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:4
+; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:16
+; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:12
; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:24
-; SI-NEXT: buffer_load_dword v49, off, s[0:3], s32 offset:20
-; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:32
+; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:20
+; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:32
; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:28
; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:40
; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:36
@@ -56007,8 +56339,9 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:64
; SI-NEXT: buffer_load_dword v53, off, s[0:3], s32 offset:60
; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:72
-; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:68
+; SI-NEXT: buffer_load_dword v52, off, s[0:3], s32 offset:68
; SI-NEXT: v_lshlrev_b32_e32 v1, 24, v1
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v1, 8, v3
@@ -56025,23 +56358,22 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: v_lshlrev_b32_e32 v9, 24, v25
; SI-NEXT: v_lshlrev_b32_e32 v45, 8, v27
; SI-NEXT: v_lshlrev_b32_e32 v25, 24, v29
+; SI-NEXT: buffer_store_dword v24, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: v_lshlrev_b32_e32 v23, 8, v2
; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v4
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: v_lshlrev_b32_e32 v21, 8, v51
+; SI-NEXT: s_and_b64 s[6:7], vcc, exec
+; SI-NEXT: v_lshlrev_b32_e32 v21, 8, v50
; SI-NEXT: v_lshlrev_b32_e32 v19, 24, v39
+; SI-NEXT: v_lshlrev_b32_e32 v17, 8, v37
; SI-NEXT: s_waitcnt vmcnt(12)
-; SI-NEXT: v_lshlrev_b32_e32 v17, 8, v38
-; SI-NEXT: s_waitcnt vmcnt(10)
; SI-NEXT: v_lshlrev_b32_e32 v13, 24, v36
-; SI-NEXT: s_waitcnt vmcnt(8)
+; SI-NEXT: s_waitcnt vmcnt(10)
; SI-NEXT: v_lshlrev_b32_e32 v51, 8, v30
-; SI-NEXT: s_waitcnt vmcnt(6)
+; SI-NEXT: s_waitcnt vmcnt(8)
; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v42
-; SI-NEXT: buffer_store_dword v27, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(6)
; SI-NEXT: v_lshlrev_b32_e32 v15, 8, v43
; SI-NEXT: s_waitcnt vmcnt(4)
@@ -56049,7 +56381,7 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: s_cbranch_scc0 .LBB87_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_and_b32_e32 v0, 0xff, v32
-; SI-NEXT: v_mov_b32_e32 v38, v1
+; SI-NEXT: v_mov_b32_e32 v50, v1
; SI-NEXT: v_or_b32_e32 v0, v0, v1
; SI-NEXT: v_and_b32_e32 v1, 0xff, v33
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
@@ -56104,7 +56436,7 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v44, v10
; SI-NEXT: v_or_b32_e32 v10, v0, v1
; SI-NEXT: v_and_b32_e32 v0, 0xff, v48
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v50
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v60
; SI-NEXT: v_or_b32_e32 v0, v0, v23
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
@@ -56113,14 +56445,12 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v16, v18
; SI-NEXT: v_mov_b32_e32 v18, v20
; SI-NEXT: v_mov_b32_e32 v20, v22
-; SI-NEXT: v_mov_b32_e32 v22, v24
-; SI-NEXT: v_mov_b32_e32 v24, v26
-; SI-NEXT: v_mov_b32_e32 v26, v28
-; SI-NEXT: v_mov_b32_e32 v28, v25
+; SI-NEXT: v_mov_b32_e32 v22, v46
+; SI-NEXT: v_mov_b32_e32 v46, v25
; SI-NEXT: v_mov_b32_e32 v25, v11
; SI-NEXT: v_or_b32_e32 v11, v0, v1
-; SI-NEXT: v_and_b32_e32 v0, 0xff, v60
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v49
+; SI-NEXT: v_and_b32_e32 v0, 0xff, v49
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v38
; SI-NEXT: v_or_b32_e32 v0, v0, v21
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
@@ -56135,8 +56465,7 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: v_or_b32_e32 v1, v13, v1
; SI-NEXT: v_mov_b32_e32 v62, v58
; SI-NEXT: v_mov_b32_e32 v58, v47
-; SI-NEXT: v_mov_b32_e32 v47, v46
-; SI-NEXT: v_mov_b32_e32 v46, v45
+; SI-NEXT: v_mov_b32_e32 v47, v45
; SI-NEXT: v_mov_b32_e32 v45, v23
; SI-NEXT: v_mov_b32_e32 v23, v21
; SI-NEXT: v_mov_b32_e32 v21, v19
@@ -56149,19 +56478,22 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v1, v27, v1
-; SI-NEXT: v_mov_b32_e32 v52, v14
+; SI-NEXT: v_mov_b32_e32 v37, v14
; SI-NEXT: v_or_b32_e32 v14, v0, v1
; SI-NEXT: s_waitcnt vmcnt(3)
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v37
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v52
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT: v_mov_b32_e32 v30, v48
+; SI-NEXT: v_mov_b32_e32 v48, v27
; SI-NEXT: v_mov_b32_e32 v27, v42
; SI-NEXT: v_or_b32_e32 v1, v42, v1
; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v0, 0xff, v53
; SI-NEXT: v_or_b32_e32 v0, v0, v15
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_mov_b32_e32 v30, v48
-; SI-NEXT: v_mov_b32_e32 v48, v51
+; SI-NEXT: v_mov_b32_e32 v24, v26
+; SI-NEXT: v_mov_b32_e32 v26, v28
+; SI-NEXT: v_mov_b32_e32 v28, v51
; SI-NEXT: v_mov_b32_e32 v51, v15
; SI-NEXT: v_or_b32_e32 v15, v0, v1
; SI-NEXT: s_and_b32 s4, s28, 0xff
@@ -56222,7 +56554,7 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v33
; SI-NEXT: s_addk_i32 s4, 0x300
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
-; SI-NEXT: v_or_b32_e32 v1, v38, v1
+; SI-NEXT: v_or_b32_e32 v1, v50, v1
; SI-NEXT: v_and_b32_e32 v2, 0xff, v2
; SI-NEXT: s_and_b32 s4, s4, 0xffff
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -56257,9 +56589,8 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: v_or_b32_e32 v1, v62, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v6, vcc, 0x3000000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v52
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v37
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: v_or_b32_e32 v0, v61, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
@@ -56303,7 +56634,8 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: s_add_i32 s4, s4, 0x3000000
; SI-NEXT: s_add_i32 s5, s5, 0x3000000
; SI-NEXT: s_add_i32 s6, s6, 0x3000000
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_mov_b32_e32 v2, s6
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
@@ -56320,32 +56652,34 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v1, v58, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
; SI-NEXT: v_add_i32_e32 v8, vcc, 0x3000000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v20
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v22
-; SI-NEXT: v_or_b32_e32 v0, v47, v0
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
+; SI-NEXT: v_or_b32_e32 v0, v22, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
+; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
+; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_or_b32_e32 v1, v56, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v9, vcc, 0x3000000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v24
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v26
-; SI-NEXT: v_or_b32_e32 v0, v46, v0
+; SI-NEXT: v_or_b32_e32 v0, v47, v0
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v28, v1
+; SI-NEXT: v_or_b32_e32 v1, v46, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v10, vcc, 0x3000000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v30
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v50
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v60
; SI-NEXT: v_or_b32_e32 v0, v45, v0
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
@@ -56354,9 +56688,9 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: v_or_b32_e32 v1, v25, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v11, vcc, 0x3000000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v60
+; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v49
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v49
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v38
; SI-NEXT: v_or_b32_e32 v0, v23, v0
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
@@ -56379,18 +56713,17 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v55
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v54
-; SI-NEXT: v_or_b32_e32 v0, v48, v0
+; SI-NEXT: v_or_b32_e32 v0, v28, v0
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
+; SI-NEXT: v_or_b32_e32 v1, v48, v1
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_add_i32_e32 v14, vcc, 0x3000000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v53
; SI-NEXT: v_and_b32_e32 v0, 0xff, v0
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v37
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v52
; SI-NEXT: v_or_b32_e32 v0, v51, v0
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
@@ -56401,7 +56734,6 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: v_add_i32_e32 v15, vcc, 0x3000000, v0
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
-; SI-NEXT: v_mov_b32_e32 v2, s6
; SI-NEXT: .LBB87_3: ; %end
; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
@@ -56422,23 +56754,22 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB87_4:
-; SI-NEXT: s_waitcnt expcnt(1)
+; SI-NEXT: v_mov_b32_e32 v30, v48
+; SI-NEXT: v_mov_b32_e32 v48, v27
; SI-NEXT: v_mov_b32_e32 v27, v42
; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
-; SI-NEXT: v_mov_b32_e32 v38, v1
+; SI-NEXT: v_mov_b32_e32 v50, v1
; SI-NEXT: v_mov_b32_e32 v43, v6
; SI-NEXT: v_mov_b32_e32 v29, v8
; SI-NEXT: v_mov_b32_e32 v44, v10
; SI-NEXT: v_mov_b32_e32 v36, v12
-; SI-NEXT: v_mov_b32_e32 v52, v14
+; SI-NEXT: v_mov_b32_e32 v37, v14
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mov_b32_e32 v16, v18
; SI-NEXT: v_mov_b32_e32 v18, v20
; SI-NEXT: v_mov_b32_e32 v20, v22
-; SI-NEXT: v_mov_b32_e32 v22, v24
; SI-NEXT: v_mov_b32_e32 v24, v26
; SI-NEXT: v_mov_b32_e32 v26, v28
-; SI-NEXT: v_mov_b32_e32 v30, v48
; SI-NEXT: v_mov_b32_e32 v39, v40
; SI-NEXT: v_mov_b32_e32 v41, v3
; SI-NEXT: v_mov_b32_e32 v40, v5
@@ -56448,20 +56779,22 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v57, v7
; SI-NEXT: v_mov_b32_e32 v59, v56
; SI-NEXT: v_mov_b32_e32 v58, v47
-; SI-NEXT: v_mov_b32_e32 v47, v46
+; SI-NEXT: v_mov_b32_e32 v22, v46
; SI-NEXT: v_mov_b32_e32 v56, v9
-; SI-NEXT: v_mov_b32_e32 v46, v45
-; SI-NEXT: v_mov_b32_e32 v28, v25
+; SI-NEXT: v_mov_b32_e32 v47, v45
+; SI-NEXT: v_mov_b32_e32 v46, v25
; SI-NEXT: v_mov_b32_e32 v45, v23
; SI-NEXT: v_mov_b32_e32 v25, v11
; SI-NEXT: v_mov_b32_e32 v23, v21
; SI-NEXT: v_mov_b32_e32 v21, v19
; SI-NEXT: v_mov_b32_e32 v19, v17
; SI-NEXT: v_mov_b32_e32 v17, v13
-; SI-NEXT: v_mov_b32_e32 v48, v51
+; SI-NEXT: v_mov_b32_e32 v28, v51
; SI-NEXT: v_mov_b32_e32 v51, v15
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; SI-NEXT: s_branch .LBB87_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB87_2
+; SI-NEXT: s_branch .LBB87_3
;
; VI-LABEL: bitcast_v64i8_to_v8f64_scalar:
; VI: ; %bb.0:
@@ -56483,16 +56816,16 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
; VI-NEXT: v_mov_b32_e32 v37, v30
-; VI-NEXT: v_mov_b32_e32 v61, v28
+; VI-NEXT: v_mov_b32_e32 v32, v28
; VI-NEXT: v_mov_b32_e32 v31, v0
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:76
-; VI-NEXT: buffer_load_ushort v48, off, s[0:3], s32
+; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32
; VI-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:8
-; VI-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:4
+; VI-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:4
; VI-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:16
-; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:12
+; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:12
; VI-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:24
-; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:20
+; VI-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:20
; VI-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:32
; VI-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:28
; VI-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:40
@@ -56505,8 +56838,9 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:60
; VI-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:72
; VI-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:68
-; VI-NEXT: v_lshlrev_b32_e32 v32, 8, v1
-; VI-NEXT: v_lshlrev_b32_e32 v39, 8, v3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: v_lshlrev_b32_e32 v62, 8, v1
+; VI-NEXT: v_lshlrev_b32_e32 v48, 8, v3
; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v5
; VI-NEXT: v_lshlrev_b32_e32 v5, 8, v7
; VI-NEXT: v_lshlrev_b32_e32 v3, 8, v9
@@ -56522,12 +56856,12 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: v_lshlrev_b32_e32 v25, 8, v29
; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
-; VI-NEXT: v_lshlrev_b32_e32 v23, 8, v48
+; VI-NEXT: v_lshlrev_b32_e32 v23, 8, v39
; VI-NEXT: v_lshlrev_b32_e32 v11, 8, v28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_and_b64 s[6:7], vcc, exec
; VI-NEXT: v_lshlrev_b32_e32 v21, 8, v38
; VI-NEXT: v_lshlrev_b32_e32 v19, 8, v36
; VI-NEXT: v_lshlrev_b32_e32 v17, 8, v35
@@ -56543,49 +56877,48 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: v_lshlrev_b32_e32 v42, 8, v44
; VI-NEXT: s_cbranch_scc0 .LBB87_4
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: v_or_b32_sdwa v0, v2, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v2, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v38, v1
; VI-NEXT: v_or_b32_sdwa v1, v4, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v35, v4
+; VI-NEXT: v_mov_b32_e32 v36, v4
; VI-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v10, v59 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v1, v12, v58 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v44, v2
+; VI-NEXT: v_mov_b32_e32 v35, v2
; VI-NEXT: v_mov_b32_e32 v49, v6
; VI-NEXT: v_or_b32_sdwa v2, v6, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v14, v57 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v1, v16, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v36, v58
+; VI-NEXT: v_mov_b32_e32 v63, v59
+; VI-NEXT: v_mov_b32_e32 v59, v58
; VI-NEXT: v_mov_b32_e32 v58, v57
; VI-NEXT: v_mov_b32_e32 v57, v7
; VI-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v18, v56 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v1, v20, v47 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v34, v48
; VI-NEXT: v_mov_b32_e32 v40, v3
; VI-NEXT: v_mov_b32_e32 v48, v8
; VI-NEXT: v_or_b32_sdwa v3, v8, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v22, v46 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v1, v24, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v63, v59
-; VI-NEXT: v_mov_b32_e32 v59, v56
; VI-NEXT: v_mov_b32_e32 v56, v47
; VI-NEXT: v_mov_b32_e32 v47, v46
; VI-NEXT: v_mov_b32_e32 v46, v9
; VI-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v26, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v61, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v34, v39
+; VI-NEXT: v_or_b32_sdwa v1, v32, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v39, v10
; VI-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v37, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v62, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v61, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v45, v25
; VI-NEXT: v_mov_b32_e32 v25, v11
; VI-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v60, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v33, v19 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v33, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v60, v19 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v43, v12
; VI-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v55, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
@@ -56595,8 +56928,8 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v20, v22
; VI-NEXT: v_mov_b32_e32 v22, v24
; VI-NEXT: v_mov_b32_e32 v24, v26
-; VI-NEXT: v_mov_b32_e32 v26, v61
-; VI-NEXT: v_mov_b32_e32 v61, v23
+; VI-NEXT: v_mov_b32_e32 v26, v32
+; VI-NEXT: v_mov_b32_e32 v32, v23
; VI-NEXT: v_mov_b32_e32 v23, v21
; VI-NEXT: v_mov_b32_e32 v21, v19
; VI-NEXT: v_mov_b32_e32 v19, v17
@@ -56606,17 +56939,18 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: v_or_b32_sdwa v1, v52, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_and_b32 s4, s28, 0xff
; VI-NEXT: s_lshl_b32 s5, s29, 8
+; VI-NEXT: v_mov_b32_e32 v44, v14
; VI-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v51, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_waitcnt vmcnt(3)
; VI-NEXT: v_or_b32_sdwa v1, v50, v42 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: s_or_b32 s4, s4, s5
-; VI-NEXT: v_mov_b32_e32 v29, v33
-; VI-NEXT: v_mov_b32_e32 v33, v28
+; VI-NEXT: v_mov_b32_e32 v29, v61
+; VI-NEXT: v_mov_b32_e32 v61, v28
; VI-NEXT: v_mov_b32_e32 v28, v15
; VI-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: s_and_b32 s4, s4, 0xffff
-; VI-NEXT: v_or_b32_sdwa v0, v31, v32 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v31, v62 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v41, v5
; VI-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_e32 v3, s4, v0
@@ -56661,11 +56995,11 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: s_or_b32 s4, s5, s4
; VI-NEXT: s_addk_i32 s4, 0x300
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v31
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v44
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v35
; VI-NEXT: s_and_b32 s4, s4, 0xffff
-; VI-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v1, v34, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v35
+; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v36
; VI-NEXT: v_or_b32_e32 v0, s4, v0
; VI-NEXT: v_add_u32_e32 v1, vcc, 0x300, v1
; VI-NEXT: v_or_b32_sdwa v2, v38, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
@@ -56683,11 +57017,13 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v43
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
-; VI-NEXT: v_or_b32_sdwa v1, v36, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v6, vcc, 0x3000000, v0
-; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
+; VI-NEXT: v_add_u32_e32 v6, vcc, 0x3000000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v44
+; VI-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: s_add_i32 s16, s16, 3
; VI-NEXT: s_and_b32 s4, s16, 0xff
; VI-NEXT: s_lshl_b32 s5, s17, 8
@@ -56728,17 +57064,15 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: s_add_i32 s5, s5, 0x3000000
; VI-NEXT: s_add_i32 s6, s6, 0x3000000
; VI-NEXT: v_mov_b32_e32 v2, s6
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
-; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v1, v57, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v7, vcc, 0x3000000, v0
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v16
-; VI-NEXT: v_or_b32_sdwa v0, v59, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v18
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v1, v56, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
@@ -56761,15 +57095,15 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v10, vcc, 0x3000000, v0
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v30
-; VI-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v62
+; VI-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v29
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v1, v25, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v11, vcc, 0x3000000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v60
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v33
; VI-NEXT: v_or_b32_sdwa v0, v23, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v29
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v60
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v1, v21, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
@@ -56782,7 +57116,7 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v13, vcc, 0x3000000, v0
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v53
-; VI-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v52
; VI-NEXT: v_add_u32_e32 v0, vcc, 0x300, v0
; VI-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
@@ -56817,34 +57151,34 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB87_4:
-; VI-NEXT: v_mov_b32_e32 v44, v2
-; VI-NEXT: v_mov_b32_e32 v34, v39
-; VI-NEXT: v_mov_b32_e32 v35, v4
-; VI-NEXT: v_mov_b32_e32 v29, v33
+; VI-NEXT: v_mov_b32_e32 v34, v48
+; VI-NEXT: v_mov_b32_e32 v35, v2
+; VI-NEXT: v_mov_b32_e32 v36, v4
+; VI-NEXT: v_mov_b32_e32 v29, v61
; VI-NEXT: v_mov_b32_e32 v49, v6
; VI-NEXT: v_mov_b32_e32 v48, v8
; VI-NEXT: v_mov_b32_e32 v39, v10
; VI-NEXT: v_mov_b32_e32 v43, v12
+; VI-NEXT: v_mov_b32_e32 v44, v14
; VI-NEXT: v_mov_b32_e32 v16, v18
; VI-NEXT: v_mov_b32_e32 v18, v20
; VI-NEXT: v_mov_b32_e32 v20, v22
; VI-NEXT: v_mov_b32_e32 v22, v24
; VI-NEXT: v_mov_b32_e32 v24, v26
-; VI-NEXT: v_mov_b32_e32 v26, v61
+; VI-NEXT: v_mov_b32_e32 v26, v32
; VI-NEXT: v_mov_b32_e32 v30, v37
; VI-NEXT: v_mov_b32_e32 v38, v1
; VI-NEXT: v_mov_b32_e32 v41, v5
; VI-NEXT: v_mov_b32_e32 v40, v3
; VI-NEXT: v_mov_b32_e32 v63, v59
-; VI-NEXT: v_mov_b32_e32 v36, v58
+; VI-NEXT: v_mov_b32_e32 v59, v58
; VI-NEXT: v_mov_b32_e32 v58, v57
; VI-NEXT: v_mov_b32_e32 v57, v7
-; VI-NEXT: v_mov_b32_e32 v59, v56
; VI-NEXT: v_mov_b32_e32 v56, v47
; VI-NEXT: v_mov_b32_e32 v47, v46
; VI-NEXT: v_mov_b32_e32 v46, v9
; VI-NEXT: v_mov_b32_e32 v45, v25
-; VI-NEXT: v_mov_b32_e32 v61, v23
+; VI-NEXT: v_mov_b32_e32 v32, v23
; VI-NEXT: v_mov_b32_e32 v25, v11
; VI-NEXT: v_mov_b32_e32 v23, v21
; VI-NEXT: v_mov_b32_e32 v21, v19
@@ -56852,10 +57186,12 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v17, v13
; VI-NEXT: v_mov_b32_e32 v37, v27
; VI-NEXT: v_mov_b32_e32 v27, v42
-; VI-NEXT: v_mov_b32_e32 v33, v28
+; VI-NEXT: v_mov_b32_e32 v61, v28
; VI-NEXT: v_mov_b32_e32 v28, v15
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; VI-NEXT: s_branch .LBB87_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB87_2
+; VI-NEXT: s_branch .LBB87_3
;
; GFX9-LABEL: bitcast_v64i8_to_v8f64_scalar:
; GFX9: ; %bb.0:
@@ -56877,16 +57213,16 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
; GFX9-NEXT: v_mov_b32_e32 v37, v30
-; GFX9-NEXT: v_mov_b32_e32 v61, v28
+; GFX9-NEXT: v_mov_b32_e32 v32, v28
; GFX9-NEXT: v_mov_b32_e32 v31, v0
; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:76
-; GFX9-NEXT: buffer_load_ushort v48, off, s[0:3], s32
+; GFX9-NEXT: buffer_load_ushort v39, off, s[0:3], s32
; GFX9-NEXT: buffer_load_ushort v28, off, s[0:3], s32 offset:8
-; GFX9-NEXT: buffer_load_ushort v62, off, s[0:3], s32 offset:4
+; GFX9-NEXT: buffer_load_ushort v61, off, s[0:3], s32 offset:4
; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:16
-; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:12
+; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:12
; GFX9-NEXT: buffer_load_ushort v36, off, s[0:3], s32 offset:24
-; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:20
+; GFX9-NEXT: buffer_load_ushort v60, off, s[0:3], s32 offset:20
; GFX9-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:32
; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:28
; GFX9-NEXT: buffer_load_ushort v34, off, s[0:3], s32 offset:40
@@ -56899,8 +57235,9 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:60
; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:72
; GFX9-NEXT: buffer_load_ushort v50, off, s[0:3], s32 offset:68
-; GFX9-NEXT: v_lshlrev_b32_e32 v32, 8, v1
-; GFX9-NEXT: v_lshlrev_b32_e32 v39, 8, v3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: v_lshlrev_b32_e32 v62, 8, v1
+; GFX9-NEXT: v_lshlrev_b32_e32 v48, 8, v3
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v5
; GFX9-NEXT: v_lshlrev_b32_e32 v5, 8, v7
; GFX9-NEXT: v_lshlrev_b32_e32 v3, 8, v9
@@ -56916,14 +57253,14 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: v_lshlrev_b32_e32 v25, 8, v29
; GFX9-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; GFX9-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
+; GFX9-NEXT: buffer_store_dword v56, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; GFX9-NEXT: s_waitcnt vmcnt(22)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; GFX9-NEXT: s_waitcnt vmcnt(21)
-; GFX9-NEXT: v_lshlrev_b32_e32 v23, 8, v48
+; GFX9-NEXT: v_lshlrev_b32_e32 v23, 8, v39
; GFX9-NEXT: s_waitcnt vmcnt(20)
; GFX9-NEXT: v_lshlrev_b32_e32 v11, 8, v28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_and_b64 s[6:7], vcc, exec
; GFX9-NEXT: s_waitcnt vmcnt(18)
; GFX9-NEXT: v_lshlrev_b32_e32 v21, 8, v38
; GFX9-NEXT: s_waitcnt vmcnt(16)
@@ -56942,49 +57279,48 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: v_lshlrev_b32_e32 v42, 8, v44
; GFX9-NEXT: s_cbranch_scc0 .LBB87_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: v_or_b32_sdwa v0, v2, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v2, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_mov_b32_e32 v38, v1
; GFX9-NEXT: v_or_b32_sdwa v1, v4, v1 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_mov_b32_e32 v35, v4
+; GFX9-NEXT: v_mov_b32_e32 v36, v4
; GFX9-NEXT: v_or_b32_sdwa v4, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v10, v59 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v1, v12, v58 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_mov_b32_e32 v44, v2
+; GFX9-NEXT: v_mov_b32_e32 v35, v2
; GFX9-NEXT: v_mov_b32_e32 v49, v6
; GFX9-NEXT: v_or_b32_sdwa v2, v6, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v14, v57 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v1, v16, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_mov_b32_e32 v36, v58
+; GFX9-NEXT: v_mov_b32_e32 v63, v59
+; GFX9-NEXT: v_mov_b32_e32 v59, v58
; GFX9-NEXT: v_mov_b32_e32 v58, v57
; GFX9-NEXT: v_mov_b32_e32 v57, v7
; GFX9-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v18, v56 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v1, v20, v47 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_mov_b32_e32 v34, v48
; GFX9-NEXT: v_mov_b32_e32 v40, v3
; GFX9-NEXT: v_mov_b32_e32 v48, v8
; GFX9-NEXT: v_or_b32_sdwa v3, v8, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v22, v46 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v1, v24, v9 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_mov_b32_e32 v63, v59
-; GFX9-NEXT: v_mov_b32_e32 v59, v56
; GFX9-NEXT: v_mov_b32_e32 v56, v47
; GFX9-NEXT: v_mov_b32_e32 v47, v46
; GFX9-NEXT: v_mov_b32_e32 v46, v9
; GFX9-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v26, v45 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v61, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_mov_b32_e32 v34, v39
+; GFX9-NEXT: v_or_b32_sdwa v1, v32, v25 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_mov_b32_e32 v39, v10
; GFX9-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v37, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v62, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v61, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_mov_b32_e32 v45, v25
; GFX9-NEXT: v_mov_b32_e32 v25, v11
; GFX9-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v0, v60, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_or_b32_sdwa v1, v33, v19 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v33, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v60, v19 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_mov_b32_e32 v43, v12
; GFX9-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v55, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
@@ -56994,8 +57330,8 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v20, v22
; GFX9-NEXT: v_mov_b32_e32 v22, v24
; GFX9-NEXT: v_mov_b32_e32 v24, v26
-; GFX9-NEXT: v_mov_b32_e32 v26, v61
-; GFX9-NEXT: v_mov_b32_e32 v61, v23
+; GFX9-NEXT: v_mov_b32_e32 v26, v32
+; GFX9-NEXT: v_mov_b32_e32 v32, v23
; GFX9-NEXT: v_mov_b32_e32 v23, v21
; GFX9-NEXT: v_mov_b32_e32 v21, v19
; GFX9-NEXT: v_mov_b32_e32 v19, v17
@@ -57005,17 +57341,18 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: v_or_b32_sdwa v1, v52, v27 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_and_b32 s4, s28, 0xff
; GFX9-NEXT: s_lshl_b32 s5, s29, 8
+; GFX9-NEXT: v_mov_b32_e32 v44, v14
; GFX9-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v0, v51, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_waitcnt vmcnt(3)
; GFX9-NEXT: v_or_b32_sdwa v1, v50, v42 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_or_b32 s4, s4, s5
-; GFX9-NEXT: v_mov_b32_e32 v29, v33
-; GFX9-NEXT: v_mov_b32_e32 v33, v28
+; GFX9-NEXT: v_mov_b32_e32 v29, v61
+; GFX9-NEXT: v_mov_b32_e32 v61, v28
; GFX9-NEXT: v_mov_b32_e32 v28, v15
; GFX9-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: s_and_b32 s4, s4, 0xffff
-; GFX9-NEXT: v_or_b32_sdwa v0, v31, v32 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v31, v62 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_mov_b32_e32 v41, v5
; GFX9-NEXT: v_or_b32_sdwa v5, v2, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_or_b32_e32 v3, s4, v0
@@ -57059,11 +57396,11 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: s_lshl_b32 s6, s29, 8
; GFX9-NEXT: s_or_b32 s5, s6, s5
; GFX9-NEXT: v_add_u32_e32 v0, 3, v31
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v44
-; GFX9-NEXT: v_add_u32_e32 v2, 3, v35
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v35
+; GFX9-NEXT: v_add_u32_e32 v2, 3, v36
; GFX9-NEXT: s_movk_i32 s4, 0x300
; GFX9-NEXT: s_addk_i32 s5, 0x300
-; GFX9-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v0, v62, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_or_b32_sdwa v1, v34, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_or_b32_sdwa v2, v38, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: s_and_b32 s5, s5, 0xffff
@@ -57082,12 +57419,14 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: v_add_u32_e32 v0, 3, v39
; GFX9-NEXT: v_add_u32_e32 v1, 3, v43
; GFX9-NEXT: v_or_b32_sdwa v0, v63, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_or_b32_sdwa v1, v36, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v1, v59, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v44
+; GFX9-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: s_add_i32 s16, s16, 3
; GFX9-NEXT: s_and_b32 s5, s16, 0xff
; GFX9-NEXT: s_lshl_b32 s6, s17, 8
@@ -57128,18 +57467,16 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: s_lshl_b32 s8, s8, 16
; GFX9-NEXT: s_or_b32 s7, s7, s8
; GFX9-NEXT: v_mov_b32_e32 v2, s7
-; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
-; GFX9-NEXT: v_or_b32_sdwa v0, v58, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_or_b32_sdwa v1, v57, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; GFX9-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
; GFX9-NEXT: v_add_u32_e32 v0, 3, v16
+; GFX9-NEXT: s_waitcnt vmcnt(0)
+; GFX9-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v1, 3, v18
-; GFX9-NEXT: v_or_b32_sdwa v0, v59, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_or_b32_sdwa v1, v56, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
@@ -57161,14 +57498,14 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_add_u32_e32 v0, 3, v30
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v62
-; GFX9-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v29
+; GFX9-NEXT: v_or_b32_sdwa v0, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_or_b32_sdwa v1, v25, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; GFX9-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; GFX9-NEXT: v_add_u32_e32 v0, 3, v60
-; GFX9-NEXT: v_add_u32_e32 v1, 3, v29
+; GFX9-NEXT: v_add_u32_e32 v0, 3, v33
+; GFX9-NEXT: v_add_u32_e32 v1, 3, v60
; GFX9-NEXT: v_or_b32_sdwa v0, v23, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_or_b32_sdwa v1, v21, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
@@ -57183,7 +57520,7 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; GFX9-NEXT: v_add_u32_e32 v0, 3, v53
; GFX9-NEXT: v_add_u32_e32 v1, 3, v52
-; GFX9-NEXT: v_or_b32_sdwa v0, v33, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v0, v61, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_or_b32_sdwa v1, v37, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v0, 0x300, v0
; GFX9-NEXT: v_add_u32_sdwa v1, v1, s4 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
@@ -57217,34 +57554,34 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: s_waitcnt vmcnt(0)
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB87_4:
-; GFX9-NEXT: v_mov_b32_e32 v44, v2
-; GFX9-NEXT: v_mov_b32_e32 v34, v39
-; GFX9-NEXT: v_mov_b32_e32 v35, v4
-; GFX9-NEXT: v_mov_b32_e32 v29, v33
+; GFX9-NEXT: v_mov_b32_e32 v34, v48
+; GFX9-NEXT: v_mov_b32_e32 v35, v2
+; GFX9-NEXT: v_mov_b32_e32 v36, v4
+; GFX9-NEXT: v_mov_b32_e32 v29, v61
; GFX9-NEXT: v_mov_b32_e32 v49, v6
; GFX9-NEXT: v_mov_b32_e32 v48, v8
; GFX9-NEXT: v_mov_b32_e32 v39, v10
; GFX9-NEXT: v_mov_b32_e32 v43, v12
+; GFX9-NEXT: v_mov_b32_e32 v44, v14
; GFX9-NEXT: v_mov_b32_e32 v16, v18
; GFX9-NEXT: v_mov_b32_e32 v18, v20
; GFX9-NEXT: v_mov_b32_e32 v20, v22
; GFX9-NEXT: v_mov_b32_e32 v22, v24
; GFX9-NEXT: v_mov_b32_e32 v24, v26
-; GFX9-NEXT: v_mov_b32_e32 v26, v61
+; GFX9-NEXT: v_mov_b32_e32 v26, v32
; GFX9-NEXT: v_mov_b32_e32 v30, v37
; GFX9-NEXT: v_mov_b32_e32 v38, v1
; GFX9-NEXT: v_mov_b32_e32 v41, v5
; GFX9-NEXT: v_mov_b32_e32 v40, v3
; GFX9-NEXT: v_mov_b32_e32 v63, v59
-; GFX9-NEXT: v_mov_b32_e32 v36, v58
+; GFX9-NEXT: v_mov_b32_e32 v59, v58
; GFX9-NEXT: v_mov_b32_e32 v58, v57
; GFX9-NEXT: v_mov_b32_e32 v57, v7
-; GFX9-NEXT: v_mov_b32_e32 v59, v56
; GFX9-NEXT: v_mov_b32_e32 v56, v47
; GFX9-NEXT: v_mov_b32_e32 v47, v46
; GFX9-NEXT: v_mov_b32_e32 v46, v9
; GFX9-NEXT: v_mov_b32_e32 v45, v25
-; GFX9-NEXT: v_mov_b32_e32 v61, v23
+; GFX9-NEXT: v_mov_b32_e32 v32, v23
; GFX9-NEXT: v_mov_b32_e32 v25, v11
; GFX9-NEXT: v_mov_b32_e32 v23, v21
; GFX9-NEXT: v_mov_b32_e32 v21, v19
@@ -57252,10 +57589,12 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v17, v13
; GFX9-NEXT: v_mov_b32_e32 v37, v27
; GFX9-NEXT: v_mov_b32_e32 v27, v42
-; GFX9-NEXT: v_mov_b32_e32 v33, v28
+; GFX9-NEXT: v_mov_b32_e32 v61, v28
; GFX9-NEXT: v_mov_b32_e32 v28, v15
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX9-NEXT: s_branch .LBB87_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB87_2
+; GFX9-NEXT: s_branch .LBB87_3
;
; GFX11-TRUE16-LABEL: bitcast_v64i8_to_v8f64_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -57296,7 +57635,6 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v69, 8, v25
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v27, 8, v27
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v29, 8, v29
-; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(15)
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v25, 8, v0
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(13)
@@ -57315,67 +57653,68 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 8, v14
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(6)
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v23, 8, v86
-; GFX11-TRUE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, -1
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB87_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s3, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s0, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s2, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s17, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-TRUE16-NEXT: s_and_b32 s5, s16, 0xff
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s18, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s19, 8
; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s8
; GFX11-TRUE16-NEXT: s_and_b32 s5, s5, 0xffff
; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s6, 16
-; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-TRUE16-NEXT: s_and_b32 s6, s16, 0xff
-; GFX11-TRUE16-NEXT: s_and_b32 s8, s18, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s19, 8
-; GFX11-TRUE16-NEXT: s_or_b32 s6, s6, s7
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s8
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s22, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s23, 8
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s25, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s24, 0xff
; GFX11-TRUE16-NEXT: s_and_b32 s6, s6, 0xffff
; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s7, 16
-; GFX11-TRUE16-NEXT: s_and_b32 s8, s20, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s21, 8
-; GFX11-TRUE16-NEXT: s_or_b32 s6, s6, s7
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s9
-; GFX11-TRUE16-NEXT: s_and_b32 s8, s22, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s23, 8
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s25, 8
; GFX11-TRUE16-NEXT: s_or_b32 s8, s8, s9
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s8, 16
-; GFX11-TRUE16-NEXT: s_or_b32 s9, s9, s10
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_and_b32 s8, s9, 0xffff
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s26, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s27, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s7, s8, 0xffff
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s26, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s27, 8
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v31
-; GFX11-TRUE16-NEXT: s_or_b32 s9, s9, s10
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s8, s9
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v32
-; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s9, 16
+; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s8, 16
; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xff, v38
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s8, s9
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v33
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v83
; GFX11-TRUE16-NEXT: v_or_b32_e32 v1, v1, v84
; GFX11-TRUE16-NEXT: v_and_b32_e32 v10, 0xff, v22
; GFX11-TRUE16-NEXT: v_and_b32_e32 v11, 0xff, v24
; GFX11-TRUE16-NEXT: v_or_b32_e32 v2, v2, v85
-; GFX11-TRUE16-NEXT: s_and_b32 s11, s28, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s12, s29, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s10, s28, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s29, 8
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; GFX11-TRUE16-NEXT: v_or_b32_e32 v6, v6, v82
; GFX11-TRUE16-NEXT: v_or_b32_e32 v10, v10, v68
; GFX11-TRUE16-NEXT: v_or_b32_e32 v11, v11, v69
-; GFX11-TRUE16-NEXT: s_or_b32 s10, s11, s12
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s10, s11
; GFX11-TRUE16-NEXT: v_or_b32_e32 v5, v1, v2
-; GFX11-TRUE16-NEXT: s_and_b32 s10, s10, 0xffff
+; GFX11-TRUE16-NEXT: s_and_b32 s9, s9, 0xffff
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xff, v35
-; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, s10, v0
+; GFX11-TRUE16-NEXT: v_or_b32_e32 v4, s9, v0
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v34
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v36
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v37
@@ -57441,10 +57780,9 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: v_or_b32_e32 v13, v13, v87
; GFX11-TRUE16-NEXT: v_or_b32_e32 v14, v96, v14
; GFX11-TRUE16-NEXT: v_or_b32_e32 v15, v15, v86
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB87_3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB87_3
; GFX11-TRUE16-NEXT: .LBB87_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_add_i32 s0, s0, 3
; GFX11-TRUE16-NEXT: s_add_i32 s2, s2, 3
@@ -57640,7 +57978,9 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB87_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-TRUE16-NEXT: s_branch .LBB87_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB87_2
+; GFX11-TRUE16-NEXT: s_branch .LBB87_3
;
; GFX11-FAKE16-LABEL: bitcast_v64i8_to_v8f64_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -57681,7 +58021,6 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v69, 8, v25
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v27, 8, v27
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v29, 8, v29
-; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(15)
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v25, 8, v0
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(13)
@@ -57700,67 +58039,68 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v21, 8, v14
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(6)
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v23, 8, v86
-; GFX11-FAKE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, -1
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB87_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-FAKE16-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s3, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s0, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s2, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s4, 0xffff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s5, 16
+; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s17, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-FAKE16-NEXT: s_and_b32 s5, s16, 0xff
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s18, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s19, 8
; GFX11-FAKE16-NEXT: s_or_b32 s5, s5, s6
; GFX11-FAKE16-NEXT: s_or_b32 s6, s7, s8
; GFX11-FAKE16-NEXT: s_and_b32 s5, s5, 0xffff
; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s6, 16
-; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s21, 8
; GFX11-FAKE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-FAKE16-NEXT: s_and_b32 s6, s16, 0xff
-; GFX11-FAKE16-NEXT: s_and_b32 s8, s18, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s19, 8
-; GFX11-FAKE16-NEXT: s_or_b32 s6, s6, s7
-; GFX11-FAKE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s7, s8
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s22, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s23, 8
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s25, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s24, 0xff
; GFX11-FAKE16-NEXT: s_and_b32 s6, s6, 0xffff
; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s7, 16
-; GFX11-FAKE16-NEXT: s_and_b32 s8, s20, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s21, 8
-; GFX11-FAKE16-NEXT: s_or_b32 s6, s6, s7
-; GFX11-FAKE16-NEXT: s_or_b32 s7, s8, s9
-; GFX11-FAKE16-NEXT: s_and_b32 s8, s22, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s23, 8
-; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s25, 8
; GFX11-FAKE16-NEXT: s_or_b32 s8, s8, s9
-; GFX11-FAKE16-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-FAKE16-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s8, 16
-; GFX11-FAKE16-NEXT: s_or_b32 s9, s9, s10
-; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-FAKE16-NEXT: s_and_b32 s8, s9, 0xffff
-; GFX11-FAKE16-NEXT: s_and_b32 s9, s26, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s27, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s7, s8, 0xffff
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s26, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s27, 8
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v31
-; GFX11-FAKE16-NEXT: s_or_b32 s9, s9, s10
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s8, s9
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v32
-; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s9, 16
+; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s8, 16
; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xff, v38
-; GFX11-FAKE16-NEXT: s_or_b32 s8, s8, s9
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v33
; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v83
; GFX11-FAKE16-NEXT: v_or_b32_e32 v1, v1, v84
; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xff, v22
; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xff, v24
; GFX11-FAKE16-NEXT: v_or_b32_e32 v2, v2, v85
-; GFX11-FAKE16-NEXT: s_and_b32 s11, s28, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s12, s29, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s10, s28, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s11, s29, 8
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; GFX11-FAKE16-NEXT: v_or_b32_e32 v6, v6, v82
; GFX11-FAKE16-NEXT: v_or_b32_e32 v10, v10, v68
; GFX11-FAKE16-NEXT: v_or_b32_e32 v11, v11, v69
-; GFX11-FAKE16-NEXT: s_or_b32 s10, s11, s12
+; GFX11-FAKE16-NEXT: s_or_b32 s9, s10, s11
; GFX11-FAKE16-NEXT: v_or_b32_e32 v5, v1, v2
-; GFX11-FAKE16-NEXT: s_and_b32 s10, s10, 0xffff
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s9, 0xffff
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xff, v35
-; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, s10, v0
+; GFX11-FAKE16-NEXT: v_or_b32_e32 v4, s9, v0
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v34
; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v36
; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v37
@@ -57826,10 +58166,9 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; GFX11-FAKE16-NEXT: v_or_b32_e32 v13, v13, v87
; GFX11-FAKE16-NEXT: v_or_b32_e32 v14, v96, v14
; GFX11-FAKE16-NEXT: v_or_b32_e32 v15, v15, v86
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB87_3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB87_3
; GFX11-FAKE16-NEXT: .LBB87_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_add_i32 s0, s0, 3
; GFX11-FAKE16-NEXT: s_add_i32 s2, s2, 3
@@ -58025,7 +58364,9 @@ define inreg <8 x double> @bitcast_v64i8_to_v8f64_scalar(<64 x i8> inreg %a, i32
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB87_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-FAKE16-NEXT: s_branch .LBB87_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB87_2
+; GFX11-FAKE16-NEXT: s_branch .LBB87_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -58432,6 +58773,7 @@ define inreg <32 x half> @bitcast_v32i16_to_v32f16_scalar(<32 x i16> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: v_mov_b32_e32 v54, v17
@@ -58453,7 +58795,7 @@ define inreg <32 x half> @bitcast_v32i16_to_v32f16_scalar(<32 x i16> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v41, v2
; SI-NEXT: v_mov_b32_e32 v40, v1
; SI-NEXT: v_mov_b32_e32 v55, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB89_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_cvt_f32_f16_e32 v0, s16
@@ -58592,19 +58934,25 @@ define inreg <32 x half> @bitcast_v32i16_to_v32f16_scalar(<32 x i16> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr29
; SI-NEXT: ; implicit-def: $vgpr30
; SI-NEXT: ; implicit-def: $vgpr31
-; SI-NEXT: s_branch .LBB89_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB89_2
+; SI-NEXT: s_branch .LBB89_3
;
; VI-LABEL: bitcast_v32i16_to_v32f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; VI-NEXT: v_readfirstlane_b32 s6, v0
; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: v_readfirstlane_b32 s7, v1
-; VI-NEXT: s_cbranch_scc0 .LBB89_4
+; VI-NEXT: v_readfirstlane_b32 s7, v0
+; VI-NEXT: v_readfirstlane_b32 s6, v1
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB89_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB89_3
-; VI-NEXT: .LBB89_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB89_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB89_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_and_b32 s4, s16, 0xffff0000
; VI-NEXT: s_add_i32 s5, s16, 3
; VI-NEXT: s_and_b32 s8, s17, 0xffff0000
@@ -58633,12 +58981,12 @@ define inreg <32 x half> @bitcast_v32i16_to_v32f16_scalar(<32 x i16> inreg %a, i
; VI-NEXT: s_add_i32 s28, s28, 3
; VI-NEXT: s_and_b32 s43, s29, 0xffff0000
; VI-NEXT: s_add_i32 s29, s29, 3
-; VI-NEXT: s_and_b32 s44, s6, 0xffff0000
-; VI-NEXT: s_add_i32 s6, s6, 3
-; VI-NEXT: s_and_b32 s45, s7, 0xffff0000
+; VI-NEXT: s_and_b32 s44, s7, 0xffff0000
; VI-NEXT: s_add_i32 s7, s7, 3
-; VI-NEXT: s_and_b32 s7, s7, 0xffff
+; VI-NEXT: s_and_b32 s45, s6, 0xffff0000
+; VI-NEXT: s_add_i32 s6, s6, 3
; VI-NEXT: s_and_b32 s6, s6, 0xffff
+; VI-NEXT: s_and_b32 s7, s7, 0xffff
; VI-NEXT: s_and_b32 s29, s29, 0xffff
; VI-NEXT: s_and_b32 s28, s28, 0xffff
; VI-NEXT: s_and_b32 s27, s27, 0xffff
@@ -58653,8 +59001,8 @@ define inreg <32 x half> @bitcast_v32i16_to_v32f16_scalar(<32 x i16> inreg %a, i
; VI-NEXT: s_and_b32 s11, s11, 0xffff
; VI-NEXT: s_and_b32 s9, s9, 0xffff
; VI-NEXT: s_and_b32 s5, s5, 0xffff
-; VI-NEXT: s_or_b32 s7, s45, s7
-; VI-NEXT: s_or_b32 s6, s44, s6
+; VI-NEXT: s_or_b32 s6, s45, s6
+; VI-NEXT: s_or_b32 s7, s44, s7
; VI-NEXT: s_or_b32 s29, s43, s29
; VI-NEXT: s_or_b32 s28, s42, s28
; VI-NEXT: s_or_b32 s27, s41, s27
@@ -58669,8 +59017,8 @@ define inreg <32 x half> @bitcast_v32i16_to_v32f16_scalar(<32 x i16> inreg %a, i
; VI-NEXT: s_or_b32 s10, s10, s11
; VI-NEXT: s_or_b32 s8, s8, s9
; VI-NEXT: s_or_b32 s4, s4, s5
-; VI-NEXT: s_add_i32 s7, s7, 0x30000
; VI-NEXT: s_add_i32 s6, s6, 0x30000
+; VI-NEXT: s_add_i32 s7, s7, 0x30000
; VI-NEXT: s_add_i32 s29, s29, 0x30000
; VI-NEXT: s_add_i32 s28, s28, 0x30000
; VI-NEXT: s_add_i32 s27, s27, 0x30000
@@ -58685,7 +59033,7 @@ define inreg <32 x half> @bitcast_v32i16_to_v32f16_scalar(<32 x i16> inreg %a, i
; VI-NEXT: s_add_i32 s18, s10, 0x30000
; VI-NEXT: s_add_i32 s17, s8, 0x30000
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB89_3: ; %end
+; VI-NEXT: .LBB89_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -58700,17 +59048,16 @@ define inreg <32 x half> @bitcast_v32i16_to_v32f16_scalar(<32 x i16> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: v_mov_b32_e32 v14, s6
-; VI-NEXT: v_mov_b32_e32 v15, s7
+; VI-NEXT: v_mov_b32_e32 v14, s7
+; VI-NEXT: v_mov_b32_e32 v15, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB89_4:
-; VI-NEXT: s_branch .LBB89_2
;
; GFX9-LABEL: bitcast_v32i16_to_v32f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v2
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v16, v2
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
@@ -58726,12 +59073,15 @@ define inreg <32 x half> @bitcast_v32i16_to_v32f16_scalar(<32 x i16> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB89_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB89_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB89_3
-; GFX9-NEXT: .LBB89_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB89_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB89_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
@@ -58748,10 +59098,8 @@ define inreg <32 x half> @bitcast_v32i16_to_v32f16_scalar(<32 x i16> inreg %a, i
; GFX9-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: .LBB89_3: ; %end
+; GFX9-NEXT: .LBB89_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB89_4:
-; GFX9-NEXT: s_branch .LBB89_2
;
; GFX11-LABEL: bitcast_v32i16_to_v32f16_scalar:
; GFX11: ; %bb.0:
@@ -58761,12 +59109,15 @@ define inreg <32 x half> @bitcast_v32i16_to_v32f16_scalar(<32 x i16> inreg %a, i
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB89_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB89_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB89_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB89_4
-; GFX11-NEXT: .LBB89_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v15, s27, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v14, s26, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v13, s25, 3 op_sel_hi:[1,0]
@@ -58784,8 +59135,6 @@ define inreg <32 x half> @bitcast_v32i16_to_v32f16_scalar(<32 x i16> inreg %a, i
; GFX11-NEXT: v_pk_add_u16 v1, s13, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB89_3:
-; GFX11-NEXT: s_branch .LBB89_2
; GFX11-NEXT: .LBB89_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -59181,10 +59530,14 @@ define inreg <32 x i16> @bitcast_v32f16_to_v32i16_scalar(<32 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v12, s28
; SI-NEXT: v_cvt_f16_f32_e32 v13, s29
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: s_cbranch_scc0 .LBB91_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB91_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB91_3
-; SI-NEXT: .LBB91_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB91_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB91_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v31, v31
; SI-NEXT: v_cvt_f32_f16_e32 v30, v30
; SI-NEXT: v_cvt_f32_f16_e32 v27, v27
@@ -59321,16 +59674,15 @@ define inreg <32 x i16> @bitcast_v32f16_to_v32i16_scalar(<32 x half> inreg %a, i
; SI-NEXT: v_alignbit_b32 v21, v22, v21, 16
; SI-NEXT: v_alignbit_b32 v25, v26, v25, 16
; SI-NEXT: v_alignbit_b32 v29, v30, v29, 16
-; SI-NEXT: .LBB91_3: ; %end
+; SI-NEXT: .LBB91_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB91_4:
-; SI-NEXT: s_branch .LBB91_2
;
; VI-LABEL: bitcast_v32f16_to_v32i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v13, v2
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: v_mov_b32_e32 v16, v2
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
@@ -59346,12 +59698,15 @@ define inreg <32 x i16> @bitcast_v32f16_to_v32i16_scalar(<32 x half> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB91_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB91_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB91_3
-; VI-NEXT: .LBB91_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB91_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB91_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v17, 0x200
; VI-NEXT: v_add_f16_e32 v19, 0x200, v15
; VI-NEXT: v_add_f16_sdwa v15, v15, v17 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
@@ -59401,16 +59756,15 @@ define inreg <32 x i16> @bitcast_v32f16_to_v32i16_scalar(<32 x half> inreg %a, i
; VI-NEXT: v_or_b32_e32 v2, v19, v2
; VI-NEXT: v_or_b32_e32 v1, v18, v1
; VI-NEXT: v_or_b32_e32 v0, v16, v0
-; VI-NEXT: .LBB91_3: ; %end
+; VI-NEXT: .LBB91_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB91_4:
-; VI-NEXT: s_branch .LBB91_2
;
; GFX9-LABEL: bitcast_v32f16_to_v32i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v2
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v16, v2
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
@@ -59426,12 +59780,15 @@ define inreg <32 x i16> @bitcast_v32f16_to_v32i16_scalar(<32 x half> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB91_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB91_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB91_3
-; GFX9-NEXT: .LBB91_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB91_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB91_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_movk_i32 s4, 0x200
; GFX9-NEXT: v_pk_add_f16 v15, v15, s4 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v14, v14, s4 op_sel_hi:[1,0]
@@ -59449,10 +59806,8 @@ define inreg <32 x i16> @bitcast_v32f16_to_v32i16_scalar(<32 x half> inreg %a, i
; GFX9-NEXT: v_pk_add_f16 v2, v2, s4 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v1, v1, s4 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, v0, s4 op_sel_hi:[1,0]
-; GFX9-NEXT: .LBB91_3: ; %end
+; GFX9-NEXT: .LBB91_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB91_4:
-; GFX9-NEXT: s_branch .LBB91_2
;
; GFX11-LABEL: bitcast_v32f16_to_v32i16_scalar:
; GFX11: ; %bb.0:
@@ -59462,12 +59817,15 @@ define inreg <32 x i16> @bitcast_v32f16_to_v32i16_scalar(<32 x half> inreg %a, i
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB91_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB91_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB91_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB91_4
-; GFX11-NEXT: .LBB91_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v15, 0x200, s27 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v14, 0x200, s26 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v13, 0x200, s25 op_sel_hi:[0,1]
@@ -59485,8 +59843,6 @@ define inreg <32 x i16> @bitcast_v32f16_to_v32i16_scalar(<32 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s13 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB91_3:
-; GFX11-NEXT: s_branch .LBB91_2
; GFX11-NEXT: .LBB91_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -59857,12 +60213,13 @@ define inreg <32 x bfloat> @bitcast_v32i16_to_v32bf16_scalar(<32 x i16> inreg %a
; SI-LABEL: bitcast_v32i16_to_v32bf16_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
; SI-NEXT: v_mov_b32_e32 v20, v17
; SI-NEXT: v_mov_b32_e32 v33, v16
; SI-NEXT: v_mov_b32_e32 v16, v15
-; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
-; SI-NEXT: v_mov_b32_e32 v32, v14
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: v_mov_b32_e32 v32, v14
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v3
; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v5
@@ -60042,19 +60399,25 @@ define inreg <32 x bfloat> @bitcast_v32i16_to_v32bf16_scalar(<32 x i16> inreg %a
; SI-NEXT: ; implicit-def: $vgpr26
; SI-NEXT: ; implicit-def: $vgpr28
; SI-NEXT: ; implicit-def: $vgpr30
-; SI-NEXT: s_branch .LBB93_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB93_2
+; SI-NEXT: s_branch .LBB93_3
;
; VI-LABEL: bitcast_v32i16_to_v32bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
-; VI-NEXT: v_readfirstlane_b32 s6, v0
; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: v_readfirstlane_b32 s7, v1
-; VI-NEXT: s_cbranch_scc0 .LBB93_4
+; VI-NEXT: v_readfirstlane_b32 s7, v0
+; VI-NEXT: v_readfirstlane_b32 s6, v1
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB93_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB93_3
-; VI-NEXT: .LBB93_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB93_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB93_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_and_b32 s4, s16, 0xffff0000
; VI-NEXT: s_add_i32 s5, s16, 3
; VI-NEXT: s_and_b32 s8, s17, 0xffff0000
@@ -60083,12 +60446,12 @@ define inreg <32 x bfloat> @bitcast_v32i16_to_v32bf16_scalar(<32 x i16> inreg %a
; VI-NEXT: s_add_i32 s28, s28, 3
; VI-NEXT: s_and_b32 s43, s29, 0xffff0000
; VI-NEXT: s_add_i32 s29, s29, 3
-; VI-NEXT: s_and_b32 s44, s6, 0xffff0000
-; VI-NEXT: s_add_i32 s6, s6, 3
-; VI-NEXT: s_and_b32 s45, s7, 0xffff0000
+; VI-NEXT: s_and_b32 s44, s7, 0xffff0000
; VI-NEXT: s_add_i32 s7, s7, 3
-; VI-NEXT: s_and_b32 s7, s7, 0xffff
+; VI-NEXT: s_and_b32 s45, s6, 0xffff0000
+; VI-NEXT: s_add_i32 s6, s6, 3
; VI-NEXT: s_and_b32 s6, s6, 0xffff
+; VI-NEXT: s_and_b32 s7, s7, 0xffff
; VI-NEXT: s_and_b32 s29, s29, 0xffff
; VI-NEXT: s_and_b32 s28, s28, 0xffff
; VI-NEXT: s_and_b32 s27, s27, 0xffff
@@ -60103,8 +60466,8 @@ define inreg <32 x bfloat> @bitcast_v32i16_to_v32bf16_scalar(<32 x i16> inreg %a
; VI-NEXT: s_and_b32 s11, s11, 0xffff
; VI-NEXT: s_and_b32 s9, s9, 0xffff
; VI-NEXT: s_and_b32 s5, s5, 0xffff
-; VI-NEXT: s_or_b32 s7, s45, s7
-; VI-NEXT: s_or_b32 s6, s44, s6
+; VI-NEXT: s_or_b32 s6, s45, s6
+; VI-NEXT: s_or_b32 s7, s44, s7
; VI-NEXT: s_or_b32 s29, s43, s29
; VI-NEXT: s_or_b32 s28, s42, s28
; VI-NEXT: s_or_b32 s27, s41, s27
@@ -60119,8 +60482,8 @@ define inreg <32 x bfloat> @bitcast_v32i16_to_v32bf16_scalar(<32 x i16> inreg %a
; VI-NEXT: s_or_b32 s10, s10, s11
; VI-NEXT: s_or_b32 s8, s8, s9
; VI-NEXT: s_or_b32 s4, s4, s5
-; VI-NEXT: s_add_i32 s7, s7, 0x30000
; VI-NEXT: s_add_i32 s6, s6, 0x30000
+; VI-NEXT: s_add_i32 s7, s7, 0x30000
; VI-NEXT: s_add_i32 s29, s29, 0x30000
; VI-NEXT: s_add_i32 s28, s28, 0x30000
; VI-NEXT: s_add_i32 s27, s27, 0x30000
@@ -60135,7 +60498,7 @@ define inreg <32 x bfloat> @bitcast_v32i16_to_v32bf16_scalar(<32 x i16> inreg %a
; VI-NEXT: s_add_i32 s18, s10, 0x30000
; VI-NEXT: s_add_i32 s17, s8, 0x30000
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB93_3: ; %end
+; VI-NEXT: .LBB93_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
@@ -60150,17 +60513,16 @@ define inreg <32 x bfloat> @bitcast_v32i16_to_v32bf16_scalar(<32 x i16> inreg %a
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: v_mov_b32_e32 v14, s6
-; VI-NEXT: v_mov_b32_e32 v15, s7
+; VI-NEXT: v_mov_b32_e32 v14, s7
+; VI-NEXT: v_mov_b32_e32 v15, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB93_4:
-; VI-NEXT: s_branch .LBB93_2
;
; GFX9-LABEL: bitcast_v32i16_to_v32bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v2
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v16, v2
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
@@ -60176,12 +60538,15 @@ define inreg <32 x bfloat> @bitcast_v32i16_to_v32bf16_scalar(<32 x i16> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB93_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB93_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB93_3
-; GFX9-NEXT: .LBB93_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB93_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB93_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v13, v13, 3 op_sel_hi:[1,0]
@@ -60198,10 +60563,8 @@ define inreg <32 x bfloat> @bitcast_v32i16_to_v32bf16_scalar(<32 x i16> inreg %a
; GFX9-NEXT: v_pk_add_u16 v2, v2, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, v1, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, v0, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: .LBB93_3: ; %end
+; GFX9-NEXT: .LBB93_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB93_4:
-; GFX9-NEXT: s_branch .LBB93_2
;
; GFX11-LABEL: bitcast_v32i16_to_v32bf16_scalar:
; GFX11: ; %bb.0:
@@ -60211,12 +60574,15 @@ define inreg <32 x bfloat> @bitcast_v32i16_to_v32bf16_scalar(<32 x i16> inreg %a
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB93_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB93_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB93_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB93_4
-; GFX11-NEXT: .LBB93_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v15, s27, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v14, s26, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v13, s25, 3 op_sel_hi:[1,0]
@@ -60234,8 +60600,6 @@ define inreg <32 x bfloat> @bitcast_v32i16_to_v32bf16_scalar(<32 x i16> inreg %a
; GFX11-NEXT: v_pk_add_u16 v1, s13, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB93_3:
-; GFX11-NEXT: s_branch .LBB93_2
; GFX11-NEXT: .LBB93_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -61706,6 +62070,7 @@ define inreg <32 x i16> @bitcast_v32bf16_to_v32i16_scalar(<32 x bfloat> inreg %a
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
@@ -61722,7 +62087,6 @@ define inreg <32 x i16> @bitcast_v32bf16_to_v32i16_scalar(<32 x bfloat> inreg %a
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: s_waitcnt expcnt(6)
; SI-NEXT: v_mul_f32_e64 v57, 1.0, s16
; SI-NEXT: v_mul_f32_e64 v56, 1.0, s17
@@ -61744,6 +62108,7 @@ define inreg <32 x i16> @bitcast_v32bf16_to_v32i16_scalar(<32 x bfloat> inreg %a
; SI-NEXT: v_mul_f32_e32 v40, 1.0, v15
; SI-NEXT: v_mul_f32_e32 v55, 1.0, v16
; SI-NEXT: v_mul_f32_e32 v54, 1.0, v17
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mul_f32_e64 v33, 1.0, s18
; SI-NEXT: v_mul_f32_e64 v32, 1.0, s19
; SI-NEXT: s_waitcnt expcnt(0)
@@ -61957,7 +62322,9 @@ define inreg <32 x i16> @bitcast_v32bf16_to_v32i16_scalar(<32 x bfloat> inreg %a
; SI-NEXT: ; implicit-def: $vgpr29
; SI-NEXT: ; implicit-def: $vgpr30
; SI-NEXT: ; implicit-def: $vgpr31
-; SI-NEXT: s_branch .LBB95_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB95_2
+; SI-NEXT: s_branch .LBB95_3
;
; VI-LABEL: bitcast_v32bf16_to_v32i16_scalar:
; VI: ; %bb.0:
@@ -61965,16 +62332,20 @@ define inreg <32 x i16> @bitcast_v32bf16_to_v32i16_scalar(<32 x bfloat> inreg %a
; VI-NEXT: s_xor_saveexec_b64 s[4:5], -1
; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 ; 4-byte Folded Spill
; VI-NEXT: s_mov_b64 exec, s[4:5]
-; VI-NEXT: v_writelane_b32 v20, s30, 0
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
+; VI-NEXT: v_writelane_b32 v20, s30, 0
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_writelane_b32 v20, s31, 1
; VI-NEXT: v_readfirstlane_b32 s30, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s31, v1
-; VI-NEXT: s_cbranch_scc0 .LBB95_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB95_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB95_4
-; VI-NEXT: .LBB95_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB95_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB95_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v1, 0x40c00000
; VI-NEXT: v_add_f32_e32 v0, s4, v1
@@ -62265,8 +62636,6 @@ define inreg <32 x i16> @bitcast_v32bf16_to_v32i16_scalar(<32 x bfloat> inreg %a
; VI-NEXT: v_alignbit_b32 v1, v1, v17, 16
; VI-NEXT: v_alignbit_b32 v0, v16, v0, 16
; VI-NEXT: s_branch .LBB95_5
-; VI-NEXT: .LBB95_3:
-; VI-NEXT: s_branch .LBB95_2
; VI-NEXT: .LBB95_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -62299,16 +62668,20 @@ define inreg <32 x i16> @bitcast_v32bf16_to_v32i16_scalar(<32 x bfloat> inreg %a
; GFX9-NEXT: s_xor_saveexec_b64 s[4:5], -1
; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 ; 4-byte Folded Spill
; GFX9-NEXT: s_mov_b64 exec, s[4:5]
-; GFX9-NEXT: v_writelane_b32 v20, s30, 0
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
+; GFX9-NEXT: v_writelane_b32 v20, s30, 0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_writelane_b32 v20, s31, 1
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
-; GFX9-NEXT: s_cbranch_scc0 .LBB95_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB95_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB95_4
-; GFX9-NEXT: .LBB95_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB95_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB95_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
; GFX9-NEXT: s_and_b32 s5, s30, 0xffff0000
; GFX9-NEXT: v_add_f32_e32 v1, s5, v0
@@ -62600,8 +62973,6 @@ define inreg <32 x i16> @bitcast_v32bf16_to_v32i16_scalar(<32 x bfloat> inreg %a
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; GFX9-NEXT: v_and_or_b32 v0, v17, v16, v0
; GFX9-NEXT: s_branch .LBB95_5
-; GFX9-NEXT: .LBB95_3:
-; GFX9-NEXT: s_branch .LBB95_2
; GFX9-NEXT: .LBB95_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -62636,12 +63007,15 @@ define inreg <32 x i16> @bitcast_v32bf16_to_v32i16_scalar(<32 x bfloat> inreg %a
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB95_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB95_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB95_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB95_4
-; GFX11-NEXT: .LBB95_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_and_b32 s0, s12, 0xffff0000
; GFX11-NEXT: s_lshl_b32 s1, s12, 16
; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
@@ -62935,8 +63309,6 @@ define inreg <32 x i16> @bitcast_v32bf16_to_v32i16_scalar(<32 x bfloat> inreg %a
; GFX11-NEXT: v_and_or_b32 v1, 0xffff0000, v16, v23
; GFX11-NEXT: v_and_or_b32 v0, 0xffff0000, v0, v24
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB95_3:
-; GFX11-NEXT: s_branch .LBB95_2
; GFX11-NEXT: .LBB95_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -65206,8 +65578,9 @@ define inreg <64 x i8> @bitcast_v32i16_to_v64i8_scalar(<32 x i16> inreg %a, i32
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_writelane_b32 v37, s30, 0
; SI-NEXT: v_writelane_b32 v37, s31, 1
-; SI-NEXT: v_writelane_b32 v37, s34, 2
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v19
+; SI-NEXT: v_writelane_b32 v37, s34, 2
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_writelane_b32 v37, s35, 3
; SI-NEXT: v_readfirstlane_b32 s34, v18
; SI-NEXT: v_readfirstlane_b32 s35, v17
@@ -65219,7 +65592,7 @@ define inreg <64 x i8> @bitcast_v32i16_to_v64i8_scalar(<32 x i16> inreg %a, i32
; SI-NEXT: v_readfirstlane_b32 s93, v5
; SI-NEXT: v_readfirstlane_b32 s90, v2
; SI-NEXT: v_readfirstlane_b32 s91, v1
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v4
; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v8
; SI-NEXT: v_lshlrev_b32_e32 v35, 16, v12
@@ -65729,7 +66102,9 @@ define inreg <64 x i8> @bitcast_v32i16_to_v64i8_scalar(<32 x i16> inreg %a, i32
; SI-NEXT: ; implicit-def: $vgpr22
; SI-NEXT: ; implicit-def: $vgpr14
; SI-NEXT: ; implicit-def: $vgpr10
-; SI-NEXT: s_branch .LBB97_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB97_2
+; SI-NEXT: s_branch .LBB97_3
;
; VI-LABEL: bitcast_v32i16_to_v64i8_scalar:
; VI: ; %bb.0:
@@ -65758,8 +66133,9 @@ define inreg <64 x i8> @bitcast_v32i16_to_v64i8_scalar(<32 x i16> inreg %a, i32
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3
; VI-NEXT: v_writelane_b32 v4, s66, 18
; VI-NEXT: v_readfirstlane_b32 s4, v1
-; VI-NEXT: s_and_b64 s[6:7], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s5, v2
+; VI-NEXT: s_and_b64 s[6:7], vcc, exec
+; VI-NEXT: s_mov_b64 s[46:47], -1
; VI-NEXT: v_writelane_b32 v4, s67, 19
; VI-NEXT: s_cbranch_scc0 .LBB97_4
; VI-NEXT: ; %bb.1: ; %cmp.false
@@ -66207,7 +66583,9 @@ define inreg <64 x i8> @bitcast_v32i16_to_v64i8_scalar(<32 x i16> inreg %a, i32
; VI-NEXT: ; implicit-def: $sgpr58
; VI-NEXT: ; implicit-def: $sgpr57
; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: s_branch .LBB97_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[46:47]
+; VI-NEXT: s_cbranch_vccz .LBB97_2
+; VI-NEXT: s_branch .LBB97_3
;
; GFX9-LABEL: bitcast_v32i16_to_v64i8_scalar:
; GFX9: ; %bb.0:
@@ -66233,8 +66611,9 @@ define inreg <64 x i8> @bitcast_v32i16_to_v64i8_scalar(<32 x i16> inreg %a, i32
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3
; GFX9-NEXT: v_writelane_b32 v63, s55, 15
; GFX9-NEXT: v_readfirstlane_b32 s4, v1
-; GFX9-NEXT: s_and_b64 s[6:7], vcc, exec
; GFX9-NEXT: v_readfirstlane_b32 s5, v2
+; GFX9-NEXT: s_and_b64 s[6:7], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[46:47], -1
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
@@ -66422,7 +66801,8 @@ define inreg <64 x i8> @bitcast_v32i16_to_v64i8_scalar(<32 x i16> inreg %a, i32
; GFX9-NEXT: ; implicit-def: $sgpr59
; GFX9-NEXT: ; implicit-def: $sgpr57
; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: s_branch .LBB97_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[46:47]
+; GFX9-NEXT: s_cbranch_vccz .LBB97_2
; GFX9-NEXT: .LBB97_4:
; GFX9-NEXT: v_mov_b32_e32 v21, s44
; GFX9-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
@@ -66642,7 +67022,7 @@ define inreg <64 x i8> @bitcast_v32i16_to_v64i8_scalar(<32 x i16> inreg %a, i32
; GFX11-NEXT: s_mov_b32 exec_lo, s4
; GFX11-NEXT: v_writelane_b32 v40, s30, 0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
-; GFX11-NEXT: s_mov_b32 s42, 0
+; GFX11-NEXT: s_mov_b32 s5, -1
; GFX11-NEXT: v_writelane_b32 v40, s31, 1
; GFX11-NEXT: v_writelane_b32 v40, s34, 2
; GFX11-NEXT: v_writelane_b32 v40, s35, 3
@@ -66651,49 +67031,49 @@ define inreg <64 x i8> @bitcast_v32i16_to_v64i8_scalar(<32 x i16> inreg %a, i32
; GFX11-NEXT: v_writelane_b32 v40, s38, 6
; GFX11-NEXT: v_writelane_b32 v40, s39, 7
; GFX11-NEXT: v_writelane_b32 v40, s48, 8
-; GFX11-NEXT: v_writelane_b32 v40, s49, 9
; GFX11-NEXT: s_cbranch_scc0 .LBB97_3
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s43, s27, 24
-; GFX11-NEXT: s_lshr_b32 s44, s27, 16
-; GFX11-NEXT: s_lshr_b32 s46, s27, 8
-; GFX11-NEXT: s_lshr_b32 s45, s26, 16
-; GFX11-NEXT: s_lshr_b32 s47, s26, 8
-; GFX11-NEXT: s_lshr_b32 s56, s25, 24
-; GFX11-NEXT: s_lshr_b32 s57, s25, 16
-; GFX11-NEXT: s_lshr_b32 s59, s25, 8
-; GFX11-NEXT: s_lshr_b32 s58, s24, 16
-; GFX11-NEXT: s_lshr_b32 s60, s24, 8
-; GFX11-NEXT: s_lshr_b32 s61, s23, 24
-; GFX11-NEXT: s_lshr_b32 s62, s23, 16
-; GFX11-NEXT: s_lshr_b32 s72, s23, 8
-; GFX11-NEXT: s_lshr_b32 s63, s22, 16
-; GFX11-NEXT: s_lshr_b32 s73, s22, 8
-; GFX11-NEXT: s_lshr_b32 s74, s21, 24
-; GFX11-NEXT: s_lshr_b32 s75, s21, 16
-; GFX11-NEXT: s_lshr_b32 s77, s21, 8
-; GFX11-NEXT: s_lshr_b32 s76, s20, 16
-; GFX11-NEXT: s_lshr_b32 s78, s20, 8
-; GFX11-NEXT: s_lshr_b32 s79, s19, 24
-; GFX11-NEXT: s_lshr_b32 s88, s19, 16
-; GFX11-NEXT: s_lshr_b32 s90, s19, 8
-; GFX11-NEXT: s_lshr_b32 s89, s18, 16
-; GFX11-NEXT: s_lshr_b32 s91, s18, 8
-; GFX11-NEXT: s_lshr_b32 s92, s17, 24
-; GFX11-NEXT: s_lshr_b32 s93, s17, 16
-; GFX11-NEXT: s_lshr_b32 s95, s17, 8
-; GFX11-NEXT: s_lshr_b32 s94, s16, 16
-; GFX11-NEXT: s_lshr_b32 vcc_hi, s16, 8
-; GFX11-NEXT: s_lshr_b32 s30, s3, 24
-; GFX11-NEXT: s_lshr_b32 s31, s3, 16
-; GFX11-NEXT: s_lshr_b32 s35, s3, 8
-; GFX11-NEXT: s_lshr_b32 s34, s2, 16
-; GFX11-NEXT: s_lshr_b32 s36, s2, 8
-; GFX11-NEXT: s_lshr_b32 s37, s1, 24
-; GFX11-NEXT: s_lshr_b32 s38, s1, 16
-; GFX11-NEXT: s_lshr_b32 s48, s1, 8
-; GFX11-NEXT: s_lshr_b32 s39, s0, 16
-; GFX11-NEXT: s_lshr_b32 s49, s0, 8
+; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
+; GFX11-NEXT: s_lshr_b32 s42, s27, 24
+; GFX11-NEXT: s_lshr_b32 s43, s27, 16
+; GFX11-NEXT: s_lshr_b32 s45, s27, 8
+; GFX11-NEXT: s_lshr_b32 s44, s26, 16
+; GFX11-NEXT: s_lshr_b32 s46, s26, 8
+; GFX11-NEXT: s_lshr_b32 s47, s25, 24
+; GFX11-NEXT: s_lshr_b32 s56, s25, 16
+; GFX11-NEXT: s_lshr_b32 s58, s25, 8
+; GFX11-NEXT: s_lshr_b32 s57, s24, 16
+; GFX11-NEXT: s_lshr_b32 s59, s24, 8
+; GFX11-NEXT: s_lshr_b32 s60, s23, 24
+; GFX11-NEXT: s_lshr_b32 s61, s23, 16
+; GFX11-NEXT: s_lshr_b32 s63, s23, 8
+; GFX11-NEXT: s_lshr_b32 s62, s22, 16
+; GFX11-NEXT: s_lshr_b32 s72, s22, 8
+; GFX11-NEXT: s_lshr_b32 s73, s21, 24
+; GFX11-NEXT: s_lshr_b32 s74, s21, 16
+; GFX11-NEXT: s_lshr_b32 s76, s21, 8
+; GFX11-NEXT: s_lshr_b32 s75, s20, 16
+; GFX11-NEXT: s_lshr_b32 s77, s20, 8
+; GFX11-NEXT: s_lshr_b32 s78, s19, 24
+; GFX11-NEXT: s_lshr_b32 s79, s19, 16
+; GFX11-NEXT: s_lshr_b32 s89, s19, 8
+; GFX11-NEXT: s_lshr_b32 s88, s18, 16
+; GFX11-NEXT: s_lshr_b32 s90, s18, 8
+; GFX11-NEXT: s_lshr_b32 s91, s17, 24
+; GFX11-NEXT: s_lshr_b32 s92, s17, 16
+; GFX11-NEXT: s_lshr_b32 s94, s17, 8
+; GFX11-NEXT: s_lshr_b32 s93, s16, 16
+; GFX11-NEXT: s_lshr_b32 s95, s16, 8
+; GFX11-NEXT: s_lshr_b32 vcc_hi, s3, 24
+; GFX11-NEXT: s_lshr_b32 s30, s3, 16
+; GFX11-NEXT: s_lshr_b32 s34, s3, 8
+; GFX11-NEXT: s_lshr_b32 s31, s2, 16
+; GFX11-NEXT: s_lshr_b32 s35, s2, 8
+; GFX11-NEXT: s_lshr_b32 s36, s1, 24
+; GFX11-NEXT: s_lshr_b32 s37, s1, 16
+; GFX11-NEXT: s_lshr_b32 s39, s1, 8
+; GFX11-NEXT: s_lshr_b32 s38, s0, 16
+; GFX11-NEXT: s_lshr_b32 s48, s0, 8
; GFX11-NEXT: s_lshr_b64 s[40:41], s[26:27], 24
; GFX11-NEXT: s_lshr_b64 s[28:29], s[24:25], 24
; GFX11-NEXT: s_lshr_b64 s[14:15], s[22:23], 24
@@ -66701,9 +67081,7 @@ define inreg <64 x i8> @bitcast_v32i16_to_v64i8_scalar(<32 x i16> inreg %a, i32
; GFX11-NEXT: s_lshr_b64 s[10:11], s[18:19], 24
; GFX11-NEXT: s_lshr_b64 s[8:9], s[16:17], 24
; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
-; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
-; GFX11-NEXT: s_cbranch_vccnz .LBB97_4
+; GFX11-NEXT: s_cbranch_execnz .LBB97_4
; GFX11-NEXT: .LBB97_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v14, s19, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v13, s18, 3 op_sel_hi:[1,0]
@@ -66771,55 +67149,56 @@ define inreg <64 x i8> @bitcast_v32i16_to_v64i8_scalar(<32 x i16> inreg %a, i32
; GFX11-NEXT: v_lshrrev_b32_e32 v96, 8, v23
; GFX11-NEXT: s_branch .LBB97_5
; GFX11-NEXT: .LBB97_3:
-; GFX11-NEXT: ; implicit-def: $sgpr49
-; GFX11-NEXT: ; implicit-def: $sgpr39
-; GFX11-NEXT: ; implicit-def: $sgpr4
; GFX11-NEXT: ; implicit-def: $sgpr48
; GFX11-NEXT: ; implicit-def: $sgpr38
+; GFX11-NEXT: ; implicit-def: $sgpr4
+; GFX11-NEXT: ; implicit-def: $sgpr39
; GFX11-NEXT: ; implicit-def: $sgpr37
; GFX11-NEXT: ; implicit-def: $sgpr36
-; GFX11-NEXT: ; implicit-def: $sgpr34
-; GFX11-NEXT: ; implicit-def: $sgpr6
; GFX11-NEXT: ; implicit-def: $sgpr35
; GFX11-NEXT: ; implicit-def: $sgpr31
+; GFX11-NEXT: ; implicit-def: $sgpr6
+; GFX11-NEXT: ; implicit-def: $sgpr34
; GFX11-NEXT: ; implicit-def: $sgpr30
; GFX11-NEXT: ; implicit-def: $vcc_hi
-; GFX11-NEXT: ; implicit-def: $sgpr94
-; GFX11-NEXT: ; implicit-def: $sgpr8
; GFX11-NEXT: ; implicit-def: $sgpr95
; GFX11-NEXT: ; implicit-def: $sgpr93
+; GFX11-NEXT: ; implicit-def: $sgpr8
+; GFX11-NEXT: ; implicit-def: $sgpr94
; GFX11-NEXT: ; implicit-def: $sgpr92
; GFX11-NEXT: ; implicit-def: $sgpr91
-; GFX11-NEXT: ; implicit-def: $sgpr89
-; GFX11-NEXT: ; implicit-def: $sgpr10
; GFX11-NEXT: ; implicit-def: $sgpr90
; GFX11-NEXT: ; implicit-def: $sgpr88
+; GFX11-NEXT: ; implicit-def: $sgpr10
+; GFX11-NEXT: ; implicit-def: $sgpr89
; GFX11-NEXT: ; implicit-def: $sgpr79
; GFX11-NEXT: ; implicit-def: $sgpr78
-; GFX11-NEXT: ; implicit-def: $sgpr76
-; GFX11-NEXT: ; implicit-def: $sgpr12
; GFX11-NEXT: ; implicit-def: $sgpr77
; GFX11-NEXT: ; implicit-def: $sgpr75
+; GFX11-NEXT: ; implicit-def: $sgpr12
+; GFX11-NEXT: ; implicit-def: $sgpr76
; GFX11-NEXT: ; implicit-def: $sgpr74
; GFX11-NEXT: ; implicit-def: $sgpr73
-; GFX11-NEXT: ; implicit-def: $sgpr63
-; GFX11-NEXT: ; implicit-def: $sgpr14
; GFX11-NEXT: ; implicit-def: $sgpr72
; GFX11-NEXT: ; implicit-def: $sgpr62
+; GFX11-NEXT: ; implicit-def: $sgpr14
+; GFX11-NEXT: ; implicit-def: $sgpr63
; GFX11-NEXT: ; implicit-def: $sgpr61
; GFX11-NEXT: ; implicit-def: $sgpr60
-; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr28
; GFX11-NEXT: ; implicit-def: $sgpr59
; GFX11-NEXT: ; implicit-def: $sgpr57
+; GFX11-NEXT: ; implicit-def: $sgpr28
+; GFX11-NEXT: ; implicit-def: $sgpr58
; GFX11-NEXT: ; implicit-def: $sgpr56
; GFX11-NEXT: ; implicit-def: $sgpr47
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr40
; GFX11-NEXT: ; implicit-def: $sgpr46
; GFX11-NEXT: ; implicit-def: $sgpr44
+; GFX11-NEXT: ; implicit-def: $sgpr40
+; GFX11-NEXT: ; implicit-def: $sgpr45
; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: s_branch .LBB97_2
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
+; GFX11-NEXT: s_cbranch_vccz .LBB97_2
; GFX11-NEXT: .LBB97_4:
; GFX11-NEXT: v_dual_mov_b32 v23, s0 :: v_dual_mov_b32 v24, s1
; GFX11-NEXT: v_dual_mov_b32 v19, s2 :: v_dual_mov_b32 v20, s3
@@ -66829,28 +67208,28 @@ define inreg <64 x i8> @bitcast_v32i16_to_v64i8_scalar(<32 x i16> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v5, s22 :: v_dual_mov_b32 v6, s23
; GFX11-NEXT: v_dual_mov_b32 v3, s24 :: v_dual_mov_b32 v4, s25
; GFX11-NEXT: v_dual_mov_b32 v1, s26 :: v_dual_mov_b32 v2, s27
-; GFX11-NEXT: v_dual_mov_b32 v96, s49 :: v_dual_mov_b32 v87, s39
-; GFX11-NEXT: v_dual_mov_b32 v86, s48 :: v_dual_mov_b32 v85, s38
-; GFX11-NEXT: v_dual_mov_b32 v84, s37 :: v_dual_mov_b32 v83, s36
-; GFX11-NEXT: v_dual_mov_b32 v82, s34 :: v_dual_mov_b32 v81, s35
-; GFX11-NEXT: v_dual_mov_b32 v80, s31 :: v_dual_mov_b32 v71, s30
-; GFX11-NEXT: v_dual_mov_b32 v70, vcc_hi :: v_dual_mov_b32 v69, s94
-; GFX11-NEXT: v_dual_mov_b32 v68, s95 :: v_dual_mov_b32 v67, s93
-; GFX11-NEXT: v_dual_mov_b32 v66, s92 :: v_dual_mov_b32 v65, s91
-; GFX11-NEXT: v_dual_mov_b32 v64, s89 :: v_dual_mov_b32 v55, s90
-; GFX11-NEXT: v_dual_mov_b32 v54, s88 :: v_dual_mov_b32 v53, s79
-; GFX11-NEXT: v_dual_mov_b32 v52, s78 :: v_dual_mov_b32 v51, s76
-; GFX11-NEXT: v_dual_mov_b32 v50, s77 :: v_dual_mov_b32 v49, s75
-; GFX11-NEXT: v_dual_mov_b32 v48, s74 :: v_dual_mov_b32 v39, s73
-; GFX11-NEXT: v_dual_mov_b32 v38, s63 :: v_dual_mov_b32 v37, s72
-; GFX11-NEXT: v_dual_mov_b32 v36, s62 :: v_dual_mov_b32 v35, s61
-; GFX11-NEXT: v_dual_mov_b32 v34, s60 :: v_dual_mov_b32 v33, s58
-; GFX11-NEXT: v_dual_mov_b32 v32, s59 :: v_dual_mov_b32 v31, s57
-; GFX11-NEXT: v_dual_mov_b32 v30, s56 :: v_dual_mov_b32 v29, s47
-; GFX11-NEXT: v_dual_mov_b32 v22, s45 :: v_dual_mov_b32 v7, s40
-; GFX11-NEXT: v_dual_mov_b32 v18, s46 :: v_dual_mov_b32 v11, s28
-; GFX11-NEXT: v_dual_mov_b32 v12, s44 :: v_dual_mov_b32 v17, s14
-; GFX11-NEXT: v_dual_mov_b32 v8, s43 :: v_dual_mov_b32 v21, s12
+; GFX11-NEXT: v_dual_mov_b32 v96, s48 :: v_dual_mov_b32 v87, s38
+; GFX11-NEXT: v_dual_mov_b32 v86, s39 :: v_dual_mov_b32 v85, s37
+; GFX11-NEXT: v_dual_mov_b32 v84, s36 :: v_dual_mov_b32 v83, s35
+; GFX11-NEXT: v_dual_mov_b32 v82, s31 :: v_dual_mov_b32 v81, s34
+; GFX11-NEXT: v_dual_mov_b32 v80, s30 :: v_dual_mov_b32 v71, vcc_hi
+; GFX11-NEXT: v_dual_mov_b32 v70, s95 :: v_dual_mov_b32 v69, s93
+; GFX11-NEXT: v_dual_mov_b32 v68, s94 :: v_dual_mov_b32 v67, s92
+; GFX11-NEXT: v_dual_mov_b32 v66, s91 :: v_dual_mov_b32 v65, s90
+; GFX11-NEXT: v_dual_mov_b32 v64, s88 :: v_dual_mov_b32 v55, s89
+; GFX11-NEXT: v_dual_mov_b32 v54, s79 :: v_dual_mov_b32 v53, s78
+; GFX11-NEXT: v_dual_mov_b32 v52, s77 :: v_dual_mov_b32 v51, s75
+; GFX11-NEXT: v_dual_mov_b32 v50, s76 :: v_dual_mov_b32 v49, s74
+; GFX11-NEXT: v_dual_mov_b32 v48, s73 :: v_dual_mov_b32 v39, s72
+; GFX11-NEXT: v_dual_mov_b32 v38, s62 :: v_dual_mov_b32 v37, s63
+; GFX11-NEXT: v_dual_mov_b32 v36, s61 :: v_dual_mov_b32 v35, s60
+; GFX11-NEXT: v_dual_mov_b32 v34, s59 :: v_dual_mov_b32 v33, s57
+; GFX11-NEXT: v_dual_mov_b32 v32, s58 :: v_dual_mov_b32 v31, s56
+; GFX11-NEXT: v_dual_mov_b32 v30, s47 :: v_dual_mov_b32 v29, s46
+; GFX11-NEXT: v_dual_mov_b32 v22, s44 :: v_dual_mov_b32 v7, s40
+; GFX11-NEXT: v_dual_mov_b32 v18, s45 :: v_dual_mov_b32 v11, s28
+; GFX11-NEXT: v_dual_mov_b32 v12, s43 :: v_dual_mov_b32 v17, s14
+; GFX11-NEXT: v_dual_mov_b32 v8, s42 :: v_dual_mov_b32 v21, s12
; GFX11-NEXT: v_dual_mov_b32 v25, s10 :: v_dual_mov_b32 v26, s8
; GFX11-NEXT: v_dual_mov_b32 v27, s6 :: v_dual_mov_b32 v28, s4
; GFX11-NEXT: .LBB97_5: ; %end
@@ -67004,7 +67383,6 @@ define inreg <64 x i8> @bitcast_v32i16_to_v64i8_scalar(<32 x i16> inreg %a, i32
; GFX11-NEXT: scratch_store_b128 v0, v[23:26], off offset:16
; GFX11-NEXT: scratch_store_b128 v0, v[13:16], off offset:32
; GFX11-NEXT: scratch_store_b128 v0, v[1:4], off offset:48
-; GFX11-NEXT: v_readlane_b32 s49, v40, 9
; GFX11-NEXT: v_readlane_b32 s48, v40, 8
; GFX11-NEXT: v_readlane_b32 s39, v40, 7
; GFX11-NEXT: v_readlane_b32 s38, v40, 6
@@ -69559,8 +69937,9 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:56
; SI-NEXT: s_waitcnt expcnt(1)
; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:72
-; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:52
+; SI-NEXT: buffer_load_dword v56, off, s[0:3], s32 offset:52
; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:68
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_readfirstlane_b32 s15, v27
; SI-NEXT: v_readfirstlane_b32 s40, v26
; SI-NEXT: v_readfirstlane_b32 s12, v19
@@ -69571,8 +69950,8 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: v_readfirstlane_b32 s9, v2
; SI-NEXT: v_readfirstlane_b32 s7, v1
; SI-NEXT: v_readfirstlane_b32 s6, v0
-; SI-NEXT: v_lshlrev_b32_e32 v0, 8, v7
-; SI-NEXT: v_lshlrev_b32_e32 v2, 24, v9
+; SI-NEXT: v_lshlrev_b32_e32 v2, 8, v7
+; SI-NEXT: v_lshlrev_b32_e32 v0, 24, v9
; SI-NEXT: v_lshlrev_b32_e32 v3, 24, v5
; SI-NEXT: v_lshlrev_b32_e32 v49, 8, v15
; SI-NEXT: v_lshlrev_b32_e32 v7, 24, v17
@@ -69590,9 +69969,9 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: v_lshlrev_b32_e32 v43, 8, v36
; SI-NEXT: s_waitcnt vmcnt(13)
; SI-NEXT: v_lshlrev_b32_e32 v42, 24, v37
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_and_b64 s[46:47], vcc, exec
; SI-NEXT: s_waitcnt vmcnt(11)
-; SI-NEXT: v_lshlrev_b32_e32 v56, 8, v38
+; SI-NEXT: v_lshlrev_b32_e32 v57, 8, v38
; SI-NEXT: s_waitcnt vmcnt(9)
; SI-NEXT: v_lshlrev_b32_e32 v58, 24, v39
; SI-NEXT: s_waitcnt vmcnt(7)
@@ -69639,11 +70018,11 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: s_or_b32 s47, s47, s4
; SI-NEXT: s_and_b32 s4, s24, 0xff
; SI-NEXT: s_lshl_b32 s56, s25, 8
-; SI-NEXT: v_or_b32_e32 v9, v9, v0
+; SI-NEXT: v_or_b32_e32 v9, v9, v2
; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10
; SI-NEXT: s_or_b32 s4, s4, s56
; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9
-; SI-NEXT: v_or_b32_e32 v11, v2, v10
+; SI-NEXT: v_or_b32_e32 v11, v0, v10
; SI-NEXT: s_and_b32 s4, s4, 0xffff
; SI-NEXT: v_mov_b32_e32 v5, s46
; SI-NEXT: v_or_b32_e32 v10, v9, v11
@@ -69712,13 +70091,13 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: s_lshl_b32 s56, s15, 8
; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21
; SI-NEXT: v_or_b32_e32 v48, v32, v63
-; SI-NEXT: v_and_b32_e32 v32, 0xff, v57
+; SI-NEXT: v_and_b32_e32 v32, 0xff, v56
; SI-NEXT: s_or_b32 s4, s4, s56
; SI-NEXT: v_or_b32_e32 v29, v44, v21
; SI-NEXT: v_and_b32_e32 v26, 0xff, v40
; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v32
; SI-NEXT: s_and_b32 s4, s4, 0xffff
-; SI-NEXT: v_or_b32_e32 v26, v26, v56
+; SI-NEXT: v_or_b32_e32 v26, v26, v57
; SI-NEXT: v_or_b32_e32 v34, v61, v32
; SI-NEXT: v_or_b32_e32 v32, s4, v29
; SI-NEXT: s_and_b32 s4, s43, 0xff
@@ -69757,7 +70136,7 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: s_and_b32 s4, s45, 0xff
; SI-NEXT: s_lshl_b32 s5, s44, 8
; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v57
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v56
; SI-NEXT: s_or_b32 s4, s5, s4
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: s_addk_i32 s4, 0x300
@@ -69793,7 +70172,7 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v40
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_add_i32_e32 v5, vcc, 3, v55
-; SI-NEXT: v_or_b32_e32 v1, v56, v1
+; SI-NEXT: v_or_b32_e32 v1, v57, v1
; SI-NEXT: v_and_b32_e32 v5, 0xff, v5
; SI-NEXT: v_add_i32_e32 v1, vcc, 0x300, v1
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
@@ -69923,25 +70302,25 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: s_or_b32 s4, s5, s4
; SI-NEXT: s_add_i32 s20, s20, 3
-; SI-NEXT: v_or_b32_e32 v0, v0, v1
-; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v8
+; SI-NEXT: v_or_b32_e32 v1, v2, v1
+; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v8
; SI-NEXT: s_add_i32 s47, s4, 0x3000000
; SI-NEXT: s_and_b32 s4, s20, 0xff
; SI-NEXT: s_lshl_b32 s5, s21, 8
; SI-NEXT: s_add_i32 s22, s22, 3
-; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
+; SI-NEXT: v_and_b32_e32 v2, 0xff, v2
; SI-NEXT: s_or_b32 s4, s5, s4
; SI-NEXT: s_and_b32 s6, s22, 0xff
-; SI-NEXT: v_add_i32_e32 v0, vcc, 0x300, v0
-; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
+; SI-NEXT: v_add_i32_e32 v1, vcc, 0x300, v1
+; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; SI-NEXT: s_addk_i32 s4, 0x300
; SI-NEXT: s_lshl_b32 s5, s23, 24
; SI-NEXT: s_lshl_b32 s6, s6, 16
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v1, v2, v1
+; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1
+; SI-NEXT: v_or_b32_e32 v0, v0, v2
; SI-NEXT: s_and_b32 s4, s4, 0xffff
; SI-NEXT: s_or_b32 s5, s5, s6
-; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: v_or_b32_e32 v0, v0, v1
; SI-NEXT: s_or_b32 s4, s5, s4
; SI-NEXT: v_add_i32_e32 v10, vcc, 0x3000000, v0
; SI-NEXT: s_add_i32 s41, s4, 0x3000000
@@ -70031,7 +70410,9 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: ; implicit-def: $vgpr29
; SI-NEXT: ; implicit-def: $vgpr48
; SI-NEXT: ; implicit-def: $vgpr31
-; SI-NEXT: s_branch .LBB99_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB99_2
+; SI-NEXT: s_branch .LBB99_3
;
; VI-LABEL: bitcast_v64i8_to_v32i16_scalar:
; VI: ; %bb.0:
@@ -70060,11 +70441,11 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:76
; VI-NEXT: buffer_load_ushort v31, off, s[0:3], s32
; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:8
-; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:4
+; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:4
; VI-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:16
; VI-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:12
; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:24
-; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:20
+; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:20
; VI-NEXT: buffer_load_ushort v20, off, s[0:3], s32 offset:32
; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:28
; VI-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:40
@@ -70074,12 +70455,12 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:56
; VI-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:52
; VI-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:64
-; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:60
+; VI-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:60
; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:72
; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:68
-; VI-NEXT: v_mov_b32_e32 v51, v23
+; VI-NEXT: v_mov_b32_e32 v52, v23
; VI-NEXT: v_mov_b32_e32 v30, v26
-; VI-NEXT: v_mov_b32_e32 v26, v22
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_lshlrev_b32_e32 v50, 8, v1
; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v3
; VI-NEXT: v_lshlrev_b32_e32 v3, 8, v5
@@ -70090,20 +70471,20 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: v_lshlrev_b32_e32 v13, 8, v15
; VI-NEXT: v_lshlrev_b32_e32 v15, 8, v17
; VI-NEXT: v_lshlrev_b32_e32 v23, 8, v19
-; VI-NEXT: v_lshlrev_b32_e32 v46, 8, v21
-; VI-NEXT: v_lshlrev_b32_e32 v56, 8, v51
+; VI-NEXT: v_lshlrev_b32_e32 v47, 8, v21
+; VI-NEXT: v_lshlrev_b32_e32 v56, 8, v52
; VI-NEXT: v_lshlrev_b32_e32 v58, 8, v25
; VI-NEXT: v_lshlrev_b32_e32 v27, 8, v27
; VI-NEXT: v_lshlrev_b32_e32 v29, 8, v29
; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; VI-NEXT: v_lshlrev_b32_e32 v59, 8, v31
; VI-NEXT: v_lshlrev_b32_e32 v60, 8, v33
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_and_b64 s[6:7], vcc, exec
; VI-NEXT: v_lshlrev_b32_e32 v61, 8, v35
; VI-NEXT: v_lshlrev_b32_e32 v62, 8, v37
; VI-NEXT: v_lshlrev_b32_e32 v63, 8, v20
@@ -70112,11 +70493,11 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: s_waitcnt vmcnt(11)
; VI-NEXT: v_lshlrev_b32_e32 v14, 8, v14
; VI-NEXT: s_waitcnt vmcnt(9)
-; VI-NEXT: v_lshlrev_b32_e32 v38, 8, v39
+; VI-NEXT: v_lshlrev_b32_e32 v26, 8, v39
; VI-NEXT: s_waitcnt vmcnt(7)
-; VI-NEXT: v_lshlrev_b32_e32 v51, 8, v48
+; VI-NEXT: v_lshlrev_b32_e32 v35, 8, v48
; VI-NEXT: s_waitcnt vmcnt(5)
-; VI-NEXT: v_lshlrev_b32_e32 v22, 8, v49
+; VI-NEXT: v_lshlrev_b32_e32 v38, 8, v49
; VI-NEXT: s_cbranch_scc0 .LBB99_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_or_b32_sdwa v0, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
@@ -70126,27 +70507,27 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: v_or_b32_sdwa v1, v34, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
; VI-NEXT: v_or_b32_sdwa v0, v10, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v35, v6
; VI-NEXT: v_or_b32_sdwa v2, v6, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v53, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v1, v16, v15 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v52, v3
; VI-NEXT: v_mov_b32_e32 v49, v7
; VI-NEXT: v_or_b32_sdwa v3, v8, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v18, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v55, v46 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v55, v47 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v37, v8
; VI-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v26, v56 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v22, v56 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v1, v24, v58 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v40, v9
+; VI-NEXT: v_mov_b32_e32 v41, v9
; VI-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v30, v27 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v1, v28, v29 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v31, v10
; VI-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v52, v60 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v51, v60 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v17, v11
; VI-NEXT: v_mov_b32_e32 v19, v13
; VI-NEXT: s_and_b32 s4, s28, 0xff
@@ -70162,23 +70543,22 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: s_lshl_b32 s7, s23, 8
; VI-NEXT: s_lshl_b32 s8, s27, 8
; VI-NEXT: v_mov_b32_e32 v25, v23
-; VI-NEXT: v_mov_b32_e32 v48, v51
-; VI-NEXT: v_mov_b32_e32 v23, v26
-; VI-NEXT: v_mov_b32_e32 v26, v30
+; VI-NEXT: v_mov_b32_e32 v48, v35
+; VI-NEXT: v_mov_b32_e32 v23, v30
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v0, v34, v59 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v54, v61 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v41, v62 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v40, v62 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v42, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v1, v43, v33 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v44, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v45, v38 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v45, v26 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v47, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v57, v22 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v46, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v57, v38 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v32, v50 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_e32 v3, s4, v0
@@ -70219,12 +70599,12 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v42
; VI-NEXT: v_or_b32_sdwa v3, v63, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v15, vcc, 0x300, v3
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v41
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v40
; VI-NEXT: v_or_b32_sdwa v12, v62, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v54
; VI-NEXT: v_or_b32_sdwa v3, v61, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v39, vcc, 0x300, v3
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v52
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v51
; VI-NEXT: v_or_b32_sdwa v11, v60, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v30
@@ -70232,18 +70612,18 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: v_add_u32_e32 v30, vcc, 0x300, v3
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v28
; VI-NEXT: v_or_b32_sdwa v10, v29, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v26
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v23
+; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v45
; VI-NEXT: v_or_b32_sdwa v3, v27, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v2, v26, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v26, vcc, 0x300, v3
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v24
; VI-NEXT: v_or_b32_sdwa v9, v58, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v23
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v57
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v22
; VI-NEXT: v_or_b32_sdwa v3, v56, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_or_b32_sdwa v0, v22, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v22, vcc, 0x300, v3
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v55
-; VI-NEXT: v_or_b32_sdwa v8, v46, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v8, v47, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v18
; VI-NEXT: v_or_b32_sdwa v3, v25, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v18, vcc, 0x300, v3
@@ -70255,15 +70635,11 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v36
; VI-NEXT: v_or_b32_sdwa v6, v17, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v31
-; VI-NEXT: v_or_b32_sdwa v3, v40, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v3, v41, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v17, vcc, 0x300, v3
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v37
; VI-NEXT: v_or_b32_sdwa v5, v49, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v35
-; VI-NEXT: v_or_b32_sdwa v3, v20, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v19, vcc, 0x300, v3
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
; VI-NEXT: s_add_i32 s28, s28, 3
; VI-NEXT: s_and_b32 s4, s28, 0xff
; VI-NEXT: s_lshl_b32 s5, s29, 8
@@ -70292,13 +70668,12 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: s_and_b32 s10, s16, 0xff
; VI-NEXT: s_lshl_b32 s11, s17, 8
; VI-NEXT: s_or_b32 s10, s11, s10
-; VI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v47
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v46
; VI-NEXT: s_addk_i32 s6, 0x300
; VI-NEXT: s_addk_i32 s8, 0x300
; VI-NEXT: s_addk_i32 s10, 0x300
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v57
; VI-NEXT: v_or_b32_sdwa v1, v48, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v45
; VI-NEXT: s_addk_i32 s4, 0x300
; VI-NEXT: s_lshl_b32 s5, s5, 16
; VI-NEXT: s_lshl_b32 s7, s7, 16
@@ -70306,8 +70681,8 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: s_and_b32 s10, s10, 0xffff
; VI-NEXT: s_and_b32 s8, s8, 0xffff
; VI-NEXT: s_and_b32 s6, s6, 0xffff
+; VI-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v1, vcc, 0x300, v1
-; VI-NEXT: v_or_b32_sdwa v2, v38, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_or_b32 s9, s9, s10
; VI-NEXT: s_or_b32 s7, s7, s8
; VI-NEXT: s_or_b32 s5, s5, s6
@@ -70315,7 +70690,6 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: s_add_i32 s9, s9, 0x3000000
; VI-NEXT: s_add_i32 s7, s7, 0x3000000
; VI-NEXT: s_add_i32 s5, s5, 0x3000000
-; VI-NEXT: v_or_b32_sdwa v5, v5, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_or_b32_sdwa v6, v6, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_or_b32_sdwa v7, v7, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_or_b32_sdwa v8, v8, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
@@ -70326,25 +70700,31 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: v_or_b32_sdwa v13, v13, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_or_b32_sdwa v2, v2, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v5, vcc, 0x3000000, v5
; VI-NEXT: v_add_u32_e32 v6, vcc, 0x3000000, v6
; VI-NEXT: v_add_u32_e32 v7, vcc, 0x3000000, v7
; VI-NEXT: v_add_u32_e32 v8, vcc, 0x3000000, v8
; VI-NEXT: v_add_u32_e32 v9, vcc, 0x3000000, v9
; VI-NEXT: v_add_u32_e32 v10, vcc, 0x3000000, v10
; VI-NEXT: v_add_u32_e32 v11, vcc, 0x3000000, v11
-; VI-NEXT: s_waitcnt vmcnt(2)
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v4, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v12, vcc, 0x3000000, v12
; VI-NEXT: v_add_u32_e32 v13, vcc, 0x3000000, v13
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
+; VI-NEXT: v_or_b32_sdwa v3, v20, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v19, vcc, 0x300, v3
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v5, v5, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v5, vcc, 0x3000000, v5
; VI-NEXT: v_add_u32_e32 v14, vcc, 0x3000000, v2
; VI-NEXT: v_add_u32_e32 v15, vcc, 0x3000000, v0
; VI-NEXT: v_mov_b32_e32 v0, s9
; VI-NEXT: v_mov_b32_e32 v1, s7
; VI-NEXT: v_mov_b32_e32 v2, s5
+; VI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
+; VI-NEXT: v_or_b32_sdwa v4, v52, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: v_or_b32_sdwa v3, v20, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
@@ -70376,23 +70756,24 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB99_4:
; VI-NEXT: v_mov_b32_e32 v25, v23
-; VI-NEXT: v_mov_b32_e32 v23, v26
-; VI-NEXT: v_mov_b32_e32 v26, v30
+; VI-NEXT: v_mov_b32_e32 v23, v30
; VI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
-; VI-NEXT: v_mov_b32_e32 v48, v51
+; VI-NEXT: v_mov_b32_e32 v48, v35
; VI-NEXT: v_mov_b32_e32 v31, v10
; VI-NEXT: v_mov_b32_e32 v36, v34
-; VI-NEXT: v_mov_b32_e32 v35, v6
; VI-NEXT: v_mov_b32_e32 v37, v8
; VI-NEXT: v_mov_b32_e32 v39, v14
; VI-NEXT: v_mov_b32_e32 v21, v15
; VI-NEXT: v_mov_b32_e32 v19, v13
; VI-NEXT: v_mov_b32_e32 v17, v11
-; VI-NEXT: v_mov_b32_e32 v40, v9
+; VI-NEXT: v_mov_b32_e32 v41, v9
; VI-NEXT: v_mov_b32_e32 v49, v7
; VI-NEXT: v_mov_b32_e32 v20, v5
+; VI-NEXT: v_mov_b32_e32 v52, v3
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; VI-NEXT: s_branch .LBB99_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB99_2
+; VI-NEXT: s_branch .LBB99_3
;
; GFX9-LABEL: bitcast_v64i8_to_v32i16_scalar:
; GFX9: ; %bb.0:
@@ -70419,9 +70800,9 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:8
; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:4
; GFX9-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:16
-; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:12
+; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:12
; GFX9-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:24
-; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:20
+; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:20
; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:32
; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:28
; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:40
@@ -70440,6 +70821,7 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: v_lshlrev_b32_e32 v5, 8, v9
; GFX9-NEXT: v_lshlrev_b32_e32 v9, 8, v13
; GFX9-NEXT: v_lshlrev_b32_e32 v13, 8, v17
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1
; GFX9-NEXT: v_lshlrev_b32_e32 v7, 8, v7
; GFX9-NEXT: v_lshlrev_b32_e32 v11, 8, v11
@@ -70456,7 +70838,7 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: v_lshlrev_b32_e32 v61, 8, v32
; GFX9-NEXT: s_waitcnt vmcnt(17)
; GFX9-NEXT: v_lshlrev_b32_e32 v60, 8, v38
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_and_b64 s[6:7], vcc, exec
; GFX9-NEXT: s_waitcnt vmcnt(15)
; GFX9-NEXT: v_lshlrev_b32_e32 v63, 8, v30
; GFX9-NEXT: s_waitcnt vmcnt(13)
@@ -70511,14 +70893,13 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: v_or_b32_sdwa v0, v18, v25 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_or_b32_sdwa v1, v20, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_mov_b32_e32 v32, v16
; GFX9-NEXT: v_lshl_or_b32 v8, v1, 16, v0
-; GFX9-NEXT: v_mov_b32_e32 v16, v22
-; GFX9-NEXT: v_or_b32_sdwa v0, v22, v58 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
; GFX9-NEXT: v_mov_b32_e32 v37, v24
; GFX9-NEXT: v_or_b32_sdwa v1, v24, v47 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v27, v25
+; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v0, v22, v58 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_mov_b32_e32 v17, v9
; GFX9-NEXT: v_lshl_or_b32 v9, v1, 16, v0
@@ -70535,12 +70916,12 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: s_or_b32 s4, s4, s5
; GFX9-NEXT: s_and_b32 s5, s18, 0xff
; GFX9-NEXT: s_lshl_b32 s6, s19, 8
-; GFX9-NEXT: v_mov_b32_e32 v55, v11
+; GFX9-NEXT: v_mov_b32_e32 v41, v11
; GFX9-NEXT: v_lshl_or_b32 v11, v1, 16, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v41, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v40, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_or_b32 s5, s5, s6
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: v_or_b32_sdwa v1, v40, v62 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v55, v62 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_pack_ll_b32_b16 s4, s4, s5
; GFX9-NEXT: s_and_b32 s5, s20, 0xff
; GFX9-NEXT: s_lshl_b32 s6, s21, 8
@@ -70571,25 +70952,26 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s7
; GFX9-NEXT: v_lshl_or_b32 v4, v4, 16, v2
; GFX9-NEXT: v_mov_b32_e32 v42, v15
-; GFX9-NEXT: v_mov_b32_e32 v27, v25
+; GFX9-NEXT: v_mov_b32_e32 v32, v16
; GFX9-NEXT: v_mov_b32_e32 v30, v18
; GFX9-NEXT: v_mov_b32_e32 v23, v21
; GFX9-NEXT: v_mov_b32_e32 v49, v20
+; GFX9-NEXT: v_mov_b32_e32 v16, v22
; GFX9-NEXT: v_mov_b32_e32 v39, v26
; GFX9-NEXT: v_mov_b32_e32 v35, v28
; GFX9-NEXT: v_mov_b32_e32 v54, v31
; GFX9-NEXT: v_mov_b32_e32 v31, v51
; GFX9-NEXT: v_mov_b32_e32 v2, s6
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v57, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v57, v24 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: v_mov_b32_e32 v18, v22
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v56, v24 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v56, v25 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v15, v1, 16, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
-; GFX9-NEXT: v_mov_b32_e32 v20, v24
+; GFX9-NEXT: v_mov_b32_e32 v18, v24
+; GFX9-NEXT: v_mov_b32_e32 v20, v25
; GFX9-NEXT: s_cbranch_execnz .LBB99_3
; GFX9-NEXT: .LBB99_2: ; %cmp.true
; GFX9-NEXT: v_add_u32_e32 v3, 3, v45
@@ -70601,10 +70983,10 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: v_add_u32_e32 v3, 3, v43
; GFX9-NEXT: v_or_b32_sdwa v3, v36, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v15, 0x300, v3
-; GFX9-NEXT: v_add_u32_e32 v3, 3, v41
+; GFX9-NEXT: v_add_u32_e32 v3, 3, v40
; GFX9-NEXT: v_or_b32_sdwa v3, v63, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v12, 0x300, v3
-; GFX9-NEXT: v_add_u32_e32 v3, 3, v40
+; GFX9-NEXT: v_add_u32_e32 v3, 3, v55
; GFX9-NEXT: v_or_b32_sdwa v3, v62, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v36, 0x300, v3
; GFX9-NEXT: v_add_u32_e32 v3, 3, v34
@@ -70638,7 +71020,7 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: v_or_b32_sdwa v3, v19, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v23, 0x300, v3
; GFX9-NEXT: v_add_u32_e32 v3, 3, v48
-; GFX9-NEXT: v_or_b32_sdwa v3, v55, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v3, v41, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v6, 0x300, v3
; GFX9-NEXT: v_add_u32_e32 v3, 3, v33
; GFX9-NEXT: v_or_b32_sdwa v3, v17, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
@@ -70792,11 +71174,13 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v23, v21
; GFX9-NEXT: v_mov_b32_e32 v42, v15
; GFX9-NEXT: v_mov_b32_e32 v19, v13
-; GFX9-NEXT: v_mov_b32_e32 v55, v11
+; GFX9-NEXT: v_mov_b32_e32 v41, v11
; GFX9-NEXT: v_mov_b32_e32 v17, v9
; GFX9-NEXT: v_mov_b32_e32 v50, v3
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX9-NEXT: s_branch .LBB99_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB99_2
+; GFX9-NEXT: s_branch .LBB99_3
;
; GFX11-TRUE16-LABEL: bitcast_v64i8_to_v32i16_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -70837,7 +71221,6 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 8, v25
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v27, 8, v27
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v25, 8, v29
-; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(15)
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v85, 8, v0
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(13)
@@ -70856,45 +71239,46 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v81, 8, v14
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(6)
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v84, 8, v84
-; GFX11-TRUE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, -1
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB99_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s3, 8
-; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s5, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s7, s8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s23, 8
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s25, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s0, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s2, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s16, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s4, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s6, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s20, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s21, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s25, 8
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v35
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-TRUE16-NEXT: s_and_b32 s10, s28, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s29, 8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_and_b32 s9, s28, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s29, 8
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v39
-; GFX11-TRUE16-NEXT: s_or_b32 s10, s10, s11
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s26, 0xff
-; GFX11-TRUE16-NEXT: v_and_b32_e64 v1, 0xffff, s10
-; GFX11-TRUE16-NEXT: s_lshl_b32 s12, s27, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s9, s10
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s26, 0xff
+; GFX11-TRUE16-NEXT: v_and_b32_e64 v1, 0xffff, s9
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s27, 8
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v38
-; GFX11-TRUE16-NEXT: s_or_b32 s9, s9, s12
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s8, s11
; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v31
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s8, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s7, s8
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v37
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v0, 16, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v33
@@ -70962,10 +71346,9 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v15, 16, v87
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v96, 16, v97
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v86, 16, v98
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB99_3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB99_3
; GFX11-TRUE16-NEXT: .LBB99_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v68
@@ -71141,7 +71524,9 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB99_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-TRUE16-NEXT: s_branch .LBB99_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB99_2
+; GFX11-TRUE16-NEXT: s_branch .LBB99_3
;
; GFX11-FAKE16-LABEL: bitcast_v64i8_to_v32i16_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -71182,7 +71567,6 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v21, 8, v25
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v27, 8, v27
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v25, 8, v29
-; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(15)
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v85, 8, v0
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(13)
@@ -71201,45 +71585,46 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v81, 8, v14
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(6)
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v84, 8, v84
-; GFX11-FAKE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, -1
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB99_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-FAKE16-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s3, 8
-; GFX11-FAKE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-FAKE16-NEXT: s_or_b32 s6, s7, s8
-; GFX11-FAKE16-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s5, s6
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s7, s8
-; GFX11-FAKE16-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s23, 8
-; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-FAKE16-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s25, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s0, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s2, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s16, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s4, s5
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s6, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s20, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s21, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s25, 8
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v35
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s7, s8
-; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-FAKE16-NEXT: s_and_b32 s10, s28, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s11, s29, 8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s6, s7
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s28, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s29, 8
; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v39
-; GFX11-FAKE16-NEXT: s_or_b32 s10, s10, s11
-; GFX11-FAKE16-NEXT: s_and_b32 s9, s26, 0xff
-; GFX11-FAKE16-NEXT: v_and_b32_e64 v1, 0xffff, s10
-; GFX11-FAKE16-NEXT: s_lshl_b32 s12, s27, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s9, s9, s10
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s26, 0xff
+; GFX11-FAKE16-NEXT: v_and_b32_e64 v1, 0xffff, s9
+; GFX11-FAKE16-NEXT: s_lshl_b32 s11, s27, 8
; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v38
-; GFX11-FAKE16-NEXT: s_or_b32 s9, s9, s12
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s8, s11
; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xff, v31
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s8, s9
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s7, s8
; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v37
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v0, 16, v1
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v33
@@ -71307,10 +71692,9 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v15, 16, v87
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v96, 16, v97
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v86, 16, v98
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB99_3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB99_3
; GFX11-FAKE16-NEXT: .LBB99_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(1)
; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 3, v68
@@ -71486,7 +71870,9 @@ define inreg <32 x i16> @bitcast_v64i8_to_v32i16_scalar(<64 x i8> inreg %a, i32
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB99_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-FAKE16-NEXT: s_branch .LBB99_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB99_2
+; GFX11-FAKE16-NEXT: s_branch .LBB99_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -71981,14 +72367,14 @@ define inreg <32 x bfloat> @bitcast_v32f16_to_v32bf16_scalar(<32 x half> inreg %
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: v_cvt_f16_f32_e32 v32, s16
; SI-NEXT: v_cvt_f16_f32_e32 v33, s17
-; SI-NEXT: v_cvt_f16_f32_e32 v39, v0
-; SI-NEXT: v_cvt_f16_f32_e32 v49, v1
-; SI-NEXT: v_cvt_f16_f32_e32 v51, v2
-; SI-NEXT: v_cvt_f16_f32_e32 v53, v3
-; SI-NEXT: v_cvt_f16_f32_e32 v55, v4
-; SI-NEXT: v_cvt_f16_f32_e32 v41, v5
-; SI-NEXT: v_cvt_f16_f32_e32 v43, v6
-; SI-NEXT: v_cvt_f16_f32_e32 v45, v7
+; SI-NEXT: v_cvt_f16_f32_e32 v38, v0
+; SI-NEXT: v_cvt_f16_f32_e32 v48, v1
+; SI-NEXT: v_cvt_f16_f32_e32 v50, v2
+; SI-NEXT: v_cvt_f16_f32_e32 v52, v3
+; SI-NEXT: v_cvt_f16_f32_e32 v54, v4
+; SI-NEXT: v_cvt_f16_f32_e32 v40, v5
+; SI-NEXT: v_cvt_f16_f32_e32 v42, v6
+; SI-NEXT: v_cvt_f16_f32_e32 v44, v7
; SI-NEXT: v_cvt_f16_f32_e32 v46, v8
; SI-NEXT: v_cvt_f16_f32_e32 v47, v9
; SI-NEXT: v_cvt_f16_f32_e32 v56, v10
@@ -72010,16 +72396,17 @@ define inreg <32 x bfloat> @bitcast_v32f16_to_v32bf16_scalar(<32 x half> inreg %
; SI-NEXT: v_cvt_f16_f32_e32 v35, s19
; SI-NEXT: v_cvt_f16_f32_e32 v36, s20
; SI-NEXT: v_cvt_f16_f32_e32 v37, s21
-; SI-NEXT: v_cvt_f16_f32_e32 v38, s22
-; SI-NEXT: v_cvt_f16_f32_e32 v48, s23
-; SI-NEXT: v_cvt_f16_f32_e32 v50, s24
-; SI-NEXT: v_cvt_f16_f32_e32 v52, s25
-; SI-NEXT: v_cvt_f16_f32_e32 v54, s26
-; SI-NEXT: v_cvt_f16_f32_e32 v40, s27
-; SI-NEXT: v_cvt_f16_f32_e32 v42, s28
-; SI-NEXT: v_cvt_f16_f32_e32 v44, s29
+; SI-NEXT: v_cvt_f16_f32_e32 v39, s22
+; SI-NEXT: v_cvt_f16_f32_e32 v49, s23
+; SI-NEXT: v_cvt_f16_f32_e32 v51, s24
+; SI-NEXT: v_cvt_f16_f32_e32 v53, s25
+; SI-NEXT: v_cvt_f16_f32_e32 v55, s26
+; SI-NEXT: v_cvt_f16_f32_e32 v41, s27
+; SI-NEXT: v_cvt_f16_f32_e32 v43, s28
+; SI-NEXT: v_cvt_f16_f32_e32 v45, s29
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB101_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v32
@@ -72028,22 +72415,22 @@ define inreg <32 x bfloat> @bitcast_v32f16_to_v32bf16_scalar(<32 x half> inreg %
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v35
; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v36
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v37
-; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v38
-; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v48
-; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v50
-; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v52
-; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v54
-; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v40
-; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v42
-; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v44
-; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v39
-; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v49
-; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v51
-; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v53
-; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v55
-; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v41
-; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v43
-; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v45
+; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v39
+; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v49
+; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v51
+; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v53
+; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v55
+; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v41
+; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v43
+; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v45
+; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v38
+; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v48
+; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v50
+; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v52
+; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v54
+; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v40
+; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v42
+; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v44
; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v46
; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v47
; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v56
@@ -72074,38 +72461,38 @@ define inreg <32 x bfloat> @bitcast_v32f16_to_v32bf16_scalar(<32 x half> inreg %
; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v3
; SI-NEXT: v_cvt_f32_f16_e32 v0, v47
; SI-NEXT: v_cvt_f32_f16_e32 v1, v46
-; SI-NEXT: v_cvt_f32_f16_e32 v2, v45
-; SI-NEXT: v_cvt_f32_f16_e32 v3, v43
+; SI-NEXT: v_cvt_f32_f16_e32 v2, v44
+; SI-NEXT: v_cvt_f32_f16_e32 v3, v42
; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v0
; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v1
; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v2
; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v3
-; SI-NEXT: v_cvt_f32_f16_e32 v0, v41
-; SI-NEXT: v_cvt_f32_f16_e32 v1, v55
-; SI-NEXT: v_cvt_f32_f16_e32 v2, v53
-; SI-NEXT: v_cvt_f32_f16_e32 v3, v51
+; SI-NEXT: v_cvt_f32_f16_e32 v0, v40
+; SI-NEXT: v_cvt_f32_f16_e32 v1, v54
+; SI-NEXT: v_cvt_f32_f16_e32 v2, v52
+; SI-NEXT: v_cvt_f32_f16_e32 v3, v50
; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v0
; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v1
; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v2
; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v3
-; SI-NEXT: v_cvt_f32_f16_e32 v0, v49
-; SI-NEXT: v_cvt_f32_f16_e32 v1, v39
-; SI-NEXT: v_cvt_f32_f16_e32 v2, v44
-; SI-NEXT: v_cvt_f32_f16_e32 v3, v42
+; SI-NEXT: v_cvt_f32_f16_e32 v0, v48
+; SI-NEXT: v_cvt_f32_f16_e32 v1, v38
+; SI-NEXT: v_cvt_f32_f16_e32 v2, v45
+; SI-NEXT: v_cvt_f32_f16_e32 v3, v43
; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v0
; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v1
; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v2
; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v3
-; SI-NEXT: v_cvt_f32_f16_e32 v0, v40
-; SI-NEXT: v_cvt_f32_f16_e32 v1, v54
-; SI-NEXT: v_cvt_f32_f16_e32 v2, v52
-; SI-NEXT: v_cvt_f32_f16_e32 v3, v50
+; SI-NEXT: v_cvt_f32_f16_e32 v0, v41
+; SI-NEXT: v_cvt_f32_f16_e32 v1, v55
+; SI-NEXT: v_cvt_f32_f16_e32 v2, v53
+; SI-NEXT: v_cvt_f32_f16_e32 v3, v51
; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v0
; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v1
; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v2
; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v3
-; SI-NEXT: v_cvt_f32_f16_e32 v0, v48
-; SI-NEXT: v_cvt_f32_f16_e32 v1, v38
+; SI-NEXT: v_cvt_f32_f16_e32 v0, v49
+; SI-NEXT: v_cvt_f32_f16_e32 v1, v39
; SI-NEXT: v_cvt_f32_f16_e32 v2, v37
; SI-NEXT: v_cvt_f32_f16_e32 v3, v36
; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v0
@@ -72236,13 +72623,16 @@ define inreg <32 x bfloat> @bitcast_v32f16_to_v32bf16_scalar(<32 x half> inreg %
; SI-NEXT: ; implicit-def: $vgpr29
; SI-NEXT: ; implicit-def: $vgpr30
; SI-NEXT: ; implicit-def: $vgpr31
-; SI-NEXT: s_branch .LBB101_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB101_2
+; SI-NEXT: s_branch .LBB101_3
;
; VI-LABEL: bitcast_v32f16_to_v32bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v13, v2
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: v_mov_b32_e32 v16, v2
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
@@ -72258,12 +72648,15 @@ define inreg <32 x bfloat> @bitcast_v32f16_to_v32bf16_scalar(<32 x half> inreg %
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB101_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB101_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB101_3
-; VI-NEXT: .LBB101_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB101_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB101_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v17, 0x200
; VI-NEXT: v_add_f16_e32 v19, 0x200, v15
; VI-NEXT: v_add_f16_sdwa v15, v15, v17 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:WORD_1 src1_sel:DWORD
@@ -72313,16 +72706,15 @@ define inreg <32 x bfloat> @bitcast_v32f16_to_v32bf16_scalar(<32 x half> inreg %
; VI-NEXT: v_or_b32_e32 v2, v19, v2
; VI-NEXT: v_or_b32_e32 v1, v18, v1
; VI-NEXT: v_or_b32_e32 v0, v16, v0
-; VI-NEXT: .LBB101_3: ; %end
+; VI-NEXT: .LBB101_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB101_4:
-; VI-NEXT: s_branch .LBB101_2
;
; GFX9-LABEL: bitcast_v32f16_to_v32bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v2
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v16, v2
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
@@ -72338,12 +72730,15 @@ define inreg <32 x bfloat> @bitcast_v32f16_to_v32bf16_scalar(<32 x half> inreg %
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB101_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB101_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB101_3
-; GFX9-NEXT: .LBB101_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB101_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB101_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_movk_i32 s4, 0x200
; GFX9-NEXT: v_pk_add_f16 v15, v15, s4 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v14, v14, s4 op_sel_hi:[1,0]
@@ -72361,10 +72756,8 @@ define inreg <32 x bfloat> @bitcast_v32f16_to_v32bf16_scalar(<32 x half> inreg %
; GFX9-NEXT: v_pk_add_f16 v2, v2, s4 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v1, v1, s4 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, v0, s4 op_sel_hi:[1,0]
-; GFX9-NEXT: .LBB101_3: ; %end
+; GFX9-NEXT: .LBB101_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB101_4:
-; GFX9-NEXT: s_branch .LBB101_2
;
; GFX11-LABEL: bitcast_v32f16_to_v32bf16_scalar:
; GFX11: ; %bb.0:
@@ -72374,12 +72767,15 @@ define inreg <32 x bfloat> @bitcast_v32f16_to_v32bf16_scalar(<32 x half> inreg %
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB101_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB101_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB101_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB101_4
-; GFX11-NEXT: .LBB101_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v15, 0x200, s27 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v14, 0x200, s26 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v13, 0x200, s25 op_sel_hi:[0,1]
@@ -72397,8 +72793,6 @@ define inreg <32 x bfloat> @bitcast_v32f16_to_v32bf16_scalar(<32 x half> inreg %
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s13 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB101_3:
-; GFX11-NEXT: s_branch .LBB101_2
; GFX11-NEXT: .LBB101_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -73892,6 +74286,7 @@ define inreg <32 x half> @bitcast_v32bf16_to_v32f16_scalar(<32 x bfloat> inreg %
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
@@ -73908,7 +74303,6 @@ define inreg <32 x half> @bitcast_v32bf16_to_v32f16_scalar(<32 x bfloat> inreg %
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mul_f32_e64 v32, 1.0, s16
; SI-NEXT: v_mul_f32_e64 v33, 1.0, s17
; SI-NEXT: v_mul_f32_e32 v39, 1.0, v0
@@ -73936,6 +74330,7 @@ define inreg <32 x half> @bitcast_v32bf16_to_v32f16_scalar(<32 x bfloat> inreg %
; SI-NEXT: v_mul_f32_e32 v62, 1.0, v16
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mul_f32_e32 v63, 1.0, v17
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mul_f32_e64 v34, 1.0, s18
; SI-NEXT: v_mul_f32_e64 v35, 1.0, s19
; SI-NEXT: v_mul_f32_e64 v36, 1.0, s20
@@ -74196,7 +74591,9 @@ define inreg <32 x half> @bitcast_v32bf16_to_v32f16_scalar(<32 x bfloat> inreg %
; SI-NEXT: ; implicit-def: $vgpr29
; SI-NEXT: ; implicit-def: $vgpr30
; SI-NEXT: ; implicit-def: $vgpr31
-; SI-NEXT: s_branch .LBB103_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB103_2
+; SI-NEXT: s_branch .LBB103_3
;
; VI-LABEL: bitcast_v32bf16_to_v32f16_scalar:
; VI: ; %bb.0:
@@ -74204,16 +74601,20 @@ define inreg <32 x half> @bitcast_v32bf16_to_v32f16_scalar(<32 x bfloat> inreg %
; VI-NEXT: s_xor_saveexec_b64 s[4:5], -1
; VI-NEXT: buffer_store_dword v20, off, s[0:3], s32 ; 4-byte Folded Spill
; VI-NEXT: s_mov_b64 exec, s[4:5]
-; VI-NEXT: v_writelane_b32 v20, s30, 0
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
+; VI-NEXT: v_writelane_b32 v20, s30, 0
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_writelane_b32 v20, s31, 1
; VI-NEXT: v_readfirstlane_b32 s30, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s31, v1
-; VI-NEXT: s_cbranch_scc0 .LBB103_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB103_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB103_4
-; VI-NEXT: .LBB103_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB103_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB103_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v1, 0x40c00000
; VI-NEXT: v_add_f32_e32 v0, s4, v1
@@ -74504,8 +74905,6 @@ define inreg <32 x half> @bitcast_v32bf16_to_v32f16_scalar(<32 x bfloat> inreg %
; VI-NEXT: v_alignbit_b32 v1, v1, v17, 16
; VI-NEXT: v_alignbit_b32 v0, v16, v0, 16
; VI-NEXT: s_branch .LBB103_5
-; VI-NEXT: .LBB103_3:
-; VI-NEXT: s_branch .LBB103_2
; VI-NEXT: .LBB103_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -74538,16 +74937,20 @@ define inreg <32 x half> @bitcast_v32bf16_to_v32f16_scalar(<32 x bfloat> inreg %
; GFX9-NEXT: s_xor_saveexec_b64 s[4:5], -1
; GFX9-NEXT: buffer_store_dword v20, off, s[0:3], s32 ; 4-byte Folded Spill
; GFX9-NEXT: s_mov_b64 exec, s[4:5]
-; GFX9-NEXT: v_writelane_b32 v20, s30, 0
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v2
+; GFX9-NEXT: v_writelane_b32 v20, s30, 0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_writelane_b32 v20, s31, 1
; GFX9-NEXT: v_readfirstlane_b32 s30, v0
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_readfirstlane_b32 s31, v1
-; GFX9-NEXT: s_cbranch_scc0 .LBB103_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB103_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB103_4
-; GFX9-NEXT: .LBB103_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB103_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB103_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
; GFX9-NEXT: s_and_b32 s5, s30, 0xffff0000
; GFX9-NEXT: v_add_f32_e32 v1, s5, v0
@@ -74855,8 +75258,6 @@ define inreg <32 x half> @bitcast_v32bf16_to_v32f16_scalar(<32 x bfloat> inreg %
; GFX9-NEXT: v_and_b32_sdwa v16, v16, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: v_lshl_or_b32 v0, v0, 16, v16
; GFX9-NEXT: s_branch .LBB103_5
-; GFX9-NEXT: .LBB103_3:
-; GFX9-NEXT: s_branch .LBB103_2
; GFX9-NEXT: .LBB103_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -74891,12 +75292,15 @@ define inreg <32 x half> @bitcast_v32bf16_to_v32f16_scalar(<32 x bfloat> inreg %
; GFX11-NEXT: s_mov_b32 s13, s1
; GFX11-NEXT: s_mov_b32 s12, s0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB103_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB103_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB103_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB103_4
-; GFX11-NEXT: .LBB103_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_and_b32 s0, s12, 0xffff0000
; GFX11-NEXT: s_lshl_b32 s1, s12, 16
; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s0
@@ -75223,8 +75627,6 @@ define inreg <32 x half> @bitcast_v32bf16_to_v32f16_scalar(<32 x bfloat> inreg %
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_4)
; GFX11-NEXT: v_lshl_or_b32 v0, v0, 16, v23
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB103_3:
-; GFX11-NEXT: s_branch .LBB103_2
; GFX11-NEXT: .LBB103_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
; GFX11-NEXT: v_dual_mov_b32 v2, s14 :: v_dual_mov_b32 v3, s15
@@ -77344,22 +77746,22 @@ define inreg <64 x i8> @bitcast_v32f16_to_v64i8_scalar(<32 x half> inreg %a, i32
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill
-; SI-NEXT: v_cvt_f16_f32_e32 v22, s17
+; SI-NEXT: v_cvt_f16_f32_e32 v21, s17
; SI-NEXT: v_cvt_f16_f32_e32 v20, v2
-; SI-NEXT: v_cvt_f16_f32_e32 v28, v1
-; SI-NEXT: v_cvt_f16_f32_e32 v27, v4
-; SI-NEXT: v_cvt_f16_f32_e32 v21, v3
+; SI-NEXT: v_cvt_f16_f32_e32 v22, v1
+; SI-NEXT: v_cvt_f16_f32_e32 v26, v4
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v63, v3
; SI-NEXT: v_cvt_f16_f32_e32 v4, v6
-; SI-NEXT: s_waitcnt expcnt(1)
-; SI-NEXT: v_cvt_f16_f32_e32 v62, v5
+; SI-NEXT: v_cvt_f16_f32_e32 v61, v5
; SI-NEXT: v_cvt_f16_f32_e32 v35, v8
; SI-NEXT: v_cvt_f16_f32_e32 v34, v7
; SI-NEXT: v_cvt_f16_f32_e32 v3, v10
; SI-NEXT: v_cvt_f16_f32_e32 v30, v9
-; SI-NEXT: v_cvt_f16_f32_e32 v53, v12
+; SI-NEXT: v_cvt_f16_f32_e32 v52, v12
; SI-NEXT: v_cvt_f16_f32_e32 v50, v11
; SI-NEXT: v_cvt_f16_f32_e32 v2, v14
-; SI-NEXT: v_cvt_f16_f32_e32 v48, v13
+; SI-NEXT: v_cvt_f16_f32_e32 v39, v13
; SI-NEXT: v_cvt_f16_f32_e32 v41, v16
; SI-NEXT: v_cvt_f16_f32_e32 v40, v15
; SI-NEXT: v_cvt_f16_f32_e32 v1, v18
@@ -77375,21 +77777,22 @@ define inreg <64 x i8> @bitcast_v32f16_to_v64i8_scalar(<32 x half> inreg %a, i32
; SI-NEXT: v_cvt_f16_f32_e32 v42, s24
; SI-NEXT: v_cvt_f16_f32_e32 v5, s27
; SI-NEXT: v_cvt_f16_f32_e32 v14, s26
-; SI-NEXT: v_cvt_f16_f32_e32 v26, s29
+; SI-NEXT: v_cvt_f16_f32_e32 v27, s29
; SI-NEXT: v_cvt_f16_f32_e32 v25, s28
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v19
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v28, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB105_4
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v22
+; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v21
; SI-NEXT: v_or_b32_e32 v37, v10, v8
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v7
; SI-NEXT: v_or_b32_e32 v32, v9, v8
@@ -77428,10 +77831,10 @@ define inreg <64 x i8> @bitcast_v32f16_to_v64i8_scalar(<32 x half> inreg %a, i32
; SI-NEXT: v_alignbit_b32 v8, v19, v18, 8
; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:92 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v26
+; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v27
; SI-NEXT: v_or_b32_e32 v16, v25, v8
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v20
-; SI-NEXT: v_or_b32_e32 v17, v28, v8
+; SI-NEXT: v_or_b32_e32 v17, v22, v8
; SI-NEXT: v_alignbit_b32 v8, v17, v16, 24
; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -77441,49 +77844,50 @@ define inreg <64 x i8> @bitcast_v32f16_to_v64i8_scalar(<32 x half> inreg %a, i32
; SI-NEXT: v_alignbit_b32 v8, v17, v16, 8
; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:84 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v27
-; SI-NEXT: v_or_b32_e32 v15, v21, v8
+; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v26
+; SI-NEXT: v_or_b32_e32 v15, v63, v8
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v4
-; SI-NEXT: v_or_b32_e32 v14, v62, v8
+; SI-NEXT: v_or_b32_e32 v14, v61, v8
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v35
; SI-NEXT: v_or_b32_e32 v12, v34, v8
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v3
; SI-NEXT: v_or_b32_e32 v13, v30, v8
-; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v53
+; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v52
; SI-NEXT: v_or_b32_e32 v10, v50, v8
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v2
-; SI-NEXT: v_or_b32_e32 v11, v48, v8
+; SI-NEXT: v_or_b32_e32 v11, v39, v8
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v41
; SI-NEXT: v_or_b32_e32 v9, v40, v8
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v1
-; SI-NEXT: v_alignbit_b32 v22, v11, v10, 24
+; SI-NEXT: v_alignbit_b32 v21, v11, v10, 24
; SI-NEXT: v_or_b32_e32 v8, v55, v8
-; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v22, v11, v10, 16
-; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v21, v11, v10, 16
+; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v22, v8, v9, 24
-; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v21, v8, v9, 24
+; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v22, v8, v9, 16
-; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
+; SI-NEXT: v_alignbit_b32 v21, v8, v9, 16
+; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_alignbit_b32 v22, v8, v9, 8
+; SI-NEXT: v_alignbit_b32 v21, v8, v9, 8
; SI-NEXT: v_alignbit_b32 v57, v14, v15, 24
; SI-NEXT: v_alignbit_b32 v58, v14, v15, 16
-; SI-NEXT: v_alignbit_b32 v61, v14, v15, 8
+; SI-NEXT: v_alignbit_b32 v62, v14, v15, 8
; SI-NEXT: v_alignbit_b32 v44, v13, v12, 24
; SI-NEXT: v_alignbit_b32 v47, v13, v12, 16
; SI-NEXT: v_alignbit_b32 v56, v13, v12, 8
; SI-NEXT: v_alignbit_b32 v43, v11, v10, 8
-; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
-; SI-NEXT: v_lshrrev_b32_e32 v52, 8, v32
-; SI-NEXT: v_lshrrev_b32_e32 v39, 8, v23
+; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
+; SI-NEXT: v_lshrrev_b32_e32 v53, 8, v32
+; SI-NEXT: v_lshrrev_b32_e32 v48, 8, v23
; SI-NEXT: v_lshrrev_b32_e32 v36, 8, v19
; SI-NEXT: v_lshrrev_b32_e32 v31, 8, v17
; SI-NEXT: v_lshrrev_b32_e32 v28, 8, v14
-; SI-NEXT: v_lshrrev_b32_e32 v63, 8, v13
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshrrev_b32_e32 v21, 8, v13
; SI-NEXT: v_lshrrev_b32_e32 v59, 8, v11
; SI-NEXT: v_lshrrev_b32_e32 v45, 8, v8
; SI-NEXT: v_bfe_u32 v54, v7, 8, 8
@@ -77492,7 +77896,6 @@ define inreg <64 x i8> @bitcast_v32f16_to_v64i8_scalar(<32 x half> inreg %a, i32
; SI-NEXT: v_bfe_u32 v38, v20, 8, 8
; SI-NEXT: v_bfe_u32 v33, v4, 8, 8
; SI-NEXT: v_bfe_u32 v29, v3, 8, 8
-; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_bfe_u32 v22, v2, 8, 8
; SI-NEXT: v_bfe_u32 v60, v1, 8, 8
; SI-NEXT: s_cbranch_execnz .LBB105_3
@@ -77519,13 +77922,13 @@ define inreg <64 x i8> @bitcast_v32f16_to_v64i8_scalar(<32 x half> inreg %a, i32
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8
; SI-NEXT: v_or_b32_e32 v9, v9, v8
; SI-NEXT: s_waitcnt expcnt(4)
-; SI-NEXT: v_cvt_f32_f16_e32 v11, v53
+; SI-NEXT: v_cvt_f32_f16_e32 v11, v52
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v1
; SI-NEXT: v_or_b32_e32 v8, v10, v8
; SI-NEXT: v_cvt_f32_f16_e32 v10, v50
; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
; SI-NEXT: s_waitcnt expcnt(3)
-; SI-NEXT: v_cvt_f32_f16_e32 v12, v48
+; SI-NEXT: v_cvt_f32_f16_e32 v12, v39
; SI-NEXT: s_waitcnt expcnt(2)
; SI-NEXT: v_cvt_f32_f16_e32 v13, v35
; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11
@@ -77553,14 +77956,14 @@ define inreg <64 x i8> @bitcast_v32f16_to_v64i8_scalar(<32 x half> inreg %a, i32
; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13
; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
-; SI-NEXT: v_cvt_f32_f16_e32 v16, v21
+; SI-NEXT: v_cvt_f32_f16_e32 v16, v63
; SI-NEXT: v_or_b32_e32 v12, v14, v12
-; SI-NEXT: v_cvt_f32_f16_e32 v14, v27
+; SI-NEXT: v_cvt_f32_f16_e32 v14, v26
; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v3
; SI-NEXT: v_cvt_f32_f16_e32 v4, v4
; SI-NEXT: v_or_b32_e32 v13, v13, v15
; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v16
-; SI-NEXT: v_cvt_f32_f16_e32 v16, v62
+; SI-NEXT: v_cvt_f32_f16_e32 v16, v61
; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14
; SI-NEXT: v_cvt_f16_f32_e32 v14, v14
; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4
@@ -77570,7 +77973,7 @@ define inreg <64 x i8> @bitcast_v32f16_to_v64i8_scalar(<32 x half> inreg %a, i32
; SI-NEXT: v_cvt_f16_f32_e32 v16, v16
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
; SI-NEXT: v_or_b32_e32 v15, v15, v14
-; SI-NEXT: v_cvt_f32_f16_e32 v17, v26
+; SI-NEXT: v_cvt_f32_f16_e32 v17, v27
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v4
; SI-NEXT: v_or_b32_e32 v14, v16, v14
; SI-NEXT: v_cvt_f32_f16_e32 v16, v25
@@ -77610,14 +78013,13 @@ define inreg <64 x i8> @bitcast_v32f16_to_v64i8_scalar(<32 x half> inreg %a, i32
; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
; SI-NEXT: v_alignbit_b32 v57, v14, v15, 24
; SI-NEXT: v_alignbit_b32 v58, v14, v15, 16
-; SI-NEXT: v_alignbit_b32 v61, v14, v15, 8
+; SI-NEXT: v_alignbit_b32 v62, v14, v15, 8
; SI-NEXT: v_alignbit_b32 v44, v13, v12, 24
; SI-NEXT: v_alignbit_b32 v47, v13, v12, 16
; SI-NEXT: v_alignbit_b32 v56, v13, v12, 8
; SI-NEXT: v_alignbit_b32 v43, v11, v10, 8
; SI-NEXT: v_lshrrev_b32_e32 v31, 8, v17
; SI-NEXT: v_lshrrev_b32_e32 v28, 8, v14
-; SI-NEXT: v_lshrrev_b32_e32 v63, 8, v13
; SI-NEXT: v_lshrrev_b32_e32 v59, 8, v11
; SI-NEXT: v_lshrrev_b32_e32 v45, 8, v8
; SI-NEXT: v_bfe_u32 v54, v7, 8, 8
@@ -77655,7 +78057,7 @@ define inreg <64 x i8> @bitcast_v32f16_to_v64i8_scalar(<32 x half> inreg %a, i32
; SI-NEXT: v_cvt_f16_f32_e32 v23, v23
; SI-NEXT: v_or_b32_e32 v23, v23, v22
; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
-; SI-NEXT: v_lshrrev_b32_e32 v39, 8, v23
+; SI-NEXT: v_lshrrev_b32_e32 v48, 8, v23
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_cvt_f32_f16_e32 v21, v21
; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21
@@ -77717,13 +78119,14 @@ define inreg <64 x i8> @bitcast_v32f16_to_v64i8_scalar(<32 x half> inreg %a, i32
; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_alignbit_b32 v21, v8, v9, 8
-; SI-NEXT: v_lshrrev_b32_e32 v52, 8, v32
-; SI-NEXT: v_bfe_u32 v22, v2, 8, 8
; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
-; SI-NEXT: .LBB105_3: ; %end
-; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload
+; SI-NEXT: v_lshrrev_b32_e32 v53, 8, v32
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_and_b32_e32 v21, 0xff, v37
+; SI-NEXT: v_lshrrev_b32_e32 v21, 8, v13
+; SI-NEXT: v_bfe_u32 v22, v2, 8, 8
+; SI-NEXT: .LBB105_3: ; %end
+; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v25, 0xff, v37
; SI-NEXT: v_and_b32_e32 v7, 0xff, v7
; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7
; SI-NEXT: v_and_b32_e32 v6, 0xff, v6
@@ -77738,55 +78141,55 @@ define inreg <64 x i8> @bitcast_v32f16_to_v64i8_scalar(<32 x half> inreg %a, i32
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; SI-NEXT: v_and_b32_e32 v1, 0xff, v1
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v25, 8, v25
-; SI-NEXT: v_or_b32_e32 v21, v21, v25
-; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload
-; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; SI-NEXT: v_lshlrev_b32_e32 v26, 8, v26
+; SI-NEXT: v_or_b32_e32 v25, v25, v26
+; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload
+; SI-NEXT: v_and_b32_e32 v25, 0xffff, v25
; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v26, 24, v26
+; SI-NEXT: v_lshlrev_b32_e32 v27, 24, v27
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_and_b32_e32 v25, 0xff, v25
-; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v25
-; SI-NEXT: v_or_b32_e32 v25, v26, v25
-; SI-NEXT: v_or_b32_e32 v21, v21, v25
-; SI-NEXT: buffer_store_dword v21, v0, s[0:3], 0 offen
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_and_b32_e32 v21, 0xff, v32
-; SI-NEXT: v_lshlrev_b32_e32 v25, 8, v52
-; SI-NEXT: v_or_b32_e32 v21, v21, v25
-; SI-NEXT: v_lshlrev_b32_e32 v25, 24, v54
-; SI-NEXT: v_and_b32_e32 v21, 0xffff, v21
+; SI-NEXT: v_and_b32_e32 v26, 0xff, v26
+; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v26
+; SI-NEXT: v_or_b32_e32 v26, v27, v26
+; SI-NEXT: v_or_b32_e32 v25, v25, v26
+; SI-NEXT: buffer_store_dword v25, v0, s[0:3], 0 offen
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_and_b32_e32 v25, 0xff, v32
+; SI-NEXT: v_lshlrev_b32_e32 v26, 8, v53
+; SI-NEXT: v_or_b32_e32 v25, v25, v26
+; SI-NEXT: v_lshlrev_b32_e32 v26, 24, v54
+; SI-NEXT: v_and_b32_e32 v25, 0xffff, v25
+; SI-NEXT: v_or_b32_e32 v7, v26, v7
; SI-NEXT: v_or_b32_e32 v7, v25, v7
-; SI-NEXT: v_or_b32_e32 v7, v21, v7
-; SI-NEXT: v_add_i32_e32 v21, vcc, 4, v0
-; SI-NEXT: buffer_store_dword v7, v21, s[0:3], 0 offen
-; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload
+; SI-NEXT: v_add_i32_e32 v25, vcc, 4, v0
+; SI-NEXT: buffer_store_dword v7, v25, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_and_b32_e32 v7, 0xff, v24
-; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:100 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v21, 8, v21
-; SI-NEXT: v_or_b32_e32 v7, v7, v21
-; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
+; SI-NEXT: v_lshlrev_b32_e32 v24, 8, v24
+; SI-NEXT: v_or_b32_e32 v7, v7, v24
+; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:88 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7
; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v24, 24, v24
+; SI-NEXT: v_lshlrev_b32_e32 v25, 24, v25
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_and_b32_e32 v21, 0xff, v21
-; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21
-; SI-NEXT: v_or_b32_e32 v21, v24, v21
-; SI-NEXT: v_or_b32_e32 v7, v7, v21
-; SI-NEXT: v_add_i32_e32 v21, vcc, 8, v0
-; SI-NEXT: buffer_store_dword v7, v21, s[0:3], 0 offen
+; SI-NEXT: v_and_b32_e32 v24, 0xff, v24
+; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v24
+; SI-NEXT: v_or_b32_e32 v24, v25, v24
+; SI-NEXT: v_or_b32_e32 v7, v7, v24
+; SI-NEXT: v_add_i32_e32 v24, vcc, 8, v0
+; SI-NEXT: buffer_store_dword v7, v24, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_and_b32_e32 v7, 0xff, v23
-; SI-NEXT: v_lshlrev_b32_e32 v21, 8, v39
-; SI-NEXT: v_or_b32_e32 v7, v7, v21
-; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v51
+; SI-NEXT: v_lshlrev_b32_e32 v23, 8, v48
+; SI-NEXT: v_or_b32_e32 v7, v7, v23
+; SI-NEXT: v_lshlrev_b32_e32 v23, 24, v51
; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7
-; SI-NEXT: v_or_b32_e32 v6, v21, v6
+; SI-NEXT: v_or_b32_e32 v6, v23, v6
; SI-NEXT: v_or_b32_e32 v6, v7, v6
; SI-NEXT: v_add_i32_e32 v7, vcc, 12, v0
; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen
@@ -77850,7 +78253,7 @@ define inreg <64 x i8> @bitcast_v32f16_to_v64i8_scalar(<32 x half> inreg %a, i32
; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_and_b32_e32 v5, 0xff, v15
-; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v61
+; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v62
; SI-NEXT: v_or_b32_e32 v5, v5, v6
; SI-NEXT: v_and_b32_e32 v6, 0xff, v58
; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6
@@ -77884,7 +78287,7 @@ define inreg <64 x i8> @bitcast_v32f16_to_v64i8_scalar(<32 x half> inreg %a, i32
; SI-NEXT: buffer_store_dword v4, v5, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_and_b32_e32 v4, 0xff, v13
-; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v63
+; SI-NEXT: v_lshlrev_b32_e32 v5, 8, v21
; SI-NEXT: v_or_b32_e32 v4, v4, v5
; SI-NEXT: v_lshlrev_b32_e32 v5, 24, v29
; SI-NEXT: v_and_b32_e32 v4, 0xffff, v4
@@ -77967,15 +78370,13 @@ define inreg <64 x i8> @bitcast_v32f16_to_v64i8_scalar(<32 x half> inreg %a, i32
; SI-NEXT: .LBB105_4:
; SI-NEXT: ; implicit-def: $vgpr8
; SI-NEXT: ; kill: killed $vgpr8
-; SI-NEXT: ; implicit-def: $vgpr8
-; SI-NEXT: ; kill: killed $vgpr8
; SI-NEXT: ; implicit-def: $vgpr37
; SI-NEXT: ; implicit-def: $vgpr32
-; SI-NEXT: ; implicit-def: $vgpr52
+; SI-NEXT: ; implicit-def: $vgpr53
; SI-NEXT: ; implicit-def: $vgpr54
; SI-NEXT: ; implicit-def: $vgpr24
; SI-NEXT: ; implicit-def: $vgpr23
-; SI-NEXT: ; implicit-def: $vgpr39
+; SI-NEXT: ; implicit-def: $vgpr48
; SI-NEXT: ; implicit-def: $vgpr51
; SI-NEXT: ; implicit-def: $vgpr18
; SI-NEXT: ; implicit-def: $vgpr19
@@ -77986,7 +78387,7 @@ define inreg <64 x i8> @bitcast_v32f16_to_v64i8_scalar(<32 x half> inreg %a, i32
; SI-NEXT: ; implicit-def: $vgpr31
; SI-NEXT: ; implicit-def: $vgpr38
; SI-NEXT: ; implicit-def: $vgpr15
-; SI-NEXT: ; implicit-def: $vgpr61
+; SI-NEXT: ; implicit-def: $vgpr62
; SI-NEXT: ; implicit-def: $vgpr58
; SI-NEXT: ; implicit-def: $vgpr57
; SI-NEXT: ; implicit-def: $vgpr14
@@ -77997,7 +78398,7 @@ define inreg <64 x i8> @bitcast_v32f16_to_v64i8_scalar(<32 x half> inreg %a, i32
; SI-NEXT: ; implicit-def: $vgpr47
; SI-NEXT: ; implicit-def: $vgpr44
; SI-NEXT: ; implicit-def: $vgpr13
-; SI-NEXT: ; implicit-def: $vgpr63
+; SI-NEXT: ; implicit-def: $vgpr21
; SI-NEXT: ; implicit-def: $vgpr29
; SI-NEXT: ; implicit-def: $vgpr10
; SI-NEXT: ; implicit-def: $vgpr43
@@ -78038,7 +78439,11 @@ define inreg <64 x i8> @bitcast_v32f16_to_v64i8_scalar(<32 x half> inreg %a, i32
; SI-NEXT: ; implicit-def: $vgpr8
; SI-NEXT: ; kill: killed $vgpr8
; SI-NEXT: ; implicit-def: $vgpr8
-; SI-NEXT: s_branch .LBB105_2
+; SI-NEXT: ; kill: killed $vgpr8
+; SI-NEXT: ; implicit-def: $vgpr8
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB105_2
+; SI-NEXT: s_branch .LBB105_3
;
; VI-LABEL: bitcast_v32f16_to_v64i8_scalar:
; VI: ; %bb.0:
@@ -78068,8 +78473,9 @@ define inreg <64 x i8> @bitcast_v32f16_to_v64i8_scalar(<32 x half> inreg %a, i32
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3
; VI-NEXT: v_writelane_b32 v63, s67, 19
; VI-NEXT: v_readfirstlane_b32 s4, v1
-; VI-NEXT: s_and_b64 s[6:7], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s5, v2
+; VI-NEXT: s_and_b64 s[6:7], vcc, exec
+; VI-NEXT: s_mov_b64 s[46:47], -1
; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
@@ -78305,7 +78711,8 @@ define inreg <64 x i8> @bitcast_v32f16_to_v64i8_scalar(<32 x half> inreg %a, i32
; VI-NEXT: ; implicit-def: $sgpr58
; VI-NEXT: ; implicit-def: $sgpr36
; VI-NEXT: ; implicit-def: $sgpr75
-; VI-NEXT: s_branch .LBB105_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[46:47]
+; VI-NEXT: s_cbranch_vccz .LBB105_2
; VI-NEXT: .LBB105_4:
; VI-NEXT: v_mov_b32_e32 v1, s58
; VI-NEXT: v_mov_b32_e32 v53, s56
@@ -78561,8 +78968,9 @@ define inreg <64 x i8> @bitcast_v32f16_to_v64i8_scalar(<32 x half> inreg %a, i32
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3
; GFX9-NEXT: v_writelane_b32 v63, s55, 15
; GFX9-NEXT: v_readfirstlane_b32 s4, v1
-; GFX9-NEXT: s_and_b64 s[6:7], vcc, exec
; GFX9-NEXT: v_readfirstlane_b32 s5, v2
+; GFX9-NEXT: s_and_b64 s[6:7], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[46:47], -1
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
@@ -78751,7 +79159,8 @@ define inreg <64 x i8> @bitcast_v32f16_to_v64i8_scalar(<32 x half> inreg %a, i32
; GFX9-NEXT: ; implicit-def: $sgpr59
; GFX9-NEXT: ; implicit-def: $sgpr57
; GFX9-NEXT: ; implicit-def: $sgpr56
-; GFX9-NEXT: s_branch .LBB105_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[46:47]
+; GFX9-NEXT: s_cbranch_vccz .LBB105_2
; GFX9-NEXT: .LBB105_4:
; GFX9-NEXT: v_mov_b32_e32 v21, s44
; GFX9-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
@@ -78971,7 +79380,7 @@ define inreg <64 x i8> @bitcast_v32f16_to_v64i8_scalar(<32 x half> inreg %a, i32
; GFX11-NEXT: s_mov_b32 exec_lo, s4
; GFX11-NEXT: v_writelane_b32 v40, s30, 0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
-; GFX11-NEXT: s_mov_b32 s42, 0
+; GFX11-NEXT: s_mov_b32 s5, -1
; GFX11-NEXT: v_writelane_b32 v40, s31, 1
; GFX11-NEXT: v_writelane_b32 v40, s34, 2
; GFX11-NEXT: v_writelane_b32 v40, s35, 3
@@ -78980,49 +79389,49 @@ define inreg <64 x i8> @bitcast_v32f16_to_v64i8_scalar(<32 x half> inreg %a, i32
; GFX11-NEXT: v_writelane_b32 v40, s38, 6
; GFX11-NEXT: v_writelane_b32 v40, s39, 7
; GFX11-NEXT: v_writelane_b32 v40, s48, 8
-; GFX11-NEXT: v_writelane_b32 v40, s49, 9
; GFX11-NEXT: s_cbranch_scc0 .LBB105_3
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s43, s27, 24
-; GFX11-NEXT: s_lshr_b32 s44, s27, 16
-; GFX11-NEXT: s_lshr_b32 s46, s27, 8
-; GFX11-NEXT: s_lshr_b32 s45, s26, 16
-; GFX11-NEXT: s_lshr_b32 s47, s26, 8
-; GFX11-NEXT: s_lshr_b32 s56, s25, 24
-; GFX11-NEXT: s_lshr_b32 s57, s25, 16
-; GFX11-NEXT: s_lshr_b32 s59, s25, 8
-; GFX11-NEXT: s_lshr_b32 s58, s24, 16
-; GFX11-NEXT: s_lshr_b32 s60, s24, 8
-; GFX11-NEXT: s_lshr_b32 s61, s23, 24
-; GFX11-NEXT: s_lshr_b32 s62, s23, 16
-; GFX11-NEXT: s_lshr_b32 s72, s23, 8
-; GFX11-NEXT: s_lshr_b32 s63, s22, 16
-; GFX11-NEXT: s_lshr_b32 s73, s22, 8
-; GFX11-NEXT: s_lshr_b32 s74, s21, 24
-; GFX11-NEXT: s_lshr_b32 s75, s21, 16
-; GFX11-NEXT: s_lshr_b32 s77, s21, 8
-; GFX11-NEXT: s_lshr_b32 s76, s20, 16
-; GFX11-NEXT: s_lshr_b32 s78, s20, 8
-; GFX11-NEXT: s_lshr_b32 s79, s19, 24
-; GFX11-NEXT: s_lshr_b32 s88, s19, 16
-; GFX11-NEXT: s_lshr_b32 s90, s19, 8
-; GFX11-NEXT: s_lshr_b32 s89, s18, 16
-; GFX11-NEXT: s_lshr_b32 s91, s18, 8
-; GFX11-NEXT: s_lshr_b32 s92, s17, 24
-; GFX11-NEXT: s_lshr_b32 s93, s17, 16
-; GFX11-NEXT: s_lshr_b32 s95, s17, 8
-; GFX11-NEXT: s_lshr_b32 s94, s16, 16
-; GFX11-NEXT: s_lshr_b32 vcc_hi, s16, 8
-; GFX11-NEXT: s_lshr_b32 s30, s3, 24
-; GFX11-NEXT: s_lshr_b32 s31, s3, 16
-; GFX11-NEXT: s_lshr_b32 s35, s3, 8
-; GFX11-NEXT: s_lshr_b32 s34, s2, 16
-; GFX11-NEXT: s_lshr_b32 s36, s2, 8
-; GFX11-NEXT: s_lshr_b32 s37, s1, 24
-; GFX11-NEXT: s_lshr_b32 s38, s1, 16
-; GFX11-NEXT: s_lshr_b32 s48, s1, 8
-; GFX11-NEXT: s_lshr_b32 s39, s0, 16
-; GFX11-NEXT: s_lshr_b32 s49, s0, 8
+; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
+; GFX11-NEXT: s_lshr_b32 s42, s27, 24
+; GFX11-NEXT: s_lshr_b32 s43, s27, 16
+; GFX11-NEXT: s_lshr_b32 s45, s27, 8
+; GFX11-NEXT: s_lshr_b32 s44, s26, 16
+; GFX11-NEXT: s_lshr_b32 s46, s26, 8
+; GFX11-NEXT: s_lshr_b32 s47, s25, 24
+; GFX11-NEXT: s_lshr_b32 s56, s25, 16
+; GFX11-NEXT: s_lshr_b32 s58, s25, 8
+; GFX11-NEXT: s_lshr_b32 s57, s24, 16
+; GFX11-NEXT: s_lshr_b32 s59, s24, 8
+; GFX11-NEXT: s_lshr_b32 s60, s23, 24
+; GFX11-NEXT: s_lshr_b32 s61, s23, 16
+; GFX11-NEXT: s_lshr_b32 s63, s23, 8
+; GFX11-NEXT: s_lshr_b32 s62, s22, 16
+; GFX11-NEXT: s_lshr_b32 s72, s22, 8
+; GFX11-NEXT: s_lshr_b32 s73, s21, 24
+; GFX11-NEXT: s_lshr_b32 s74, s21, 16
+; GFX11-NEXT: s_lshr_b32 s76, s21, 8
+; GFX11-NEXT: s_lshr_b32 s75, s20, 16
+; GFX11-NEXT: s_lshr_b32 s77, s20, 8
+; GFX11-NEXT: s_lshr_b32 s78, s19, 24
+; GFX11-NEXT: s_lshr_b32 s79, s19, 16
+; GFX11-NEXT: s_lshr_b32 s89, s19, 8
+; GFX11-NEXT: s_lshr_b32 s88, s18, 16
+; GFX11-NEXT: s_lshr_b32 s90, s18, 8
+; GFX11-NEXT: s_lshr_b32 s91, s17, 24
+; GFX11-NEXT: s_lshr_b32 s92, s17, 16
+; GFX11-NEXT: s_lshr_b32 s94, s17, 8
+; GFX11-NEXT: s_lshr_b32 s93, s16, 16
+; GFX11-NEXT: s_lshr_b32 s95, s16, 8
+; GFX11-NEXT: s_lshr_b32 vcc_hi, s3, 24
+; GFX11-NEXT: s_lshr_b32 s30, s3, 16
+; GFX11-NEXT: s_lshr_b32 s34, s3, 8
+; GFX11-NEXT: s_lshr_b32 s31, s2, 16
+; GFX11-NEXT: s_lshr_b32 s35, s2, 8
+; GFX11-NEXT: s_lshr_b32 s36, s1, 24
+; GFX11-NEXT: s_lshr_b32 s37, s1, 16
+; GFX11-NEXT: s_lshr_b32 s39, s1, 8
+; GFX11-NEXT: s_lshr_b32 s38, s0, 16
+; GFX11-NEXT: s_lshr_b32 s48, s0, 8
; GFX11-NEXT: s_lshr_b64 s[40:41], s[26:27], 24
; GFX11-NEXT: s_lshr_b64 s[28:29], s[24:25], 24
; GFX11-NEXT: s_lshr_b64 s[14:15], s[22:23], 24
@@ -79030,9 +79439,7 @@ define inreg <64 x i8> @bitcast_v32f16_to_v64i8_scalar(<32 x half> inreg %a, i32
; GFX11-NEXT: s_lshr_b64 s[10:11], s[18:19], 24
; GFX11-NEXT: s_lshr_b64 s[8:9], s[16:17], 24
; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
-; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s42
-; GFX11-NEXT: s_cbranch_vccnz .LBB105_4
+; GFX11-NEXT: s_cbranch_execnz .LBB105_4
; GFX11-NEXT: .LBB105_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v14, 0x200, s19 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v13, 0x200, s18 op_sel_hi:[0,1]
@@ -79100,55 +79507,56 @@ define inreg <64 x i8> @bitcast_v32f16_to_v64i8_scalar(<32 x half> inreg %a, i32
; GFX11-NEXT: v_lshrrev_b32_e32 v96, 8, v23
; GFX11-NEXT: s_branch .LBB105_5
; GFX11-NEXT: .LBB105_3:
-; GFX11-NEXT: ; implicit-def: $sgpr49
-; GFX11-NEXT: ; implicit-def: $sgpr39
-; GFX11-NEXT: ; implicit-def: $sgpr4
; GFX11-NEXT: ; implicit-def: $sgpr48
; GFX11-NEXT: ; implicit-def: $sgpr38
+; GFX11-NEXT: ; implicit-def: $sgpr4
+; GFX11-NEXT: ; implicit-def: $sgpr39
; GFX11-NEXT: ; implicit-def: $sgpr37
; GFX11-NEXT: ; implicit-def: $sgpr36
-; GFX11-NEXT: ; implicit-def: $sgpr34
-; GFX11-NEXT: ; implicit-def: $sgpr6
; GFX11-NEXT: ; implicit-def: $sgpr35
; GFX11-NEXT: ; implicit-def: $sgpr31
+; GFX11-NEXT: ; implicit-def: $sgpr6
+; GFX11-NEXT: ; implicit-def: $sgpr34
; GFX11-NEXT: ; implicit-def: $sgpr30
; GFX11-NEXT: ; implicit-def: $vcc_hi
-; GFX11-NEXT: ; implicit-def: $sgpr94
-; GFX11-NEXT: ; implicit-def: $sgpr8
; GFX11-NEXT: ; implicit-def: $sgpr95
; GFX11-NEXT: ; implicit-def: $sgpr93
+; GFX11-NEXT: ; implicit-def: $sgpr8
+; GFX11-NEXT: ; implicit-def: $sgpr94
; GFX11-NEXT: ; implicit-def: $sgpr92
; GFX11-NEXT: ; implicit-def: $sgpr91
-; GFX11-NEXT: ; implicit-def: $sgpr89
-; GFX11-NEXT: ; implicit-def: $sgpr10
; GFX11-NEXT: ; implicit-def: $sgpr90
; GFX11-NEXT: ; implicit-def: $sgpr88
+; GFX11-NEXT: ; implicit-def: $sgpr10
+; GFX11-NEXT: ; implicit-def: $sgpr89
; GFX11-NEXT: ; implicit-def: $sgpr79
; GFX11-NEXT: ; implicit-def: $sgpr78
-; GFX11-NEXT: ; implicit-def: $sgpr76
-; GFX11-NEXT: ; implicit-def: $sgpr12
; GFX11-NEXT: ; implicit-def: $sgpr77
; GFX11-NEXT: ; implicit-def: $sgpr75
+; GFX11-NEXT: ; implicit-def: $sgpr12
+; GFX11-NEXT: ; implicit-def: $sgpr76
; GFX11-NEXT: ; implicit-def: $sgpr74
; GFX11-NEXT: ; implicit-def: $sgpr73
-; GFX11-NEXT: ; implicit-def: $sgpr63
-; GFX11-NEXT: ; implicit-def: $sgpr14
; GFX11-NEXT: ; implicit-def: $sgpr72
; GFX11-NEXT: ; implicit-def: $sgpr62
+; GFX11-NEXT: ; implicit-def: $sgpr14
+; GFX11-NEXT: ; implicit-def: $sgpr63
; GFX11-NEXT: ; implicit-def: $sgpr61
; GFX11-NEXT: ; implicit-def: $sgpr60
-; GFX11-NEXT: ; implicit-def: $sgpr58
-; GFX11-NEXT: ; implicit-def: $sgpr28
; GFX11-NEXT: ; implicit-def: $sgpr59
; GFX11-NEXT: ; implicit-def: $sgpr57
+; GFX11-NEXT: ; implicit-def: $sgpr28
+; GFX11-NEXT: ; implicit-def: $sgpr58
; GFX11-NEXT: ; implicit-def: $sgpr56
; GFX11-NEXT: ; implicit-def: $sgpr47
-; GFX11-NEXT: ; implicit-def: $sgpr45
-; GFX11-NEXT: ; implicit-def: $sgpr40
; GFX11-NEXT: ; implicit-def: $sgpr46
; GFX11-NEXT: ; implicit-def: $sgpr44
+; GFX11-NEXT: ; implicit-def: $sgpr40
+; GFX11-NEXT: ; implicit-def: $sgpr45
; GFX11-NEXT: ; implicit-def: $sgpr43
-; GFX11-NEXT: s_branch .LBB105_2
+; GFX11-NEXT: ; implicit-def: $sgpr42
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
+; GFX11-NEXT: s_cbranch_vccz .LBB105_2
; GFX11-NEXT: .LBB105_4:
; GFX11-NEXT: v_dual_mov_b32 v23, s0 :: v_dual_mov_b32 v24, s1
; GFX11-NEXT: v_dual_mov_b32 v19, s2 :: v_dual_mov_b32 v20, s3
@@ -79158,28 +79566,28 @@ define inreg <64 x i8> @bitcast_v32f16_to_v64i8_scalar(<32 x half> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v5, s22 :: v_dual_mov_b32 v6, s23
; GFX11-NEXT: v_dual_mov_b32 v3, s24 :: v_dual_mov_b32 v4, s25
; GFX11-NEXT: v_dual_mov_b32 v1, s26 :: v_dual_mov_b32 v2, s27
-; GFX11-NEXT: v_dual_mov_b32 v96, s49 :: v_dual_mov_b32 v87, s39
-; GFX11-NEXT: v_dual_mov_b32 v86, s48 :: v_dual_mov_b32 v85, s38
-; GFX11-NEXT: v_dual_mov_b32 v84, s37 :: v_dual_mov_b32 v83, s36
-; GFX11-NEXT: v_dual_mov_b32 v82, s34 :: v_dual_mov_b32 v81, s35
-; GFX11-NEXT: v_dual_mov_b32 v80, s31 :: v_dual_mov_b32 v71, s30
-; GFX11-NEXT: v_dual_mov_b32 v70, vcc_hi :: v_dual_mov_b32 v69, s94
-; GFX11-NEXT: v_dual_mov_b32 v68, s95 :: v_dual_mov_b32 v67, s93
-; GFX11-NEXT: v_dual_mov_b32 v66, s92 :: v_dual_mov_b32 v65, s91
-; GFX11-NEXT: v_dual_mov_b32 v64, s89 :: v_dual_mov_b32 v55, s90
-; GFX11-NEXT: v_dual_mov_b32 v54, s88 :: v_dual_mov_b32 v53, s79
-; GFX11-NEXT: v_dual_mov_b32 v52, s78 :: v_dual_mov_b32 v51, s76
-; GFX11-NEXT: v_dual_mov_b32 v50, s77 :: v_dual_mov_b32 v49, s75
-; GFX11-NEXT: v_dual_mov_b32 v48, s74 :: v_dual_mov_b32 v39, s73
-; GFX11-NEXT: v_dual_mov_b32 v38, s63 :: v_dual_mov_b32 v37, s72
-; GFX11-NEXT: v_dual_mov_b32 v36, s62 :: v_dual_mov_b32 v35, s61
-; GFX11-NEXT: v_dual_mov_b32 v34, s60 :: v_dual_mov_b32 v33, s58
-; GFX11-NEXT: v_dual_mov_b32 v32, s59 :: v_dual_mov_b32 v31, s57
-; GFX11-NEXT: v_dual_mov_b32 v30, s56 :: v_dual_mov_b32 v29, s47
-; GFX11-NEXT: v_dual_mov_b32 v22, s45 :: v_dual_mov_b32 v7, s40
-; GFX11-NEXT: v_dual_mov_b32 v18, s46 :: v_dual_mov_b32 v11, s28
-; GFX11-NEXT: v_dual_mov_b32 v12, s44 :: v_dual_mov_b32 v17, s14
-; GFX11-NEXT: v_dual_mov_b32 v8, s43 :: v_dual_mov_b32 v21, s12
+; GFX11-NEXT: v_dual_mov_b32 v96, s48 :: v_dual_mov_b32 v87, s38
+; GFX11-NEXT: v_dual_mov_b32 v86, s39 :: v_dual_mov_b32 v85, s37
+; GFX11-NEXT: v_dual_mov_b32 v84, s36 :: v_dual_mov_b32 v83, s35
+; GFX11-NEXT: v_dual_mov_b32 v82, s31 :: v_dual_mov_b32 v81, s34
+; GFX11-NEXT: v_dual_mov_b32 v80, s30 :: v_dual_mov_b32 v71, vcc_hi
+; GFX11-NEXT: v_dual_mov_b32 v70, s95 :: v_dual_mov_b32 v69, s93
+; GFX11-NEXT: v_dual_mov_b32 v68, s94 :: v_dual_mov_b32 v67, s92
+; GFX11-NEXT: v_dual_mov_b32 v66, s91 :: v_dual_mov_b32 v65, s90
+; GFX11-NEXT: v_dual_mov_b32 v64, s88 :: v_dual_mov_b32 v55, s89
+; GFX11-NEXT: v_dual_mov_b32 v54, s79 :: v_dual_mov_b32 v53, s78
+; GFX11-NEXT: v_dual_mov_b32 v52, s77 :: v_dual_mov_b32 v51, s75
+; GFX11-NEXT: v_dual_mov_b32 v50, s76 :: v_dual_mov_b32 v49, s74
+; GFX11-NEXT: v_dual_mov_b32 v48, s73 :: v_dual_mov_b32 v39, s72
+; GFX11-NEXT: v_dual_mov_b32 v38, s62 :: v_dual_mov_b32 v37, s63
+; GFX11-NEXT: v_dual_mov_b32 v36, s61 :: v_dual_mov_b32 v35, s60
+; GFX11-NEXT: v_dual_mov_b32 v34, s59 :: v_dual_mov_b32 v33, s57
+; GFX11-NEXT: v_dual_mov_b32 v32, s58 :: v_dual_mov_b32 v31, s56
+; GFX11-NEXT: v_dual_mov_b32 v30, s47 :: v_dual_mov_b32 v29, s46
+; GFX11-NEXT: v_dual_mov_b32 v22, s44 :: v_dual_mov_b32 v7, s40
+; GFX11-NEXT: v_dual_mov_b32 v18, s45 :: v_dual_mov_b32 v11, s28
+; GFX11-NEXT: v_dual_mov_b32 v12, s43 :: v_dual_mov_b32 v17, s14
+; GFX11-NEXT: v_dual_mov_b32 v8, s42 :: v_dual_mov_b32 v21, s12
; GFX11-NEXT: v_dual_mov_b32 v25, s10 :: v_dual_mov_b32 v26, s8
; GFX11-NEXT: v_dual_mov_b32 v27, s6 :: v_dual_mov_b32 v28, s4
; GFX11-NEXT: .LBB105_5: ; %end
@@ -79333,7 +79741,6 @@ define inreg <64 x i8> @bitcast_v32f16_to_v64i8_scalar(<32 x half> inreg %a, i32
; GFX11-NEXT: scratch_store_b128 v0, v[23:26], off offset:16
; GFX11-NEXT: scratch_store_b128 v0, v[13:16], off offset:32
; GFX11-NEXT: scratch_store_b128 v0, v[1:4], off offset:48
-; GFX11-NEXT: v_readlane_b32 s49, v40, 9
; GFX11-NEXT: v_readlane_b32 s48, v40, 8
; GFX11-NEXT: v_readlane_b32 s39, v40, 7
; GFX11-NEXT: v_readlane_b32 s38, v40, 6
@@ -81811,6 +82218,7 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v20
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_readfirstlane_b32 s9, v31
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:64
@@ -82235,7 +82643,9 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; SI-NEXT: ; implicit-def: $vgpr29
; SI-NEXT: ; implicit-def: $vgpr30
; SI-NEXT: ; implicit-def: $vgpr31
-; SI-NEXT: s_branch .LBB107_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB107_2
+; SI-NEXT: s_branch .LBB107_3
;
; VI-LABEL: bitcast_v64i8_to_v32f16_scalar:
; VI: ; %bb.0:
@@ -82264,11 +82674,11 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:76
; VI-NEXT: buffer_load_ushort v31, off, s[0:3], s32
; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:8
-; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:4
+; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:4
; VI-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:16
; VI-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:12
; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:24
-; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:20
+; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:20
; VI-NEXT: buffer_load_ushort v20, off, s[0:3], s32 offset:32
; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:28
; VI-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:40
@@ -82278,12 +82688,12 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:56
; VI-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:52
; VI-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:64
-; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:60
+; VI-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:60
; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:72
; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:68
-; VI-NEXT: v_mov_b32_e32 v51, v23
+; VI-NEXT: v_mov_b32_e32 v52, v23
; VI-NEXT: v_mov_b32_e32 v30, v26
-; VI-NEXT: v_mov_b32_e32 v26, v22
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_lshlrev_b32_e32 v50, 8, v1
; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v3
; VI-NEXT: v_lshlrev_b32_e32 v3, 8, v5
@@ -82294,20 +82704,20 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: v_lshlrev_b32_e32 v13, 8, v15
; VI-NEXT: v_lshlrev_b32_e32 v15, 8, v17
; VI-NEXT: v_lshlrev_b32_e32 v23, 8, v19
-; VI-NEXT: v_lshlrev_b32_e32 v46, 8, v21
-; VI-NEXT: v_lshlrev_b32_e32 v56, 8, v51
+; VI-NEXT: v_lshlrev_b32_e32 v47, 8, v21
+; VI-NEXT: v_lshlrev_b32_e32 v56, 8, v52
; VI-NEXT: v_lshlrev_b32_e32 v58, 8, v25
; VI-NEXT: v_lshlrev_b32_e32 v27, 8, v27
; VI-NEXT: v_lshlrev_b32_e32 v29, 8, v29
; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; VI-NEXT: v_lshlrev_b32_e32 v59, 8, v31
; VI-NEXT: v_lshlrev_b32_e32 v60, 8, v33
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_and_b64 s[6:7], vcc, exec
; VI-NEXT: v_lshlrev_b32_e32 v61, 8, v35
; VI-NEXT: v_lshlrev_b32_e32 v62, 8, v37
; VI-NEXT: v_lshlrev_b32_e32 v63, 8, v20
@@ -82316,11 +82726,11 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: s_waitcnt vmcnt(11)
; VI-NEXT: v_lshlrev_b32_e32 v14, 8, v14
; VI-NEXT: s_waitcnt vmcnt(9)
-; VI-NEXT: v_lshlrev_b32_e32 v38, 8, v39
+; VI-NEXT: v_lshlrev_b32_e32 v26, 8, v39
; VI-NEXT: s_waitcnt vmcnt(7)
-; VI-NEXT: v_lshlrev_b32_e32 v51, 8, v48
+; VI-NEXT: v_lshlrev_b32_e32 v35, 8, v48
; VI-NEXT: s_waitcnt vmcnt(5)
-; VI-NEXT: v_lshlrev_b32_e32 v22, 8, v49
+; VI-NEXT: v_lshlrev_b32_e32 v38, 8, v49
; VI-NEXT: s_cbranch_scc0 .LBB107_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_or_b32_sdwa v0, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
@@ -82330,27 +82740,27 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: v_or_b32_sdwa v1, v34, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
; VI-NEXT: v_or_b32_sdwa v0, v10, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v35, v6
; VI-NEXT: v_or_b32_sdwa v2, v6, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v53, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v1, v16, v15 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v52, v3
; VI-NEXT: v_mov_b32_e32 v49, v7
; VI-NEXT: v_or_b32_sdwa v3, v8, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v18, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v55, v46 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v55, v47 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v37, v8
; VI-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v26, v56 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v22, v56 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v1, v24, v58 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v40, v9
+; VI-NEXT: v_mov_b32_e32 v41, v9
; VI-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v30, v27 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v1, v28, v29 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v31, v10
; VI-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v52, v60 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v51, v60 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v17, v11
; VI-NEXT: v_mov_b32_e32 v19, v13
; VI-NEXT: s_and_b32 s4, s28, 0xff
@@ -82366,23 +82776,22 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: s_lshl_b32 s7, s23, 8
; VI-NEXT: s_lshl_b32 s8, s27, 8
; VI-NEXT: v_mov_b32_e32 v25, v23
-; VI-NEXT: v_mov_b32_e32 v48, v51
-; VI-NEXT: v_mov_b32_e32 v23, v26
-; VI-NEXT: v_mov_b32_e32 v26, v30
+; VI-NEXT: v_mov_b32_e32 v48, v35
+; VI-NEXT: v_mov_b32_e32 v23, v30
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v0, v34, v59 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v54, v61 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v41, v62 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v40, v62 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v42, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v1, v43, v33 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v44, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v45, v38 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v45, v26 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v47, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v57, v22 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v46, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v57, v38 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v32, v50 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_e32 v3, s4, v0
@@ -82423,12 +82832,12 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v42
; VI-NEXT: v_or_b32_sdwa v3, v63, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v15, vcc, 0x300, v3
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v41
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v40
; VI-NEXT: v_or_b32_sdwa v12, v62, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v54
; VI-NEXT: v_or_b32_sdwa v3, v61, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v39, vcc, 0x300, v3
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v52
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v51
; VI-NEXT: v_or_b32_sdwa v11, v60, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v30
@@ -82436,18 +82845,18 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: v_add_u32_e32 v30, vcc, 0x300, v3
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v28
; VI-NEXT: v_or_b32_sdwa v10, v29, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v26
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v23
+; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v45
; VI-NEXT: v_or_b32_sdwa v3, v27, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v2, v26, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v26, vcc, 0x300, v3
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v24
; VI-NEXT: v_or_b32_sdwa v9, v58, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v23
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v57
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v22
; VI-NEXT: v_or_b32_sdwa v3, v56, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_or_b32_sdwa v0, v22, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v22, vcc, 0x300, v3
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v55
-; VI-NEXT: v_or_b32_sdwa v8, v46, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v8, v47, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v18
; VI-NEXT: v_or_b32_sdwa v3, v25, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v18, vcc, 0x300, v3
@@ -82459,15 +82868,11 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v36
; VI-NEXT: v_or_b32_sdwa v6, v17, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v31
-; VI-NEXT: v_or_b32_sdwa v3, v40, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v3, v41, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v17, vcc, 0x300, v3
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v37
; VI-NEXT: v_or_b32_sdwa v5, v49, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v35
-; VI-NEXT: v_or_b32_sdwa v3, v20, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v19, vcc, 0x300, v3
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
; VI-NEXT: s_add_i32 s28, s28, 3
; VI-NEXT: s_and_b32 s4, s28, 0xff
; VI-NEXT: s_lshl_b32 s5, s29, 8
@@ -82496,13 +82901,12 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: s_and_b32 s10, s16, 0xff
; VI-NEXT: s_lshl_b32 s11, s17, 8
; VI-NEXT: s_or_b32 s10, s11, s10
-; VI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v47
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v46
; VI-NEXT: s_addk_i32 s6, 0x300
; VI-NEXT: s_addk_i32 s8, 0x300
; VI-NEXT: s_addk_i32 s10, 0x300
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v57
; VI-NEXT: v_or_b32_sdwa v1, v48, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v45
; VI-NEXT: s_addk_i32 s4, 0x300
; VI-NEXT: s_lshl_b32 s5, s5, 16
; VI-NEXT: s_lshl_b32 s7, s7, 16
@@ -82510,8 +82914,8 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: s_and_b32 s10, s10, 0xffff
; VI-NEXT: s_and_b32 s8, s8, 0xffff
; VI-NEXT: s_and_b32 s6, s6, 0xffff
+; VI-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v1, vcc, 0x300, v1
-; VI-NEXT: v_or_b32_sdwa v2, v38, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_or_b32 s9, s9, s10
; VI-NEXT: s_or_b32 s7, s7, s8
; VI-NEXT: s_or_b32 s5, s5, s6
@@ -82519,7 +82923,6 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: s_add_i32 s9, s9, 0x3000000
; VI-NEXT: s_add_i32 s7, s7, 0x3000000
; VI-NEXT: s_add_i32 s5, s5, 0x3000000
-; VI-NEXT: v_or_b32_sdwa v5, v5, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_or_b32_sdwa v6, v6, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_or_b32_sdwa v7, v7, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_or_b32_sdwa v8, v8, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
@@ -82530,25 +82933,31 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: v_or_b32_sdwa v13, v13, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_or_b32_sdwa v2, v2, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v5, vcc, 0x3000000, v5
; VI-NEXT: v_add_u32_e32 v6, vcc, 0x3000000, v6
; VI-NEXT: v_add_u32_e32 v7, vcc, 0x3000000, v7
; VI-NEXT: v_add_u32_e32 v8, vcc, 0x3000000, v8
; VI-NEXT: v_add_u32_e32 v9, vcc, 0x3000000, v9
; VI-NEXT: v_add_u32_e32 v10, vcc, 0x3000000, v10
; VI-NEXT: v_add_u32_e32 v11, vcc, 0x3000000, v11
-; VI-NEXT: s_waitcnt vmcnt(2)
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v4, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v12, vcc, 0x3000000, v12
; VI-NEXT: v_add_u32_e32 v13, vcc, 0x3000000, v13
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
+; VI-NEXT: v_or_b32_sdwa v3, v20, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v19, vcc, 0x300, v3
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v5, v5, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v5, vcc, 0x3000000, v5
; VI-NEXT: v_add_u32_e32 v14, vcc, 0x3000000, v2
; VI-NEXT: v_add_u32_e32 v15, vcc, 0x3000000, v0
; VI-NEXT: v_mov_b32_e32 v0, s9
; VI-NEXT: v_mov_b32_e32 v1, s7
; VI-NEXT: v_mov_b32_e32 v2, s5
+; VI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
+; VI-NEXT: v_or_b32_sdwa v4, v52, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: v_or_b32_sdwa v3, v20, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
@@ -82580,23 +82989,24 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB107_4:
; VI-NEXT: v_mov_b32_e32 v25, v23
-; VI-NEXT: v_mov_b32_e32 v23, v26
-; VI-NEXT: v_mov_b32_e32 v26, v30
+; VI-NEXT: v_mov_b32_e32 v23, v30
; VI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
-; VI-NEXT: v_mov_b32_e32 v48, v51
+; VI-NEXT: v_mov_b32_e32 v48, v35
; VI-NEXT: v_mov_b32_e32 v31, v10
; VI-NEXT: v_mov_b32_e32 v36, v34
-; VI-NEXT: v_mov_b32_e32 v35, v6
; VI-NEXT: v_mov_b32_e32 v37, v8
; VI-NEXT: v_mov_b32_e32 v39, v14
; VI-NEXT: v_mov_b32_e32 v21, v15
; VI-NEXT: v_mov_b32_e32 v19, v13
; VI-NEXT: v_mov_b32_e32 v17, v11
-; VI-NEXT: v_mov_b32_e32 v40, v9
+; VI-NEXT: v_mov_b32_e32 v41, v9
; VI-NEXT: v_mov_b32_e32 v49, v7
; VI-NEXT: v_mov_b32_e32 v20, v5
+; VI-NEXT: v_mov_b32_e32 v52, v3
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; VI-NEXT: s_branch .LBB107_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB107_2
+; VI-NEXT: s_branch .LBB107_3
;
; GFX9-LABEL: bitcast_v64i8_to_v32f16_scalar:
; GFX9: ; %bb.0:
@@ -82623,9 +83033,9 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:8
; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:4
; GFX9-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:16
-; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:12
+; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:12
; GFX9-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:24
-; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:20
+; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:20
; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:32
; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:28
; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:40
@@ -82644,6 +83054,7 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: v_lshlrev_b32_e32 v5, 8, v9
; GFX9-NEXT: v_lshlrev_b32_e32 v9, 8, v13
; GFX9-NEXT: v_lshlrev_b32_e32 v13, 8, v17
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1
; GFX9-NEXT: v_lshlrev_b32_e32 v7, 8, v7
; GFX9-NEXT: v_lshlrev_b32_e32 v11, 8, v11
@@ -82660,7 +83071,7 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: v_lshlrev_b32_e32 v61, 8, v32
; GFX9-NEXT: s_waitcnt vmcnt(17)
; GFX9-NEXT: v_lshlrev_b32_e32 v60, 8, v38
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_and_b64 s[6:7], vcc, exec
; GFX9-NEXT: s_waitcnt vmcnt(15)
; GFX9-NEXT: v_lshlrev_b32_e32 v63, 8, v30
; GFX9-NEXT: s_waitcnt vmcnt(13)
@@ -82715,14 +83126,13 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: v_or_b32_sdwa v0, v18, v25 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_or_b32_sdwa v1, v20, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_mov_b32_e32 v32, v16
; GFX9-NEXT: v_lshl_or_b32 v8, v1, 16, v0
-; GFX9-NEXT: v_mov_b32_e32 v16, v22
-; GFX9-NEXT: v_or_b32_sdwa v0, v22, v58 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
; GFX9-NEXT: v_mov_b32_e32 v37, v24
; GFX9-NEXT: v_or_b32_sdwa v1, v24, v47 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v27, v25
+; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v0, v22, v58 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_mov_b32_e32 v17, v9
; GFX9-NEXT: v_lshl_or_b32 v9, v1, 16, v0
@@ -82739,12 +83149,12 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: s_or_b32 s4, s4, s5
; GFX9-NEXT: s_and_b32 s5, s18, 0xff
; GFX9-NEXT: s_lshl_b32 s6, s19, 8
-; GFX9-NEXT: v_mov_b32_e32 v55, v11
+; GFX9-NEXT: v_mov_b32_e32 v41, v11
; GFX9-NEXT: v_lshl_or_b32 v11, v1, 16, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v41, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v40, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_or_b32 s5, s5, s6
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: v_or_b32_sdwa v1, v40, v62 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v55, v62 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_pack_ll_b32_b16 s4, s4, s5
; GFX9-NEXT: s_and_b32 s5, s20, 0xff
; GFX9-NEXT: s_lshl_b32 s6, s21, 8
@@ -82775,25 +83185,26 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s7
; GFX9-NEXT: v_lshl_or_b32 v4, v4, 16, v2
; GFX9-NEXT: v_mov_b32_e32 v42, v15
-; GFX9-NEXT: v_mov_b32_e32 v27, v25
+; GFX9-NEXT: v_mov_b32_e32 v32, v16
; GFX9-NEXT: v_mov_b32_e32 v30, v18
; GFX9-NEXT: v_mov_b32_e32 v23, v21
; GFX9-NEXT: v_mov_b32_e32 v49, v20
+; GFX9-NEXT: v_mov_b32_e32 v16, v22
; GFX9-NEXT: v_mov_b32_e32 v39, v26
; GFX9-NEXT: v_mov_b32_e32 v35, v28
; GFX9-NEXT: v_mov_b32_e32 v54, v31
; GFX9-NEXT: v_mov_b32_e32 v31, v51
; GFX9-NEXT: v_mov_b32_e32 v2, s6
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v57, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v57, v24 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: v_mov_b32_e32 v18, v22
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v56, v24 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v56, v25 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v15, v1, 16, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
-; GFX9-NEXT: v_mov_b32_e32 v20, v24
+; GFX9-NEXT: v_mov_b32_e32 v18, v24
+; GFX9-NEXT: v_mov_b32_e32 v20, v25
; GFX9-NEXT: s_cbranch_execnz .LBB107_3
; GFX9-NEXT: .LBB107_2: ; %cmp.true
; GFX9-NEXT: v_add_u32_e32 v3, 3, v45
@@ -82805,10 +83216,10 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: v_add_u32_e32 v3, 3, v43
; GFX9-NEXT: v_or_b32_sdwa v3, v36, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v15, 0x300, v3
-; GFX9-NEXT: v_add_u32_e32 v3, 3, v41
+; GFX9-NEXT: v_add_u32_e32 v3, 3, v40
; GFX9-NEXT: v_or_b32_sdwa v3, v63, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v12, 0x300, v3
-; GFX9-NEXT: v_add_u32_e32 v3, 3, v40
+; GFX9-NEXT: v_add_u32_e32 v3, 3, v55
; GFX9-NEXT: v_or_b32_sdwa v3, v62, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v36, 0x300, v3
; GFX9-NEXT: v_add_u32_e32 v3, 3, v34
@@ -82842,7 +83253,7 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: v_or_b32_sdwa v3, v19, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v23, 0x300, v3
; GFX9-NEXT: v_add_u32_e32 v3, 3, v48
-; GFX9-NEXT: v_or_b32_sdwa v3, v55, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v3, v41, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v6, 0x300, v3
; GFX9-NEXT: v_add_u32_e32 v3, 3, v33
; GFX9-NEXT: v_or_b32_sdwa v3, v17, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
@@ -82996,11 +83407,13 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v23, v21
; GFX9-NEXT: v_mov_b32_e32 v42, v15
; GFX9-NEXT: v_mov_b32_e32 v19, v13
-; GFX9-NEXT: v_mov_b32_e32 v55, v11
+; GFX9-NEXT: v_mov_b32_e32 v41, v11
; GFX9-NEXT: v_mov_b32_e32 v17, v9
; GFX9-NEXT: v_mov_b32_e32 v50, v3
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX9-NEXT: s_branch .LBB107_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB107_2
+; GFX9-NEXT: s_branch .LBB107_3
;
; GFX11-TRUE16-LABEL: bitcast_v64i8_to_v32f16_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -83041,7 +83454,6 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 8, v25
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v27, 8, v27
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v25, 8, v29
-; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(15)
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v85, 8, v0
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(13)
@@ -83060,45 +83472,46 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v81, 8, v14
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(6)
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v84, 8, v84
-; GFX11-TRUE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, -1
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB107_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s3, 8
-; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s5, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s7, s8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s23, 8
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s25, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s0, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s2, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s16, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s4, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s6, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s20, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s21, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s25, 8
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v35
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-TRUE16-NEXT: s_and_b32 s10, s28, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s29, 8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_and_b32 s9, s28, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s29, 8
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v39
-; GFX11-TRUE16-NEXT: s_or_b32 s10, s10, s11
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s26, 0xff
-; GFX11-TRUE16-NEXT: v_and_b32_e64 v1, 0xffff, s10
-; GFX11-TRUE16-NEXT: s_lshl_b32 s12, s27, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s9, s10
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s26, 0xff
+; GFX11-TRUE16-NEXT: v_and_b32_e64 v1, 0xffff, s9
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s27, 8
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v38
-; GFX11-TRUE16-NEXT: s_or_b32 s9, s9, s12
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s8, s11
; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v31
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s8, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s7, s8
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v37
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v0, 16, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v33
@@ -83166,10 +83579,9 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v15, 16, v87
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v96, 16, v97
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v86, 16, v98
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB107_3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB107_3
; GFX11-TRUE16-NEXT: .LBB107_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v68
@@ -83345,7 +83757,9 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB107_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-TRUE16-NEXT: s_branch .LBB107_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB107_2
+; GFX11-TRUE16-NEXT: s_branch .LBB107_3
;
; GFX11-FAKE16-LABEL: bitcast_v64i8_to_v32f16_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -83386,7 +83800,6 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v21, 8, v25
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v27, 8, v27
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v25, 8, v29
-; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(15)
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v85, 8, v0
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(13)
@@ -83405,45 +83818,46 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v81, 8, v14
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(6)
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v84, 8, v84
-; GFX11-FAKE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, -1
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB107_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-FAKE16-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s3, 8
-; GFX11-FAKE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-FAKE16-NEXT: s_or_b32 s6, s7, s8
-; GFX11-FAKE16-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s5, s6
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s7, s8
-; GFX11-FAKE16-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s23, 8
-; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-FAKE16-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s25, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s0, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s2, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s16, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s4, s5
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s6, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s20, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s21, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s25, 8
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v35
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s7, s8
-; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-FAKE16-NEXT: s_and_b32 s10, s28, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s11, s29, 8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s6, s7
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s28, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s29, 8
; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v39
-; GFX11-FAKE16-NEXT: s_or_b32 s10, s10, s11
-; GFX11-FAKE16-NEXT: s_and_b32 s9, s26, 0xff
-; GFX11-FAKE16-NEXT: v_and_b32_e64 v1, 0xffff, s10
-; GFX11-FAKE16-NEXT: s_lshl_b32 s12, s27, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s9, s9, s10
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s26, 0xff
+; GFX11-FAKE16-NEXT: v_and_b32_e64 v1, 0xffff, s9
+; GFX11-FAKE16-NEXT: s_lshl_b32 s11, s27, 8
; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v38
-; GFX11-FAKE16-NEXT: s_or_b32 s9, s9, s12
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s8, s11
; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xff, v31
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s8, s9
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s7, s8
; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v37
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v0, 16, v1
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v33
@@ -83511,10 +83925,9 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v15, 16, v87
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v96, 16, v97
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v86, 16, v98
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB107_3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB107_3
; GFX11-FAKE16-NEXT: .LBB107_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(1)
; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 3, v68
@@ -83690,7 +84103,9 @@ define inreg <32 x half> @bitcast_v64i8_to_v32f16_scalar(<64 x i8> inreg %a, i32
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB107_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-FAKE16-NEXT: s_branch .LBB107_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB107_2
+; GFX11-FAKE16-NEXT: s_branch .LBB107_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -86874,6 +87289,7 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v19
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
@@ -86890,7 +87306,6 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v62, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mul_f32_e64 v19, 1.0, s17
; SI-NEXT: v_mul_f32_e32 v42, 1.0, v2
; SI-NEXT: v_mul_f32_e32 v20, 1.0, v1
@@ -86904,12 +87319,13 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; SI-NEXT: v_mul_f32_e32 v29, 1.0, v9
; SI-NEXT: v_mul_f32_e32 v12, 1.0, v12
; SI-NEXT: v_mul_f32_e32 v35, 1.0, v11
-; SI-NEXT: v_mul_f32_e32 v56, 1.0, v14
+; SI-NEXT: v_mul_f32_e32 v47, 1.0, v14
; SI-NEXT: v_mul_f32_e32 v33, 1.0, v13
; SI-NEXT: v_mul_f32_e32 v36, 1.0, v16
; SI-NEXT: v_mul_f32_e32 v39, 1.0, v15
; SI-NEXT: v_mul_f32_e32 v48, 1.0, v18
-; SI-NEXT: v_mul_f32_e32 v32, 1.0, v17
+; SI-NEXT: v_mul_f32_e32 v49, 1.0, v17
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mul_f32_e64 v3, 1.0, s16
; SI-NEXT: v_mul_f32_e64 v22, 1.0, s19
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s18
@@ -86927,8 +87343,8 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
@@ -87001,7 +87417,7 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v31
; SI-NEXT: v_alignbit_b32 v3, v1, v34, 16
; SI-NEXT: v_lshrrev_b32_e32 v1, 16, v12
-; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v56
+; SI-NEXT: v_lshrrev_b32_e32 v7, 16, v47
; SI-NEXT: v_alignbit_b32 v2, v1, v35, 16
; SI-NEXT: v_alignbit_b32 v8, v7, v33, 16
; SI-NEXT: v_alignbit_b32 v4, v8, v2, 24
@@ -87012,55 +87428,55 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; SI-NEXT: v_alignbit_b32 v1, v1, v39, 16
; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v43
; SI-NEXT: v_lshrrev_b32_e32 v12, 16, v44
-; SI-NEXT: v_alignbit_b32 v5, v4, v32, 16
+; SI-NEXT: v_alignbit_b32 v5, v4, v49, 16
; SI-NEXT: v_mov_b32_e32 v31, v23
; SI-NEXT: v_alignbit_b32 v20, v18, v23, 16
; SI-NEXT: v_alignbit_b32 v14, v12, v29, 16
; SI-NEXT: v_alignbit_b32 v23, v5, v1, 24
; SI-NEXT: v_mov_b32_e32 v38, v36
; SI-NEXT: v_alignbit_b32 v36, v20, v6, 24
-; SI-NEXT: v_alignbit_b32 v25, v14, v3, 24
-; SI-NEXT: v_alignbit_b32 v50, v8, v2, 16
-; SI-NEXT: v_mov_b32_e32 v53, v32
+; SI-NEXT: v_alignbit_b32 v53, v14, v3, 24
+; SI-NEXT: v_alignbit_b32 v32, v8, v2, 16
; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_alignbit_b32 v23, v5, v1, 16
-; SI-NEXT: v_alignbit_b32 v32, v5, v1, 8
+; SI-NEXT: v_alignbit_b32 v50, v5, v1, 8
; SI-NEXT: v_alignbit_b32 v55, v20, v6, 16
; SI-NEXT: v_alignbit_b32 v40, v20, v6, 8
; SI-NEXT: v_mov_b32_e32 v35, v29
; SI-NEXT: v_alignbit_b32 v52, v14, v3, 16
-; SI-NEXT: v_alignbit_b32 v54, v14, v3, 8
+; SI-NEXT: v_alignbit_b32 v25, v14, v3, 8
; SI-NEXT: v_mov_b32_e32 v37, v33
; SI-NEXT: v_alignbit_b32 v51, v8, v2, 8
+; SI-NEXT: v_mov_b32_e32 v54, v49
; SI-NEXT: buffer_store_dword v23, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
; SI-NEXT: v_lshrrev_b32_e32 v22, 24, v22
; SI-NEXT: v_lshrrev_b32_e32 v62, 8, v30
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_mov_b32_e32 v23, v41
-; SI-NEXT: v_lshrrev_b32_e32 v41, 24, v41
-; SI-NEXT: v_lshrrev_b32_e32 v46, 8, v19
+; SI-NEXT: v_lshrrev_b32_e32 v61, 24, v41
+; SI-NEXT: v_lshrrev_b32_e32 v63, 8, v19
; SI-NEXT: v_mov_b32_e32 v28, v26
-; SI-NEXT: v_lshrrev_b32_e32 v61, 24, v26
+; SI-NEXT: v_lshrrev_b32_e32 v46, 24, v26
; SI-NEXT: v_lshrrev_b32_e32 v57, 8, v16
; SI-NEXT: v_mov_b32_e32 v26, v42
-; SI-NEXT: v_lshrrev_b32_e32 v63, 24, v42
+; SI-NEXT: v_lshrrev_b32_e32 v41, 24, v42
; SI-NEXT: v_lshrrev_b32_e32 v58, 8, v11
; SI-NEXT: v_mov_b32_e32 v29, v43
; SI-NEXT: v_lshrrev_b32_e32 v59, 24, v43
-; SI-NEXT: v_lshrrev_b32_e32 v47, 8, v20
+; SI-NEXT: v_lshrrev_b32_e32 v56, 8, v20
; SI-NEXT: v_mov_b32_e32 v34, v44
; SI-NEXT: v_lshrrev_b32_e32 v45, 24, v44
; SI-NEXT: v_lshrrev_b32_e32 v60, 8, v14
-; SI-NEXT: v_mov_b32_e32 v33, v56
-; SI-NEXT: v_lshrrev_b32_e32 v43, 24, v56
-; SI-NEXT: v_lshrrev_b32_e32 v56, 8, v8
+; SI-NEXT: v_mov_b32_e32 v33, v47
+; SI-NEXT: v_lshrrev_b32_e32 v43, 24, v47
+; SI-NEXT: v_lshrrev_b32_e32 v47, 8, v8
; SI-NEXT: v_mov_b32_e32 v49, v48
; SI-NEXT: v_lshrrev_b32_e32 v42, 24, v48
-; SI-NEXT: v_mov_b32_e32 v48, v32
-; SI-NEXT: v_mov_b32_e32 v32, v50
-; SI-NEXT: v_mov_b32_e32 v50, v25
-; SI-NEXT: v_mov_b32_e32 v25, v36
+; SI-NEXT: v_mov_b32_e32 v48, v50
+; SI-NEXT: v_mov_b32_e32 v50, v32
+; SI-NEXT: v_mov_b32_e32 v32, v53
+; SI-NEXT: v_mov_b32_e32 v53, v36
; SI-NEXT: v_mov_b32_e32 v36, v38
; SI-NEXT: v_lshrrev_b32_e32 v44, 8, v5
; SI-NEXT: s_cbranch_execnz .LBB109_3
@@ -87072,7 +87488,7 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v2
; SI-NEXT: v_and_b32_e32 v3, 0xffff0000, v49
; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16
-; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v53
+; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v54
; SI-NEXT: v_add_f32_e32 v42, 0x40c00000, v3
; SI-NEXT: v_add_f32_e32 v2, 0x40c00000, v2
; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v42
@@ -87089,15 +87505,18 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; SI-NEXT: v_add_f32_e32 v45, 0x40c00000, v10
; SI-NEXT: v_lshrrev_b32_e32 v18, 16, v45
; SI-NEXT: v_alignbit_b32 v48, v5, v1, 8
+; SI-NEXT: v_lshrrev_b32_e32 v59, 24, v45
+; SI-NEXT: v_lshrrev_b32_e32 v45, 24, v44
; SI-NEXT: v_lshrrev_b32_e32 v43, 24, v43
; SI-NEXT: v_lshrrev_b32_e32 v42, 24, v42
+; SI-NEXT: v_lshrrev_b32_e32 v44, 8, v5
; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(9)
; SI-NEXT: v_and_b32_e32 v2, 0xffff0000, v2
@@ -87111,7 +87530,7 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; SI-NEXT: v_add_f32_e32 v3, 0x40c00000, v3
; SI-NEXT: v_alignbit_b32 v8, v7, v3, 16
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
-; SI-NEXT: v_alignbit_b32 v32, v8, v2, 16
+; SI-NEXT: v_alignbit_b32 v50, v8, v2, 16
; SI-NEXT: v_alignbit_b32 v51, v8, v2, 8
; SI-NEXT: s_waitcnt vmcnt(8)
; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
@@ -87121,23 +87540,23 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v9
; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v9
-; SI-NEXT: s_waitcnt vmcnt(6)
+; SI-NEXT: s_waitcnt vmcnt(5)
+; SI-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
+; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
+; SI-NEXT: v_add_f32_e32 v15, 0x40c00000, v15
+; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v13
+; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v15
+; SI-NEXT: v_alignbit_b32 v15, v15, v13, 16
+; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_and_b32_e32 v10, 0xffff0000, v10
; SI-NEXT: v_add_f32_e32 v10, 0x40c00000, v10
; SI-NEXT: v_lshrrev_b32_e32 v10, 16, v10
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_and_b32_e32 v15, 0xffff0000, v15
; SI-NEXT: v_and_b32_e32 v19, 0xffff0000, v19
-; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
-; SI-NEXT: v_add_f32_e32 v15, 0x40c00000, v15
; SI-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
; SI-NEXT: v_add_f32_e32 v19, 0x40c00000, v19
-; SI-NEXT: v_add_f32_e32 v13, 0x40c00000, v13
-; SI-NEXT: v_lshrrev_b32_e32 v15, 16, v15
; SI-NEXT: v_add_f32_e32 v17, 0x40c00000, v17
; SI-NEXT: v_lshrrev_b32_e32 v19, 16, v19
-; SI-NEXT: v_alignbit_b32 v15, v15, v13, 16
-; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
; SI-NEXT: v_alignbit_b32 v21, v19, v17, 16
; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
@@ -87148,32 +87567,32 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6
; SI-NEXT: v_alignbit_b32 v14, v12, v6, 16
; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
-; SI-NEXT: v_alignbit_b32 v50, v14, v3, 24
+; SI-NEXT: v_alignbit_b32 v32, v14, v3, 24
; SI-NEXT: v_alignbit_b32 v52, v14, v3, 16
-; SI-NEXT: v_alignbit_b32 v54, v14, v3, 8
+; SI-NEXT: v_alignbit_b32 v25, v14, v3, 8
; SI-NEXT: v_lshrrev_b32_e32 v60, 8, v14
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v13
; SI-NEXT: v_add_f32_e32 v16, 0x40c00000, v13
+; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v28
+; SI-NEXT: v_add_f32_e32 v46, 0x40c00000, v13
+; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v46
+; SI-NEXT: v_lshrrev_b32_e32 v46, 24, v46
+; SI-NEXT: v_alignbit_b32 v16, v13, v16, 16
+; SI-NEXT: v_lshrrev_b32_e32 v57, 8, v16
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_and_b32_e32 v17, 0xffff0000, v17
; SI-NEXT: v_add_f32_e32 v19, 0x40c00000, v17
; SI-NEXT: v_and_b32_e32 v17, 0xffff0000, v23
; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
-; SI-NEXT: v_and_b32_e32 v13, 0xffff0000, v28
; SI-NEXT: v_add_f32_e32 v22, 0x40c00000, v22
-; SI-NEXT: v_add_f32_e32 v41, 0x40c00000, v17
-; SI-NEXT: v_add_f32_e32 v56, 0x40c00000, v13
-; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v41
-; SI-NEXT: v_lshrrev_b32_e32 v13, 16, v56
-; SI-NEXT: v_lshrrev_b32_e32 v41, 24, v41
-; SI-NEXT: v_lshrrev_b32_e32 v61, 24, v56
-; SI-NEXT: v_lshrrev_b32_e32 v56, 8, v8
+; SI-NEXT: v_add_f32_e32 v47, 0x40c00000, v17
+; SI-NEXT: v_lshrrev_b32_e32 v17, 16, v47
+; SI-NEXT: v_lshrrev_b32_e32 v61, 24, v47
+; SI-NEXT: v_lshrrev_b32_e32 v47, 8, v8
; SI-NEXT: v_alignbit_b32 v19, v17, v19, 16
-; SI-NEXT: v_alignbit_b32 v16, v13, v16, 16
-; SI-NEXT: v_lshrrev_b32_e32 v46, 8, v19
-; SI-NEXT: v_lshrrev_b32_e32 v57, 8, v16
+; SI-NEXT: v_lshrrev_b32_e32 v63, 8, v19
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_and_b32_e32 v6, 0xffff0000, v6
; SI-NEXT: v_add_f32_e32 v6, 0x40c00000, v6
@@ -87182,7 +87601,7 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v9
; SI-NEXT: v_alignbit_b32 v20, v18, v9, 16
; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
-; SI-NEXT: v_alignbit_b32 v25, v20, v6, 24
+; SI-NEXT: v_alignbit_b32 v53, v20, v6, 24
; SI-NEXT: v_alignbit_b32 v55, v20, v6, 16
; SI-NEXT: v_alignbit_b32 v40, v20, v6, 8
; SI-NEXT: s_waitcnt vmcnt(1)
@@ -87196,14 +87615,14 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
; SI-NEXT: v_add_f32_e32 v9, 0x40c00000, v9
; SI-NEXT: v_alignbit_b32 v10, v10, v9, 16
-; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v9, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_and_b32_e32 v23, 0xffff0000, v23
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_and_b32_e32 v22, 0xffff0000, v22
-; SI-NEXT: v_add_f32_e32 v59, 0x40c00000, v23
+; SI-NEXT: v_add_f32_e32 v56, 0x40c00000, v23
; SI-NEXT: v_add_f32_e32 v22, 0x40c00000, v22
-; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v59
+; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v56
; SI-NEXT: v_alignbit_b32 v30, v24, v22, 16
; SI-NEXT: v_alignbit_b32 v22, v30, v27, 24
; SI-NEXT: v_lshrrev_b32_e32 v62, 8, v30
@@ -87236,14 +87655,13 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v9
; SI-NEXT: v_add_f32_e32 v11, 0x40c00000, v9
; SI-NEXT: v_and_b32_e32 v9, 0xffff0000, v26
-; SI-NEXT: v_add_f32_e32 v47, 0x40c00000, v9
-; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v47
+; SI-NEXT: v_add_f32_e32 v41, 0x40c00000, v9
+; SI-NEXT: v_lshrrev_b32_e32 v9, 16, v41
; SI-NEXT: v_alignbit_b32 v11, v9, v11, 16
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_alignbit_b32 v22, v11, v10, 24
; SI-NEXT: v_lshrrev_b32_e32 v58, 8, v11
-; SI-NEXT: v_lshrrev_b32_e32 v63, 24, v47
-; SI-NEXT: v_lshrrev_b32_e32 v47, 8, v20
+; SI-NEXT: v_lshrrev_b32_e32 v41, 24, v41
; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_alignbit_b32 v22, v11, v10, 16
@@ -87261,10 +87679,8 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; SI-NEXT: v_alignbit_b32 v22, v5, v1, 16
; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:116 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshrrev_b32_e32 v22, 24, v59
-; SI-NEXT: v_lshrrev_b32_e32 v59, 24, v45
-; SI-NEXT: v_lshrrev_b32_e32 v45, 24, v44
-; SI-NEXT: v_lshrrev_b32_e32 v44, 8, v5
+; SI-NEXT: v_lshrrev_b32_e32 v22, 24, v56
+; SI-NEXT: v_lshrrev_b32_e32 v56, 8, v20
; SI-NEXT: .LBB109_3: ; %end
; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:108 ; 4-byte Folded Reload
; SI-NEXT: v_and_b32_e32 v27, 0xff, v27
@@ -87326,9 +87742,9 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; SI-NEXT: v_add_i32_e32 v22, vcc, 8, v0
; SI-NEXT: buffer_store_dword v21, v22, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v21, 8, v46
+; SI-NEXT: v_lshlrev_b32_e32 v21, 8, v63
; SI-NEXT: v_or_b32_e32 v19, v19, v21
-; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v41
+; SI-NEXT: v_lshlrev_b32_e32 v21, 24, v61
; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19
; SI-NEXT: v_or_b32_e32 v17, v21, v17
; SI-NEXT: v_or_b32_e32 v17, v19, v17
@@ -87355,7 +87771,7 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; SI-NEXT: v_and_b32_e32 v15, 0xff, v16
; SI-NEXT: v_lshlrev_b32_e32 v16, 8, v57
; SI-NEXT: v_or_b32_e32 v15, v15, v16
-; SI-NEXT: v_lshlrev_b32_e32 v16, 24, v61
+; SI-NEXT: v_lshlrev_b32_e32 v16, 24, v46
; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15
; SI-NEXT: v_or_b32_e32 v13, v16, v13
; SI-NEXT: v_or_b32_e32 v13, v15, v13
@@ -87382,7 +87798,7 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; SI-NEXT: v_and_b32_e32 v10, 0xff, v11
; SI-NEXT: v_lshlrev_b32_e32 v11, 8, v58
; SI-NEXT: v_or_b32_e32 v10, v10, v11
-; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v63
+; SI-NEXT: v_lshlrev_b32_e32 v11, 24, v41
; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10
; SI-NEXT: v_or_b32_e32 v9, v11, v9
; SI-NEXT: v_or_b32_e32 v9, v10, v9
@@ -87393,7 +87809,7 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; SI-NEXT: v_or_b32_e32 v6, v6, v9
; SI-NEXT: v_and_b32_e32 v9, 0xff, v55
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9
-; SI-NEXT: v_lshlrev_b32_e32 v10, 24, v25
+; SI-NEXT: v_lshlrev_b32_e32 v10, 24, v53
; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6
; SI-NEXT: v_or_b32_e32 v9, v10, v9
; SI-NEXT: v_or_b32_e32 v6, v6, v9
@@ -87401,7 +87817,7 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; SI-NEXT: buffer_store_dword v6, v9, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_and_b32_e32 v6, 0xff, v20
-; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v47
+; SI-NEXT: v_lshlrev_b32_e32 v9, 8, v56
; SI-NEXT: v_or_b32_e32 v6, v6, v9
; SI-NEXT: v_and_b32_e32 v9, 0xff, v18
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9
@@ -87412,11 +87828,11 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; SI-NEXT: v_add_i32_e32 v9, vcc, 36, v0
; SI-NEXT: buffer_store_dword v6, v9, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v54
+; SI-NEXT: v_lshlrev_b32_e32 v6, 8, v25
; SI-NEXT: v_or_b32_e32 v3, v3, v6
; SI-NEXT: v_and_b32_e32 v6, 0xff, v52
; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6
-; SI-NEXT: v_lshlrev_b32_e32 v9, 24, v50
+; SI-NEXT: v_lshlrev_b32_e32 v9, 24, v32
; SI-NEXT: v_and_b32_e32 v3, 0xffff, v3
; SI-NEXT: v_or_b32_e32 v6, v9, v6
; SI-NEXT: v_or_b32_e32 v3, v3, v6
@@ -87438,7 +87854,7 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v51
; SI-NEXT: v_or_b32_e32 v2, v2, v3
-; SI-NEXT: v_and_b32_e32 v3, 0xff, v32
+; SI-NEXT: v_and_b32_e32 v3, 0xff, v50
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; SI-NEXT: v_and_b32_e32 v2, 0xffff, v2
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -87449,7 +87865,7 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; SI-NEXT: buffer_store_dword v2, v3, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_and_b32_e32 v2, 0xff, v8
-; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v56
+; SI-NEXT: v_lshlrev_b32_e32 v3, 8, v47
; SI-NEXT: v_or_b32_e32 v2, v2, v3
; SI-NEXT: v_and_b32_e32 v3, 0xff, v7
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
@@ -87508,7 +87924,7 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; kill: killed $vgpr1
; SI-NEXT: ; implicit-def: $vgpr4
-; SI-NEXT: v_mov_b32_e32 v53, v32
+; SI-NEXT: v_mov_b32_e32 v54, v49
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; kill: killed $vgpr1
; SI-NEXT: v_mov_b32_e32 v49, v48
@@ -87517,7 +87933,7 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; SI-NEXT: v_mov_b32_e32 v37, v33
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; kill: killed $vgpr1
-; SI-NEXT: v_mov_b32_e32 v33, v56
+; SI-NEXT: v_mov_b32_e32 v33, v47
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; kill: killed $vgpr1
; SI-NEXT: s_waitcnt expcnt(0)
@@ -87542,7 +87958,6 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; SI-NEXT: v_mov_b32_e32 v23, v41
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; kill: killed $vgpr1
-; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; kill: killed $vgpr4
; SI-NEXT: ; implicit-def: $vgpr4
; SI-NEXT: ; implicit-def: $vgpr27
@@ -87552,41 +87967,40 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; SI-NEXT: ; implicit-def: $vgpr22
; SI-NEXT: ; implicit-def: $vgpr21
; SI-NEXT: ; implicit-def: $vgpr19
-; SI-NEXT: ; implicit-def: $vgpr46
+; SI-NEXT: ; implicit-def: $vgpr63
; SI-NEXT: ; implicit-def: $vgpr17
-; SI-NEXT: ; implicit-def: $vgpr41
+; SI-NEXT: ; implicit-def: $vgpr61
; SI-NEXT: ; implicit-def: $vgpr15
; SI-NEXT: ; implicit-def: $vgpr16
; SI-NEXT: ; implicit-def: $vgpr57
; SI-NEXT: ; implicit-def: $vgpr13
-; SI-NEXT: ; implicit-def: $vgpr61
+; SI-NEXT: ; implicit-def: $vgpr46
; SI-NEXT: ; implicit-def: $vgpr10
; SI-NEXT: ; implicit-def: $vgpr11
; SI-NEXT: ; implicit-def: $vgpr58
; SI-NEXT: ; implicit-def: $vgpr9
-; SI-NEXT: ; implicit-def: $vgpr63
+; SI-NEXT: ; implicit-def: $vgpr41
; SI-NEXT: ; implicit-def: $vgpr6
; SI-NEXT: ; implicit-def: $vgpr40
; SI-NEXT: ; implicit-def: $vgpr55
-; SI-NEXT: ; implicit-def: $vgpr25
+; SI-NEXT: ; implicit-def: $vgpr53
; SI-NEXT: ; implicit-def: $vgpr20
-; SI-NEXT: ; implicit-def: $vgpr47
+; SI-NEXT: ; implicit-def: $vgpr56
; SI-NEXT: ; implicit-def: $vgpr18
; SI-NEXT: ; implicit-def: $vgpr59
; SI-NEXT: ; implicit-def: $vgpr3
-; SI-NEXT: ; implicit-def: $vgpr54
+; SI-NEXT: ; implicit-def: $vgpr25
; SI-NEXT: ; implicit-def: $vgpr52
-; SI-NEXT: ; implicit-def: $vgpr50
+; SI-NEXT: ; implicit-def: $vgpr32
; SI-NEXT: ; implicit-def: $vgpr14
; SI-NEXT: ; implicit-def: $vgpr60
; SI-NEXT: ; implicit-def: $vgpr12
; SI-NEXT: ; implicit-def: $vgpr45
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr51
-; SI-NEXT: ; implicit-def: $vgpr32
-; SI-NEXT: ; kill: killed $vgpr1
+; SI-NEXT: ; implicit-def: $vgpr50
; SI-NEXT: ; implicit-def: $vgpr8
-; SI-NEXT: ; implicit-def: $vgpr56
+; SI-NEXT: ; implicit-def: $vgpr47
; SI-NEXT: ; implicit-def: $vgpr7
; SI-NEXT: ; implicit-def: $vgpr43
; SI-NEXT: ; implicit-def: $vgpr48
@@ -87595,8 +88009,12 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; SI-NEXT: ; implicit-def: $vgpr44
; SI-NEXT: ; implicit-def: $vgpr4
; SI-NEXT: ; implicit-def: $vgpr1
+; SI-NEXT: ; kill: killed $vgpr1
; SI-NEXT: ; implicit-def: $vgpr42
-; SI-NEXT: s_branch .LBB109_2
+; SI-NEXT: ; implicit-def: $vgpr1
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB109_2
+; SI-NEXT: s_branch .LBB109_3
;
; VI-LABEL: bitcast_v32bf16_to_v64i8_scalar:
; VI: ; %bb.0:
@@ -87626,8 +88044,9 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3
; VI-NEXT: v_writelane_b32 v63, s67, 19
; VI-NEXT: v_readfirstlane_b32 s4, v1
-; VI-NEXT: s_and_b64 s[6:7], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s5, v2
+; VI-NEXT: s_and_b64 s[6:7], vcc, exec
+; VI-NEXT: s_mov_b64 s[46:47], -1
; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
@@ -88086,7 +88505,8 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; VI-NEXT: ; implicit-def: $sgpr59
; VI-NEXT: ; implicit-def: $sgpr57
; VI-NEXT: ; implicit-def: $sgpr56
-; VI-NEXT: s_branch .LBB109_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[46:47]
+; VI-NEXT: s_cbranch_vccz .LBB109_2
; VI-NEXT: .LBB109_4:
; VI-NEXT: v_mov_b32_e32 v19, s44
; VI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
@@ -88338,8 +88758,9 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v3
; GFX9-NEXT: v_writelane_b32 v4, s54, 14
; GFX9-NEXT: v_readfirstlane_b32 s4, v1
-; GFX9-NEXT: s_and_b64 s[6:7], vcc, exec
; GFX9-NEXT: v_readfirstlane_b32 s5, v2
+; GFX9-NEXT: s_and_b64 s[6:7], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[46:47], -1
; GFX9-NEXT: v_writelane_b32 v4, s55, 15
; GFX9-NEXT: s_cbranch_scc0 .LBB109_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
@@ -89049,7 +89470,9 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; GFX9-NEXT: ; implicit-def: $sgpr93
; GFX9-NEXT: ; implicit-def: $sgpr91
; GFX9-NEXT: ; implicit-def: $sgpr92
-; GFX9-NEXT: s_branch .LBB109_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[46:47]
+; GFX9-NEXT: s_cbranch_vccz .LBB109_2
+; GFX9-NEXT: s_branch .LBB109_3
;
; GFX11-LABEL: bitcast_v32bf16_to_v64i8_scalar:
; GFX11: ; %bb.0:
@@ -89059,7 +89482,7 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; GFX11-NEXT: s_mov_b32 exec_lo, s4
; GFX11-NEXT: v_writelane_b32 v17, s30, 0
; GFX11-NEXT: s_cmp_lg_u32 s28, 0
-; GFX11-NEXT: s_mov_b32 vcc_lo, 0
+; GFX11-NEXT: s_mov_b32 s5, -1
; GFX11-NEXT: v_writelane_b32 v17, s31, 1
; GFX11-NEXT: v_writelane_b32 v17, s34, 2
; GFX11-NEXT: v_writelane_b32 v17, s35, 3
@@ -89072,6 +89495,7 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; GFX11-NEXT: v_writelane_b32 v17, s51, 10
; GFX11-NEXT: s_cbranch_scc0 .LBB109_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-NEXT: s_lshr_b64 s[4:5], s[24:25], 24
; GFX11-NEXT: s_lshr_b32 s62, s27, 24
; GFX11-NEXT: s_lshr_b32 s61, s27, 16
; GFX11-NEXT: s_lshr_b32 s63, s27, 8
@@ -89113,15 +89537,13 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; GFX11-NEXT: s_lshr_b32 s48, s0, 16
; GFX11-NEXT: s_lshr_b32 s42, s0, 8
; GFX11-NEXT: s_lshr_b64 s[10:11], s[26:27], 24
-; GFX11-NEXT: s_lshr_b64 s[4:5], s[24:25], 24
; GFX11-NEXT: s_lshr_b64 s[6:7], s[22:23], 24
; GFX11-NEXT: s_lshr_b64 s[8:9], s[20:21], 24
; GFX11-NEXT: s_lshr_b64 s[12:13], s[18:19], 24
; GFX11-NEXT: s_lshr_b64 s[14:15], s[16:17], 24
; GFX11-NEXT: s_lshr_b64 s[28:29], s[2:3], 24
; GFX11-NEXT: s_lshr_b64 s[40:41], s[0:1], 24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, vcc_lo
-; GFX11-NEXT: s_cbranch_vccnz .LBB109_3
+; GFX11-NEXT: s_cbranch_execnz .LBB109_3
; GFX11-NEXT: .LBB109_2: ; %cmp.true
; GFX11-NEXT: s_and_b32 s4, s1, 0xffff0000
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(VALU_DEP_1)
@@ -89787,7 +90209,9 @@ define inreg <64 x i8> @bitcast_v32bf16_to_v64i8_scalar(<32 x bfloat> inreg %a,
; GFX11-NEXT: ; implicit-def: $sgpr63
; GFX11-NEXT: ; implicit-def: $sgpr61
; GFX11-NEXT: ; implicit-def: $sgpr62
-; GFX11-NEXT: s_branch .LBB109_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
+; GFX11-NEXT: s_cbranch_vccz .LBB109_2
+; GFX11-NEXT: s_branch .LBB109_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -92315,7 +92739,8 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:36
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:72
-; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:68
+; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:68
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_readfirstlane_b32 s46, v30
; SI-NEXT: v_readfirstlane_b32 s44, v23
; SI-NEXT: v_readfirstlane_b32 s45, v22
@@ -92339,7 +92764,7 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s57, v34
; SI-NEXT: v_readfirstlane_b32 s47, v35
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v36
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_and_b64 s[8:9], vcc, exec
; SI-NEXT: s_waitcnt vmcnt(13)
; SI-NEXT: v_lshlrev_b32_e32 v44, 24, v37
; SI-NEXT: s_waitcnt vmcnt(9)
@@ -92347,7 +92772,7 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_lshlrev_b32_e32 v30, 24, v39
; SI-NEXT: s_waitcnt vmcnt(3)
-; SI-NEXT: v_lshlrev_b32_e32 v42, 24, v48
+; SI-NEXT: v_lshlrev_b32_e32 v43, 24, v48
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v45, 24, v45
; SI-NEXT: s_cbranch_scc0 .LBB111_3
@@ -92424,7 +92849,7 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; SI-NEXT: s_lshl_b32 s63, s4, 16
; SI-NEXT: s_and_b32 s4, s57, 0xff
; SI-NEXT: s_lshl_b32 s5, s56, 8
-; SI-NEXT: v_or_b32_e32 v34, v42, v29
+; SI-NEXT: v_or_b32_e32 v34, v43, v29
; SI-NEXT: v_and_b32_e32 v29, 0xff, v55
; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
; SI-NEXT: v_lshlrev_b32_e32 v15, 24, v11
@@ -92448,7 +92873,7 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; SI-NEXT: s_and_b32 s4, s59, 0xff
; SI-NEXT: s_lshl_b32 s5, s58, 8
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_and_b32_e32 v31, 0xff, v43
+; SI-NEXT: v_and_b32_e32 v31, 0xff, v42
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9
; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15
@@ -92475,7 +92900,7 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; SI-NEXT: s_and_b32 s4, s59, 0xff
; SI-NEXT: s_lshl_b32 s5, s58, 8
; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_add_i32_e32 v9, vcc, 3, v43
+; SI-NEXT: v_add_i32_e32 v9, vcc, 3, v42
; SI-NEXT: s_or_b32 s4, s5, s4
; SI-NEXT: v_and_b32_e32 v9, 0xff, v9
; SI-NEXT: s_addk_i32 s4, 0x300
@@ -92483,7 +92908,7 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; SI-NEXT: s_and_b32 s4, s4, 0xffff
; SI-NEXT: v_or_b32_e32 v9, v45, v9
; SI-NEXT: v_or_b32_e32 v9, s4, v9
-; SI-NEXT: v_add_i32_e32 v43, vcc, 0x3000000, v9
+; SI-NEXT: v_add_i32_e32 v42, vcc, 0x3000000, v9
; SI-NEXT: v_add_i32_e32 v9, vcc, 3, v55
; SI-NEXT: v_and_b32_e32 v9, 0xff, v9
; SI-NEXT: v_lshlrev_b32_e32 v13, 8, v54
@@ -92505,7 +92930,7 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; SI-NEXT: s_addk_i32 s4, 0x300
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9
; SI-NEXT: s_and_b32 s4, s4, 0xffff
-; SI-NEXT: v_or_b32_e32 v9, v42, v9
+; SI-NEXT: v_or_b32_e32 v9, v43, v9
; SI-NEXT: v_or_b32_e32 v9, s4, v9
; SI-NEXT: v_add_i32_e32 v29, vcc, 0x3000000, v9
; SI-NEXT: v_add_i32_e32 v9, vcc, 3, v52
@@ -92698,8 +93123,8 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v29
; SI-NEXT: v_and_b32_e32 v29, 0xffff0000, v31
; SI-NEXT: v_lshlrev_b32_e32 v35, 16, v31
-; SI-NEXT: v_and_b32_e32 v31, 0xffff0000, v43
-; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v43
+; SI-NEXT: v_and_b32_e32 v31, 0xffff0000, v42
+; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v42
; SI-NEXT: s_branch .LBB111_5
; SI-NEXT: .LBB111_3:
; SI-NEXT: ; implicit-def: $sgpr8
@@ -92734,7 +93159,8 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; SI-NEXT: ; implicit-def: $vgpr29
; SI-NEXT: ; implicit-def: $sgpr73
; SI-NEXT: ; implicit-def: $vgpr31
-; SI-NEXT: s_branch .LBB111_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB111_2
; SI-NEXT: .LBB111_4:
; SI-NEXT: v_mov_b32_e32 v10, s60
; SI-NEXT: v_mov_b32_e32 v14, s61
@@ -92796,11 +93222,11 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; VI-NEXT: buffer_load_dword v0, off, s[0:3], s32 offset:76
; VI-NEXT: buffer_load_ushort v31, off, s[0:3], s32
; VI-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:8
-; VI-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:4
+; VI-NEXT: buffer_load_ushort v51, off, s[0:3], s32 offset:4
; VI-NEXT: buffer_load_ushort v35, off, s[0:3], s32 offset:16
; VI-NEXT: buffer_load_ushort v54, off, s[0:3], s32 offset:12
; VI-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:24
-; VI-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:20
+; VI-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:20
; VI-NEXT: buffer_load_ushort v20, off, s[0:3], s32 offset:32
; VI-NEXT: buffer_load_ushort v42, off, s[0:3], s32 offset:28
; VI-NEXT: buffer_load_ushort v12, off, s[0:3], s32 offset:40
@@ -92810,12 +93236,12 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; VI-NEXT: buffer_load_ushort v39, off, s[0:3], s32 offset:56
; VI-NEXT: buffer_load_ushort v45, off, s[0:3], s32 offset:52
; VI-NEXT: buffer_load_ushort v48, off, s[0:3], s32 offset:64
-; VI-NEXT: buffer_load_ushort v47, off, s[0:3], s32 offset:60
+; VI-NEXT: buffer_load_ushort v46, off, s[0:3], s32 offset:60
; VI-NEXT: buffer_load_ushort v49, off, s[0:3], s32 offset:72
; VI-NEXT: buffer_load_ushort v57, off, s[0:3], s32 offset:68
-; VI-NEXT: v_mov_b32_e32 v51, v23
+; VI-NEXT: v_mov_b32_e32 v52, v23
; VI-NEXT: v_mov_b32_e32 v30, v26
-; VI-NEXT: v_mov_b32_e32 v26, v22
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_lshlrev_b32_e32 v50, 8, v1
; VI-NEXT: v_lshlrev_b32_e32 v1, 8, v3
; VI-NEXT: v_lshlrev_b32_e32 v3, 8, v5
@@ -92826,20 +93252,20 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; VI-NEXT: v_lshlrev_b32_e32 v13, 8, v15
; VI-NEXT: v_lshlrev_b32_e32 v15, 8, v17
; VI-NEXT: v_lshlrev_b32_e32 v23, 8, v19
-; VI-NEXT: v_lshlrev_b32_e32 v46, 8, v21
-; VI-NEXT: v_lshlrev_b32_e32 v56, 8, v51
+; VI-NEXT: v_lshlrev_b32_e32 v47, 8, v21
+; VI-NEXT: v_lshlrev_b32_e32 v56, 8, v52
; VI-NEXT: v_lshlrev_b32_e32 v58, 8, v25
; VI-NEXT: v_lshlrev_b32_e32 v27, 8, v27
; VI-NEXT: v_lshlrev_b32_e32 v29, 8, v29
; VI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
-; VI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
+; VI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; VI-NEXT: s_waitcnt vmcnt(14)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; VI-NEXT: v_lshlrev_b32_e32 v59, 8, v31
; VI-NEXT: v_lshlrev_b32_e32 v60, 8, v33
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_and_b64 s[6:7], vcc, exec
; VI-NEXT: v_lshlrev_b32_e32 v61, 8, v35
; VI-NEXT: v_lshlrev_b32_e32 v62, 8, v37
; VI-NEXT: v_lshlrev_b32_e32 v63, 8, v20
@@ -92848,11 +93274,11 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; VI-NEXT: s_waitcnt vmcnt(11)
; VI-NEXT: v_lshlrev_b32_e32 v14, 8, v14
; VI-NEXT: s_waitcnt vmcnt(9)
-; VI-NEXT: v_lshlrev_b32_e32 v38, 8, v39
+; VI-NEXT: v_lshlrev_b32_e32 v26, 8, v39
; VI-NEXT: s_waitcnt vmcnt(7)
-; VI-NEXT: v_lshlrev_b32_e32 v51, 8, v48
+; VI-NEXT: v_lshlrev_b32_e32 v35, 8, v48
; VI-NEXT: s_waitcnt vmcnt(5)
-; VI-NEXT: v_lshlrev_b32_e32 v22, 8, v49
+; VI-NEXT: v_lshlrev_b32_e32 v38, 8, v49
; VI-NEXT: s_cbranch_scc0 .LBB111_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_or_b32_sdwa v0, v2, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
@@ -92862,27 +93288,27 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; VI-NEXT: v_or_b32_sdwa v1, v34, v11 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
; VI-NEXT: v_or_b32_sdwa v0, v10, v9 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v35, v6
; VI-NEXT: v_or_b32_sdwa v2, v6, v5 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v6, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v53, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v1, v16, v15 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_mov_b32_e32 v52, v3
; VI-NEXT: v_mov_b32_e32 v49, v7
; VI-NEXT: v_or_b32_sdwa v3, v8, v7 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v7, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v18, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v55, v46 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v55, v47 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v37, v8
; VI-NEXT: v_or_b32_sdwa v8, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v26, v56 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v22, v56 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v1, v24, v58 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_mov_b32_e32 v40, v9
+; VI-NEXT: v_mov_b32_e32 v41, v9
; VI-NEXT: v_or_b32_sdwa v9, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v30, v27 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v1, v28, v29 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v31, v10
; VI-NEXT: v_or_b32_sdwa v10, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v52, v60 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v51, v60 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v17, v11
; VI-NEXT: v_mov_b32_e32 v19, v13
; VI-NEXT: s_and_b32 s4, s28, 0xff
@@ -92898,23 +93324,22 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; VI-NEXT: s_lshl_b32 s7, s23, 8
; VI-NEXT: s_lshl_b32 s8, s27, 8
; VI-NEXT: v_mov_b32_e32 v25, v23
-; VI-NEXT: v_mov_b32_e32 v48, v51
-; VI-NEXT: v_mov_b32_e32 v23, v26
-; VI-NEXT: v_mov_b32_e32 v26, v30
+; VI-NEXT: v_mov_b32_e32 v48, v35
+; VI-NEXT: v_mov_b32_e32 v23, v30
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_or_b32_sdwa v0, v34, v59 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v11, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v54, v61 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v41, v62 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v40, v62 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v12, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v42, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v1, v43, v33 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v13, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v44, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v45, v38 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v45, v26 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v14, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v0, v47, v51 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; VI-NEXT: v_or_b32_sdwa v1, v57, v22 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v0, v46, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v1, v57, v38 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v15, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v0, v32, v50 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_e32 v3, s4, v0
@@ -92955,12 +93380,12 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v42
; VI-NEXT: v_or_b32_sdwa v3, v63, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v15, vcc, 0x300, v3
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v41
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v40
; VI-NEXT: v_or_b32_sdwa v12, v62, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v54
; VI-NEXT: v_or_b32_sdwa v3, v61, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v39, vcc, 0x300, v3
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v52
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v51
; VI-NEXT: v_or_b32_sdwa v11, v60, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v30
@@ -92968,18 +93393,18 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; VI-NEXT: v_add_u32_e32 v30, vcc, 0x300, v3
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v28
; VI-NEXT: v_or_b32_sdwa v10, v29, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v26
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v23
+; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v45
; VI-NEXT: v_or_b32_sdwa v3, v27, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v2, v26, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v26, vcc, 0x300, v3
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v24
; VI-NEXT: v_or_b32_sdwa v9, v58, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v23
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v57
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v22
; VI-NEXT: v_or_b32_sdwa v3, v56, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_or_b32_sdwa v0, v22, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v22, vcc, 0x300, v3
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v55
-; VI-NEXT: v_or_b32_sdwa v8, v46, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v8, v47, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v18
; VI-NEXT: v_or_b32_sdwa v3, v25, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v18, vcc, 0x300, v3
@@ -92991,15 +93416,11 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v36
; VI-NEXT: v_or_b32_sdwa v6, v17, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v31
-; VI-NEXT: v_or_b32_sdwa v3, v40, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_or_b32_sdwa v3, v41, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v17, vcc, 0x300, v3
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v37
; VI-NEXT: v_or_b32_sdwa v5, v49, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v35
-; VI-NEXT: v_or_b32_sdwa v3, v20, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v19, vcc, 0x300, v3
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
-; VI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
; VI-NEXT: s_add_i32 s28, s28, 3
; VI-NEXT: s_and_b32 s4, s28, 0xff
; VI-NEXT: s_lshl_b32 s5, s29, 8
@@ -93028,13 +93449,12 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; VI-NEXT: s_and_b32 s10, s16, 0xff
; VI-NEXT: s_lshl_b32 s11, s17, 8
; VI-NEXT: s_or_b32 s10, s11, s10
-; VI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
-; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v47
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v46
; VI-NEXT: s_addk_i32 s6, 0x300
; VI-NEXT: s_addk_i32 s8, 0x300
; VI-NEXT: s_addk_i32 s10, 0x300
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v57
; VI-NEXT: v_or_b32_sdwa v1, v48, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v45
; VI-NEXT: s_addk_i32 s4, 0x300
; VI-NEXT: s_lshl_b32 s5, s5, 16
; VI-NEXT: s_lshl_b32 s7, s7, 16
@@ -93042,8 +93462,8 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; VI-NEXT: s_and_b32 s10, s10, 0xffff
; VI-NEXT: s_and_b32 s8, s8, 0xffff
; VI-NEXT: s_and_b32 s6, s6, 0xffff
+; VI-NEXT: v_or_b32_sdwa v0, v38, v0 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: v_add_u32_e32 v1, vcc, 0x300, v1
-; VI-NEXT: v_or_b32_sdwa v2, v38, v2 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; VI-NEXT: s_or_b32 s9, s9, s10
; VI-NEXT: s_or_b32 s7, s7, s8
; VI-NEXT: s_or_b32 s5, s5, s6
@@ -93051,7 +93471,6 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; VI-NEXT: s_add_i32 s9, s9, 0x3000000
; VI-NEXT: s_add_i32 s7, s7, 0x3000000
; VI-NEXT: s_add_i32 s5, s5, 0x3000000
-; VI-NEXT: v_or_b32_sdwa v5, v5, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_or_b32_sdwa v6, v6, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_or_b32_sdwa v7, v7, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_or_b32_sdwa v8, v8, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
@@ -93062,25 +93481,31 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; VI-NEXT: v_or_b32_sdwa v13, v13, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_or_b32_sdwa v2, v2, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v5, vcc, 0x3000000, v5
; VI-NEXT: v_add_u32_e32 v6, vcc, 0x3000000, v6
; VI-NEXT: v_add_u32_e32 v7, vcc, 0x3000000, v7
; VI-NEXT: v_add_u32_e32 v8, vcc, 0x3000000, v8
; VI-NEXT: v_add_u32_e32 v9, vcc, 0x3000000, v9
; VI-NEXT: v_add_u32_e32 v10, vcc, 0x3000000, v10
; VI-NEXT: v_add_u32_e32 v11, vcc, 0x3000000, v11
-; VI-NEXT: s_waitcnt vmcnt(2)
-; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
-; VI-NEXT: s_waitcnt vmcnt(1)
-; VI-NEXT: v_or_b32_sdwa v4, v4, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
-; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
; VI-NEXT: v_add_u32_e32 v12, vcc, 0x3000000, v12
; VI-NEXT: v_add_u32_e32 v13, vcc, 0x3000000, v13
+; VI-NEXT: s_waitcnt vmcnt(0)
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
+; VI-NEXT: v_or_b32_sdwa v3, v20, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: v_add_u32_e32 v19, vcc, 0x300, v3
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
+; VI-NEXT: v_or_b32_sdwa v5, v5, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v5, vcc, 0x3000000, v5
; VI-NEXT: v_add_u32_e32 v14, vcc, 0x3000000, v2
; VI-NEXT: v_add_u32_e32 v15, vcc, 0x3000000, v0
; VI-NEXT: v_mov_b32_e32 v0, s9
; VI-NEXT: v_mov_b32_e32 v1, s7
; VI-NEXT: v_mov_b32_e32 v2, s5
+; VI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
+; VI-NEXT: s_waitcnt vmcnt(1)
+; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
+; VI-NEXT: v_or_b32_sdwa v4, v52, v3 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; VI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: v_or_b32_sdwa v3, v20, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
@@ -93112,23 +93537,24 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB111_4:
; VI-NEXT: v_mov_b32_e32 v25, v23
-; VI-NEXT: v_mov_b32_e32 v23, v26
-; VI-NEXT: v_mov_b32_e32 v26, v30
+; VI-NEXT: v_mov_b32_e32 v23, v30
; VI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
-; VI-NEXT: v_mov_b32_e32 v48, v51
+; VI-NEXT: v_mov_b32_e32 v48, v35
; VI-NEXT: v_mov_b32_e32 v31, v10
; VI-NEXT: v_mov_b32_e32 v36, v34
-; VI-NEXT: v_mov_b32_e32 v35, v6
; VI-NEXT: v_mov_b32_e32 v37, v8
; VI-NEXT: v_mov_b32_e32 v39, v14
; VI-NEXT: v_mov_b32_e32 v21, v15
; VI-NEXT: v_mov_b32_e32 v19, v13
; VI-NEXT: v_mov_b32_e32 v17, v11
-; VI-NEXT: v_mov_b32_e32 v40, v9
+; VI-NEXT: v_mov_b32_e32 v41, v9
; VI-NEXT: v_mov_b32_e32 v49, v7
; VI-NEXT: v_mov_b32_e32 v20, v5
+; VI-NEXT: v_mov_b32_e32 v52, v3
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; VI-NEXT: s_branch .LBB111_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB111_2
+; VI-NEXT: s_branch .LBB111_3
;
; GFX9-LABEL: bitcast_v64i8_to_v32bf16_scalar:
; GFX9: ; %bb.0:
@@ -93155,9 +93581,9 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; GFX9-NEXT: buffer_load_ushort v38, off, s[0:3], s32 offset:8
; GFX9-NEXT: buffer_load_ushort v52, off, s[0:3], s32 offset:4
; GFX9-NEXT: buffer_load_ushort v30, off, s[0:3], s32 offset:16
-; GFX9-NEXT: buffer_load_ushort v41, off, s[0:3], s32 offset:12
+; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:12
; GFX9-NEXT: buffer_load_ushort v31, off, s[0:3], s32 offset:24
-; GFX9-NEXT: buffer_load_ushort v40, off, s[0:3], s32 offset:20
+; GFX9-NEXT: buffer_load_ushort v55, off, s[0:3], s32 offset:20
; GFX9-NEXT: buffer_load_ushort v33, off, s[0:3], s32 offset:32
; GFX9-NEXT: buffer_load_ushort v44, off, s[0:3], s32 offset:28
; GFX9-NEXT: buffer_load_ushort v37, off, s[0:3], s32 offset:40
@@ -93176,6 +93602,7 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; GFX9-NEXT: v_lshlrev_b32_e32 v5, 8, v9
; GFX9-NEXT: v_lshlrev_b32_e32 v9, 8, v13
; GFX9-NEXT: v_lshlrev_b32_e32 v13, 8, v17
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_lshlrev_b32_e32 v1, 8, v1
; GFX9-NEXT: v_lshlrev_b32_e32 v7, 8, v7
; GFX9-NEXT: v_lshlrev_b32_e32 v11, 8, v11
@@ -93192,7 +93619,7 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; GFX9-NEXT: v_lshlrev_b32_e32 v61, 8, v32
; GFX9-NEXT: s_waitcnt vmcnt(17)
; GFX9-NEXT: v_lshlrev_b32_e32 v60, 8, v38
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_and_b64 s[6:7], vcc, exec
; GFX9-NEXT: s_waitcnt vmcnt(15)
; GFX9-NEXT: v_lshlrev_b32_e32 v63, 8, v30
; GFX9-NEXT: s_waitcnt vmcnt(13)
@@ -93247,14 +93674,13 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; GFX9-NEXT: v_or_b32_sdwa v0, v18, v25 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_or_b32_sdwa v1, v20, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: v_mov_b32_e32 v32, v16
; GFX9-NEXT: v_lshl_or_b32 v8, v1, 16, v0
-; GFX9-NEXT: v_mov_b32_e32 v16, v22
-; GFX9-NEXT: v_or_b32_sdwa v0, v22, v58 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
; GFX9-NEXT: v_mov_b32_e32 v37, v24
; GFX9-NEXT: v_or_b32_sdwa v1, v24, v47 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
-; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
+; GFX9-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
+; GFX9-NEXT: v_mov_b32_e32 v27, v25
+; GFX9-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
+; GFX9-NEXT: v_or_b32_sdwa v0, v22, v58 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_mov_b32_e32 v17, v9
; GFX9-NEXT: v_lshl_or_b32 v9, v1, 16, v0
@@ -93271,12 +93697,12 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; GFX9-NEXT: s_or_b32 s4, s4, s5
; GFX9-NEXT: s_and_b32 s5, s18, 0xff
; GFX9-NEXT: s_lshl_b32 s6, s19, 8
-; GFX9-NEXT: v_mov_b32_e32 v55, v11
+; GFX9-NEXT: v_mov_b32_e32 v41, v11
; GFX9-NEXT: v_lshl_or_b32 v11, v1, 16, v0
-; GFX9-NEXT: v_or_b32_sdwa v0, v41, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v40, v63 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_or_b32 s5, s5, s6
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: v_or_b32_sdwa v1, v40, v62 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v55, v62 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: s_pack_ll_b32_b16 s4, s4, s5
; GFX9-NEXT: s_and_b32 s5, s20, 0xff
; GFX9-NEXT: s_lshl_b32 s6, s21, 8
@@ -93307,25 +93733,26 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s7
; GFX9-NEXT: v_lshl_or_b32 v4, v4, 16, v2
; GFX9-NEXT: v_mov_b32_e32 v42, v15
-; GFX9-NEXT: v_mov_b32_e32 v27, v25
+; GFX9-NEXT: v_mov_b32_e32 v32, v16
; GFX9-NEXT: v_mov_b32_e32 v30, v18
; GFX9-NEXT: v_mov_b32_e32 v23, v21
; GFX9-NEXT: v_mov_b32_e32 v49, v20
+; GFX9-NEXT: v_mov_b32_e32 v16, v22
; GFX9-NEXT: v_mov_b32_e32 v39, v26
; GFX9-NEXT: v_mov_b32_e32 v35, v28
; GFX9-NEXT: v_mov_b32_e32 v54, v31
; GFX9-NEXT: v_mov_b32_e32 v31, v51
; GFX9-NEXT: v_mov_b32_e32 v2, s6
; GFX9-NEXT: s_waitcnt vmcnt(1)
-; GFX9-NEXT: v_or_b32_sdwa v0, v57, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v0, v57, v24 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: v_mov_b32_e32 v18, v22
; GFX9-NEXT: s_waitcnt vmcnt(0)
-; GFX9-NEXT: v_or_b32_sdwa v1, v56, v24 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
+; GFX9-NEXT: v_or_b32_sdwa v1, v56, v25 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:BYTE_0 src1_sel:DWORD
; GFX9-NEXT: v_lshl_or_b32 v15, v1, 16, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
-; GFX9-NEXT: v_mov_b32_e32 v20, v24
+; GFX9-NEXT: v_mov_b32_e32 v18, v24
+; GFX9-NEXT: v_mov_b32_e32 v20, v25
; GFX9-NEXT: s_cbranch_execnz .LBB111_3
; GFX9-NEXT: .LBB111_2: ; %cmp.true
; GFX9-NEXT: v_add_u32_e32 v3, 3, v45
@@ -93337,10 +93764,10 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; GFX9-NEXT: v_add_u32_e32 v3, 3, v43
; GFX9-NEXT: v_or_b32_sdwa v3, v36, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v15, 0x300, v3
-; GFX9-NEXT: v_add_u32_e32 v3, 3, v41
+; GFX9-NEXT: v_add_u32_e32 v3, 3, v40
; GFX9-NEXT: v_or_b32_sdwa v3, v63, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v12, 0x300, v3
-; GFX9-NEXT: v_add_u32_e32 v3, 3, v40
+; GFX9-NEXT: v_add_u32_e32 v3, 3, v55
; GFX9-NEXT: v_or_b32_sdwa v3, v62, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v36, 0x300, v3
; GFX9-NEXT: v_add_u32_e32 v3, 3, v34
@@ -93374,7 +93801,7 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; GFX9-NEXT: v_or_b32_sdwa v3, v19, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v23, 0x300, v3
; GFX9-NEXT: v_add_u32_e32 v3, 3, v48
-; GFX9-NEXT: v_or_b32_sdwa v3, v55, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
+; GFX9-NEXT: v_or_b32_sdwa v3, v41, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
; GFX9-NEXT: v_add_u32_e32 v6, 0x300, v3
; GFX9-NEXT: v_add_u32_e32 v3, 3, v33
; GFX9-NEXT: v_or_b32_sdwa v3, v17, v3 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:BYTE_0
@@ -93528,11 +93955,13 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v23, v21
; GFX9-NEXT: v_mov_b32_e32 v42, v15
; GFX9-NEXT: v_mov_b32_e32 v19, v13
-; GFX9-NEXT: v_mov_b32_e32 v55, v11
+; GFX9-NEXT: v_mov_b32_e32 v41, v11
; GFX9-NEXT: v_mov_b32_e32 v17, v9
; GFX9-NEXT: v_mov_b32_e32 v50, v3
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX9-NEXT: s_branch .LBB111_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB111_2
+; GFX9-NEXT: s_branch .LBB111_3
;
; GFX11-TRUE16-LABEL: bitcast_v64i8_to_v32bf16_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -93573,7 +94002,6 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v21, 8, v25
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v27, 8, v27
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v25, 8, v29
-; GFX11-TRUE16-NEXT: s_mov_b32 s4, 0
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(15)
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v85, 8, v0
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(13)
@@ -93592,45 +94020,46 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v81, 8, v14
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(6)
; GFX11-TRUE16-NEXT: v_lshlrev_b32_e32 v84, 8, v84
-; GFX11-TRUE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s4, -1
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB111_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-TRUE16-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s3, 8
-; GFX11-TRUE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-TRUE16-NEXT: s_or_b32 s6, s7, s8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s5, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s7, s8
-; GFX11-TRUE16-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s23, 8
-; GFX11-TRUE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s25, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s4, s0, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s2, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-TRUE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s16, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s4, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s6, s7
+; GFX11-TRUE16-NEXT: s_and_b32 s6, s20, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s7, s21, 8
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s9, s25, 8
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v35
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s7, s8
-; GFX11-TRUE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-TRUE16-NEXT: s_and_b32 s10, s28, 0xff
-; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s29, 8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s6, s7
+; GFX11-TRUE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-TRUE16-NEXT: s_and_b32 s9, s28, 0xff
+; GFX11-TRUE16-NEXT: s_lshl_b32 s10, s29, 8
; GFX11-TRUE16-NEXT: v_or_b32_e32 v0, v0, v39
-; GFX11-TRUE16-NEXT: s_or_b32 s10, s10, s11
-; GFX11-TRUE16-NEXT: s_and_b32 s9, s26, 0xff
-; GFX11-TRUE16-NEXT: v_and_b32_e64 v1, 0xffff, s10
-; GFX11-TRUE16-NEXT: s_lshl_b32 s12, s27, 8
+; GFX11-TRUE16-NEXT: s_or_b32 s9, s9, s10
+; GFX11-TRUE16-NEXT: s_and_b32 s8, s26, 0xff
+; GFX11-TRUE16-NEXT: v_and_b32_e64 v1, 0xffff, s9
+; GFX11-TRUE16-NEXT: s_lshl_b32 s11, s27, 8
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xff, v38
-; GFX11-TRUE16-NEXT: s_or_b32 s9, s9, s12
+; GFX11-TRUE16-NEXT: s_or_b32 s8, s8, s11
; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xff, v31
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s8, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s7, s8
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xff, v37
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v4, v0, 16, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xff, v33
@@ -93698,10 +94127,9 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v13, v15, 16, v87
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v14, v96, 16, v97
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v15, v86, 16, v98
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB111_3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB111_3
; GFX11-TRUE16-NEXT: .LBB111_2: ; %cmp.true
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(1)
; GFX11-TRUE16-NEXT: v_add_nc_u32_e32 v4, 3, v68
@@ -93877,7 +94305,9 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB111_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-TRUE16-NEXT: s_branch .LBB111_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB111_2
+; GFX11-TRUE16-NEXT: s_branch .LBB111_3
;
; GFX11-FAKE16-LABEL: bitcast_v64i8_to_v32bf16_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -93918,7 +94348,6 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v21, 8, v25
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v27, 8, v27
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v25, 8, v29
-; GFX11-FAKE16-NEXT: s_mov_b32 s4, 0
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(15)
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v85, 8, v0
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(13)
@@ -93937,45 +94366,46 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v81, 8, v14
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(6)
; GFX11-FAKE16-NEXT: v_lshlrev_b32_e32 v84, 8, v84
-; GFX11-FAKE16-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_mov_b32 s4, -1
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB111_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-FAKE16-NEXT: s_and_b32 s5, s0, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s6, s1, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s3, 8
-; GFX11-FAKE16-NEXT: s_or_b32 s5, s5, s6
-; GFX11-FAKE16-NEXT: s_or_b32 s6, s7, s8
-; GFX11-FAKE16-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s5, s6
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s7, s8
-; GFX11-FAKE16-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s8, s21, 8
-; GFX11-FAKE16-NEXT: s_and_b32 s9, s22, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s23, 8
-; GFX11-FAKE16-NEXT: s_or_b32 s7, s7, s8
-; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-FAKE16-NEXT: s_and_b32 s9, s24, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s25, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s4, s0, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s5, s1, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s2, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s3, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s4, s4, s5
+; GFX11-FAKE16-NEXT: s_or_b32 s5, s6, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s16, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s4, s5
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s6, s7
+; GFX11-FAKE16-NEXT: s_and_b32 s6, s20, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s7, s21, 8
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s22, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s23, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s6, s6, s7
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s24, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s9, s25, 8
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v35
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s7, s8
-; GFX11-FAKE16-NEXT: s_or_b32 s8, s9, s10
-; GFX11-FAKE16-NEXT: s_and_b32 s10, s28, 0xff
-; GFX11-FAKE16-NEXT: s_lshl_b32 s11, s29, 8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s6, s7
+; GFX11-FAKE16-NEXT: s_or_b32 s7, s8, s9
+; GFX11-FAKE16-NEXT: s_and_b32 s9, s28, 0xff
+; GFX11-FAKE16-NEXT: s_lshl_b32 s10, s29, 8
; GFX11-FAKE16-NEXT: v_or_b32_e32 v0, v0, v39
-; GFX11-FAKE16-NEXT: s_or_b32 s10, s10, s11
-; GFX11-FAKE16-NEXT: s_and_b32 s9, s26, 0xff
-; GFX11-FAKE16-NEXT: v_and_b32_e64 v1, 0xffff, s10
-; GFX11-FAKE16-NEXT: s_lshl_b32 s12, s27, 8
+; GFX11-FAKE16-NEXT: s_or_b32 s9, s9, s10
+; GFX11-FAKE16-NEXT: s_and_b32 s8, s26, 0xff
+; GFX11-FAKE16-NEXT: v_and_b32_e64 v1, 0xffff, s9
+; GFX11-FAKE16-NEXT: s_lshl_b32 s11, s27, 8
; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xff, v38
-; GFX11-FAKE16-NEXT: s_or_b32 s9, s9, s12
+; GFX11-FAKE16-NEXT: s_or_b32 s8, s8, s11
; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xff, v31
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s8, s9
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s7, s8
; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xff, v37
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v4, v0, 16, v1
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xff, v33
@@ -94043,10 +94473,9 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v13, v15, 16, v87
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v14, v96, 16, v97
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v15, v86, 16, v98
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s5 :: v_dual_mov_b32 v1, s6
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v3, s8
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB111_3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s6 :: v_dual_mov_b32 v3, s7
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB111_3
; GFX11-FAKE16-NEXT: .LBB111_2: ; %cmp.true
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(1)
; GFX11-FAKE16-NEXT: v_add_nc_u32_e32 v4, 3, v68
@@ -94222,7 +94651,9 @@ define inreg <32 x bfloat> @bitcast_v64i8_to_v32bf16_scalar(<64 x i8> inreg %a,
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB111_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15
-; GFX11-FAKE16-NEXT: s_branch .LBB111_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB111_2
+; GFX11-FAKE16-NEXT: s_branch .LBB111_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.576bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.576bit.ll
index e66762f..778ebb1 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.576bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.576bit.ll
@@ -152,8 +152,9 @@ define inreg <18 x float> @bitcast_v18i32_to_v18f32_scalar(<18 x i32> inreg %a,
; SI-LABEL: bitcast_v18i32_to_v18f32_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v13, v4
-; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; SI-NEXT: v_mov_b32_e32 v18, v4
+; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v17, v3
; SI-NEXT: v_mov_b32_e32 v16, v2
; SI-NEXT: v_mov_b32_e32 v15, v1
@@ -171,12 +172,15 @@ define inreg <18 x float> @bitcast_v18i32_to_v18f32_scalar(<18 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB1_3
-; SI-NEXT: .LBB1_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB1_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB1_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v17, vcc, 3, v17
; SI-NEXT: v_add_i32_e32 v16, vcc, 3, v16
; SI-NEXT: v_add_i32_e32 v15, vcc, 3, v15
@@ -195,16 +199,15 @@ define inreg <18 x float> @bitcast_v18i32_to_v18f32_scalar(<18 x i32> inreg %a,
; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
-; SI-NEXT: .LBB1_3: ; %end
+; SI-NEXT: .LBB1_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: s_branch .LBB1_2
;
; VI-LABEL: bitcast_v18i32_to_v18f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v13, v4
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: v_mov_b32_e32 v18, v4
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v17, v3
; VI-NEXT: v_mov_b32_e32 v16, v2
; VI-NEXT: v_mov_b32_e32 v15, v1
@@ -222,12 +225,15 @@ define inreg <18 x float> @bitcast_v18i32_to_v18f32_scalar(<18 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB1_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB1_3
-; VI-NEXT: .LBB1_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB1_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB1_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v17, vcc, 3, v17
; VI-NEXT: v_add_u32_e32 v16, vcc, 3, v16
; VI-NEXT: v_add_u32_e32 v15, vcc, 3, v15
@@ -246,16 +252,15 @@ define inreg <18 x float> @bitcast_v18i32_to_v18f32_scalar(<18 x i32> inreg %a,
; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: .LBB1_3: ; %end
+; VI-NEXT: .LBB1_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_4:
-; VI-NEXT: s_branch .LBB1_2
;
; GFX9-LABEL: bitcast_v18i32_to_v18f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v4
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v18, v4
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v17, v3
; GFX9-NEXT: v_mov_b32_e32 v16, v2
; GFX9-NEXT: v_mov_b32_e32 v15, v1
@@ -273,12 +278,15 @@ define inreg <18 x float> @bitcast_v18i32_to_v18f32_scalar(<18 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB1_3
-; GFX9-NEXT: .LBB1_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB1_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_u32_e32 v17, 3, v17
; GFX9-NEXT: v_add_u32_e32 v16, 3, v16
; GFX9-NEXT: v_add_u32_e32 v15, 3, v15
@@ -297,22 +305,23 @@ define inreg <18 x float> @bitcast_v18i32_to_v18f32_scalar(<18 x i32> inreg %a,
; GFX9-NEXT: v_add_u32_e32 v2, 3, v2
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: .LBB1_3: ; %end
+; GFX9-NEXT: .LBB1_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-LABEL: bitcast_v18i32_to_v18f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB1_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB1_3
-; GFX11-NEXT: .LBB1_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s29, s29, 3
; GFX11-NEXT: s_add_i32 s28, s28, 3
; GFX11-NEXT: s_add_i32 s27, s27, 3
@@ -331,7 +340,7 @@ define inreg <18 x float> @bitcast_v18i32_to_v18f32_scalar(<18 x i32> inreg %a,
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB1_3: ; %end
+; GFX11-NEXT: .LBB1_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -343,8 +352,6 @@ define inreg <18 x float> @bitcast_v18i32_to_v18f32_scalar(<18 x i32> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_4:
-; GFX11-NEXT: s_branch .LBB1_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -499,8 +506,9 @@ define inreg <18 x i32> @bitcast_v18f32_to_v18i32_scalar(<18 x float> inreg %a,
; SI-LABEL: bitcast_v18f32_to_v18i32_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v13, v4
-; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; SI-NEXT: v_mov_b32_e32 v18, v4
+; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v17, v3
; SI-NEXT: v_mov_b32_e32 v16, v2
; SI-NEXT: v_mov_b32_e32 v15, v1
@@ -518,12 +526,15 @@ define inreg <18 x i32> @bitcast_v18f32_to_v18i32_scalar(<18 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB3_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB3_3
-; SI-NEXT: .LBB3_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB3_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB3_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e32 v17, 1.0, v17
; SI-NEXT: v_add_f32_e32 v16, 1.0, v16
; SI-NEXT: v_add_f32_e32 v15, 1.0, v15
@@ -542,16 +553,15 @@ define inreg <18 x i32> @bitcast_v18f32_to_v18i32_scalar(<18 x float> inreg %a,
; SI-NEXT: v_add_f32_e32 v2, 1.0, v2
; SI-NEXT: v_add_f32_e32 v1, 1.0, v1
; SI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; SI-NEXT: .LBB3_3: ; %end
+; SI-NEXT: .LBB3_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB3_4:
-; SI-NEXT: s_branch .LBB3_2
;
; VI-LABEL: bitcast_v18f32_to_v18i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v13, v4
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: v_mov_b32_e32 v18, v4
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v17, v3
; VI-NEXT: v_mov_b32_e32 v16, v2
; VI-NEXT: v_mov_b32_e32 v15, v1
@@ -569,12 +579,15 @@ define inreg <18 x i32> @bitcast_v18f32_to_v18i32_scalar(<18 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB3_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_3
-; VI-NEXT: .LBB3_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB3_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB3_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e32 v17, 1.0, v17
; VI-NEXT: v_add_f32_e32 v16, 1.0, v16
; VI-NEXT: v_add_f32_e32 v15, 1.0, v15
@@ -593,16 +606,15 @@ define inreg <18 x i32> @bitcast_v18f32_to_v18i32_scalar(<18 x float> inreg %a,
; VI-NEXT: v_add_f32_e32 v2, 1.0, v2
; VI-NEXT: v_add_f32_e32 v1, 1.0, v1
; VI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; VI-NEXT: .LBB3_3: ; %end
+; VI-NEXT: .LBB3_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB3_4:
-; VI-NEXT: s_branch .LBB3_2
;
; GFX9-LABEL: bitcast_v18f32_to_v18i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v4
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v18, v4
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v17, v3
; GFX9-NEXT: v_mov_b32_e32 v16, v2
; GFX9-NEXT: v_mov_b32_e32 v15, v1
@@ -620,12 +632,15 @@ define inreg <18 x i32> @bitcast_v18f32_to_v18i32_scalar(<18 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_3
-; GFX9-NEXT: .LBB3_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB3_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e32 v17, 1.0, v17
; GFX9-NEXT: v_add_f32_e32 v16, 1.0, v16
; GFX9-NEXT: v_add_f32_e32 v15, 1.0, v15
@@ -644,10 +659,8 @@ define inreg <18 x i32> @bitcast_v18f32_to_v18i32_scalar(<18 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e32 v2, 1.0, v2
; GFX9-NEXT: v_add_f32_e32 v1, 1.0, v1
; GFX9-NEXT: v_add_f32_e32 v0, 1.0, v0
-; GFX9-NEXT: .LBB3_3: ; %end
+; GFX9-NEXT: .LBB3_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB3_4:
-; GFX9-NEXT: s_branch .LBB3_2
;
; GFX11-LABEL: bitcast_v18f32_to_v18i32_scalar:
; GFX11: ; %bb.0:
@@ -657,23 +670,23 @@ define inreg <18 x i32> @bitcast_v18f32_to_v18i32_scalar(<18 x float> inreg %a,
; GFX11-NEXT: s_mov_b32 exec_lo, s4
; GFX11-NEXT: v_writelane_b32 v32, s36, 0
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX11-NEXT: s_mov_b32 s36, s0
; GFX11-NEXT: s_mov_b32 s47, s23
; GFX11-NEXT: s_mov_b32 s46, s22
-; GFX11-NEXT: s_mov_b32 s45, s21
; GFX11-NEXT: v_writelane_b32 v32, s37, 1
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s45, s21
; GFX11-NEXT: s_mov_b32 s44, s20
; GFX11-NEXT: s_mov_b32 s43, s19
+; GFX11-NEXT: v_writelane_b32 v32, s38, 2
; GFX11-NEXT: s_mov_b32 s42, s18
; GFX11-NEXT: s_mov_b32 s41, s17
-; GFX11-NEXT: v_writelane_b32 v32, s38, 2
; GFX11-NEXT: s_mov_b32 s40, s16
; GFX11-NEXT: s_mov_b32 s38, s2
-; GFX11-NEXT: s_mov_b32 s37, s1
-; GFX11-NEXT: s_mov_b32 s36, s0
; GFX11-NEXT: v_writelane_b32 v32, s39, 3
; GFX11-NEXT: s_mov_b32 s39, s3
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s37, s1
+; GFX11-NEXT: s_mov_b32 s0, -1
; GFX11-NEXT: v_writelane_b32 v32, s48, 4
; GFX11-NEXT: s_mov_b32 s48, s24
; GFX11-NEXT: v_writelane_b32 v32, s49, 5
@@ -686,11 +699,14 @@ define inreg <18 x i32> @bitcast_v18f32_to_v18i32_scalar(<18 x float> inreg %a,
; GFX11-NEXT: s_mov_b32 s52, s28
; GFX11-NEXT: v_writelane_b32 v32, s53, 9
; GFX11-NEXT: s_mov_b32 s53, s29
-; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-NEXT: s_mov_b32 s0, 0
+; GFX11-NEXT: .LBB3_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
-; GFX11-NEXT: .LBB3_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v17, s53, 1.0
; GFX11-NEXT: v_add_f32_e64 v16, s52, 1.0
; GFX11-NEXT: v_add_f32_e64 v15, s51, 1.0
@@ -710,8 +726,6 @@ define inreg <18 x i32> @bitcast_v18f32_to_v18i32_scalar(<18 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s36, 1.0
; GFX11-NEXT: s_branch .LBB3_5
-; GFX11-NEXT: .LBB3_3:
-; GFX11-NEXT: s_branch .LBB3_2
; GFX11-NEXT: .LBB3_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
@@ -908,8 +922,9 @@ define inreg <9 x i64> @bitcast_v18i32_to_v9i64_scalar(<18 x i32> inreg %a, i32
; SI-LABEL: bitcast_v18i32_to_v9i64_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v13, v4
-; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; SI-NEXT: v_mov_b32_e32 v18, v4
+; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v17, v3
; SI-NEXT: v_mov_b32_e32 v16, v2
; SI-NEXT: v_mov_b32_e32 v15, v1
@@ -927,12 +942,15 @@ define inreg <9 x i64> @bitcast_v18i32_to_v9i64_scalar(<18 x i32> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB5_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB5_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB5_3
-; SI-NEXT: .LBB5_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB5_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB5_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v17, vcc, 3, v17
; SI-NEXT: v_add_i32_e32 v16, vcc, 3, v16
; SI-NEXT: v_add_i32_e32 v15, vcc, 3, v15
@@ -951,16 +969,15 @@ define inreg <9 x i64> @bitcast_v18i32_to_v9i64_scalar(<18 x i32> inreg %a, i32
; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
-; SI-NEXT: .LBB5_3: ; %end
+; SI-NEXT: .LBB5_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB5_4:
-; SI-NEXT: s_branch .LBB5_2
;
; VI-LABEL: bitcast_v18i32_to_v9i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v13, v4
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: v_mov_b32_e32 v18, v4
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v17, v3
; VI-NEXT: v_mov_b32_e32 v16, v2
; VI-NEXT: v_mov_b32_e32 v15, v1
@@ -978,12 +995,15 @@ define inreg <9 x i64> @bitcast_v18i32_to_v9i64_scalar(<18 x i32> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB5_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB5_3
-; VI-NEXT: .LBB5_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB5_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB5_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v17, vcc, 3, v17
; VI-NEXT: v_add_u32_e32 v16, vcc, 3, v16
; VI-NEXT: v_add_u32_e32 v15, vcc, 3, v15
@@ -1002,16 +1022,15 @@ define inreg <9 x i64> @bitcast_v18i32_to_v9i64_scalar(<18 x i32> inreg %a, i32
; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: .LBB5_3: ; %end
+; VI-NEXT: .LBB5_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB5_4:
-; VI-NEXT: s_branch .LBB5_2
;
; GFX9-LABEL: bitcast_v18i32_to_v9i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v4
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v18, v4
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v17, v3
; GFX9-NEXT: v_mov_b32_e32 v16, v2
; GFX9-NEXT: v_mov_b32_e32 v15, v1
@@ -1029,12 +1048,15 @@ define inreg <9 x i64> @bitcast_v18i32_to_v9i64_scalar(<18 x i32> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB5_3
-; GFX9-NEXT: .LBB5_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB5_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_u32_e32 v17, 3, v17
; GFX9-NEXT: v_add_u32_e32 v16, 3, v16
; GFX9-NEXT: v_add_u32_e32 v15, 3, v15
@@ -1053,22 +1075,23 @@ define inreg <9 x i64> @bitcast_v18i32_to_v9i64_scalar(<18 x i32> inreg %a, i32
; GFX9-NEXT: v_add_u32_e32 v2, 3, v2
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: .LBB5_3: ; %end
+; GFX9-NEXT: .LBB5_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_4:
-; GFX9-NEXT: s_branch .LBB5_2
;
; GFX11-LABEL: bitcast_v18i32_to_v9i64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB5_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB5_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB5_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB5_3
-; GFX11-NEXT: .LBB5_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s29, s29, 3
; GFX11-NEXT: s_add_i32 s28, s28, 3
; GFX11-NEXT: s_add_i32 s27, s27, 3
@@ -1087,7 +1110,7 @@ define inreg <9 x i64> @bitcast_v18i32_to_v9i64_scalar(<18 x i32> inreg %a, i32
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB5_3: ; %end
+; GFX11-NEXT: .LBB5_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -1099,8 +1122,6 @@ define inreg <9 x i64> @bitcast_v18i32_to_v9i64_scalar(<18 x i32> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB5_4:
-; GFX11-NEXT: s_branch .LBB5_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1269,8 +1290,9 @@ define inreg <18 x i32> @bitcast_v9i64_to_v18i32_scalar(<9 x i64> inreg %a, i32
; SI-LABEL: bitcast_v9i64_to_v18i32_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v13, v4
-; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; SI-NEXT: v_mov_b32_e32 v18, v4
+; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v17, v3
; SI-NEXT: v_mov_b32_e32 v16, v2
; SI-NEXT: v_mov_b32_e32 v15, v1
@@ -1288,12 +1310,15 @@ define inreg <18 x i32> @bitcast_v9i64_to_v18i32_scalar(<9 x i64> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB7_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB7_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB7_3
-; SI-NEXT: .LBB7_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB7_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB7_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v16, vcc, 3, v16
; SI-NEXT: v_addc_u32_e32 v17, vcc, 0, v17, vcc
; SI-NEXT: v_add_i32_e32 v14, vcc, 3, v14
@@ -1312,16 +1337,15 @@ define inreg <18 x i32> @bitcast_v9i64_to_v18i32_scalar(<9 x i64> inreg %a, i32
; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT: .LBB7_3: ; %end
+; SI-NEXT: .LBB7_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB7_4:
-; SI-NEXT: s_branch .LBB7_2
;
; VI-LABEL: bitcast_v9i64_to_v18i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v13, v4
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: v_mov_b32_e32 v18, v4
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v17, v3
; VI-NEXT: v_mov_b32_e32 v16, v2
; VI-NEXT: v_mov_b32_e32 v15, v1
@@ -1339,12 +1363,15 @@ define inreg <18 x i32> @bitcast_v9i64_to_v18i32_scalar(<9 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB7_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB7_3
-; VI-NEXT: .LBB7_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB7_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB7_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v16, vcc, 3, v16
; VI-NEXT: v_addc_u32_e32 v17, vcc, 0, v17, vcc
; VI-NEXT: v_add_u32_e32 v14, vcc, 3, v14
@@ -1363,16 +1390,15 @@ define inreg <18 x i32> @bitcast_v9i64_to_v18i32_scalar(<9 x i64> inreg %a, i32
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT: .LBB7_3: ; %end
+; VI-NEXT: .LBB7_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB7_4:
-; VI-NEXT: s_branch .LBB7_2
;
; GFX9-LABEL: bitcast_v9i64_to_v18i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v4
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v18, v4
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v17, v3
; GFX9-NEXT: v_mov_b32_e32 v16, v2
; GFX9-NEXT: v_mov_b32_e32 v15, v1
@@ -1390,12 +1416,15 @@ define inreg <18 x i32> @bitcast_v9i64_to_v18i32_scalar(<9 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB7_3
-; GFX9-NEXT: .LBB7_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB7_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB7_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_co_u32_e32 v16, vcc, 3, v16
; GFX9-NEXT: v_addc_co_u32_e32 v17, vcc, 0, v17, vcc
; GFX9-NEXT: v_add_co_u32_e32 v14, vcc, 3, v14
@@ -1414,22 +1443,23 @@ define inreg <18 x i32> @bitcast_v9i64_to_v18i32_scalar(<9 x i64> inreg %a, i32
; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 3, v0
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
-; GFX9-NEXT: .LBB7_3: ; %end
+; GFX9-NEXT: .LBB7_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB7_4:
-; GFX9-NEXT: s_branch .LBB7_2
;
; GFX11-LABEL: bitcast_v9i64_to_v18i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB7_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB7_3
-; GFX11-NEXT: .LBB7_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB7_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s28, s28, 3
; GFX11-NEXT: s_addc_u32 s29, s29, 0
; GFX11-NEXT: s_add_u32 s26, s26, 3
@@ -1448,7 +1478,7 @@ define inreg <18 x i32> @bitcast_v9i64_to_v18i32_scalar(<9 x i64> inreg %a, i32
; GFX11-NEXT: s_addc_u32 s3, s3, 0
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: .LBB7_3: ; %end
+; GFX11-NEXT: .LBB7_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -1460,8 +1490,6 @@ define inreg <18 x i32> @bitcast_v9i64_to_v18i32_scalar(<9 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB7_4:
-; GFX11-NEXT: s_branch .LBB7_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1625,8 +1653,9 @@ define inreg <9 x double> @bitcast_v18i32_to_v9f64_scalar(<18 x i32> inreg %a, i
; SI-LABEL: bitcast_v18i32_to_v9f64_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v13, v4
-; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; SI-NEXT: v_mov_b32_e32 v18, v4
+; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v17, v3
; SI-NEXT: v_mov_b32_e32 v16, v2
; SI-NEXT: v_mov_b32_e32 v15, v1
@@ -1644,12 +1673,15 @@ define inreg <9 x double> @bitcast_v18i32_to_v9f64_scalar(<18 x i32> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB9_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB9_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB9_3
-; SI-NEXT: .LBB9_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB9_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB9_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v17, vcc, 3, v17
; SI-NEXT: v_add_i32_e32 v16, vcc, 3, v16
; SI-NEXT: v_add_i32_e32 v15, vcc, 3, v15
@@ -1668,16 +1700,15 @@ define inreg <9 x double> @bitcast_v18i32_to_v9f64_scalar(<18 x i32> inreg %a, i
; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
-; SI-NEXT: .LBB9_3: ; %end
+; SI-NEXT: .LBB9_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB9_4:
-; SI-NEXT: s_branch .LBB9_2
;
; VI-LABEL: bitcast_v18i32_to_v9f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v13, v4
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: v_mov_b32_e32 v18, v4
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v17, v3
; VI-NEXT: v_mov_b32_e32 v16, v2
; VI-NEXT: v_mov_b32_e32 v15, v1
@@ -1695,12 +1726,15 @@ define inreg <9 x double> @bitcast_v18i32_to_v9f64_scalar(<18 x i32> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB9_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB9_3
-; VI-NEXT: .LBB9_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB9_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB9_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v17, vcc, 3, v17
; VI-NEXT: v_add_u32_e32 v16, vcc, 3, v16
; VI-NEXT: v_add_u32_e32 v15, vcc, 3, v15
@@ -1719,16 +1753,15 @@ define inreg <9 x double> @bitcast_v18i32_to_v9f64_scalar(<18 x i32> inreg %a, i
; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: .LBB9_3: ; %end
+; VI-NEXT: .LBB9_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB9_4:
-; VI-NEXT: s_branch .LBB9_2
;
; GFX9-LABEL: bitcast_v18i32_to_v9f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v4
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v18, v4
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v17, v3
; GFX9-NEXT: v_mov_b32_e32 v16, v2
; GFX9-NEXT: v_mov_b32_e32 v15, v1
@@ -1746,12 +1779,15 @@ define inreg <9 x double> @bitcast_v18i32_to_v9f64_scalar(<18 x i32> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB9_3
-; GFX9-NEXT: .LBB9_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB9_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_u32_e32 v17, 3, v17
; GFX9-NEXT: v_add_u32_e32 v16, 3, v16
; GFX9-NEXT: v_add_u32_e32 v15, 3, v15
@@ -1770,22 +1806,23 @@ define inreg <9 x double> @bitcast_v18i32_to_v9f64_scalar(<18 x i32> inreg %a, i
; GFX9-NEXT: v_add_u32_e32 v2, 3, v2
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: .LBB9_3: ; %end
+; GFX9-NEXT: .LBB9_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB9_4:
-; GFX9-NEXT: s_branch .LBB9_2
;
; GFX11-LABEL: bitcast_v18i32_to_v9f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB9_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB9_3
-; GFX11-NEXT: .LBB9_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s29, s29, 3
; GFX11-NEXT: s_add_i32 s28, s28, 3
; GFX11-NEXT: s_add_i32 s27, s27, 3
@@ -1804,7 +1841,7 @@ define inreg <9 x double> @bitcast_v18i32_to_v9f64_scalar(<18 x i32> inreg %a, i
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB9_3: ; %end
+; GFX11-NEXT: .LBB9_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -1816,8 +1853,6 @@ define inreg <9 x double> @bitcast_v18i32_to_v9f64_scalar(<18 x i32> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB9_4:
-; GFX11-NEXT: s_branch .LBB9_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1945,8 +1980,9 @@ define inreg <18 x i32> @bitcast_v9f64_to_v18i32_scalar(<9 x double> inreg %a, i
; SI-LABEL: bitcast_v9f64_to_v18i32_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v13, v4
-; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; SI-NEXT: v_mov_b32_e32 v18, v4
+; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v17, v3
; SI-NEXT: v_mov_b32_e32 v16, v2
; SI-NEXT: v_mov_b32_e32 v15, v1
@@ -1964,12 +2000,15 @@ define inreg <18 x i32> @bitcast_v9f64_to_v18i32_scalar(<9 x double> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB11_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB11_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB11_3
-; SI-NEXT: .LBB11_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB11_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB11_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
; SI-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
; SI-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
@@ -1979,16 +2018,15 @@ define inreg <18 x i32> @bitcast_v9f64_to_v18i32_scalar(<9 x double> inreg %a, i
; SI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; SI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; SI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; SI-NEXT: .LBB11_3: ; %end
+; SI-NEXT: .LBB11_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB11_4:
-; SI-NEXT: s_branch .LBB11_2
;
; VI-LABEL: bitcast_v9f64_to_v18i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v13, v4
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: v_mov_b32_e32 v18, v4
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v17, v3
; VI-NEXT: v_mov_b32_e32 v16, v2
; VI-NEXT: v_mov_b32_e32 v15, v1
@@ -2006,12 +2044,15 @@ define inreg <18 x i32> @bitcast_v9f64_to_v18i32_scalar(<9 x double> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB11_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB11_3
-; VI-NEXT: .LBB11_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB11_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB11_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
; VI-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
; VI-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
@@ -2021,16 +2062,15 @@ define inreg <18 x i32> @bitcast_v9f64_to_v18i32_scalar(<9 x double> inreg %a, i
; VI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; VI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; VI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; VI-NEXT: .LBB11_3: ; %end
+; VI-NEXT: .LBB11_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB11_4:
-; VI-NEXT: s_branch .LBB11_2
;
; GFX9-LABEL: bitcast_v9f64_to_v18i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v4
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v18, v4
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v17, v3
; GFX9-NEXT: v_mov_b32_e32 v16, v2
; GFX9-NEXT: v_mov_b32_e32 v15, v1
@@ -2048,12 +2088,15 @@ define inreg <18 x i32> @bitcast_v9f64_to_v18i32_scalar(<9 x double> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_3
-; GFX9-NEXT: .LBB11_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB11_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
; GFX9-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
; GFX9-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
@@ -2063,10 +2106,8 @@ define inreg <18 x i32> @bitcast_v9f64_to_v18i32_scalar(<9 x double> inreg %a, i
; GFX9-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX9-NEXT: .LBB11_3: ; %end
+; GFX9-NEXT: .LBB11_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB11_4:
-; GFX9-NEXT: s_branch .LBB11_2
;
; GFX11-LABEL: bitcast_v9f64_to_v18i32_scalar:
; GFX11: ; %bb.0:
@@ -2076,23 +2117,23 @@ define inreg <18 x i32> @bitcast_v9f64_to_v18i32_scalar(<9 x double> inreg %a, i
; GFX11-NEXT: s_mov_b32 exec_lo, s4
; GFX11-NEXT: v_writelane_b32 v32, s36, 0
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX11-NEXT: s_mov_b32 s36, s0
; GFX11-NEXT: s_mov_b32 s47, s23
; GFX11-NEXT: s_mov_b32 s46, s22
-; GFX11-NEXT: s_mov_b32 s45, s21
; GFX11-NEXT: v_writelane_b32 v32, s37, 1
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s45, s21
; GFX11-NEXT: s_mov_b32 s44, s20
; GFX11-NEXT: s_mov_b32 s43, s19
+; GFX11-NEXT: v_writelane_b32 v32, s38, 2
; GFX11-NEXT: s_mov_b32 s42, s18
; GFX11-NEXT: s_mov_b32 s41, s17
-; GFX11-NEXT: v_writelane_b32 v32, s38, 2
; GFX11-NEXT: s_mov_b32 s40, s16
; GFX11-NEXT: s_mov_b32 s38, s2
-; GFX11-NEXT: s_mov_b32 s37, s1
-; GFX11-NEXT: s_mov_b32 s36, s0
; GFX11-NEXT: v_writelane_b32 v32, s39, 3
; GFX11-NEXT: s_mov_b32 s39, s3
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s37, s1
+; GFX11-NEXT: s_mov_b32 s0, -1
; GFX11-NEXT: v_writelane_b32 v32, s48, 4
; GFX11-NEXT: s_mov_b32 s48, s24
; GFX11-NEXT: v_writelane_b32 v32, s49, 5
@@ -2105,11 +2146,14 @@ define inreg <18 x i32> @bitcast_v9f64_to_v18i32_scalar(<9 x double> inreg %a, i
; GFX11-NEXT: s_mov_b32 s52, s28
; GFX11-NEXT: v_writelane_b32 v32, s53, 9
; GFX11-NEXT: s_mov_b32 s53, s29
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-NEXT: s_mov_b32 s0, 0
+; GFX11-NEXT: .LBB11_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
-; GFX11-NEXT: .LBB11_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[16:17], s[52:53], 1.0
; GFX11-NEXT: v_add_f64 v[14:15], s[50:51], 1.0
; GFX11-NEXT: v_add_f64 v[12:13], s[48:49], 1.0
@@ -2120,8 +2164,6 @@ define inreg <18 x i32> @bitcast_v9f64_to_v18i32_scalar(<9 x double> inreg %a, i
; GFX11-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
; GFX11-NEXT: s_branch .LBB11_5
-; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: s_branch .LBB11_2
; GFX11-NEXT: .LBB11_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
@@ -2760,11 +2802,12 @@ define inreg <36 x i16> @bitcast_v18i32_to_v36i16_scalar(<18 x i32> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v5
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s9, v1
; SI-NEXT: v_readfirstlane_b32 s8, v2
; SI-NEXT: v_readfirstlane_b32 s7, v3
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s6, v4
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB13_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v1, s7
@@ -2970,21 +3013,24 @@ define inreg <36 x i16> @bitcast_v18i32_to_v36i16_scalar(<18 x i32> inreg %a, i3
; SI-NEXT: ; implicit-def: $sgpr11
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: s_branch .LBB13_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB13_2
+; SI-NEXT: s_branch .LBB13_3
;
; VI-LABEL: bitcast_v18i32_to_v36i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s9, v0
; VI-NEXT: v_readfirstlane_b32 s8, v1
-; VI-NEXT: v_readfirstlane_b32 s6, v2
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: v_readfirstlane_b32 s7, v3
+; VI-NEXT: v_readfirstlane_b32 s7, v2
+; VI-NEXT: v_readfirstlane_b32 s6, v3
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB13_4
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_lshr_b32 s10, s7, 16
-; VI-NEXT: s_lshr_b32 s11, s6, 16
+; VI-NEXT: s_lshr_b32 s10, s6, 16
+; VI-NEXT: s_lshr_b32 s11, s7, 16
; VI-NEXT: s_lshr_b32 s12, s8, 16
; VI-NEXT: s_lshr_b32 s13, s9, 16
; VI-NEXT: s_lshr_b32 s14, s29, 16
@@ -3003,8 +3049,8 @@ define inreg <36 x i16> @bitcast_v18i32_to_v36i16_scalar(<18 x i32> inreg %a, i3
; VI-NEXT: s_lshr_b32 s59, s16, 16
; VI-NEXT: s_cbranch_execnz .LBB13_3
; VI-NEXT: .LBB13_2: ; %cmp.true
-; VI-NEXT: s_add_i32 s7, s7, 3
; VI-NEXT: s_add_i32 s6, s6, 3
+; VI-NEXT: s_add_i32 s7, s7, 3
; VI-NEXT: s_add_i32 s8, s8, 3
; VI-NEXT: s_add_i32 s9, s9, 3
; VI-NEXT: s_add_i32 s29, s29, 3
@@ -3021,8 +3067,8 @@ define inreg <36 x i16> @bitcast_v18i32_to_v36i16_scalar(<18 x i32> inreg %a, i3
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: s_lshr_b32 s10, s7, 16
-; VI-NEXT: s_lshr_b32 s11, s6, 16
+; VI-NEXT: s_lshr_b32 s10, s6, 16
+; VI-NEXT: s_lshr_b32 s11, s7, 16
; VI-NEXT: s_lshr_b32 s12, s8, 16
; VI-NEXT: s_lshr_b32 s13, s9, 16
; VI-NEXT: s_lshr_b32 s14, s29, 16
@@ -3085,15 +3131,15 @@ define inreg <36 x i16> @bitcast_v18i32_to_v36i16_scalar(<18 x i32> inreg %a, i3
; VI-NEXT: s_lshl_b32 s13, s13, 16
; VI-NEXT: s_and_b32 s8, 0xffff, s8
; VI-NEXT: s_lshl_b32 s12, s12, 16
-; VI-NEXT: s_and_b32 s6, 0xffff, s6
-; VI-NEXT: s_lshl_b32 s11, s11, 16
; VI-NEXT: s_and_b32 s7, 0xffff, s7
+; VI-NEXT: s_lshl_b32 s11, s11, 16
+; VI-NEXT: s_and_b32 s6, 0xffff, s6
; VI-NEXT: s_lshl_b32 s10, s10, 16
; VI-NEXT: s_or_b32 s14, s26, s14
; VI-NEXT: s_or_b32 s9, s9, s13
; VI-NEXT: s_or_b32 s8, s8, s12
-; VI-NEXT: s_or_b32 s6, s6, s11
-; VI-NEXT: s_or_b32 s7, s7, s10
+; VI-NEXT: s_or_b32 s7, s7, s11
+; VI-NEXT: s_or_b32 s6, s6, s10
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: v_mov_b32_e32 v2, s16
@@ -3110,8 +3156,8 @@ define inreg <36 x i16> @bitcast_v18i32_to_v36i16_scalar(<18 x i32> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v13, s14
; VI-NEXT: v_mov_b32_e32 v14, s9
; VI-NEXT: v_mov_b32_e32 v15, s8
-; VI-NEXT: v_mov_b32_e32 v16, s6
-; VI-NEXT: v_mov_b32_e32 v17, s7
+; VI-NEXT: v_mov_b32_e32 v16, s7
+; VI-NEXT: v_mov_b32_e32 v17, s6
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB13_4:
; VI-NEXT: ; implicit-def: $sgpr59
@@ -3132,23 +3178,26 @@ define inreg <36 x i16> @bitcast_v18i32_to_v36i16_scalar(<18 x i32> inreg %a, i3
; VI-NEXT: ; implicit-def: $sgpr12
; VI-NEXT: ; implicit-def: $sgpr11
; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: s_branch .LBB13_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB13_2
+; VI-NEXT: s_branch .LBB13_3
;
; GFX9-LABEL: bitcast_v18i32_to_v36i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
-; GFX9-NEXT: v_readfirstlane_b32 s6, v0
-; GFX9-NEXT: v_readfirstlane_b32 s7, v1
-; GFX9-NEXT: v_readfirstlane_b32 s8, v2
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: v_readfirstlane_b32 s9, v3
+; GFX9-NEXT: v_readfirstlane_b32 s7, v0
+; GFX9-NEXT: v_readfirstlane_b32 s8, v1
+; GFX9-NEXT: v_readfirstlane_b32 s9, v2
+; GFX9-NEXT: v_readfirstlane_b32 s6, v3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB13_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_lshr_b32 s10, s9, 16
-; GFX9-NEXT: s_lshr_b32 s11, s8, 16
-; GFX9-NEXT: s_lshr_b32 s12, s7, 16
-; GFX9-NEXT: s_lshr_b32 s13, s6, 16
+; GFX9-NEXT: s_lshr_b32 s10, s6, 16
+; GFX9-NEXT: s_lshr_b32 s11, s9, 16
+; GFX9-NEXT: s_lshr_b32 s12, s8, 16
+; GFX9-NEXT: s_lshr_b32 s13, s7, 16
; GFX9-NEXT: s_lshr_b32 s14, s29, 16
; GFX9-NEXT: s_lshr_b32 s15, s28, 16
; GFX9-NEXT: s_lshr_b32 s40, s27, 16
@@ -3165,10 +3214,10 @@ define inreg <36 x i16> @bitcast_v18i32_to_v36i16_scalar(<18 x i32> inreg %a, i3
; GFX9-NEXT: s_lshr_b32 s59, s16, 16
; GFX9-NEXT: s_cbranch_execnz .LBB13_3
; GFX9-NEXT: .LBB13_2: ; %cmp.true
+; GFX9-NEXT: s_add_i32 s6, s6, 3
; GFX9-NEXT: s_add_i32 s9, s9, 3
; GFX9-NEXT: s_add_i32 s8, s8, 3
; GFX9-NEXT: s_add_i32 s7, s7, 3
-; GFX9-NEXT: s_add_i32 s6, s6, 3
; GFX9-NEXT: s_add_i32 s29, s29, 3
; GFX9-NEXT: s_add_i32 s28, s28, 3
; GFX9-NEXT: s_add_i32 s27, s27, 3
@@ -3183,10 +3232,10 @@ define inreg <36 x i16> @bitcast_v18i32_to_v36i16_scalar(<18 x i32> inreg %a, i3
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: s_lshr_b32 s10, s9, 16
-; GFX9-NEXT: s_lshr_b32 s11, s8, 16
-; GFX9-NEXT: s_lshr_b32 s12, s7, 16
-; GFX9-NEXT: s_lshr_b32 s13, s6, 16
+; GFX9-NEXT: s_lshr_b32 s10, s6, 16
+; GFX9-NEXT: s_lshr_b32 s11, s9, 16
+; GFX9-NEXT: s_lshr_b32 s12, s8, 16
+; GFX9-NEXT: s_lshr_b32 s13, s7, 16
; GFX9-NEXT: s_lshr_b32 s14, s29, 16
; GFX9-NEXT: s_lshr_b32 s15, s28, 16
; GFX9-NEXT: s_lshr_b32 s40, s27, 16
@@ -3216,10 +3265,10 @@ define inreg <36 x i16> @bitcast_v18i32_to_v36i16_scalar(<18 x i32> inreg %a, i3
; GFX9-NEXT: s_pack_ll_b32_b16 s25, s27, s40
; GFX9-NEXT: s_pack_ll_b32_b16 s15, s28, s15
; GFX9-NEXT: s_pack_ll_b32_b16 s14, s29, s14
-; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s13
-; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s12
-; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s11
-; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s10
+; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s13
+; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s12
+; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s11
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s10
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: v_mov_b32_e32 v2, s16
@@ -3234,10 +3283,10 @@ define inreg <36 x i16> @bitcast_v18i32_to_v36i16_scalar(<18 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v11, s25
; GFX9-NEXT: v_mov_b32_e32 v12, s15
; GFX9-NEXT: v_mov_b32_e32 v13, s14
-; GFX9-NEXT: v_mov_b32_e32 v14, s6
-; GFX9-NEXT: v_mov_b32_e32 v15, s7
-; GFX9-NEXT: v_mov_b32_e32 v16, s8
-; GFX9-NEXT: v_mov_b32_e32 v17, s9
+; GFX9-NEXT: v_mov_b32_e32 v14, s7
+; GFX9-NEXT: v_mov_b32_e32 v15, s8
+; GFX9-NEXT: v_mov_b32_e32 v16, s9
+; GFX9-NEXT: v_mov_b32_e32 v17, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB13_4:
; GFX9-NEXT: ; implicit-def: $sgpr59
@@ -3258,13 +3307,15 @@ define inreg <36 x i16> @bitcast_v18i32_to_v36i16_scalar(<18 x i32> inreg %a, i3
; GFX9-NEXT: ; implicit-def: $sgpr12
; GFX9-NEXT: ; implicit-def: $sgpr11
; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: s_branch .LBB13_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB13_2
+; GFX9-NEXT: s_branch .LBB13_3
;
; GFX11-LABEL: bitcast_v18i32_to_v36i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-NEXT: s_mov_b32 s46, 0
+; GFX11-NEXT: s_mov_b32 s46, -1
; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
; GFX11-NEXT: s_cbranch_scc0 .LBB13_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
@@ -3286,8 +3337,7 @@ define inreg <36 x i16> @bitcast_v18i32_to_v36i16_scalar(<18 x i32> inreg %a, i3
; GFX11-NEXT: s_lshr_b32 s43, s2, 16
; GFX11-NEXT: s_lshr_b32 s44, s1, 16
; GFX11-NEXT: s_lshr_b32 s45, s0, 16
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
-; GFX11-NEXT: s_cbranch_vccnz .LBB13_3
+; GFX11-NEXT: s_cbranch_execnz .LBB13_3
; GFX11-NEXT: .LBB13_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s29, s29, 3
; GFX11-NEXT: s_add_i32 s28, s28, 3
@@ -3374,7 +3424,9 @@ define inreg <36 x i16> @bitcast_v18i32_to_v36i16_scalar(<18 x i32> inreg %a, i3
; GFX11-NEXT: ; implicit-def: $sgpr6
; GFX11-NEXT: ; implicit-def: $sgpr5
; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: s_branch .LBB13_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
+; GFX11-NEXT: s_cbranch_vccz .LBB13_2
+; GFX11-NEXT: s_branch .LBB13_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -4124,6 +4176,7 @@ define inreg <18 x i32> @bitcast_v36i16_to_v18i32_scalar(<36 x i16> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v22
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
@@ -4141,7 +4194,7 @@ define inreg <18 x i32> @bitcast_v36i16_to_v18i32_scalar(<36 x i16> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v48, v4
; SI-NEXT: v_mov_b32_e32 v49, v2
; SI-NEXT: v_mov_b32_e32 v50, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v44, 16, v3
@@ -4158,46 +4211,46 @@ define inreg <18 x i32> @bitcast_v36i16_to_v18i32_scalar(<36 x i16> inreg %a, i3
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v50
; SI-NEXT: v_or_b32_e32 v7, v0, v45
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49
-; SI-NEXT: v_or_b32_e32 v8, v0, v44
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v48
-; SI-NEXT: s_and_b32 s4, s16, 0xffff
-; SI-NEXT: s_lshl_b32 s5, s17, 16
; SI-NEXT: v_or_b32_e32 v9, v0, v43
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v39
+; SI-NEXT: s_and_b32 s4, s16, 0xffff
+; SI-NEXT: s_lshl_b32 s5, s17, 16
+; SI-NEXT: v_or_b32_e32 v10, v0, v42
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v38
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: s_and_b32 s5, s18, 0xffff
; SI-NEXT: s_lshl_b32 s6, s19, 16
-; SI-NEXT: v_or_b32_e32 v10, v0, v42
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v38
+; SI-NEXT: v_or_b32_e32 v11, v0, v41
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v37
; SI-NEXT: s_or_b32 s5, s5, s6
; SI-NEXT: s_and_b32 s6, s20, 0xffff
; SI-NEXT: s_lshl_b32 s7, s21, 16
-; SI-NEXT: v_or_b32_e32 v11, v0, v41
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v37
+; SI-NEXT: v_or_b32_e32 v12, v0, v40
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v36
; SI-NEXT: s_or_b32 s6, s6, s7
; SI-NEXT: s_and_b32 s7, s22, 0xffff
; SI-NEXT: s_lshl_b32 s8, s23, 16
-; SI-NEXT: v_or_b32_e32 v12, v0, v40
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v36
+; SI-NEXT: v_or_b32_e32 v13, v0, v55
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v35
; SI-NEXT: s_or_b32 s7, s7, s8
; SI-NEXT: s_and_b32 s8, s24, 0xffff
; SI-NEXT: s_lshl_b32 s9, s25, 16
-; SI-NEXT: v_or_b32_e32 v13, v0, v55
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v35
+; SI-NEXT: v_or_b32_e32 v14, v0, v54
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v34
; SI-NEXT: s_or_b32 s8, s8, s9
; SI-NEXT: s_and_b32 s9, s26, 0xffff
; SI-NEXT: s_lshl_b32 s10, s27, 16
-; SI-NEXT: v_or_b32_e32 v14, v0, v54
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v34
+; SI-NEXT: v_or_b32_e32 v15, v0, v53
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v33
; SI-NEXT: s_or_b32 s9, s9, s10
; SI-NEXT: s_and_b32 s10, s28, 0xffff
; SI-NEXT: s_lshl_b32 s11, s29, 16
-; SI-NEXT: v_or_b32_e32 v15, v0, v53
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v33
-; SI-NEXT: s_or_b32 s10, s10, s11
+; SI-NEXT: v_and_b32_e32 v1, 0xffff, v49
; SI-NEXT: v_or_b32_e32 v16, v0, v52
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v32
+; SI-NEXT: s_or_b32 s10, s10, s11
+; SI-NEXT: v_or_b32_e32 v8, v1, v44
; SI-NEXT: v_or_b32_e32 v17, v0, v51
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
@@ -4212,10 +4265,6 @@ define inreg <18 x i32> @bitcast_v36i16_to_v18i32_scalar(<36 x i16> inreg %a, i3
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v45, v0
; SI-NEXT: v_add_i32_e32 v7, vcc, 0x30000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v49
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v0, v44, v0
-; SI-NEXT: v_add_i32_e32 v8, vcc, 0x30000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v48
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v43, v0
@@ -4271,13 +4320,16 @@ define inreg <18 x i32> @bitcast_v36i16_to_v18i32_scalar(<36 x i16> inreg %a, i3
; SI-NEXT: s_lshl_b32 s10, s27, 16
; SI-NEXT: s_add_i32 s28, s28, 3
; SI-NEXT: v_or_b32_e32 v0, v52, v0
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v49
; SI-NEXT: s_or_b32 s9, s10, s9
; SI-NEXT: s_and_b32 s10, s28, 0xffff
; SI-NEXT: s_lshl_b32 s11, s29, 16
; SI-NEXT: v_add_i32_e32 v16, vcc, 0x30000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v32
+; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1
; SI-NEXT: s_or_b32 s10, s11, s10
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: v_or_b32_e32 v1, v44, v1
; SI-NEXT: s_add_i32 s4, s4, 0x30000
; SI-NEXT: s_add_i32 s5, s5, 0x30000
; SI-NEXT: s_add_i32 s6, s6, 0x30000
@@ -4286,6 +4338,7 @@ define inreg <18 x i32> @bitcast_v36i16_to_v18i32_scalar(<36 x i16> inreg %a, i3
; SI-NEXT: s_add_i32 s9, s9, 0x30000
; SI-NEXT: s_add_i32 s10, s10, 0x30000
; SI-NEXT: v_or_b32_e32 v0, v51, v0
+; SI-NEXT: v_add_i32_e32 v8, vcc, 0x30000, v1
; SI-NEXT: v_add_i32_e32 v17, vcc, 0x30000, v0
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
@@ -4305,7 +4358,9 @@ define inreg <18 x i32> @bitcast_v36i16_to_v18i32_scalar(<36 x i16> inreg %a, i3
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB15_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; SI-NEXT: s_branch .LBB15_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB15_2
+; SI-NEXT: s_branch .LBB15_3
;
; VI-LABEL: bitcast_v36i16_to_v18i32_scalar:
; VI: ; %bb.0:
@@ -4325,11 +4380,12 @@ define inreg <18 x i32> @bitcast_v36i16_to_v18i32_scalar(<36 x i16> inreg %a, i3
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v3
; VI-NEXT: v_mov_b32_e32 v33, v2
; VI-NEXT: v_mov_b32_e32 v34, v1
; VI-NEXT: v_mov_b32_e32 v35, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB15_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -4504,15 +4560,13 @@ define inreg <18 x i32> @bitcast_v36i16_to_v18i32_scalar(<36 x i16> inreg %a, i3
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB15_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB15_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB15_2
+; VI-NEXT: s_branch .LBB15_3
;
; GFX9-LABEL: bitcast_v36i16_to_v18i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v3
-; GFX9-NEXT: v_mov_b32_e32 v33, v2
-; GFX9-NEXT: v_mov_b32_e32 v34, v1
-; GFX9-NEXT: v_mov_b32_e32 v35, v0
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
; GFX9-NEXT: s_lshr_b32 s42, s27, 16
@@ -4528,11 +4582,15 @@ define inreg <18 x i32> @bitcast_v36i16_to_v18i32_scalar(<36 x i16> inreg %a, i3
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
+; GFX9-NEXT: v_mov_b32_e32 v32, v3
+; GFX9-NEXT: v_mov_b32_e32 v33, v2
+; GFX9-NEXT: v_mov_b32_e32 v34, v1
+; GFX9-NEXT: v_mov_b32_e32 v35, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_lshrrev_b32_e32 v36, 16, v32
; GFX9-NEXT: v_lshrrev_b32_e32 v37, 16, v33
; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v34
; GFX9-NEXT: v_lshrrev_b32_e32 v39, 16, v35
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -4547,6 +4605,7 @@ define inreg <18 x i32> @bitcast_v36i16_to_v18i32_scalar(<36 x i16> inreg %a, i3
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB15_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v35
@@ -4574,15 +4633,17 @@ define inreg <18 x i32> @bitcast_v36i16_to_v18i32_scalar(<36 x i16> inreg %a, i3
; GFX9-NEXT: s_cbranch_execnz .LBB15_3
; GFX9-NEXT: .LBB15_2: ; %cmp.true
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v35
-; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v34
-; GFX9-NEXT: v_and_b32_e32 v16, 0xffff, v33
-; GFX9-NEXT: v_and_b32_e32 v17, 0xffff, v32
; GFX9-NEXT: v_lshl_or_b32 v0, v39, 16, v0
-; GFX9-NEXT: v_lshl_or_b32 v1, v38, 16, v1
-; GFX9-NEXT: v_lshl_or_b32 v16, v37, 16, v16
-; GFX9-NEXT: v_lshl_or_b32 v17, v36, 16, v17
; GFX9-NEXT: v_pk_add_u16 v14, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v33
+; GFX9-NEXT: v_lshl_or_b32 v0, v37, 16, v0
+; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v34
+; GFX9-NEXT: v_pk_add_u16 v16, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v32
+; GFX9-NEXT: v_lshl_or_b32 v1, v38, 16, v1
+; GFX9-NEXT: v_lshl_or_b32 v0, v36, 16, v0
; GFX9-NEXT: v_pk_add_u16 v15, v1, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_pk_add_u16 v17, v0, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s6, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s7, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v2, s8, 3 op_sel_hi:[1,0]
@@ -4597,20 +4658,20 @@ define inreg <18 x i32> @bitcast_v36i16_to_v18i32_scalar(<36 x i16> inreg %a, i3
; GFX9-NEXT: v_pk_add_u16 v11, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v12, s18, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v13, s19, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: .LBB15_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB15_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB15_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB15_2
+; GFX9-NEXT: s_branch .LBB15_3
;
; GFX11-LABEL: bitcast_v36i16_to_v18i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-NEXT: s_lshr_b32 s42, s28, 16
+; GFX11-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-NEXT: s_lshr_b32 s41, s28, 16
; GFX11-NEXT: s_lshr_b32 s15, s27, 16
; GFX11-NEXT: s_lshr_b32 s14, s26, 16
; GFX11-NEXT: s_lshr_b32 s13, s25, 16
@@ -4623,15 +4684,14 @@ define inreg <18 x i32> @bitcast_v36i16_to_v18i32_scalar(<36 x i16> inreg %a, i3
; GFX11-NEXT: s_lshr_b32 s6, s18, 16
; GFX11-NEXT: s_lshr_b32 s5, s17, 16
; GFX11-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-NEXT: s_mov_b32 s40, 0
-; GFX11-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-NEXT: s_pack_ll_b32_b16 s3, s3, s43
+; GFX11-NEXT: s_lshr_b32 s42, s3, 16
+; GFX11-NEXT: s_lshr_b32 s43, s2, 16
+; GFX11-NEXT: s_lshr_b32 s44, s1, 16
+; GFX11-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-NEXT: s_pack_ll_b32_b16 s1, s1, s44
+; GFX11-NEXT: s_pack_ll_b32_b16 s0, s0, s45
+; GFX11-NEXT: s_pack_ll_b32_b16 s2, s2, s43
+; GFX11-NEXT: s_pack_ll_b32_b16 s3, s3, s42
; GFX11-NEXT: s_pack_ll_b32_b16 s4, s16, s4
; GFX11-NEXT: s_pack_ll_b32_b16 s5, s17, s5
; GFX11-NEXT: s_pack_ll_b32_b16 s6, s18, s6
@@ -4644,14 +4704,18 @@ define inreg <18 x i32> @bitcast_v36i16_to_v18i32_scalar(<36 x i16> inreg %a, i3
; GFX11-NEXT: s_pack_ll_b32_b16 s13, s25, s13
; GFX11-NEXT: s_pack_ll_b32_b16 s14, s26, s14
; GFX11-NEXT: s_pack_ll_b32_b16 s15, s27, s15
-; GFX11-NEXT: s_pack_ll_b32_b16 s16, s28, s42
-; GFX11-NEXT: s_pack_ll_b32_b16 s17, s29, s41
-; GFX11-NEXT: s_and_b32 s47, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB15_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-NEXT: s_mov_b32 s40, -1
+; GFX11-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-NEXT: s_cbranch_scc0 .LBB15_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-NEXT: s_mov_b32 s40, 0
+; GFX11-NEXT: .LBB15_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
; GFX11-NEXT: s_cbranch_vccnz .LBB15_4
-; GFX11-NEXT: .LBB15_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
@@ -4671,8 +4735,6 @@ define inreg <18 x i32> @bitcast_v36i16_to_v18i32_scalar(<36 x i16> inreg %a, i3
; GFX11-NEXT: v_pk_add_u16 v16, s16, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB15_3:
-; GFX11-NEXT: s_branch .LBB15_2
; GFX11-NEXT: .LBB15_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -5441,20 +5503,21 @@ define inreg <36 x half> @bitcast_v18i32_to_v36f16_scalar(<18 x i32> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v5
-; SI-NEXT: v_readfirstlane_b32 s8, v1
-; SI-NEXT: v_readfirstlane_b32 s7, v2
-; SI-NEXT: v_readfirstlane_b32 s6, v3
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: v_readfirstlane_b32 s9, v4
+; SI-NEXT: v_readfirstlane_b32 s9, v1
+; SI-NEXT: v_readfirstlane_b32 s8, v2
+; SI-NEXT: v_readfirstlane_b32 s7, v3
+; SI-NEXT: v_readfirstlane_b32 s6, v4
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB17_4
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_lshr_b32 s4, s9, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v1, s4
; SI-NEXT: s_lshr_b32 s4, s6, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v2, s4
+; SI-NEXT: v_cvt_f32_f16_e32 v1, s4
; SI-NEXT: s_lshr_b32 s4, s7, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v4, s4
+; SI-NEXT: v_cvt_f32_f16_e32 v2, s4
; SI-NEXT: s_lshr_b32 s4, s8, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v4, s4
+; SI-NEXT: s_lshr_b32 s4, s9, 16
; SI-NEXT: v_cvt_f32_f16_e32 v6, s4
; SI-NEXT: s_lshr_b32 s4, s29, 16
; SI-NEXT: v_cvt_f32_f16_e32 v8, s4
@@ -5484,10 +5547,10 @@ define inreg <36 x half> @bitcast_v18i32_to_v36f16_scalar(<18 x i32> inreg %a, i
; SI-NEXT: v_cvt_f32_f16_e32 v34, s4
; SI-NEXT: s_lshr_b32 s4, s16, 16
; SI-NEXT: v_cvt_f32_f16_e32 v36, s4
-; SI-NEXT: v_cvt_f32_f16_e32 v3, s9
-; SI-NEXT: v_cvt_f32_f16_e32 v5, s6
-; SI-NEXT: v_cvt_f32_f16_e32 v7, s7
-; SI-NEXT: v_cvt_f32_f16_e32 v9, s8
+; SI-NEXT: v_cvt_f32_f16_e32 v3, s6
+; SI-NEXT: v_cvt_f32_f16_e32 v5, s7
+; SI-NEXT: v_cvt_f32_f16_e32 v7, s8
+; SI-NEXT: v_cvt_f32_f16_e32 v9, s9
; SI-NEXT: v_cvt_f32_f16_e32 v11, s29
; SI-NEXT: v_cvt_f32_f16_e32 v13, s28
; SI-NEXT: v_cvt_f32_f16_e32 v14, s27
@@ -5518,10 +5581,10 @@ define inreg <36 x half> @bitcast_v18i32_to_v36f16_scalar(<18 x i32> inreg %a, i
; SI-NEXT: s_add_i32 s27, s27, 3
; SI-NEXT: s_add_i32 s28, s28, 3
; SI-NEXT: s_add_i32 s29, s29, 3
+; SI-NEXT: s_add_i32 s9, s9, 3
; SI-NEXT: s_add_i32 s8, s8, 3
; SI-NEXT: s_add_i32 s7, s7, 3
; SI-NEXT: s_add_i32 s6, s6, 3
-; SI-NEXT: s_add_i32 s9, s9, 3
; SI-NEXT: s_lshr_b32 s4, s16, 16
; SI-NEXT: s_lshr_b32 s5, s17, 16
; SI-NEXT: s_lshr_b32 s10, s18, 16
@@ -5536,14 +5599,14 @@ define inreg <36 x half> @bitcast_v18i32_to_v36f16_scalar(<18 x i32> inreg %a, i
; SI-NEXT: s_lshr_b32 s43, s27, 16
; SI-NEXT: s_lshr_b32 s44, s28, 16
; SI-NEXT: s_lshr_b32 s45, s29, 16
-; SI-NEXT: s_lshr_b32 s46, s8, 16
-; SI-NEXT: s_lshr_b32 s47, s7, 16
-; SI-NEXT: s_lshr_b32 s56, s6, 16
-; SI-NEXT: s_lshr_b32 s57, s9, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v3, s9
-; SI-NEXT: v_cvt_f32_f16_e32 v5, s6
-; SI-NEXT: v_cvt_f32_f16_e32 v7, s7
-; SI-NEXT: v_cvt_f32_f16_e32 v9, s8
+; SI-NEXT: s_lshr_b32 s46, s9, 16
+; SI-NEXT: s_lshr_b32 s47, s8, 16
+; SI-NEXT: s_lshr_b32 s56, s7, 16
+; SI-NEXT: s_lshr_b32 s57, s6, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v3, s6
+; SI-NEXT: v_cvt_f32_f16_e32 v5, s7
+; SI-NEXT: v_cvt_f32_f16_e32 v7, s8
+; SI-NEXT: v_cvt_f32_f16_e32 v9, s9
; SI-NEXT: v_cvt_f32_f16_e32 v11, s29
; SI-NEXT: v_cvt_f32_f16_e32 v13, s28
; SI-NEXT: v_cvt_f32_f16_e32 v14, s27
@@ -5740,21 +5803,24 @@ define inreg <36 x half> @bitcast_v18i32_to_v36f16_scalar(<18 x i32> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: s_branch .LBB17_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB17_2
+; SI-NEXT: s_branch .LBB17_3
;
; VI-LABEL: bitcast_v18i32_to_v36f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s9, v0
; VI-NEXT: v_readfirstlane_b32 s8, v1
-; VI-NEXT: v_readfirstlane_b32 s6, v2
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: v_readfirstlane_b32 s7, v3
+; VI-NEXT: v_readfirstlane_b32 s7, v2
+; VI-NEXT: v_readfirstlane_b32 s6, v3
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB17_4
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_lshr_b32 s10, s7, 16
-; VI-NEXT: s_lshr_b32 s11, s6, 16
+; VI-NEXT: s_lshr_b32 s10, s6, 16
+; VI-NEXT: s_lshr_b32 s11, s7, 16
; VI-NEXT: s_lshr_b32 s12, s8, 16
; VI-NEXT: s_lshr_b32 s13, s9, 16
; VI-NEXT: s_lshr_b32 s14, s29, 16
@@ -5773,8 +5839,8 @@ define inreg <36 x half> @bitcast_v18i32_to_v36f16_scalar(<18 x i32> inreg %a, i
; VI-NEXT: s_lshr_b32 s59, s16, 16
; VI-NEXT: s_cbranch_execnz .LBB17_3
; VI-NEXT: .LBB17_2: ; %cmp.true
-; VI-NEXT: s_add_i32 s7, s7, 3
; VI-NEXT: s_add_i32 s6, s6, 3
+; VI-NEXT: s_add_i32 s7, s7, 3
; VI-NEXT: s_add_i32 s8, s8, 3
; VI-NEXT: s_add_i32 s9, s9, 3
; VI-NEXT: s_add_i32 s29, s29, 3
@@ -5791,8 +5857,8 @@ define inreg <36 x half> @bitcast_v18i32_to_v36f16_scalar(<18 x i32> inreg %a, i
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: s_lshr_b32 s10, s7, 16
-; VI-NEXT: s_lshr_b32 s11, s6, 16
+; VI-NEXT: s_lshr_b32 s10, s6, 16
+; VI-NEXT: s_lshr_b32 s11, s7, 16
; VI-NEXT: s_lshr_b32 s12, s8, 16
; VI-NEXT: s_lshr_b32 s13, s9, 16
; VI-NEXT: s_lshr_b32 s14, s29, 16
@@ -5855,15 +5921,15 @@ define inreg <36 x half> @bitcast_v18i32_to_v36f16_scalar(<18 x i32> inreg %a, i
; VI-NEXT: s_lshl_b32 s13, s13, 16
; VI-NEXT: s_and_b32 s8, 0xffff, s8
; VI-NEXT: s_lshl_b32 s12, s12, 16
-; VI-NEXT: s_and_b32 s6, 0xffff, s6
-; VI-NEXT: s_lshl_b32 s11, s11, 16
; VI-NEXT: s_and_b32 s7, 0xffff, s7
+; VI-NEXT: s_lshl_b32 s11, s11, 16
+; VI-NEXT: s_and_b32 s6, 0xffff, s6
; VI-NEXT: s_lshl_b32 s10, s10, 16
; VI-NEXT: s_or_b32 s14, s26, s14
; VI-NEXT: s_or_b32 s9, s9, s13
; VI-NEXT: s_or_b32 s8, s8, s12
-; VI-NEXT: s_or_b32 s6, s6, s11
-; VI-NEXT: s_or_b32 s7, s7, s10
+; VI-NEXT: s_or_b32 s7, s7, s11
+; VI-NEXT: s_or_b32 s6, s6, s10
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: v_mov_b32_e32 v2, s16
@@ -5880,8 +5946,8 @@ define inreg <36 x half> @bitcast_v18i32_to_v36f16_scalar(<18 x i32> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v13, s14
; VI-NEXT: v_mov_b32_e32 v14, s9
; VI-NEXT: v_mov_b32_e32 v15, s8
-; VI-NEXT: v_mov_b32_e32 v16, s6
-; VI-NEXT: v_mov_b32_e32 v17, s7
+; VI-NEXT: v_mov_b32_e32 v16, s7
+; VI-NEXT: v_mov_b32_e32 v17, s6
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB17_4:
; VI-NEXT: ; implicit-def: $sgpr59
@@ -5902,23 +5968,26 @@ define inreg <36 x half> @bitcast_v18i32_to_v36f16_scalar(<18 x i32> inreg %a, i
; VI-NEXT: ; implicit-def: $sgpr12
; VI-NEXT: ; implicit-def: $sgpr11
; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: s_branch .LBB17_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB17_2
+; VI-NEXT: s_branch .LBB17_3
;
; GFX9-LABEL: bitcast_v18i32_to_v36f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
-; GFX9-NEXT: v_readfirstlane_b32 s6, v0
-; GFX9-NEXT: v_readfirstlane_b32 s7, v1
-; GFX9-NEXT: v_readfirstlane_b32 s8, v2
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: v_readfirstlane_b32 s9, v3
+; GFX9-NEXT: v_readfirstlane_b32 s7, v0
+; GFX9-NEXT: v_readfirstlane_b32 s8, v1
+; GFX9-NEXT: v_readfirstlane_b32 s9, v2
+; GFX9-NEXT: v_readfirstlane_b32 s6, v3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB17_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_lshr_b32 s10, s9, 16
-; GFX9-NEXT: s_lshr_b32 s11, s8, 16
-; GFX9-NEXT: s_lshr_b32 s12, s7, 16
-; GFX9-NEXT: s_lshr_b32 s13, s6, 16
+; GFX9-NEXT: s_lshr_b32 s10, s6, 16
+; GFX9-NEXT: s_lshr_b32 s11, s9, 16
+; GFX9-NEXT: s_lshr_b32 s12, s8, 16
+; GFX9-NEXT: s_lshr_b32 s13, s7, 16
; GFX9-NEXT: s_lshr_b32 s14, s29, 16
; GFX9-NEXT: s_lshr_b32 s15, s28, 16
; GFX9-NEXT: s_lshr_b32 s40, s27, 16
@@ -5935,10 +6004,10 @@ define inreg <36 x half> @bitcast_v18i32_to_v36f16_scalar(<18 x i32> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s59, s16, 16
; GFX9-NEXT: s_cbranch_execnz .LBB17_3
; GFX9-NEXT: .LBB17_2: ; %cmp.true
+; GFX9-NEXT: s_add_i32 s6, s6, 3
; GFX9-NEXT: s_add_i32 s9, s9, 3
; GFX9-NEXT: s_add_i32 s8, s8, 3
; GFX9-NEXT: s_add_i32 s7, s7, 3
-; GFX9-NEXT: s_add_i32 s6, s6, 3
; GFX9-NEXT: s_add_i32 s29, s29, 3
; GFX9-NEXT: s_add_i32 s28, s28, 3
; GFX9-NEXT: s_add_i32 s27, s27, 3
@@ -5953,10 +6022,10 @@ define inreg <36 x half> @bitcast_v18i32_to_v36f16_scalar(<18 x i32> inreg %a, i
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: s_lshr_b32 s10, s9, 16
-; GFX9-NEXT: s_lshr_b32 s11, s8, 16
-; GFX9-NEXT: s_lshr_b32 s12, s7, 16
-; GFX9-NEXT: s_lshr_b32 s13, s6, 16
+; GFX9-NEXT: s_lshr_b32 s10, s6, 16
+; GFX9-NEXT: s_lshr_b32 s11, s9, 16
+; GFX9-NEXT: s_lshr_b32 s12, s8, 16
+; GFX9-NEXT: s_lshr_b32 s13, s7, 16
; GFX9-NEXT: s_lshr_b32 s14, s29, 16
; GFX9-NEXT: s_lshr_b32 s15, s28, 16
; GFX9-NEXT: s_lshr_b32 s40, s27, 16
@@ -5986,10 +6055,10 @@ define inreg <36 x half> @bitcast_v18i32_to_v36f16_scalar(<18 x i32> inreg %a, i
; GFX9-NEXT: s_pack_ll_b32_b16 s25, s27, s40
; GFX9-NEXT: s_pack_ll_b32_b16 s15, s28, s15
; GFX9-NEXT: s_pack_ll_b32_b16 s14, s29, s14
-; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s13
-; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s12
-; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s11
-; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s10
+; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s13
+; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s12
+; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s11
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s10
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: v_mov_b32_e32 v2, s16
@@ -6004,10 +6073,10 @@ define inreg <36 x half> @bitcast_v18i32_to_v36f16_scalar(<18 x i32> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v11, s25
; GFX9-NEXT: v_mov_b32_e32 v12, s15
; GFX9-NEXT: v_mov_b32_e32 v13, s14
-; GFX9-NEXT: v_mov_b32_e32 v14, s6
-; GFX9-NEXT: v_mov_b32_e32 v15, s7
-; GFX9-NEXT: v_mov_b32_e32 v16, s8
-; GFX9-NEXT: v_mov_b32_e32 v17, s9
+; GFX9-NEXT: v_mov_b32_e32 v14, s7
+; GFX9-NEXT: v_mov_b32_e32 v15, s8
+; GFX9-NEXT: v_mov_b32_e32 v16, s9
+; GFX9-NEXT: v_mov_b32_e32 v17, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB17_4:
; GFX9-NEXT: ; implicit-def: $sgpr59
@@ -6028,13 +6097,15 @@ define inreg <36 x half> @bitcast_v18i32_to_v36f16_scalar(<18 x i32> inreg %a, i
; GFX9-NEXT: ; implicit-def: $sgpr12
; GFX9-NEXT: ; implicit-def: $sgpr11
; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: s_branch .LBB17_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB17_2
+; GFX9-NEXT: s_branch .LBB17_3
;
; GFX11-LABEL: bitcast_v18i32_to_v36f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-NEXT: s_mov_b32 s46, 0
+; GFX11-NEXT: s_mov_b32 s46, -1
; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
; GFX11-NEXT: s_cbranch_scc0 .LBB17_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
@@ -6056,8 +6127,7 @@ define inreg <36 x half> @bitcast_v18i32_to_v36f16_scalar(<18 x i32> inreg %a, i
; GFX11-NEXT: s_lshr_b32 s43, s2, 16
; GFX11-NEXT: s_lshr_b32 s44, s1, 16
; GFX11-NEXT: s_lshr_b32 s45, s0, 16
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
-; GFX11-NEXT: s_cbranch_vccnz .LBB17_3
+; GFX11-NEXT: s_cbranch_execnz .LBB17_3
; GFX11-NEXT: .LBB17_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s29, s29, 3
; GFX11-NEXT: s_add_i32 s28, s28, 3
@@ -6144,7 +6214,9 @@ define inreg <36 x half> @bitcast_v18i32_to_v36f16_scalar(<18 x i32> inreg %a, i
; GFX11-NEXT: ; implicit-def: $sgpr6
; GFX11-NEXT: ; implicit-def: $sgpr5
; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: s_branch .LBB17_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
+; GFX11-NEXT: s_cbranch_vccz .LBB17_2
+; GFX11-NEXT: s_branch .LBB17_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -7044,6 +7116,7 @@ define inreg <18 x i32> @bitcast_v36f16_to_v18i32_scalar(<36 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v58, s28
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v22
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB19_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v35
@@ -7304,7 +7377,9 @@ define inreg <18 x i32> @bitcast_v36f16_to_v18i32_scalar(<36 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v44, v32
; SI-NEXT: v_mov_b32_e32 v25, v48
; SI-NEXT: v_mov_b32_e32 v48, v43
-; SI-NEXT: s_branch .LBB19_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB19_2
+; SI-NEXT: s_branch .LBB19_3
;
; VI-LABEL: bitcast_v36f16_to_v18i32_scalar:
; VI: ; %bb.0:
@@ -7324,11 +7399,12 @@ define inreg <18 x i32> @bitcast_v36f16_to_v18i32_scalar(<36 x half> inreg %a, i
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v3
; VI-NEXT: v_mov_b32_e32 v33, v2
; VI-NEXT: v_mov_b32_e32 v34, v1
; VI-NEXT: v_mov_b32_e32 v35, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB19_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -7472,15 +7548,13 @@ define inreg <18 x i32> @bitcast_v36f16_to_v18i32_scalar(<36 x half> inreg %a, i
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB19_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB19_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB19_2
+; VI-NEXT: s_branch .LBB19_3
;
; GFX9-LABEL: bitcast_v36f16_to_v18i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v3
-; GFX9-NEXT: v_mov_b32_e32 v33, v2
-; GFX9-NEXT: v_mov_b32_e32 v34, v1
-; GFX9-NEXT: v_mov_b32_e32 v35, v0
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
; GFX9-NEXT: s_lshr_b32 s42, s27, 16
@@ -7496,11 +7570,15 @@ define inreg <18 x i32> @bitcast_v36f16_to_v18i32_scalar(<36 x half> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
+; GFX9-NEXT: v_mov_b32_e32 v32, v3
+; GFX9-NEXT: v_mov_b32_e32 v33, v2
+; GFX9-NEXT: v_mov_b32_e32 v34, v1
+; GFX9-NEXT: v_mov_b32_e32 v35, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_lshrrev_b32_e32 v36, 16, v32
; GFX9-NEXT: v_lshrrev_b32_e32 v37, 16, v33
; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v34
; GFX9-NEXT: v_lshrrev_b32_e32 v39, 16, v35
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -7515,6 +7593,7 @@ define inreg <18 x i32> @bitcast_v36f16_to_v18i32_scalar(<36 x half> inreg %a, i
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB19_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v35
@@ -7573,14 +7652,16 @@ define inreg <18 x i32> @bitcast_v36f16_to_v18i32_scalar(<36 x half> inreg %a, i
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB19_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB19_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB19_2
+; GFX9-NEXT: s_branch .LBB19_3
;
; GFX11-LABEL: bitcast_v36f16_to_v18i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-NEXT: s_lshr_b32 s42, s28, 16
+; GFX11-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-NEXT: s_lshr_b32 s41, s28, 16
; GFX11-NEXT: s_lshr_b32 s15, s27, 16
; GFX11-NEXT: s_lshr_b32 s14, s26, 16
; GFX11-NEXT: s_lshr_b32 s13, s25, 16
@@ -7593,15 +7674,14 @@ define inreg <18 x i32> @bitcast_v36f16_to_v18i32_scalar(<36 x half> inreg %a, i
; GFX11-NEXT: s_lshr_b32 s6, s18, 16
; GFX11-NEXT: s_lshr_b32 s5, s17, 16
; GFX11-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-NEXT: s_mov_b32 s40, 0
-; GFX11-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-NEXT: s_pack_ll_b32_b16 s3, s3, s43
+; GFX11-NEXT: s_lshr_b32 s42, s3, 16
+; GFX11-NEXT: s_lshr_b32 s43, s2, 16
+; GFX11-NEXT: s_lshr_b32 s44, s1, 16
+; GFX11-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-NEXT: s_pack_ll_b32_b16 s1, s1, s44
+; GFX11-NEXT: s_pack_ll_b32_b16 s0, s0, s45
+; GFX11-NEXT: s_pack_ll_b32_b16 s2, s2, s43
+; GFX11-NEXT: s_pack_ll_b32_b16 s3, s3, s42
; GFX11-NEXT: s_pack_ll_b32_b16 s4, s16, s4
; GFX11-NEXT: s_pack_ll_b32_b16 s5, s17, s5
; GFX11-NEXT: s_pack_ll_b32_b16 s6, s18, s6
@@ -7614,14 +7694,18 @@ define inreg <18 x i32> @bitcast_v36f16_to_v18i32_scalar(<36 x half> inreg %a, i
; GFX11-NEXT: s_pack_ll_b32_b16 s13, s25, s13
; GFX11-NEXT: s_pack_ll_b32_b16 s14, s26, s14
; GFX11-NEXT: s_pack_ll_b32_b16 s15, s27, s15
-; GFX11-NEXT: s_pack_ll_b32_b16 s16, s28, s42
-; GFX11-NEXT: s_pack_ll_b32_b16 s17, s29, s41
-; GFX11-NEXT: s_and_b32 s47, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB19_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-NEXT: s_mov_b32 s40, -1
+; GFX11-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-NEXT: s_cbranch_scc0 .LBB19_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-NEXT: s_mov_b32 s40, 0
+; GFX11-NEXT: .LBB19_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
; GFX11-NEXT: s_cbranch_vccnz .LBB19_4
-; GFX11-NEXT: .LBB19_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
@@ -7641,8 +7725,6 @@ define inreg <18 x i32> @bitcast_v36f16_to_v18i32_scalar(<36 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v16, 0x200, s16 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB19_3:
-; GFX11-NEXT: s_branch .LBB19_2
; GFX11-NEXT: .LBB19_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -7815,8 +7897,9 @@ define inreg <9 x i64> @bitcast_v18f32_to_v9i64_scalar(<18 x float> inreg %a, i3
; SI-LABEL: bitcast_v18f32_to_v9i64_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v13, v4
-; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; SI-NEXT: v_mov_b32_e32 v18, v4
+; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v17, v3
; SI-NEXT: v_mov_b32_e32 v16, v2
; SI-NEXT: v_mov_b32_e32 v15, v1
@@ -7834,12 +7917,15 @@ define inreg <9 x i64> @bitcast_v18f32_to_v9i64_scalar(<18 x float> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB21_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB21_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB21_3
-; SI-NEXT: .LBB21_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB21_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB21_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e32 v17, 1.0, v17
; SI-NEXT: v_add_f32_e32 v16, 1.0, v16
; SI-NEXT: v_add_f32_e32 v15, 1.0, v15
@@ -7858,16 +7944,15 @@ define inreg <9 x i64> @bitcast_v18f32_to_v9i64_scalar(<18 x float> inreg %a, i3
; SI-NEXT: v_add_f32_e32 v2, 1.0, v2
; SI-NEXT: v_add_f32_e32 v1, 1.0, v1
; SI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; SI-NEXT: .LBB21_3: ; %end
+; SI-NEXT: .LBB21_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB21_4:
-; SI-NEXT: s_branch .LBB21_2
;
; VI-LABEL: bitcast_v18f32_to_v9i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v13, v4
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: v_mov_b32_e32 v18, v4
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v17, v3
; VI-NEXT: v_mov_b32_e32 v16, v2
; VI-NEXT: v_mov_b32_e32 v15, v1
@@ -7885,12 +7970,15 @@ define inreg <9 x i64> @bitcast_v18f32_to_v9i64_scalar(<18 x float> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB21_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB21_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB21_3
-; VI-NEXT: .LBB21_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB21_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB21_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e32 v17, 1.0, v17
; VI-NEXT: v_add_f32_e32 v16, 1.0, v16
; VI-NEXT: v_add_f32_e32 v15, 1.0, v15
@@ -7909,16 +7997,15 @@ define inreg <9 x i64> @bitcast_v18f32_to_v9i64_scalar(<18 x float> inreg %a, i3
; VI-NEXT: v_add_f32_e32 v2, 1.0, v2
; VI-NEXT: v_add_f32_e32 v1, 1.0, v1
; VI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; VI-NEXT: .LBB21_3: ; %end
+; VI-NEXT: .LBB21_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB21_4:
-; VI-NEXT: s_branch .LBB21_2
;
; GFX9-LABEL: bitcast_v18f32_to_v9i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v4
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v18, v4
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v17, v3
; GFX9-NEXT: v_mov_b32_e32 v16, v2
; GFX9-NEXT: v_mov_b32_e32 v15, v1
@@ -7936,12 +8023,15 @@ define inreg <9 x i64> @bitcast_v18f32_to_v9i64_scalar(<18 x float> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB21_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB21_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB21_3
-; GFX9-NEXT: .LBB21_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB21_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e32 v17, 1.0, v17
; GFX9-NEXT: v_add_f32_e32 v16, 1.0, v16
; GFX9-NEXT: v_add_f32_e32 v15, 1.0, v15
@@ -7960,10 +8050,8 @@ define inreg <9 x i64> @bitcast_v18f32_to_v9i64_scalar(<18 x float> inreg %a, i3
; GFX9-NEXT: v_add_f32_e32 v2, 1.0, v2
; GFX9-NEXT: v_add_f32_e32 v1, 1.0, v1
; GFX9-NEXT: v_add_f32_e32 v0, 1.0, v0
-; GFX9-NEXT: .LBB21_3: ; %end
+; GFX9-NEXT: .LBB21_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB21_4:
-; GFX9-NEXT: s_branch .LBB21_2
;
; GFX11-LABEL: bitcast_v18f32_to_v9i64_scalar:
; GFX11: ; %bb.0:
@@ -7973,23 +8061,23 @@ define inreg <9 x i64> @bitcast_v18f32_to_v9i64_scalar(<18 x float> inreg %a, i3
; GFX11-NEXT: s_mov_b32 exec_lo, s4
; GFX11-NEXT: v_writelane_b32 v32, s36, 0
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX11-NEXT: s_mov_b32 s36, s0
; GFX11-NEXT: s_mov_b32 s47, s23
; GFX11-NEXT: s_mov_b32 s46, s22
-; GFX11-NEXT: s_mov_b32 s45, s21
; GFX11-NEXT: v_writelane_b32 v32, s37, 1
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s45, s21
; GFX11-NEXT: s_mov_b32 s44, s20
; GFX11-NEXT: s_mov_b32 s43, s19
+; GFX11-NEXT: v_writelane_b32 v32, s38, 2
; GFX11-NEXT: s_mov_b32 s42, s18
; GFX11-NEXT: s_mov_b32 s41, s17
-; GFX11-NEXT: v_writelane_b32 v32, s38, 2
; GFX11-NEXT: s_mov_b32 s40, s16
; GFX11-NEXT: s_mov_b32 s38, s2
-; GFX11-NEXT: s_mov_b32 s37, s1
-; GFX11-NEXT: s_mov_b32 s36, s0
; GFX11-NEXT: v_writelane_b32 v32, s39, 3
; GFX11-NEXT: s_mov_b32 s39, s3
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s37, s1
+; GFX11-NEXT: s_mov_b32 s0, -1
; GFX11-NEXT: v_writelane_b32 v32, s48, 4
; GFX11-NEXT: s_mov_b32 s48, s24
; GFX11-NEXT: v_writelane_b32 v32, s49, 5
@@ -8002,11 +8090,14 @@ define inreg <9 x i64> @bitcast_v18f32_to_v9i64_scalar(<18 x float> inreg %a, i3
; GFX11-NEXT: s_mov_b32 s52, s28
; GFX11-NEXT: v_writelane_b32 v32, s53, 9
; GFX11-NEXT: s_mov_b32 s53, s29
-; GFX11-NEXT: s_cbranch_scc0 .LBB21_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: s_cbranch_scc0 .LBB21_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-NEXT: s_mov_b32 s0, 0
+; GFX11-NEXT: .LBB21_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB21_4
-; GFX11-NEXT: .LBB21_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v17, s53, 1.0
; GFX11-NEXT: v_add_f32_e64 v16, s52, 1.0
; GFX11-NEXT: v_add_f32_e64 v15, s51, 1.0
@@ -8026,8 +8117,6 @@ define inreg <9 x i64> @bitcast_v18f32_to_v9i64_scalar(<18 x float> inreg %a, i3
; GFX11-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s36, 1.0
; GFX11-NEXT: s_branch .LBB21_5
-; GFX11-NEXT: .LBB21_3:
-; GFX11-NEXT: s_branch .LBB21_2
; GFX11-NEXT: .LBB21_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
@@ -8229,8 +8318,9 @@ define inreg <18 x float> @bitcast_v9i64_to_v18f32_scalar(<9 x i64> inreg %a, i3
; SI-LABEL: bitcast_v9i64_to_v18f32_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v13, v4
-; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; SI-NEXT: v_mov_b32_e32 v18, v4
+; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v17, v3
; SI-NEXT: v_mov_b32_e32 v16, v2
; SI-NEXT: v_mov_b32_e32 v15, v1
@@ -8248,12 +8338,15 @@ define inreg <18 x float> @bitcast_v9i64_to_v18f32_scalar(<9 x i64> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB23_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB23_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB23_3
-; SI-NEXT: .LBB23_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB23_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB23_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v16, vcc, 3, v16
; SI-NEXT: v_addc_u32_e32 v17, vcc, 0, v17, vcc
; SI-NEXT: v_add_i32_e32 v14, vcc, 3, v14
@@ -8272,16 +8365,15 @@ define inreg <18 x float> @bitcast_v9i64_to_v18f32_scalar(<9 x i64> inreg %a, i3
; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT: .LBB23_3: ; %end
+; SI-NEXT: .LBB23_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB23_4:
-; SI-NEXT: s_branch .LBB23_2
;
; VI-LABEL: bitcast_v9i64_to_v18f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v13, v4
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: v_mov_b32_e32 v18, v4
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v17, v3
; VI-NEXT: v_mov_b32_e32 v16, v2
; VI-NEXT: v_mov_b32_e32 v15, v1
@@ -8299,12 +8391,15 @@ define inreg <18 x float> @bitcast_v9i64_to_v18f32_scalar(<9 x i64> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB23_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB23_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB23_3
-; VI-NEXT: .LBB23_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB23_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB23_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v16, vcc, 3, v16
; VI-NEXT: v_addc_u32_e32 v17, vcc, 0, v17, vcc
; VI-NEXT: v_add_u32_e32 v14, vcc, 3, v14
@@ -8323,16 +8418,15 @@ define inreg <18 x float> @bitcast_v9i64_to_v18f32_scalar(<9 x i64> inreg %a, i3
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT: .LBB23_3: ; %end
+; VI-NEXT: .LBB23_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB23_4:
-; VI-NEXT: s_branch .LBB23_2
;
; GFX9-LABEL: bitcast_v9i64_to_v18f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v4
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v18, v4
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v17, v3
; GFX9-NEXT: v_mov_b32_e32 v16, v2
; GFX9-NEXT: v_mov_b32_e32 v15, v1
@@ -8350,12 +8444,15 @@ define inreg <18 x float> @bitcast_v9i64_to_v18f32_scalar(<9 x i64> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB23_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB23_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB23_3
-; GFX9-NEXT: .LBB23_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB23_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_co_u32_e32 v16, vcc, 3, v16
; GFX9-NEXT: v_addc_co_u32_e32 v17, vcc, 0, v17, vcc
; GFX9-NEXT: v_add_co_u32_e32 v14, vcc, 3, v14
@@ -8374,22 +8471,23 @@ define inreg <18 x float> @bitcast_v9i64_to_v18f32_scalar(<9 x i64> inreg %a, i3
; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 3, v0
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
-; GFX9-NEXT: .LBB23_3: ; %end
+; GFX9-NEXT: .LBB23_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB23_4:
-; GFX9-NEXT: s_branch .LBB23_2
;
; GFX11-LABEL: bitcast_v9i64_to_v18f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB23_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB23_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB23_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB23_3
-; GFX11-NEXT: .LBB23_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s28, s28, 3
; GFX11-NEXT: s_addc_u32 s29, s29, 0
; GFX11-NEXT: s_add_u32 s26, s26, 3
@@ -8408,7 +8506,7 @@ define inreg <18 x float> @bitcast_v9i64_to_v18f32_scalar(<9 x i64> inreg %a, i3
; GFX11-NEXT: s_addc_u32 s3, s3, 0
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: .LBB23_3: ; %end
+; GFX11-NEXT: .LBB23_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -8420,8 +8518,6 @@ define inreg <18 x float> @bitcast_v9i64_to_v18f32_scalar(<9 x i64> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB23_4:
-; GFX11-NEXT: s_branch .LBB23_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -8576,8 +8672,9 @@ define inreg <9 x double> @bitcast_v18f32_to_v9f64_scalar(<18 x float> inreg %a,
; SI-LABEL: bitcast_v18f32_to_v9f64_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v13, v4
-; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; SI-NEXT: v_mov_b32_e32 v18, v4
+; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v17, v3
; SI-NEXT: v_mov_b32_e32 v16, v2
; SI-NEXT: v_mov_b32_e32 v15, v1
@@ -8595,12 +8692,15 @@ define inreg <9 x double> @bitcast_v18f32_to_v9f64_scalar(<18 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB25_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB25_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB25_3
-; SI-NEXT: .LBB25_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB25_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB25_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e32 v17, 1.0, v17
; SI-NEXT: v_add_f32_e32 v16, 1.0, v16
; SI-NEXT: v_add_f32_e32 v15, 1.0, v15
@@ -8619,16 +8719,15 @@ define inreg <9 x double> @bitcast_v18f32_to_v9f64_scalar(<18 x float> inreg %a,
; SI-NEXT: v_add_f32_e32 v2, 1.0, v2
; SI-NEXT: v_add_f32_e32 v1, 1.0, v1
; SI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; SI-NEXT: .LBB25_3: ; %end
+; SI-NEXT: .LBB25_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB25_4:
-; SI-NEXT: s_branch .LBB25_2
;
; VI-LABEL: bitcast_v18f32_to_v9f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v13, v4
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: v_mov_b32_e32 v18, v4
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v17, v3
; VI-NEXT: v_mov_b32_e32 v16, v2
; VI-NEXT: v_mov_b32_e32 v15, v1
@@ -8646,12 +8745,15 @@ define inreg <9 x double> @bitcast_v18f32_to_v9f64_scalar(<18 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB25_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB25_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB25_3
-; VI-NEXT: .LBB25_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB25_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB25_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e32 v17, 1.0, v17
; VI-NEXT: v_add_f32_e32 v16, 1.0, v16
; VI-NEXT: v_add_f32_e32 v15, 1.0, v15
@@ -8670,16 +8772,15 @@ define inreg <9 x double> @bitcast_v18f32_to_v9f64_scalar(<18 x float> inreg %a,
; VI-NEXT: v_add_f32_e32 v2, 1.0, v2
; VI-NEXT: v_add_f32_e32 v1, 1.0, v1
; VI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; VI-NEXT: .LBB25_3: ; %end
+; VI-NEXT: .LBB25_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB25_4:
-; VI-NEXT: s_branch .LBB25_2
;
; GFX9-LABEL: bitcast_v18f32_to_v9f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v4
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v18, v4
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v17, v3
; GFX9-NEXT: v_mov_b32_e32 v16, v2
; GFX9-NEXT: v_mov_b32_e32 v15, v1
@@ -8697,12 +8798,15 @@ define inreg <9 x double> @bitcast_v18f32_to_v9f64_scalar(<18 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB25_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB25_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB25_3
-; GFX9-NEXT: .LBB25_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB25_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB25_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e32 v17, 1.0, v17
; GFX9-NEXT: v_add_f32_e32 v16, 1.0, v16
; GFX9-NEXT: v_add_f32_e32 v15, 1.0, v15
@@ -8721,10 +8825,8 @@ define inreg <9 x double> @bitcast_v18f32_to_v9f64_scalar(<18 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e32 v2, 1.0, v2
; GFX9-NEXT: v_add_f32_e32 v1, 1.0, v1
; GFX9-NEXT: v_add_f32_e32 v0, 1.0, v0
-; GFX9-NEXT: .LBB25_3: ; %end
+; GFX9-NEXT: .LBB25_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB25_4:
-; GFX9-NEXT: s_branch .LBB25_2
;
; GFX11-LABEL: bitcast_v18f32_to_v9f64_scalar:
; GFX11: ; %bb.0:
@@ -8734,23 +8836,23 @@ define inreg <9 x double> @bitcast_v18f32_to_v9f64_scalar(<18 x float> inreg %a,
; GFX11-NEXT: s_mov_b32 exec_lo, s4
; GFX11-NEXT: v_writelane_b32 v32, s36, 0
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX11-NEXT: s_mov_b32 s36, s0
; GFX11-NEXT: s_mov_b32 s47, s23
; GFX11-NEXT: s_mov_b32 s46, s22
-; GFX11-NEXT: s_mov_b32 s45, s21
; GFX11-NEXT: v_writelane_b32 v32, s37, 1
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s45, s21
; GFX11-NEXT: s_mov_b32 s44, s20
; GFX11-NEXT: s_mov_b32 s43, s19
+; GFX11-NEXT: v_writelane_b32 v32, s38, 2
; GFX11-NEXT: s_mov_b32 s42, s18
; GFX11-NEXT: s_mov_b32 s41, s17
-; GFX11-NEXT: v_writelane_b32 v32, s38, 2
; GFX11-NEXT: s_mov_b32 s40, s16
; GFX11-NEXT: s_mov_b32 s38, s2
-; GFX11-NEXT: s_mov_b32 s37, s1
-; GFX11-NEXT: s_mov_b32 s36, s0
; GFX11-NEXT: v_writelane_b32 v32, s39, 3
; GFX11-NEXT: s_mov_b32 s39, s3
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s37, s1
+; GFX11-NEXT: s_mov_b32 s0, -1
; GFX11-NEXT: v_writelane_b32 v32, s48, 4
; GFX11-NEXT: s_mov_b32 s48, s24
; GFX11-NEXT: v_writelane_b32 v32, s49, 5
@@ -8763,11 +8865,14 @@ define inreg <9 x double> @bitcast_v18f32_to_v9f64_scalar(<18 x float> inreg %a,
; GFX11-NEXT: s_mov_b32 s52, s28
; GFX11-NEXT: v_writelane_b32 v32, s53, 9
; GFX11-NEXT: s_mov_b32 s53, s29
-; GFX11-NEXT: s_cbranch_scc0 .LBB25_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: s_cbranch_scc0 .LBB25_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-NEXT: s_mov_b32 s0, 0
+; GFX11-NEXT: .LBB25_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB25_4
-; GFX11-NEXT: .LBB25_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v17, s53, 1.0
; GFX11-NEXT: v_add_f32_e64 v16, s52, 1.0
; GFX11-NEXT: v_add_f32_e64 v15, s51, 1.0
@@ -8787,8 +8892,6 @@ define inreg <9 x double> @bitcast_v18f32_to_v9f64_scalar(<18 x float> inreg %a,
; GFX11-NEXT: v_add_f32_e64 v1, s37, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s36, 1.0
; GFX11-NEXT: s_branch .LBB25_5
-; GFX11-NEXT: .LBB25_3:
-; GFX11-NEXT: s_branch .LBB25_2
; GFX11-NEXT: .LBB25_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
@@ -8949,8 +9052,9 @@ define inreg <18 x float> @bitcast_v9f64_to_v18f32_scalar(<9 x double> inreg %a,
; SI-LABEL: bitcast_v9f64_to_v18f32_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v13, v4
-; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; SI-NEXT: v_mov_b32_e32 v18, v4
+; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v17, v3
; SI-NEXT: v_mov_b32_e32 v16, v2
; SI-NEXT: v_mov_b32_e32 v15, v1
@@ -8968,12 +9072,15 @@ define inreg <18 x float> @bitcast_v9f64_to_v18f32_scalar(<9 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB27_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB27_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB27_3
-; SI-NEXT: .LBB27_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB27_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB27_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
; SI-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
; SI-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
@@ -8983,16 +9090,15 @@ define inreg <18 x float> @bitcast_v9f64_to_v18f32_scalar(<9 x double> inreg %a,
; SI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; SI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; SI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; SI-NEXT: .LBB27_3: ; %end
+; SI-NEXT: .LBB27_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB27_4:
-; SI-NEXT: s_branch .LBB27_2
;
; VI-LABEL: bitcast_v9f64_to_v18f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v13, v4
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: v_mov_b32_e32 v18, v4
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v17, v3
; VI-NEXT: v_mov_b32_e32 v16, v2
; VI-NEXT: v_mov_b32_e32 v15, v1
@@ -9010,12 +9116,15 @@ define inreg <18 x float> @bitcast_v9f64_to_v18f32_scalar(<9 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB27_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB27_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB27_3
-; VI-NEXT: .LBB27_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB27_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB27_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
; VI-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
; VI-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
@@ -9025,16 +9134,15 @@ define inreg <18 x float> @bitcast_v9f64_to_v18f32_scalar(<9 x double> inreg %a,
; VI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; VI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; VI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; VI-NEXT: .LBB27_3: ; %end
+; VI-NEXT: .LBB27_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB27_4:
-; VI-NEXT: s_branch .LBB27_2
;
; GFX9-LABEL: bitcast_v9f64_to_v18f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v4
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v18, v4
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v17, v3
; GFX9-NEXT: v_mov_b32_e32 v16, v2
; GFX9-NEXT: v_mov_b32_e32 v15, v1
@@ -9052,12 +9160,15 @@ define inreg <18 x float> @bitcast_v9f64_to_v18f32_scalar(<9 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB27_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB27_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB27_3
-; GFX9-NEXT: .LBB27_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB27_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB27_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
; GFX9-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
; GFX9-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
@@ -9067,10 +9178,8 @@ define inreg <18 x float> @bitcast_v9f64_to_v18f32_scalar(<9 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX9-NEXT: .LBB27_3: ; %end
+; GFX9-NEXT: .LBB27_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB27_4:
-; GFX9-NEXT: s_branch .LBB27_2
;
; GFX11-LABEL: bitcast_v9f64_to_v18f32_scalar:
; GFX11: ; %bb.0:
@@ -9080,23 +9189,23 @@ define inreg <18 x float> @bitcast_v9f64_to_v18f32_scalar(<9 x double> inreg %a,
; GFX11-NEXT: s_mov_b32 exec_lo, s4
; GFX11-NEXT: v_writelane_b32 v32, s36, 0
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX11-NEXT: s_mov_b32 s36, s0
; GFX11-NEXT: s_mov_b32 s47, s23
; GFX11-NEXT: s_mov_b32 s46, s22
-; GFX11-NEXT: s_mov_b32 s45, s21
; GFX11-NEXT: v_writelane_b32 v32, s37, 1
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s45, s21
; GFX11-NEXT: s_mov_b32 s44, s20
; GFX11-NEXT: s_mov_b32 s43, s19
+; GFX11-NEXT: v_writelane_b32 v32, s38, 2
; GFX11-NEXT: s_mov_b32 s42, s18
; GFX11-NEXT: s_mov_b32 s41, s17
-; GFX11-NEXT: v_writelane_b32 v32, s38, 2
; GFX11-NEXT: s_mov_b32 s40, s16
; GFX11-NEXT: s_mov_b32 s38, s2
-; GFX11-NEXT: s_mov_b32 s37, s1
-; GFX11-NEXT: s_mov_b32 s36, s0
; GFX11-NEXT: v_writelane_b32 v32, s39, 3
; GFX11-NEXT: s_mov_b32 s39, s3
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s37, s1
+; GFX11-NEXT: s_mov_b32 s0, -1
; GFX11-NEXT: v_writelane_b32 v32, s48, 4
; GFX11-NEXT: s_mov_b32 s48, s24
; GFX11-NEXT: v_writelane_b32 v32, s49, 5
@@ -9109,11 +9218,14 @@ define inreg <18 x float> @bitcast_v9f64_to_v18f32_scalar(<9 x double> inreg %a,
; GFX11-NEXT: s_mov_b32 s52, s28
; GFX11-NEXT: v_writelane_b32 v32, s53, 9
; GFX11-NEXT: s_mov_b32 s53, s29
-; GFX11-NEXT: s_cbranch_scc0 .LBB27_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: s_cbranch_scc0 .LBB27_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-NEXT: s_mov_b32 s0, 0
+; GFX11-NEXT: .LBB27_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB27_4
-; GFX11-NEXT: .LBB27_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[16:17], s[52:53], 1.0
; GFX11-NEXT: v_add_f64 v[14:15], s[50:51], 1.0
; GFX11-NEXT: v_add_f64 v[12:13], s[48:49], 1.0
@@ -9124,8 +9236,6 @@ define inreg <18 x float> @bitcast_v9f64_to_v18f32_scalar(<9 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
; GFX11-NEXT: s_branch .LBB27_5
-; GFX11-NEXT: .LBB27_3:
-; GFX11-NEXT: s_branch .LBB27_2
; GFX11-NEXT: .LBB27_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
@@ -9746,78 +9856,79 @@ define inreg <36 x i16> @bitcast_v18f32_to_v36i16_scalar(<18 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v5
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v19, s16
; SI-NEXT: v_mov_b32_e32 v18, s17
-; SI-NEXT: v_mov_b32_e32 v16, s18
-; SI-NEXT: v_mov_b32_e32 v15, s19
-; SI-NEXT: v_mov_b32_e32 v14, s20
-; SI-NEXT: v_mov_b32_e32 v13, s21
-; SI-NEXT: v_mov_b32_e32 v12, s22
-; SI-NEXT: v_mov_b32_e32 v11, s23
-; SI-NEXT: v_mov_b32_e32 v10, s24
-; SI-NEXT: v_mov_b32_e32 v9, s25
-; SI-NEXT: v_mov_b32_e32 v8, s26
-; SI-NEXT: v_mov_b32_e32 v7, s27
-; SI-NEXT: v_mov_b32_e32 v6, s28
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: v_mov_b32_e32 v5, s29
+; SI-NEXT: v_mov_b32_e32 v17, s18
+; SI-NEXT: v_mov_b32_e32 v16, s19
+; SI-NEXT: v_mov_b32_e32 v15, s20
+; SI-NEXT: v_mov_b32_e32 v14, s21
+; SI-NEXT: v_mov_b32_e32 v13, s22
+; SI-NEXT: v_mov_b32_e32 v12, s23
+; SI-NEXT: v_mov_b32_e32 v11, s24
+; SI-NEXT: v_mov_b32_e32 v10, s25
+; SI-NEXT: v_mov_b32_e32 v9, s26
+; SI-NEXT: v_mov_b32_e32 v8, s27
+; SI-NEXT: v_mov_b32_e32 v7, s28
+; SI-NEXT: v_mov_b32_e32 v6, s29
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB29_4
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: v_alignbit_b32 v17, v4, v3, 16
+; SI-NEXT: v_alignbit_b32 v5, v4, v3, 16
; SI-NEXT: v_alignbit_b32 v20, v2, v1, 16
-; SI-NEXT: v_alignbit_b32 v21, v5, v6, 16
-; SI-NEXT: v_alignbit_b32 v22, v7, v8, 16
-; SI-NEXT: v_alignbit_b32 v25, v9, v10, 16
-; SI-NEXT: v_alignbit_b32 v27, v11, v12, 16
-; SI-NEXT: v_alignbit_b32 v29, v13, v14, 16
-; SI-NEXT: v_alignbit_b32 v31, v15, v16, 16
+; SI-NEXT: v_alignbit_b32 v21, v6, v7, 16
+; SI-NEXT: v_alignbit_b32 v22, v8, v9, 16
+; SI-NEXT: v_alignbit_b32 v25, v10, v11, 16
+; SI-NEXT: v_alignbit_b32 v27, v12, v13, 16
+; SI-NEXT: v_alignbit_b32 v29, v14, v15, 16
+; SI-NEXT: v_alignbit_b32 v31, v16, v17, 16
; SI-NEXT: v_alignbit_b32 v33, v18, v19, 16
; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v4
; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v2
-; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v5
-; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v7
-; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v9
-; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v11
-; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v13
-; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v15
+; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v6
+; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v8
+; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v10
+; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v12
+; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v14
+; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v16
; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v18
; SI-NEXT: s_cbranch_execnz .LBB29_3
; SI-NEXT: .LBB29_2: ; %cmp.true
; SI-NEXT: v_add_f32_e32 v18, 1.0, v18
; SI-NEXT: v_add_f32_e32 v19, 1.0, v19
-; SI-NEXT: v_add_f32_e32 v15, 1.0, v15
; SI-NEXT: v_add_f32_e32 v16, 1.0, v16
-; SI-NEXT: v_add_f32_e32 v13, 1.0, v13
+; SI-NEXT: v_add_f32_e32 v17, 1.0, v17
; SI-NEXT: v_add_f32_e32 v14, 1.0, v14
-; SI-NEXT: v_add_f32_e32 v11, 1.0, v11
+; SI-NEXT: v_add_f32_e32 v15, 1.0, v15
; SI-NEXT: v_add_f32_e32 v12, 1.0, v12
-; SI-NEXT: v_add_f32_e32 v9, 1.0, v9
+; SI-NEXT: v_add_f32_e32 v13, 1.0, v13
; SI-NEXT: v_add_f32_e32 v10, 1.0, v10
-; SI-NEXT: v_add_f32_e32 v7, 1.0, v7
+; SI-NEXT: v_add_f32_e32 v11, 1.0, v11
; SI-NEXT: v_add_f32_e32 v8, 1.0, v8
-; SI-NEXT: v_add_f32_e32 v5, 1.0, v5
+; SI-NEXT: v_add_f32_e32 v9, 1.0, v9
; SI-NEXT: v_add_f32_e32 v6, 1.0, v6
+; SI-NEXT: v_add_f32_e32 v7, 1.0, v7
; SI-NEXT: v_add_f32_e32 v2, 1.0, v2
; SI-NEXT: v_add_f32_e32 v1, 1.0, v1
; SI-NEXT: v_add_f32_e32 v4, 1.0, v4
; SI-NEXT: v_add_f32_e32 v3, 1.0, v3
-; SI-NEXT: v_alignbit_b32 v17, v4, v3, 16
+; SI-NEXT: v_alignbit_b32 v5, v4, v3, 16
; SI-NEXT: v_alignbit_b32 v20, v2, v1, 16
-; SI-NEXT: v_alignbit_b32 v21, v5, v6, 16
-; SI-NEXT: v_alignbit_b32 v22, v7, v8, 16
-; SI-NEXT: v_alignbit_b32 v25, v9, v10, 16
-; SI-NEXT: v_alignbit_b32 v27, v11, v12, 16
-; SI-NEXT: v_alignbit_b32 v29, v13, v14, 16
-; SI-NEXT: v_alignbit_b32 v31, v15, v16, 16
+; SI-NEXT: v_alignbit_b32 v21, v6, v7, 16
+; SI-NEXT: v_alignbit_b32 v22, v8, v9, 16
+; SI-NEXT: v_alignbit_b32 v25, v10, v11, 16
+; SI-NEXT: v_alignbit_b32 v27, v12, v13, 16
+; SI-NEXT: v_alignbit_b32 v29, v14, v15, 16
+; SI-NEXT: v_alignbit_b32 v31, v16, v17, 16
; SI-NEXT: v_alignbit_b32 v33, v18, v19, 16
; SI-NEXT: v_lshrrev_b32_e32 v23, 16, v4
; SI-NEXT: v_lshrrev_b32_e32 v24, 16, v2
-; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v5
-; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v7
-; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v9
-; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v11
-; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v13
-; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v15
+; SI-NEXT: v_lshrrev_b32_e32 v26, 16, v6
+; SI-NEXT: v_lshrrev_b32_e32 v28, 16, v8
+; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v10
+; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v12
+; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v14
+; SI-NEXT: v_lshrrev_b32_e32 v35, 16, v16
; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v18
; SI-NEXT: .LBB29_3: ; %end
; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19
@@ -9830,84 +9941,84 @@ define inreg <36 x i16> @bitcast_v18f32_to_v36i16_scalar(<18 x float> inreg %a,
; SI-NEXT: v_or_b32_e32 v18, v18, v19
; SI-NEXT: v_add_i32_e32 v19, vcc, 4, v0
; SI-NEXT: buffer_store_dword v18, v19, s[0:3], 0 offen
-; SI-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; SI-NEXT: v_and_b32_e32 v17, 0xffff, v17
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v31
-; SI-NEXT: v_or_b32_e32 v16, v16, v18
+; SI-NEXT: v_or_b32_e32 v17, v17, v18
; SI-NEXT: v_add_i32_e32 v18, vcc, 8, v0
-; SI-NEXT: buffer_store_dword v16, v18, s[0:3], 0 offen
+; SI-NEXT: buffer_store_dword v17, v18, s[0:3], 0 offen
+; SI-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v35
+; SI-NEXT: v_or_b32_e32 v16, v16, v17
+; SI-NEXT: v_add_i32_e32 v17, vcc, 12, v0
+; SI-NEXT: buffer_store_dword v16, v17, s[0:3], 0 offen
; SI-NEXT: v_and_b32_e32 v15, 0xffff, v15
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v35
+; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v29
; SI-NEXT: v_or_b32_e32 v15, v15, v16
-; SI-NEXT: v_add_i32_e32 v16, vcc, 12, v0
+; SI-NEXT: v_add_i32_e32 v16, vcc, 16, v0
; SI-NEXT: buffer_store_dword v15, v16, s[0:3], 0 offen
; SI-NEXT: v_and_b32_e32 v14, 0xffff, v14
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v29
+; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v34
; SI-NEXT: v_or_b32_e32 v14, v14, v15
-; SI-NEXT: v_add_i32_e32 v15, vcc, 16, v0
+; SI-NEXT: v_add_i32_e32 v15, vcc, 20, v0
; SI-NEXT: buffer_store_dword v14, v15, s[0:3], 0 offen
; SI-NEXT: v_and_b32_e32 v13, 0xffff, v13
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v34
+; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v27
; SI-NEXT: v_or_b32_e32 v13, v13, v14
-; SI-NEXT: v_add_i32_e32 v14, vcc, 20, v0
+; SI-NEXT: v_add_i32_e32 v14, vcc, 24, v0
; SI-NEXT: buffer_store_dword v13, v14, s[0:3], 0 offen
; SI-NEXT: v_and_b32_e32 v12, 0xffff, v12
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v27
+; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v32
; SI-NEXT: v_or_b32_e32 v12, v12, v13
-; SI-NEXT: v_add_i32_e32 v13, vcc, 24, v0
+; SI-NEXT: v_add_i32_e32 v13, vcc, 28, v0
; SI-NEXT: buffer_store_dword v12, v13, s[0:3], 0 offen
; SI-NEXT: v_and_b32_e32 v11, 0xffff, v11
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v32
+; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v25
; SI-NEXT: v_or_b32_e32 v11, v11, v12
-; SI-NEXT: v_add_i32_e32 v12, vcc, 28, v0
+; SI-NEXT: v_add_i32_e32 v12, vcc, 32, v0
; SI-NEXT: buffer_store_dword v11, v12, s[0:3], 0 offen
; SI-NEXT: v_and_b32_e32 v10, 0xffff, v10
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v25
+; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v30
; SI-NEXT: v_or_b32_e32 v10, v10, v11
-; SI-NEXT: v_add_i32_e32 v11, vcc, 32, v0
+; SI-NEXT: v_add_i32_e32 v11, vcc, 36, v0
; SI-NEXT: buffer_store_dword v10, v11, s[0:3], 0 offen
; SI-NEXT: v_and_b32_e32 v9, 0xffff, v9
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v30
+; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v22
; SI-NEXT: v_or_b32_e32 v9, v9, v10
-; SI-NEXT: v_add_i32_e32 v10, vcc, 36, v0
+; SI-NEXT: v_add_i32_e32 v10, vcc, 40, v0
; SI-NEXT: buffer_store_dword v9, v10, s[0:3], 0 offen
; SI-NEXT: v_and_b32_e32 v8, 0xffff, v8
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v22
+; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v28
; SI-NEXT: v_or_b32_e32 v8, v8, v9
-; SI-NEXT: v_add_i32_e32 v9, vcc, 40, v0
+; SI-NEXT: v_add_i32_e32 v9, vcc, 44, v0
; SI-NEXT: buffer_store_dword v8, v9, s[0:3], 0 offen
; SI-NEXT: v_and_b32_e32 v7, 0xffff, v7
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v28
+; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v21
; SI-NEXT: v_or_b32_e32 v7, v7, v8
-; SI-NEXT: v_add_i32_e32 v8, vcc, 44, v0
+; SI-NEXT: v_add_i32_e32 v8, vcc, 48, v0
; SI-NEXT: buffer_store_dword v7, v8, s[0:3], 0 offen
; SI-NEXT: v_and_b32_e32 v6, 0xffff, v6
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v21
+; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v26
; SI-NEXT: v_or_b32_e32 v6, v6, v7
-; SI-NEXT: v_add_i32_e32 v7, vcc, 48, v0
+; SI-NEXT: v_add_i32_e32 v7, vcc, 52, v0
; SI-NEXT: buffer_store_dword v6, v7, s[0:3], 0 offen
-; SI-NEXT: v_and_b32_e32 v5, 0xffff, v5
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v26
-; SI-NEXT: v_or_b32_e32 v5, v5, v6
-; SI-NEXT: v_add_i32_e32 v6, vcc, 52, v0
-; SI-NEXT: buffer_store_dword v5, v6, s[0:3], 0 offen
; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v20
-; SI-NEXT: v_or_b32_e32 v1, v1, v5
-; SI-NEXT: v_add_i32_e32 v5, vcc, 56, v0
-; SI-NEXT: buffer_store_dword v1, v5, s[0:3], 0 offen
+; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v20
+; SI-NEXT: v_or_b32_e32 v1, v1, v6
+; SI-NEXT: v_add_i32_e32 v6, vcc, 56, v0
+; SI-NEXT: buffer_store_dword v1, v6, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_and_b32_e32 v1, 0xffff, v2
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v24
@@ -9916,7 +10027,7 @@ define inreg <36 x i16> @bitcast_v18f32_to_v36i16_scalar(<18 x float> inreg %a,
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_and_b32_e32 v1, 0xffff, v3
-; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v17
+; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v5
; SI-NEXT: v_or_b32_e32 v1, v1, v2
; SI-NEXT: v_add_i32_e32 v2, vcc, 64, v0
; SI-NEXT: buffer_store_dword v1, v2, s[0:3], 0 offen
@@ -9945,14 +10056,17 @@ define inreg <36 x i16> @bitcast_v18f32_to_v36i16_scalar(<18 x float> inreg %a,
; SI-NEXT: ; implicit-def: $vgpr26
; SI-NEXT: ; implicit-def: $vgpr20
; SI-NEXT: ; implicit-def: $vgpr24
-; SI-NEXT: ; implicit-def: $vgpr17
+; SI-NEXT: ; implicit-def: $vgpr5
; SI-NEXT: ; implicit-def: $vgpr23
-; SI-NEXT: s_branch .LBB29_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB29_2
+; SI-NEXT: s_branch .LBB29_3
;
; VI-LABEL: bitcast_v18f32_to_v36i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v18, s16
; VI-NEXT: v_mov_b32_e32 v17, s17
; VI-NEXT: v_mov_b32_e32 v16, s18
@@ -9966,8 +10080,8 @@ define inreg <36 x i16> @bitcast_v18f32_to_v36i16_scalar(<18 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB29_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_lshrrev_b32_e32 v22, 16, v3
@@ -10087,12 +10201,15 @@ define inreg <36 x i16> @bitcast_v18f32_to_v36i16_scalar(<18 x float> inreg %a,
; VI-NEXT: ; implicit-def: $vgpr24
; VI-NEXT: ; implicit-def: $vgpr23
; VI-NEXT: ; implicit-def: $vgpr22
-; VI-NEXT: s_branch .LBB29_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB29_2
+; VI-NEXT: s_branch .LBB29_3
;
; GFX9-LABEL: bitcast_v18f32_to_v36i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v18, s16
; GFX9-NEXT: v_mov_b32_e32 v17, s17
; GFX9-NEXT: v_mov_b32_e32 v16, s18
@@ -10106,8 +10223,8 @@ define inreg <36 x i16> @bitcast_v18f32_to_v36i16_scalar(<18 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB29_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_lshrrev_b32_e32 v22, 16, v3
@@ -10227,36 +10344,37 @@ define inreg <36 x i16> @bitcast_v18f32_to_v36i16_scalar(<18 x float> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr24
; GFX9-NEXT: ; implicit-def: $vgpr23
; GFX9-NEXT: ; implicit-def: $vgpr22
-; GFX9-NEXT: s_branch .LBB29_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB29_2
+; GFX9-NEXT: s_branch .LBB29_3
;
; GFX11-LABEL: bitcast_v18f32_to_v36i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s46, -1
+; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
; GFX11-NEXT: s_cbranch_scc0 .LBB29_3
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s5, s29, 16
-; GFX11-NEXT: s_lshr_b32 s6, s28, 16
-; GFX11-NEXT: s_lshr_b32 s7, s27, 16
-; GFX11-NEXT: s_lshr_b32 s8, s26, 16
-; GFX11-NEXT: s_lshr_b32 s9, s25, 16
-; GFX11-NEXT: s_lshr_b32 s10, s24, 16
-; GFX11-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-NEXT: s_lshr_b32 s12, s22, 16
-; GFX11-NEXT: s_lshr_b32 s13, s21, 16
-; GFX11-NEXT: s_lshr_b32 s14, s20, 16
-; GFX11-NEXT: s_lshr_b32 s15, s19, 16
-; GFX11-NEXT: s_lshr_b32 s40, s18, 16
-; GFX11-NEXT: s_lshr_b32 s41, s17, 16
-; GFX11-NEXT: s_lshr_b32 s42, s16, 16
-; GFX11-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB29_4
+; GFX11-NEXT: s_lshr_b32 s4, s29, 16
+; GFX11-NEXT: s_lshr_b32 s5, s28, 16
+; GFX11-NEXT: s_lshr_b32 s6, s27, 16
+; GFX11-NEXT: s_lshr_b32 s7, s26, 16
+; GFX11-NEXT: s_lshr_b32 s8, s25, 16
+; GFX11-NEXT: s_lshr_b32 s9, s24, 16
+; GFX11-NEXT: s_lshr_b32 s10, s23, 16
+; GFX11-NEXT: s_lshr_b32 s11, s22, 16
+; GFX11-NEXT: s_lshr_b32 s12, s21, 16
+; GFX11-NEXT: s_lshr_b32 s13, s20, 16
+; GFX11-NEXT: s_lshr_b32 s14, s19, 16
+; GFX11-NEXT: s_lshr_b32 s15, s18, 16
+; GFX11-NEXT: s_lshr_b32 s40, s17, 16
+; GFX11-NEXT: s_lshr_b32 s41, s16, 16
+; GFX11-NEXT: s_lshr_b32 s42, s3, 16
+; GFX11-NEXT: s_lshr_b32 s43, s2, 16
+; GFX11-NEXT: s_lshr_b32 s44, s1, 16
+; GFX11-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-NEXT: s_cbranch_execnz .LBB29_4
; GFX11-NEXT: .LBB29_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v13, s29, 1.0
; GFX11-NEXT: v_add_f32_e64 v14, s28, 1.0
@@ -10296,7 +10414,6 @@ define inreg <36 x i16> @bitcast_v18f32_to_v36i16_scalar(<18 x float> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v2
; GFX11-NEXT: s_branch .LBB29_5
; GFX11-NEXT: .LBB29_3:
-; GFX11-NEXT: ; implicit-def: $sgpr46
; GFX11-NEXT: ; implicit-def: $sgpr45
; GFX11-NEXT: ; implicit-def: $sgpr44
; GFX11-NEXT: ; implicit-def: $sgpr43
@@ -10314,7 +10431,9 @@ define inreg <36 x i16> @bitcast_v18f32_to_v36i16_scalar(<18 x float> inreg %a,
; GFX11-NEXT: ; implicit-def: $sgpr7
; GFX11-NEXT: ; implicit-def: $sgpr6
; GFX11-NEXT: ; implicit-def: $sgpr5
-; GFX11-NEXT: s_branch .LBB29_2
+; GFX11-NEXT: ; implicit-def: $sgpr4
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
+; GFX11-NEXT: s_cbranch_vccz .LBB29_2
; GFX11-NEXT: .LBB29_4:
; GFX11-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v7, s3
@@ -10325,15 +10444,15 @@ define inreg <36 x i16> @bitcast_v18f32_to_v36i16_scalar(<18 x float> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v8, s24 :: v_dual_mov_b32 v17, s25
; GFX11-NEXT: v_dual_mov_b32 v16, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: v_dual_mov_b32 v14, s28 :: v_dual_mov_b32 v13, s29
-; GFX11-NEXT: v_dual_mov_b32 v35, s46 :: v_dual_mov_b32 v34, s45
-; GFX11-NEXT: v_dual_mov_b32 v33, s44 :: v_dual_mov_b32 v32, s43
-; GFX11-NEXT: v_dual_mov_b32 v31, s42 :: v_dual_mov_b32 v30, s41
-; GFX11-NEXT: v_dual_mov_b32 v29, s40 :: v_dual_mov_b32 v28, s15
-; GFX11-NEXT: v_dual_mov_b32 v27, s14 :: v_dual_mov_b32 v26, s13
-; GFX11-NEXT: v_dual_mov_b32 v25, s12 :: v_dual_mov_b32 v24, s11
-; GFX11-NEXT: v_dual_mov_b32 v23, s10 :: v_dual_mov_b32 v22, s9
-; GFX11-NEXT: v_dual_mov_b32 v21, s8 :: v_dual_mov_b32 v20, s7
-; GFX11-NEXT: v_dual_mov_b32 v19, s6 :: v_dual_mov_b32 v18, s5
+; GFX11-NEXT: v_dual_mov_b32 v35, s45 :: v_dual_mov_b32 v34, s44
+; GFX11-NEXT: v_dual_mov_b32 v33, s43 :: v_dual_mov_b32 v32, s42
+; GFX11-NEXT: v_dual_mov_b32 v31, s41 :: v_dual_mov_b32 v30, s40
+; GFX11-NEXT: v_dual_mov_b32 v29, s15 :: v_dual_mov_b32 v28, s14
+; GFX11-NEXT: v_dual_mov_b32 v27, s13 :: v_dual_mov_b32 v26, s12
+; GFX11-NEXT: v_dual_mov_b32 v25, s11 :: v_dual_mov_b32 v24, s10
+; GFX11-NEXT: v_dual_mov_b32 v23, s9 :: v_dual_mov_b32 v22, s8
+; GFX11-NEXT: v_dual_mov_b32 v21, s7 :: v_dual_mov_b32 v20, s6
+; GFX11-NEXT: v_dual_mov_b32 v19, s5 :: v_dual_mov_b32 v18, s4
; GFX11-NEXT: .LBB29_5: ; %end
; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
@@ -11121,6 +11240,7 @@ define inreg <18 x float> @bitcast_v36i16_to_v18f32_scalar(<36 x i16> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v22
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
@@ -11138,7 +11258,7 @@ define inreg <18 x float> @bitcast_v36i16_to_v18f32_scalar(<36 x i16> inreg %a,
; SI-NEXT: v_mov_b32_e32 v48, v4
; SI-NEXT: v_mov_b32_e32 v49, v2
; SI-NEXT: v_mov_b32_e32 v50, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v44, 16, v3
@@ -11155,46 +11275,46 @@ define inreg <18 x float> @bitcast_v36i16_to_v18f32_scalar(<36 x i16> inreg %a,
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v50
; SI-NEXT: v_or_b32_e32 v7, v0, v45
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49
-; SI-NEXT: v_or_b32_e32 v8, v0, v44
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v48
-; SI-NEXT: s_and_b32 s4, s16, 0xffff
-; SI-NEXT: s_lshl_b32 s5, s17, 16
; SI-NEXT: v_or_b32_e32 v9, v0, v43
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v39
+; SI-NEXT: s_and_b32 s4, s16, 0xffff
+; SI-NEXT: s_lshl_b32 s5, s17, 16
+; SI-NEXT: v_or_b32_e32 v10, v0, v42
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v38
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: s_and_b32 s5, s18, 0xffff
; SI-NEXT: s_lshl_b32 s6, s19, 16
-; SI-NEXT: v_or_b32_e32 v10, v0, v42
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v38
+; SI-NEXT: v_or_b32_e32 v11, v0, v41
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v37
; SI-NEXT: s_or_b32 s5, s5, s6
; SI-NEXT: s_and_b32 s6, s20, 0xffff
; SI-NEXT: s_lshl_b32 s7, s21, 16
-; SI-NEXT: v_or_b32_e32 v11, v0, v41
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v37
+; SI-NEXT: v_or_b32_e32 v12, v0, v40
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v36
; SI-NEXT: s_or_b32 s6, s6, s7
; SI-NEXT: s_and_b32 s7, s22, 0xffff
; SI-NEXT: s_lshl_b32 s8, s23, 16
-; SI-NEXT: v_or_b32_e32 v12, v0, v40
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v36
+; SI-NEXT: v_or_b32_e32 v13, v0, v55
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v35
; SI-NEXT: s_or_b32 s7, s7, s8
; SI-NEXT: s_and_b32 s8, s24, 0xffff
; SI-NEXT: s_lshl_b32 s9, s25, 16
-; SI-NEXT: v_or_b32_e32 v13, v0, v55
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v35
+; SI-NEXT: v_or_b32_e32 v14, v0, v54
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v34
; SI-NEXT: s_or_b32 s8, s8, s9
; SI-NEXT: s_and_b32 s9, s26, 0xffff
; SI-NEXT: s_lshl_b32 s10, s27, 16
-; SI-NEXT: v_or_b32_e32 v14, v0, v54
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v34
+; SI-NEXT: v_or_b32_e32 v15, v0, v53
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v33
; SI-NEXT: s_or_b32 s9, s9, s10
; SI-NEXT: s_and_b32 s10, s28, 0xffff
; SI-NEXT: s_lshl_b32 s11, s29, 16
-; SI-NEXT: v_or_b32_e32 v15, v0, v53
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v33
-; SI-NEXT: s_or_b32 s10, s10, s11
+; SI-NEXT: v_and_b32_e32 v1, 0xffff, v49
; SI-NEXT: v_or_b32_e32 v16, v0, v52
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v32
+; SI-NEXT: s_or_b32 s10, s10, s11
+; SI-NEXT: v_or_b32_e32 v8, v1, v44
; SI-NEXT: v_or_b32_e32 v17, v0, v51
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
@@ -11209,10 +11329,6 @@ define inreg <18 x float> @bitcast_v36i16_to_v18f32_scalar(<36 x i16> inreg %a,
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v45, v0
; SI-NEXT: v_add_i32_e32 v7, vcc, 0x30000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v49
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v0, v44, v0
-; SI-NEXT: v_add_i32_e32 v8, vcc, 0x30000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v48
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v43, v0
@@ -11268,13 +11384,16 @@ define inreg <18 x float> @bitcast_v36i16_to_v18f32_scalar(<36 x i16> inreg %a,
; SI-NEXT: s_lshl_b32 s10, s27, 16
; SI-NEXT: s_add_i32 s28, s28, 3
; SI-NEXT: v_or_b32_e32 v0, v52, v0
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v49
; SI-NEXT: s_or_b32 s9, s10, s9
; SI-NEXT: s_and_b32 s10, s28, 0xffff
; SI-NEXT: s_lshl_b32 s11, s29, 16
; SI-NEXT: v_add_i32_e32 v16, vcc, 0x30000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v32
+; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1
; SI-NEXT: s_or_b32 s10, s11, s10
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: v_or_b32_e32 v1, v44, v1
; SI-NEXT: s_add_i32 s4, s4, 0x30000
; SI-NEXT: s_add_i32 s5, s5, 0x30000
; SI-NEXT: s_add_i32 s6, s6, 0x30000
@@ -11283,6 +11402,7 @@ define inreg <18 x float> @bitcast_v36i16_to_v18f32_scalar(<36 x i16> inreg %a,
; SI-NEXT: s_add_i32 s9, s9, 0x30000
; SI-NEXT: s_add_i32 s10, s10, 0x30000
; SI-NEXT: v_or_b32_e32 v0, v51, v0
+; SI-NEXT: v_add_i32_e32 v8, vcc, 0x30000, v1
; SI-NEXT: v_add_i32_e32 v17, vcc, 0x30000, v0
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
@@ -11302,7 +11422,9 @@ define inreg <18 x float> @bitcast_v36i16_to_v18f32_scalar(<36 x i16> inreg %a,
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB31_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; SI-NEXT: s_branch .LBB31_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB31_2
+; SI-NEXT: s_branch .LBB31_3
;
; VI-LABEL: bitcast_v36i16_to_v18f32_scalar:
; VI: ; %bb.0:
@@ -11322,11 +11444,12 @@ define inreg <18 x float> @bitcast_v36i16_to_v18f32_scalar(<36 x i16> inreg %a,
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v3
; VI-NEXT: v_mov_b32_e32 v33, v2
; VI-NEXT: v_mov_b32_e32 v34, v1
; VI-NEXT: v_mov_b32_e32 v35, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB31_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -11501,15 +11624,13 @@ define inreg <18 x float> @bitcast_v36i16_to_v18f32_scalar(<36 x i16> inreg %a,
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB31_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB31_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB31_2
+; VI-NEXT: s_branch .LBB31_3
;
; GFX9-LABEL: bitcast_v36i16_to_v18f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v3
-; GFX9-NEXT: v_mov_b32_e32 v33, v2
-; GFX9-NEXT: v_mov_b32_e32 v34, v1
-; GFX9-NEXT: v_mov_b32_e32 v35, v0
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
; GFX9-NEXT: s_lshr_b32 s42, s27, 16
@@ -11525,11 +11646,15 @@ define inreg <18 x float> @bitcast_v36i16_to_v18f32_scalar(<36 x i16> inreg %a,
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
+; GFX9-NEXT: v_mov_b32_e32 v32, v3
+; GFX9-NEXT: v_mov_b32_e32 v33, v2
+; GFX9-NEXT: v_mov_b32_e32 v34, v1
+; GFX9-NEXT: v_mov_b32_e32 v35, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_lshrrev_b32_e32 v36, 16, v32
; GFX9-NEXT: v_lshrrev_b32_e32 v37, 16, v33
; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v34
; GFX9-NEXT: v_lshrrev_b32_e32 v39, 16, v35
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -11544,6 +11669,7 @@ define inreg <18 x float> @bitcast_v36i16_to_v18f32_scalar(<36 x i16> inreg %a,
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB31_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v35
@@ -11571,15 +11697,17 @@ define inreg <18 x float> @bitcast_v36i16_to_v18f32_scalar(<36 x i16> inreg %a,
; GFX9-NEXT: s_cbranch_execnz .LBB31_3
; GFX9-NEXT: .LBB31_2: ; %cmp.true
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v35
-; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v34
-; GFX9-NEXT: v_and_b32_e32 v16, 0xffff, v33
-; GFX9-NEXT: v_and_b32_e32 v17, 0xffff, v32
; GFX9-NEXT: v_lshl_or_b32 v0, v39, 16, v0
-; GFX9-NEXT: v_lshl_or_b32 v1, v38, 16, v1
-; GFX9-NEXT: v_lshl_or_b32 v16, v37, 16, v16
-; GFX9-NEXT: v_lshl_or_b32 v17, v36, 16, v17
; GFX9-NEXT: v_pk_add_u16 v14, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v33
+; GFX9-NEXT: v_lshl_or_b32 v0, v37, 16, v0
+; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v34
+; GFX9-NEXT: v_pk_add_u16 v16, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v32
+; GFX9-NEXT: v_lshl_or_b32 v1, v38, 16, v1
+; GFX9-NEXT: v_lshl_or_b32 v0, v36, 16, v0
; GFX9-NEXT: v_pk_add_u16 v15, v1, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_pk_add_u16 v17, v0, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s6, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s7, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v2, s8, 3 op_sel_hi:[1,0]
@@ -11594,20 +11722,20 @@ define inreg <18 x float> @bitcast_v36i16_to_v18f32_scalar(<36 x i16> inreg %a,
; GFX9-NEXT: v_pk_add_u16 v11, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v12, s18, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v13, s19, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: .LBB31_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB31_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB31_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB31_2
+; GFX9-NEXT: s_branch .LBB31_3
;
; GFX11-LABEL: bitcast_v36i16_to_v18f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-NEXT: s_lshr_b32 s42, s28, 16
+; GFX11-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-NEXT: s_lshr_b32 s41, s28, 16
; GFX11-NEXT: s_lshr_b32 s15, s27, 16
; GFX11-NEXT: s_lshr_b32 s14, s26, 16
; GFX11-NEXT: s_lshr_b32 s13, s25, 16
@@ -11620,15 +11748,14 @@ define inreg <18 x float> @bitcast_v36i16_to_v18f32_scalar(<36 x i16> inreg %a,
; GFX11-NEXT: s_lshr_b32 s6, s18, 16
; GFX11-NEXT: s_lshr_b32 s5, s17, 16
; GFX11-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-NEXT: s_mov_b32 s40, 0
-; GFX11-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-NEXT: s_pack_ll_b32_b16 s3, s3, s43
+; GFX11-NEXT: s_lshr_b32 s42, s3, 16
+; GFX11-NEXT: s_lshr_b32 s43, s2, 16
+; GFX11-NEXT: s_lshr_b32 s44, s1, 16
+; GFX11-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-NEXT: s_pack_ll_b32_b16 s1, s1, s44
+; GFX11-NEXT: s_pack_ll_b32_b16 s0, s0, s45
+; GFX11-NEXT: s_pack_ll_b32_b16 s2, s2, s43
+; GFX11-NEXT: s_pack_ll_b32_b16 s3, s3, s42
; GFX11-NEXT: s_pack_ll_b32_b16 s4, s16, s4
; GFX11-NEXT: s_pack_ll_b32_b16 s5, s17, s5
; GFX11-NEXT: s_pack_ll_b32_b16 s6, s18, s6
@@ -11641,14 +11768,18 @@ define inreg <18 x float> @bitcast_v36i16_to_v18f32_scalar(<36 x i16> inreg %a,
; GFX11-NEXT: s_pack_ll_b32_b16 s13, s25, s13
; GFX11-NEXT: s_pack_ll_b32_b16 s14, s26, s14
; GFX11-NEXT: s_pack_ll_b32_b16 s15, s27, s15
-; GFX11-NEXT: s_pack_ll_b32_b16 s16, s28, s42
-; GFX11-NEXT: s_pack_ll_b32_b16 s17, s29, s41
-; GFX11-NEXT: s_and_b32 s47, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB31_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-NEXT: s_mov_b32 s40, -1
+; GFX11-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-NEXT: s_cbranch_scc0 .LBB31_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-NEXT: s_mov_b32 s40, 0
+; GFX11-NEXT: .LBB31_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
; GFX11-NEXT: s_cbranch_vccnz .LBB31_4
-; GFX11-NEXT: .LBB31_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
@@ -11668,8 +11799,6 @@ define inreg <18 x float> @bitcast_v36i16_to_v18f32_scalar(<36 x i16> inreg %a,
; GFX11-NEXT: v_pk_add_u16 v16, s16, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB31_3:
-; GFX11-NEXT: s_branch .LBB31_2
; GFX11-NEXT: .LBB31_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -12420,20 +12549,21 @@ define inreg <36 x half> @bitcast_v18f32_to_v36f16_scalar(<18 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v5
-; SI-NEXT: v_readfirstlane_b32 s8, v1
-; SI-NEXT: v_readfirstlane_b32 s7, v2
-; SI-NEXT: v_readfirstlane_b32 s6, v3
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: v_readfirstlane_b32 s9, v4
+; SI-NEXT: v_readfirstlane_b32 s9, v1
+; SI-NEXT: v_readfirstlane_b32 s8, v2
+; SI-NEXT: v_readfirstlane_b32 s7, v3
+; SI-NEXT: v_readfirstlane_b32 s6, v4
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB33_4
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_lshr_b32 s4, s9, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v1, s4
; SI-NEXT: s_lshr_b32 s4, s6, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v2, s4
+; SI-NEXT: v_cvt_f32_f16_e32 v1, s4
; SI-NEXT: s_lshr_b32 s4, s7, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v4, s4
+; SI-NEXT: v_cvt_f32_f16_e32 v2, s4
; SI-NEXT: s_lshr_b32 s4, s8, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v4, s4
+; SI-NEXT: s_lshr_b32 s4, s9, 16
; SI-NEXT: v_cvt_f32_f16_e32 v6, s4
; SI-NEXT: s_lshr_b32 s4, s29, 16
; SI-NEXT: v_cvt_f32_f16_e32 v8, s4
@@ -12463,10 +12593,10 @@ define inreg <36 x half> @bitcast_v18f32_to_v36f16_scalar(<18 x float> inreg %a,
; SI-NEXT: v_cvt_f32_f16_e32 v34, s4
; SI-NEXT: s_lshr_b32 s4, s16, 16
; SI-NEXT: v_cvt_f32_f16_e32 v36, s4
-; SI-NEXT: v_cvt_f32_f16_e32 v3, s9
-; SI-NEXT: v_cvt_f32_f16_e32 v5, s6
-; SI-NEXT: v_cvt_f32_f16_e32 v7, s7
-; SI-NEXT: v_cvt_f32_f16_e32 v9, s8
+; SI-NEXT: v_cvt_f32_f16_e32 v3, s6
+; SI-NEXT: v_cvt_f32_f16_e32 v5, s7
+; SI-NEXT: v_cvt_f32_f16_e32 v7, s8
+; SI-NEXT: v_cvt_f32_f16_e32 v9, s9
; SI-NEXT: v_cvt_f32_f16_e32 v11, s29
; SI-NEXT: v_cvt_f32_f16_e32 v13, s28
; SI-NEXT: v_cvt_f32_f16_e32 v14, s27
@@ -12497,10 +12627,10 @@ define inreg <36 x half> @bitcast_v18f32_to_v36f16_scalar(<18 x float> inreg %a,
; SI-NEXT: v_add_f32_e64 v14, s27, 1.0
; SI-NEXT: v_add_f32_e64 v13, s28, 1.0
; SI-NEXT: v_add_f32_e64 v11, s29, 1.0
-; SI-NEXT: v_add_f32_e64 v9, s8, 1.0
-; SI-NEXT: v_add_f32_e64 v7, s7, 1.0
-; SI-NEXT: v_add_f32_e64 v5, s6, 1.0
-; SI-NEXT: v_add_f32_e64 v3, s9, 1.0
+; SI-NEXT: v_add_f32_e64 v9, s9, 1.0
+; SI-NEXT: v_add_f32_e64 v7, s8, 1.0
+; SI-NEXT: v_add_f32_e64 v5, s7, 1.0
+; SI-NEXT: v_add_f32_e64 v3, s6, 1.0
; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v1
; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v2
; SI-NEXT: v_lshrrev_b32_e32 v32, 16, v4
@@ -12719,12 +12849,15 @@ define inreg <36 x half> @bitcast_v18f32_to_v36f16_scalar(<18 x float> inreg %a,
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: s_branch .LBB33_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB33_2
+; SI-NEXT: s_branch .LBB33_3
;
; VI-LABEL: bitcast_v18f32_to_v36f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v18, s16
; VI-NEXT: v_mov_b32_e32 v17, s17
; VI-NEXT: v_mov_b32_e32 v16, s18
@@ -12738,8 +12871,8 @@ define inreg <36 x half> @bitcast_v18f32_to_v36f16_scalar(<18 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB33_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_lshrrev_b32_e32 v22, 16, v3
@@ -12859,12 +12992,15 @@ define inreg <36 x half> @bitcast_v18f32_to_v36f16_scalar(<18 x float> inreg %a,
; VI-NEXT: ; implicit-def: $vgpr24
; VI-NEXT: ; implicit-def: $vgpr23
; VI-NEXT: ; implicit-def: $vgpr22
-; VI-NEXT: s_branch .LBB33_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB33_2
+; VI-NEXT: s_branch .LBB33_3
;
; GFX9-LABEL: bitcast_v18f32_to_v36f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v18, s16
; GFX9-NEXT: v_mov_b32_e32 v17, s17
; GFX9-NEXT: v_mov_b32_e32 v16, s18
@@ -12878,8 +13014,8 @@ define inreg <36 x half> @bitcast_v18f32_to_v36f16_scalar(<18 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB33_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_lshrrev_b32_e32 v22, 16, v3
@@ -12999,36 +13135,37 @@ define inreg <36 x half> @bitcast_v18f32_to_v36f16_scalar(<18 x float> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr24
; GFX9-NEXT: ; implicit-def: $vgpr23
; GFX9-NEXT: ; implicit-def: $vgpr22
-; GFX9-NEXT: s_branch .LBB33_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB33_2
+; GFX9-NEXT: s_branch .LBB33_3
;
; GFX11-LABEL: bitcast_v18f32_to_v36f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s46, -1
+; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
; GFX11-NEXT: s_cbranch_scc0 .LBB33_3
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s5, s29, 16
-; GFX11-NEXT: s_lshr_b32 s6, s28, 16
-; GFX11-NEXT: s_lshr_b32 s7, s27, 16
-; GFX11-NEXT: s_lshr_b32 s8, s26, 16
-; GFX11-NEXT: s_lshr_b32 s9, s25, 16
-; GFX11-NEXT: s_lshr_b32 s10, s24, 16
-; GFX11-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-NEXT: s_lshr_b32 s12, s22, 16
-; GFX11-NEXT: s_lshr_b32 s13, s21, 16
-; GFX11-NEXT: s_lshr_b32 s14, s20, 16
-; GFX11-NEXT: s_lshr_b32 s15, s19, 16
-; GFX11-NEXT: s_lshr_b32 s40, s18, 16
-; GFX11-NEXT: s_lshr_b32 s41, s17, 16
-; GFX11-NEXT: s_lshr_b32 s42, s16, 16
-; GFX11-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB33_4
+; GFX11-NEXT: s_lshr_b32 s4, s29, 16
+; GFX11-NEXT: s_lshr_b32 s5, s28, 16
+; GFX11-NEXT: s_lshr_b32 s6, s27, 16
+; GFX11-NEXT: s_lshr_b32 s7, s26, 16
+; GFX11-NEXT: s_lshr_b32 s8, s25, 16
+; GFX11-NEXT: s_lshr_b32 s9, s24, 16
+; GFX11-NEXT: s_lshr_b32 s10, s23, 16
+; GFX11-NEXT: s_lshr_b32 s11, s22, 16
+; GFX11-NEXT: s_lshr_b32 s12, s21, 16
+; GFX11-NEXT: s_lshr_b32 s13, s20, 16
+; GFX11-NEXT: s_lshr_b32 s14, s19, 16
+; GFX11-NEXT: s_lshr_b32 s15, s18, 16
+; GFX11-NEXT: s_lshr_b32 s40, s17, 16
+; GFX11-NEXT: s_lshr_b32 s41, s16, 16
+; GFX11-NEXT: s_lshr_b32 s42, s3, 16
+; GFX11-NEXT: s_lshr_b32 s43, s2, 16
+; GFX11-NEXT: s_lshr_b32 s44, s1, 16
+; GFX11-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-NEXT: s_cbranch_execnz .LBB33_4
; GFX11-NEXT: .LBB33_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v13, s29, 1.0
; GFX11-NEXT: v_add_f32_e64 v14, s28, 1.0
@@ -13068,7 +13205,6 @@ define inreg <36 x half> @bitcast_v18f32_to_v36f16_scalar(<18 x float> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v2
; GFX11-NEXT: s_branch .LBB33_5
; GFX11-NEXT: .LBB33_3:
-; GFX11-NEXT: ; implicit-def: $sgpr46
; GFX11-NEXT: ; implicit-def: $sgpr45
; GFX11-NEXT: ; implicit-def: $sgpr44
; GFX11-NEXT: ; implicit-def: $sgpr43
@@ -13086,7 +13222,9 @@ define inreg <36 x half> @bitcast_v18f32_to_v36f16_scalar(<18 x float> inreg %a,
; GFX11-NEXT: ; implicit-def: $sgpr7
; GFX11-NEXT: ; implicit-def: $sgpr6
; GFX11-NEXT: ; implicit-def: $sgpr5
-; GFX11-NEXT: s_branch .LBB33_2
+; GFX11-NEXT: ; implicit-def: $sgpr4
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
+; GFX11-NEXT: s_cbranch_vccz .LBB33_2
; GFX11-NEXT: .LBB33_4:
; GFX11-NEXT: v_dual_mov_b32 v2, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v0, s2 :: v_dual_mov_b32 v7, s3
@@ -13097,15 +13235,15 @@ define inreg <36 x half> @bitcast_v18f32_to_v36f16_scalar(<18 x float> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v8, s24 :: v_dual_mov_b32 v17, s25
; GFX11-NEXT: v_dual_mov_b32 v16, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: v_dual_mov_b32 v14, s28 :: v_dual_mov_b32 v13, s29
-; GFX11-NEXT: v_dual_mov_b32 v35, s46 :: v_dual_mov_b32 v34, s45
-; GFX11-NEXT: v_dual_mov_b32 v33, s44 :: v_dual_mov_b32 v32, s43
-; GFX11-NEXT: v_dual_mov_b32 v31, s42 :: v_dual_mov_b32 v30, s41
-; GFX11-NEXT: v_dual_mov_b32 v29, s40 :: v_dual_mov_b32 v28, s15
-; GFX11-NEXT: v_dual_mov_b32 v27, s14 :: v_dual_mov_b32 v26, s13
-; GFX11-NEXT: v_dual_mov_b32 v25, s12 :: v_dual_mov_b32 v24, s11
-; GFX11-NEXT: v_dual_mov_b32 v23, s10 :: v_dual_mov_b32 v22, s9
-; GFX11-NEXT: v_dual_mov_b32 v21, s8 :: v_dual_mov_b32 v20, s7
-; GFX11-NEXT: v_dual_mov_b32 v19, s6 :: v_dual_mov_b32 v18, s5
+; GFX11-NEXT: v_dual_mov_b32 v35, s45 :: v_dual_mov_b32 v34, s44
+; GFX11-NEXT: v_dual_mov_b32 v33, s43 :: v_dual_mov_b32 v32, s42
+; GFX11-NEXT: v_dual_mov_b32 v31, s41 :: v_dual_mov_b32 v30, s40
+; GFX11-NEXT: v_dual_mov_b32 v29, s15 :: v_dual_mov_b32 v28, s14
+; GFX11-NEXT: v_dual_mov_b32 v27, s13 :: v_dual_mov_b32 v26, s12
+; GFX11-NEXT: v_dual_mov_b32 v25, s11 :: v_dual_mov_b32 v24, s10
+; GFX11-NEXT: v_dual_mov_b32 v23, s9 :: v_dual_mov_b32 v22, s8
+; GFX11-NEXT: v_dual_mov_b32 v21, s7 :: v_dual_mov_b32 v20, s6
+; GFX11-NEXT: v_dual_mov_b32 v19, s5 :: v_dual_mov_b32 v18, s4
; GFX11-NEXT: .LBB33_5: ; %end
; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
@@ -14043,6 +14181,7 @@ define inreg <18 x float> @bitcast_v36f16_to_v18f32_scalar(<36 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v58, s28
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v22
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB35_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v35
@@ -14303,7 +14442,9 @@ define inreg <18 x float> @bitcast_v36f16_to_v18f32_scalar(<36 x half> inreg %a,
; SI-NEXT: v_mov_b32_e32 v44, v32
; SI-NEXT: v_mov_b32_e32 v25, v48
; SI-NEXT: v_mov_b32_e32 v48, v43
-; SI-NEXT: s_branch .LBB35_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB35_2
+; SI-NEXT: s_branch .LBB35_3
;
; VI-LABEL: bitcast_v36f16_to_v18f32_scalar:
; VI: ; %bb.0:
@@ -14323,11 +14464,12 @@ define inreg <18 x float> @bitcast_v36f16_to_v18f32_scalar(<36 x half> inreg %a,
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v3
; VI-NEXT: v_mov_b32_e32 v33, v2
; VI-NEXT: v_mov_b32_e32 v34, v1
; VI-NEXT: v_mov_b32_e32 v35, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB35_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -14471,15 +14613,13 @@ define inreg <18 x float> @bitcast_v36f16_to_v18f32_scalar(<36 x half> inreg %a,
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB35_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB35_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB35_2
+; VI-NEXT: s_branch .LBB35_3
;
; GFX9-LABEL: bitcast_v36f16_to_v18f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v3
-; GFX9-NEXT: v_mov_b32_e32 v33, v2
-; GFX9-NEXT: v_mov_b32_e32 v34, v1
-; GFX9-NEXT: v_mov_b32_e32 v35, v0
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
; GFX9-NEXT: s_lshr_b32 s42, s27, 16
@@ -14495,11 +14635,15 @@ define inreg <18 x float> @bitcast_v36f16_to_v18f32_scalar(<36 x half> inreg %a,
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
+; GFX9-NEXT: v_mov_b32_e32 v32, v3
+; GFX9-NEXT: v_mov_b32_e32 v33, v2
+; GFX9-NEXT: v_mov_b32_e32 v34, v1
+; GFX9-NEXT: v_mov_b32_e32 v35, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_lshrrev_b32_e32 v36, 16, v32
; GFX9-NEXT: v_lshrrev_b32_e32 v37, 16, v33
; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v34
; GFX9-NEXT: v_lshrrev_b32_e32 v39, 16, v35
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -14514,6 +14658,7 @@ define inreg <18 x float> @bitcast_v36f16_to_v18f32_scalar(<36 x half> inreg %a,
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB35_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v35
@@ -14572,14 +14717,16 @@ define inreg <18 x float> @bitcast_v36f16_to_v18f32_scalar(<36 x half> inreg %a,
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB35_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB35_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB35_2
+; GFX9-NEXT: s_branch .LBB35_3
;
; GFX11-LABEL: bitcast_v36f16_to_v18f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-NEXT: s_lshr_b32 s42, s28, 16
+; GFX11-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-NEXT: s_lshr_b32 s41, s28, 16
; GFX11-NEXT: s_lshr_b32 s15, s27, 16
; GFX11-NEXT: s_lshr_b32 s14, s26, 16
; GFX11-NEXT: s_lshr_b32 s13, s25, 16
@@ -14592,15 +14739,14 @@ define inreg <18 x float> @bitcast_v36f16_to_v18f32_scalar(<36 x half> inreg %a,
; GFX11-NEXT: s_lshr_b32 s6, s18, 16
; GFX11-NEXT: s_lshr_b32 s5, s17, 16
; GFX11-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-NEXT: s_mov_b32 s40, 0
-; GFX11-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-NEXT: s_pack_ll_b32_b16 s3, s3, s43
+; GFX11-NEXT: s_lshr_b32 s42, s3, 16
+; GFX11-NEXT: s_lshr_b32 s43, s2, 16
+; GFX11-NEXT: s_lshr_b32 s44, s1, 16
+; GFX11-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-NEXT: s_pack_ll_b32_b16 s1, s1, s44
+; GFX11-NEXT: s_pack_ll_b32_b16 s0, s0, s45
+; GFX11-NEXT: s_pack_ll_b32_b16 s2, s2, s43
+; GFX11-NEXT: s_pack_ll_b32_b16 s3, s3, s42
; GFX11-NEXT: s_pack_ll_b32_b16 s4, s16, s4
; GFX11-NEXT: s_pack_ll_b32_b16 s5, s17, s5
; GFX11-NEXT: s_pack_ll_b32_b16 s6, s18, s6
@@ -14613,14 +14759,18 @@ define inreg <18 x float> @bitcast_v36f16_to_v18f32_scalar(<36 x half> inreg %a,
; GFX11-NEXT: s_pack_ll_b32_b16 s13, s25, s13
; GFX11-NEXT: s_pack_ll_b32_b16 s14, s26, s14
; GFX11-NEXT: s_pack_ll_b32_b16 s15, s27, s15
-; GFX11-NEXT: s_pack_ll_b32_b16 s16, s28, s42
-; GFX11-NEXT: s_pack_ll_b32_b16 s17, s29, s41
-; GFX11-NEXT: s_and_b32 s47, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB35_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-NEXT: s_mov_b32 s40, -1
+; GFX11-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-NEXT: s_cbranch_scc0 .LBB35_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-NEXT: s_mov_b32 s40, 0
+; GFX11-NEXT: .LBB35_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
; GFX11-NEXT: s_cbranch_vccnz .LBB35_4
-; GFX11-NEXT: .LBB35_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
@@ -14640,8 +14790,6 @@ define inreg <18 x float> @bitcast_v36f16_to_v18f32_scalar(<36 x half> inreg %a,
; GFX11-NEXT: v_pk_add_f16 v16, 0x200, s16 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB35_3:
-; GFX11-NEXT: s_branch .LBB35_2
; GFX11-NEXT: .LBB35_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -14828,8 +14976,9 @@ define inreg <9 x double> @bitcast_v9i64_to_v9f64_scalar(<9 x i64> inreg %a, i32
; SI-LABEL: bitcast_v9i64_to_v9f64_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v13, v4
-; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; SI-NEXT: v_mov_b32_e32 v18, v4
+; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v17, v3
; SI-NEXT: v_mov_b32_e32 v16, v2
; SI-NEXT: v_mov_b32_e32 v15, v1
@@ -14847,12 +14996,15 @@ define inreg <9 x double> @bitcast_v9i64_to_v9f64_scalar(<9 x i64> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB37_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB37_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB37_3
-; SI-NEXT: .LBB37_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB37_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB37_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2
@@ -14871,16 +15023,15 @@ define inreg <9 x double> @bitcast_v9i64_to_v9f64_scalar(<9 x i64> inreg %a, i32
; SI-NEXT: v_addc_u32_e32 v15, vcc, 0, v15, vcc
; SI-NEXT: v_add_i32_e32 v16, vcc, 3, v16
; SI-NEXT: v_addc_u32_e32 v17, vcc, 0, v17, vcc
-; SI-NEXT: .LBB37_3: ; %end
+; SI-NEXT: .LBB37_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB37_4:
-; SI-NEXT: s_branch .LBB37_2
;
; VI-LABEL: bitcast_v9i64_to_v9f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v13, v4
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: v_mov_b32_e32 v18, v4
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v17, v3
; VI-NEXT: v_mov_b32_e32 v16, v2
; VI-NEXT: v_mov_b32_e32 v15, v1
@@ -14898,12 +15049,15 @@ define inreg <9 x double> @bitcast_v9i64_to_v9f64_scalar(<9 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB37_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB37_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB37_3
-; VI-NEXT: .LBB37_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB37_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB37_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
@@ -14922,16 +15076,15 @@ define inreg <9 x double> @bitcast_v9i64_to_v9f64_scalar(<9 x i64> inreg %a, i32
; VI-NEXT: v_addc_u32_e32 v15, vcc, 0, v15, vcc
; VI-NEXT: v_add_u32_e32 v16, vcc, 3, v16
; VI-NEXT: v_addc_u32_e32 v17, vcc, 0, v17, vcc
-; VI-NEXT: .LBB37_3: ; %end
+; VI-NEXT: .LBB37_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB37_4:
-; VI-NEXT: s_branch .LBB37_2
;
; GFX9-LABEL: bitcast_v9i64_to_v9f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v4
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v18, v4
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v17, v3
; GFX9-NEXT: v_mov_b32_e32 v16, v2
; GFX9-NEXT: v_mov_b32_e32 v15, v1
@@ -14949,12 +15102,15 @@ define inreg <9 x double> @bitcast_v9i64_to_v9f64_scalar(<9 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB37_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB37_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB37_3
-; GFX9-NEXT: .LBB37_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB37_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB37_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 3, v0
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, 3, v2
@@ -14973,22 +15129,23 @@ define inreg <9 x double> @bitcast_v9i64_to_v9f64_scalar(<9 x i64> inreg %a, i32
; GFX9-NEXT: v_addc_co_u32_e32 v15, vcc, 0, v15, vcc
; GFX9-NEXT: v_add_co_u32_e32 v16, vcc, 3, v16
; GFX9-NEXT: v_addc_co_u32_e32 v17, vcc, 0, v17, vcc
-; GFX9-NEXT: .LBB37_3: ; %end
+; GFX9-NEXT: .LBB37_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB37_4:
-; GFX9-NEXT: s_branch .LBB37_2
;
; GFX11-LABEL: bitcast_v9i64_to_v9f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB37_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB37_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB37_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB37_3
-; GFX11-NEXT: .LBB37_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB37_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
; GFX11-NEXT: s_add_u32 s2, s2, 3
@@ -15007,7 +15164,7 @@ define inreg <9 x double> @bitcast_v9i64_to_v9f64_scalar(<9 x i64> inreg %a, i32
; GFX11-NEXT: s_addc_u32 s27, s27, 0
; GFX11-NEXT: s_add_u32 s28, s28, 3
; GFX11-NEXT: s_addc_u32 s29, s29, 0
-; GFX11-NEXT: .LBB37_3: ; %end
+; GFX11-NEXT: .LBB37_4: ; %end
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -15018,8 +15175,6 @@ define inreg <9 x double> @bitcast_v9i64_to_v9f64_scalar(<9 x i64> inreg %a, i32
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB37_4:
-; GFX11-NEXT: s_branch .LBB37_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -15147,8 +15302,9 @@ define inreg <9 x i64> @bitcast_v9f64_to_v9i64_scalar(<9 x double> inreg %a, i32
; SI-LABEL: bitcast_v9f64_to_v9i64_scalar:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: v_mov_b32_e32 v13, v4
-; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; SI-NEXT: v_mov_b32_e32 v18, v4
+; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v17, v3
; SI-NEXT: v_mov_b32_e32 v16, v2
; SI-NEXT: v_mov_b32_e32 v15, v1
@@ -15166,12 +15322,15 @@ define inreg <9 x i64> @bitcast_v9f64_to_v9i64_scalar(<9 x double> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB39_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB39_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB39_3
-; SI-NEXT: .LBB39_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB39_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB39_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
; SI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; SI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
@@ -15181,16 +15340,15 @@ define inreg <9 x i64> @bitcast_v9f64_to_v9i64_scalar(<9 x double> inreg %a, i32
; SI-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
; SI-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
; SI-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
-; SI-NEXT: .LBB39_3: ; %end
+; SI-NEXT: .LBB39_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB39_4:
-; SI-NEXT: s_branch .LBB39_2
;
; VI-LABEL: bitcast_v9f64_to_v9i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; VI-NEXT: v_mov_b32_e32 v13, v4
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: v_mov_b32_e32 v18, v4
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v17, v3
; VI-NEXT: v_mov_b32_e32 v16, v2
; VI-NEXT: v_mov_b32_e32 v15, v1
@@ -15208,12 +15366,15 @@ define inreg <9 x i64> @bitcast_v9f64_to_v9i64_scalar(<9 x double> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB39_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB39_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB39_3
-; VI-NEXT: .LBB39_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB39_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB39_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
; VI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; VI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
@@ -15223,16 +15384,15 @@ define inreg <9 x i64> @bitcast_v9f64_to_v9i64_scalar(<9 x double> inreg %a, i32
; VI-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
; VI-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
; VI-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
-; VI-NEXT: .LBB39_3: ; %end
+; VI-NEXT: .LBB39_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB39_4:
-; VI-NEXT: s_branch .LBB39_2
;
; GFX9-LABEL: bitcast_v9f64_to_v9i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v13, v4
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: v_mov_b32_e32 v18, v4
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v18
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v17, v3
; GFX9-NEXT: v_mov_b32_e32 v16, v2
; GFX9-NEXT: v_mov_b32_e32 v15, v1
@@ -15250,12 +15410,15 @@ define inreg <9 x i64> @bitcast_v9f64_to_v9i64_scalar(<9 x double> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB39_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB39_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB39_3
-; GFX9-NEXT: .LBB39_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB39_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB39_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX9-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
@@ -15265,10 +15428,8 @@ define inreg <9 x i64> @bitcast_v9f64_to_v9i64_scalar(<9 x double> inreg %a, i32
; GFX9-NEXT: v_add_f64 v[12:13], v[12:13], 1.0
; GFX9-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
; GFX9-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
-; GFX9-NEXT: .LBB39_3: ; %end
+; GFX9-NEXT: .LBB39_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB39_4:
-; GFX9-NEXT: s_branch .LBB39_2
;
; GFX11-LABEL: bitcast_v9f64_to_v9i64_scalar:
; GFX11: ; %bb.0:
@@ -15278,23 +15439,23 @@ define inreg <9 x i64> @bitcast_v9f64_to_v9i64_scalar(<9 x double> inreg %a, i32
; GFX11-NEXT: s_mov_b32 exec_lo, s4
; GFX11-NEXT: v_writelane_b32 v32, s36, 0
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
+; GFX11-NEXT: s_mov_b32 s36, s0
; GFX11-NEXT: s_mov_b32 s47, s23
; GFX11-NEXT: s_mov_b32 s46, s22
-; GFX11-NEXT: s_mov_b32 s45, s21
; GFX11-NEXT: v_writelane_b32 v32, s37, 1
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s45, s21
; GFX11-NEXT: s_mov_b32 s44, s20
; GFX11-NEXT: s_mov_b32 s43, s19
+; GFX11-NEXT: v_writelane_b32 v32, s38, 2
; GFX11-NEXT: s_mov_b32 s42, s18
; GFX11-NEXT: s_mov_b32 s41, s17
-; GFX11-NEXT: v_writelane_b32 v32, s38, 2
; GFX11-NEXT: s_mov_b32 s40, s16
; GFX11-NEXT: s_mov_b32 s38, s2
-; GFX11-NEXT: s_mov_b32 s37, s1
-; GFX11-NEXT: s_mov_b32 s36, s0
; GFX11-NEXT: v_writelane_b32 v32, s39, 3
; GFX11-NEXT: s_mov_b32 s39, s3
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s37, s1
+; GFX11-NEXT: s_mov_b32 s0, -1
; GFX11-NEXT: v_writelane_b32 v32, s48, 4
; GFX11-NEXT: s_mov_b32 s48, s24
; GFX11-NEXT: v_writelane_b32 v32, s49, 5
@@ -15307,11 +15468,14 @@ define inreg <9 x i64> @bitcast_v9f64_to_v9i64_scalar(<9 x double> inreg %a, i32
; GFX11-NEXT: s_mov_b32 s52, s28
; GFX11-NEXT: v_writelane_b32 v32, s53, 9
; GFX11-NEXT: s_mov_b32 s53, s29
-; GFX11-NEXT: s_cbranch_scc0 .LBB39_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: s_cbranch_scc0 .LBB39_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-NEXT: s_mov_b32 s0, 0
+; GFX11-NEXT: .LBB39_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB39_4
-; GFX11-NEXT: .LBB39_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[0:1], s[36:37], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], s[38:39], 1.0
; GFX11-NEXT: v_add_f64 v[4:5], s[40:41], 1.0
@@ -15322,8 +15486,6 @@ define inreg <9 x i64> @bitcast_v9f64_to_v9i64_scalar(<9 x double> inreg %a, i32
; GFX11-NEXT: v_add_f64 v[14:15], s[50:51], 1.0
; GFX11-NEXT: v_add_f64 v[16:17], s[52:53], 1.0
; GFX11-NEXT: s_branch .LBB39_5
-; GFX11-NEXT: .LBB39_3:
-; GFX11-NEXT: s_branch .LBB39_2
; GFX11-NEXT: .LBB39_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s36 :: v_dual_mov_b32 v1, s37
; GFX11-NEXT: v_dual_mov_b32 v2, s38 :: v_dual_mov_b32 v3, s39
@@ -15972,11 +16134,12 @@ define inreg <36 x i16> @bitcast_v9i64_to_v36i16_scalar(<9 x i64> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v5
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s9, v1
; SI-NEXT: v_readfirstlane_b32 s8, v2
; SI-NEXT: v_readfirstlane_b32 s7, v3
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s6, v4
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB41_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v1, s7
@@ -16182,21 +16345,24 @@ define inreg <36 x i16> @bitcast_v9i64_to_v36i16_scalar(<9 x i64> inreg %a, i32
; SI-NEXT: ; implicit-def: $sgpr11
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: s_branch .LBB41_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB41_2
+; SI-NEXT: s_branch .LBB41_3
;
; VI-LABEL: bitcast_v9i64_to_v36i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s9, v0
; VI-NEXT: v_readfirstlane_b32 s8, v1
-; VI-NEXT: v_readfirstlane_b32 s6, v2
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: v_readfirstlane_b32 s7, v3
+; VI-NEXT: v_readfirstlane_b32 s7, v2
+; VI-NEXT: v_readfirstlane_b32 s6, v3
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB41_4
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_lshr_b32 s10, s7, 16
-; VI-NEXT: s_lshr_b32 s11, s6, 16
+; VI-NEXT: s_lshr_b32 s10, s6, 16
+; VI-NEXT: s_lshr_b32 s11, s7, 16
; VI-NEXT: s_lshr_b32 s12, s8, 16
; VI-NEXT: s_lshr_b32 s13, s9, 16
; VI-NEXT: s_lshr_b32 s14, s29, 16
@@ -16215,8 +16381,8 @@ define inreg <36 x i16> @bitcast_v9i64_to_v36i16_scalar(<9 x i64> inreg %a, i32
; VI-NEXT: s_lshr_b32 s59, s16, 16
; VI-NEXT: s_cbranch_execnz .LBB41_3
; VI-NEXT: .LBB41_2: ; %cmp.true
-; VI-NEXT: s_add_u32 s6, s6, 3
-; VI-NEXT: s_addc_u32 s7, s7, 0
+; VI-NEXT: s_add_u32 s7, s7, 3
+; VI-NEXT: s_addc_u32 s6, s6, 0
; VI-NEXT: s_add_u32 s9, s9, 3
; VI-NEXT: s_addc_u32 s8, s8, 0
; VI-NEXT: s_add_u32 s28, s28, 3
@@ -16233,8 +16399,8 @@ define inreg <36 x i16> @bitcast_v9i64_to_v36i16_scalar(<9 x i64> inreg %a, i32
; VI-NEXT: s_addc_u32 s19, s19, 0
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
-; VI-NEXT: s_lshr_b32 s10, s7, 16
-; VI-NEXT: s_lshr_b32 s11, s6, 16
+; VI-NEXT: s_lshr_b32 s10, s6, 16
+; VI-NEXT: s_lshr_b32 s11, s7, 16
; VI-NEXT: s_lshr_b32 s12, s8, 16
; VI-NEXT: s_lshr_b32 s13, s9, 16
; VI-NEXT: s_lshr_b32 s14, s29, 16
@@ -16297,15 +16463,15 @@ define inreg <36 x i16> @bitcast_v9i64_to_v36i16_scalar(<9 x i64> inreg %a, i32
; VI-NEXT: s_lshl_b32 s13, s13, 16
; VI-NEXT: s_and_b32 s8, 0xffff, s8
; VI-NEXT: s_lshl_b32 s12, s12, 16
-; VI-NEXT: s_and_b32 s6, 0xffff, s6
-; VI-NEXT: s_lshl_b32 s11, s11, 16
; VI-NEXT: s_and_b32 s7, 0xffff, s7
+; VI-NEXT: s_lshl_b32 s11, s11, 16
+; VI-NEXT: s_and_b32 s6, 0xffff, s6
; VI-NEXT: s_lshl_b32 s10, s10, 16
; VI-NEXT: s_or_b32 s14, s26, s14
; VI-NEXT: s_or_b32 s9, s9, s13
; VI-NEXT: s_or_b32 s8, s8, s12
-; VI-NEXT: s_or_b32 s6, s6, s11
-; VI-NEXT: s_or_b32 s7, s7, s10
+; VI-NEXT: s_or_b32 s7, s7, s11
+; VI-NEXT: s_or_b32 s6, s6, s10
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: v_mov_b32_e32 v2, s16
@@ -16322,8 +16488,8 @@ define inreg <36 x i16> @bitcast_v9i64_to_v36i16_scalar(<9 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v13, s14
; VI-NEXT: v_mov_b32_e32 v14, s9
; VI-NEXT: v_mov_b32_e32 v15, s8
-; VI-NEXT: v_mov_b32_e32 v16, s6
-; VI-NEXT: v_mov_b32_e32 v17, s7
+; VI-NEXT: v_mov_b32_e32 v16, s7
+; VI-NEXT: v_mov_b32_e32 v17, s6
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB41_4:
; VI-NEXT: ; implicit-def: $sgpr59
@@ -16344,23 +16510,26 @@ define inreg <36 x i16> @bitcast_v9i64_to_v36i16_scalar(<9 x i64> inreg %a, i32
; VI-NEXT: ; implicit-def: $sgpr12
; VI-NEXT: ; implicit-def: $sgpr11
; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: s_branch .LBB41_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB41_2
+; VI-NEXT: s_branch .LBB41_3
;
; GFX9-LABEL: bitcast_v9i64_to_v36i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
-; GFX9-NEXT: v_readfirstlane_b32 s6, v0
-; GFX9-NEXT: v_readfirstlane_b32 s7, v1
-; GFX9-NEXT: v_readfirstlane_b32 s8, v2
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: v_readfirstlane_b32 s9, v3
+; GFX9-NEXT: v_readfirstlane_b32 s7, v0
+; GFX9-NEXT: v_readfirstlane_b32 s8, v1
+; GFX9-NEXT: v_readfirstlane_b32 s9, v2
+; GFX9-NEXT: v_readfirstlane_b32 s6, v3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB41_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_lshr_b32 s10, s9, 16
-; GFX9-NEXT: s_lshr_b32 s11, s8, 16
-; GFX9-NEXT: s_lshr_b32 s12, s7, 16
-; GFX9-NEXT: s_lshr_b32 s13, s6, 16
+; GFX9-NEXT: s_lshr_b32 s10, s6, 16
+; GFX9-NEXT: s_lshr_b32 s11, s9, 16
+; GFX9-NEXT: s_lshr_b32 s12, s8, 16
+; GFX9-NEXT: s_lshr_b32 s13, s7, 16
; GFX9-NEXT: s_lshr_b32 s14, s29, 16
; GFX9-NEXT: s_lshr_b32 s15, s28, 16
; GFX9-NEXT: s_lshr_b32 s40, s27, 16
@@ -16377,10 +16546,10 @@ define inreg <36 x i16> @bitcast_v9i64_to_v36i16_scalar(<9 x i64> inreg %a, i32
; GFX9-NEXT: s_lshr_b32 s59, s16, 16
; GFX9-NEXT: s_cbranch_execnz .LBB41_3
; GFX9-NEXT: .LBB41_2: ; %cmp.true
-; GFX9-NEXT: s_add_u32 s8, s8, 3
-; GFX9-NEXT: s_addc_u32 s9, s9, 0
-; GFX9-NEXT: s_add_u32 s6, s6, 3
-; GFX9-NEXT: s_addc_u32 s7, s7, 0
+; GFX9-NEXT: s_add_u32 s9, s9, 3
+; GFX9-NEXT: s_addc_u32 s6, s6, 0
+; GFX9-NEXT: s_add_u32 s7, s7, 3
+; GFX9-NEXT: s_addc_u32 s8, s8, 0
; GFX9-NEXT: s_add_u32 s28, s28, 3
; GFX9-NEXT: s_addc_u32 s29, s29, 0
; GFX9-NEXT: s_add_u32 s26, s26, 3
@@ -16395,10 +16564,10 @@ define inreg <36 x i16> @bitcast_v9i64_to_v36i16_scalar(<9 x i64> inreg %a, i32
; GFX9-NEXT: s_addc_u32 s19, s19, 0
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
-; GFX9-NEXT: s_lshr_b32 s10, s9, 16
-; GFX9-NEXT: s_lshr_b32 s11, s8, 16
-; GFX9-NEXT: s_lshr_b32 s12, s7, 16
-; GFX9-NEXT: s_lshr_b32 s13, s6, 16
+; GFX9-NEXT: s_lshr_b32 s10, s6, 16
+; GFX9-NEXT: s_lshr_b32 s11, s9, 16
+; GFX9-NEXT: s_lshr_b32 s12, s8, 16
+; GFX9-NEXT: s_lshr_b32 s13, s7, 16
; GFX9-NEXT: s_lshr_b32 s14, s29, 16
; GFX9-NEXT: s_lshr_b32 s15, s28, 16
; GFX9-NEXT: s_lshr_b32 s40, s27, 16
@@ -16428,10 +16597,10 @@ define inreg <36 x i16> @bitcast_v9i64_to_v36i16_scalar(<9 x i64> inreg %a, i32
; GFX9-NEXT: s_pack_ll_b32_b16 s25, s27, s40
; GFX9-NEXT: s_pack_ll_b32_b16 s15, s28, s15
; GFX9-NEXT: s_pack_ll_b32_b16 s14, s29, s14
-; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s13
-; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s12
-; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s11
-; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s10
+; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s13
+; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s12
+; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s11
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s10
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: v_mov_b32_e32 v2, s16
@@ -16446,10 +16615,10 @@ define inreg <36 x i16> @bitcast_v9i64_to_v36i16_scalar(<9 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v11, s25
; GFX9-NEXT: v_mov_b32_e32 v12, s15
; GFX9-NEXT: v_mov_b32_e32 v13, s14
-; GFX9-NEXT: v_mov_b32_e32 v14, s6
-; GFX9-NEXT: v_mov_b32_e32 v15, s7
-; GFX9-NEXT: v_mov_b32_e32 v16, s8
-; GFX9-NEXT: v_mov_b32_e32 v17, s9
+; GFX9-NEXT: v_mov_b32_e32 v14, s7
+; GFX9-NEXT: v_mov_b32_e32 v15, s8
+; GFX9-NEXT: v_mov_b32_e32 v16, s9
+; GFX9-NEXT: v_mov_b32_e32 v17, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB41_4:
; GFX9-NEXT: ; implicit-def: $sgpr59
@@ -16470,13 +16639,15 @@ define inreg <36 x i16> @bitcast_v9i64_to_v36i16_scalar(<9 x i64> inreg %a, i32
; GFX9-NEXT: ; implicit-def: $sgpr12
; GFX9-NEXT: ; implicit-def: $sgpr11
; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: s_branch .LBB41_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB41_2
+; GFX9-NEXT: s_branch .LBB41_3
;
; GFX11-LABEL: bitcast_v9i64_to_v36i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-NEXT: s_mov_b32 s46, 0
+; GFX11-NEXT: s_mov_b32 s46, -1
; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
; GFX11-NEXT: s_cbranch_scc0 .LBB41_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
@@ -16498,8 +16669,7 @@ define inreg <36 x i16> @bitcast_v9i64_to_v36i16_scalar(<9 x i64> inreg %a, i32
; GFX11-NEXT: s_lshr_b32 s43, s2, 16
; GFX11-NEXT: s_lshr_b32 s44, s1, 16
; GFX11-NEXT: s_lshr_b32 s45, s0, 16
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
-; GFX11-NEXT: s_cbranch_vccnz .LBB41_3
+; GFX11-NEXT: s_cbranch_execnz .LBB41_3
; GFX11-NEXT: .LBB41_2: ; %cmp.true
; GFX11-NEXT: s_add_u32 s28, s28, 3
; GFX11-NEXT: s_addc_u32 s29, s29, 0
@@ -16586,7 +16756,9 @@ define inreg <36 x i16> @bitcast_v9i64_to_v36i16_scalar(<9 x i64> inreg %a, i32
; GFX11-NEXT: ; implicit-def: $sgpr6
; GFX11-NEXT: ; implicit-def: $sgpr5
; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: s_branch .LBB41_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
+; GFX11-NEXT: s_cbranch_vccz .LBB41_2
+; GFX11-NEXT: s_branch .LBB41_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -17336,6 +17508,7 @@ define inreg <9 x i64> @bitcast_v36i16_to_v9i64_scalar(<36 x i16> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v22
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
@@ -17353,7 +17526,7 @@ define inreg <9 x i64> @bitcast_v36i16_to_v9i64_scalar(<36 x i16> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v48, v4
; SI-NEXT: v_mov_b32_e32 v49, v2
; SI-NEXT: v_mov_b32_e32 v50, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v44, 16, v3
@@ -17370,46 +17543,46 @@ define inreg <9 x i64> @bitcast_v36i16_to_v9i64_scalar(<36 x i16> inreg %a, i32
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v50
; SI-NEXT: v_or_b32_e32 v7, v0, v45
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49
-; SI-NEXT: v_or_b32_e32 v8, v0, v44
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v48
-; SI-NEXT: s_and_b32 s4, s16, 0xffff
-; SI-NEXT: s_lshl_b32 s5, s17, 16
; SI-NEXT: v_or_b32_e32 v9, v0, v43
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v39
+; SI-NEXT: s_and_b32 s4, s16, 0xffff
+; SI-NEXT: s_lshl_b32 s5, s17, 16
+; SI-NEXT: v_or_b32_e32 v10, v0, v42
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v38
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: s_and_b32 s5, s18, 0xffff
; SI-NEXT: s_lshl_b32 s6, s19, 16
-; SI-NEXT: v_or_b32_e32 v10, v0, v42
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v38
+; SI-NEXT: v_or_b32_e32 v11, v0, v41
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v37
; SI-NEXT: s_or_b32 s5, s5, s6
; SI-NEXT: s_and_b32 s6, s20, 0xffff
; SI-NEXT: s_lshl_b32 s7, s21, 16
-; SI-NEXT: v_or_b32_e32 v11, v0, v41
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v37
+; SI-NEXT: v_or_b32_e32 v12, v0, v40
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v36
; SI-NEXT: s_or_b32 s6, s6, s7
; SI-NEXT: s_and_b32 s7, s22, 0xffff
; SI-NEXT: s_lshl_b32 s8, s23, 16
-; SI-NEXT: v_or_b32_e32 v12, v0, v40
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v36
+; SI-NEXT: v_or_b32_e32 v13, v0, v55
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v35
; SI-NEXT: s_or_b32 s7, s7, s8
; SI-NEXT: s_and_b32 s8, s24, 0xffff
; SI-NEXT: s_lshl_b32 s9, s25, 16
-; SI-NEXT: v_or_b32_e32 v13, v0, v55
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v35
+; SI-NEXT: v_or_b32_e32 v14, v0, v54
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v34
; SI-NEXT: s_or_b32 s8, s8, s9
; SI-NEXT: s_and_b32 s9, s26, 0xffff
; SI-NEXT: s_lshl_b32 s10, s27, 16
-; SI-NEXT: v_or_b32_e32 v14, v0, v54
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v34
+; SI-NEXT: v_or_b32_e32 v15, v0, v53
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v33
; SI-NEXT: s_or_b32 s9, s9, s10
; SI-NEXT: s_and_b32 s10, s28, 0xffff
; SI-NEXT: s_lshl_b32 s11, s29, 16
-; SI-NEXT: v_or_b32_e32 v15, v0, v53
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v33
-; SI-NEXT: s_or_b32 s10, s10, s11
+; SI-NEXT: v_and_b32_e32 v1, 0xffff, v49
; SI-NEXT: v_or_b32_e32 v16, v0, v52
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v32
+; SI-NEXT: s_or_b32 s10, s10, s11
+; SI-NEXT: v_or_b32_e32 v8, v1, v44
; SI-NEXT: v_or_b32_e32 v17, v0, v51
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
@@ -17424,10 +17597,6 @@ define inreg <9 x i64> @bitcast_v36i16_to_v9i64_scalar(<36 x i16> inreg %a, i32
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v45, v0
; SI-NEXT: v_add_i32_e32 v7, vcc, 0x30000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v49
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v0, v44, v0
-; SI-NEXT: v_add_i32_e32 v8, vcc, 0x30000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v48
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v43, v0
@@ -17483,13 +17652,16 @@ define inreg <9 x i64> @bitcast_v36i16_to_v9i64_scalar(<36 x i16> inreg %a, i32
; SI-NEXT: s_lshl_b32 s10, s27, 16
; SI-NEXT: s_add_i32 s28, s28, 3
; SI-NEXT: v_or_b32_e32 v0, v52, v0
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v49
; SI-NEXT: s_or_b32 s9, s10, s9
; SI-NEXT: s_and_b32 s10, s28, 0xffff
; SI-NEXT: s_lshl_b32 s11, s29, 16
; SI-NEXT: v_add_i32_e32 v16, vcc, 0x30000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v32
+; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1
; SI-NEXT: s_or_b32 s10, s11, s10
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: v_or_b32_e32 v1, v44, v1
; SI-NEXT: s_add_i32 s4, s4, 0x30000
; SI-NEXT: s_add_i32 s5, s5, 0x30000
; SI-NEXT: s_add_i32 s6, s6, 0x30000
@@ -17498,6 +17670,7 @@ define inreg <9 x i64> @bitcast_v36i16_to_v9i64_scalar(<36 x i16> inreg %a, i32
; SI-NEXT: s_add_i32 s9, s9, 0x30000
; SI-NEXT: s_add_i32 s10, s10, 0x30000
; SI-NEXT: v_or_b32_e32 v0, v51, v0
+; SI-NEXT: v_add_i32_e32 v8, vcc, 0x30000, v1
; SI-NEXT: v_add_i32_e32 v17, vcc, 0x30000, v0
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
@@ -17517,7 +17690,9 @@ define inreg <9 x i64> @bitcast_v36i16_to_v9i64_scalar(<36 x i16> inreg %a, i32
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB43_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; SI-NEXT: s_branch .LBB43_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB43_2
+; SI-NEXT: s_branch .LBB43_3
;
; VI-LABEL: bitcast_v36i16_to_v9i64_scalar:
; VI: ; %bb.0:
@@ -17537,11 +17712,12 @@ define inreg <9 x i64> @bitcast_v36i16_to_v9i64_scalar(<36 x i16> inreg %a, i32
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v3
; VI-NEXT: v_mov_b32_e32 v33, v2
; VI-NEXT: v_mov_b32_e32 v34, v1
; VI-NEXT: v_mov_b32_e32 v35, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB43_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -17716,15 +17892,13 @@ define inreg <9 x i64> @bitcast_v36i16_to_v9i64_scalar(<36 x i16> inreg %a, i32
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB43_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB43_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB43_2
+; VI-NEXT: s_branch .LBB43_3
;
; GFX9-LABEL: bitcast_v36i16_to_v9i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v3
-; GFX9-NEXT: v_mov_b32_e32 v33, v2
-; GFX9-NEXT: v_mov_b32_e32 v34, v1
-; GFX9-NEXT: v_mov_b32_e32 v35, v0
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
; GFX9-NEXT: s_lshr_b32 s42, s27, 16
@@ -17740,11 +17914,15 @@ define inreg <9 x i64> @bitcast_v36i16_to_v9i64_scalar(<36 x i16> inreg %a, i32
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
+; GFX9-NEXT: v_mov_b32_e32 v32, v3
+; GFX9-NEXT: v_mov_b32_e32 v33, v2
+; GFX9-NEXT: v_mov_b32_e32 v34, v1
+; GFX9-NEXT: v_mov_b32_e32 v35, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_lshrrev_b32_e32 v36, 16, v32
; GFX9-NEXT: v_lshrrev_b32_e32 v37, 16, v33
; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v34
; GFX9-NEXT: v_lshrrev_b32_e32 v39, 16, v35
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -17759,6 +17937,7 @@ define inreg <9 x i64> @bitcast_v36i16_to_v9i64_scalar(<36 x i16> inreg %a, i32
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB43_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v35
@@ -17786,15 +17965,17 @@ define inreg <9 x i64> @bitcast_v36i16_to_v9i64_scalar(<36 x i16> inreg %a, i32
; GFX9-NEXT: s_cbranch_execnz .LBB43_3
; GFX9-NEXT: .LBB43_2: ; %cmp.true
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v35
-; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v34
-; GFX9-NEXT: v_and_b32_e32 v16, 0xffff, v33
-; GFX9-NEXT: v_and_b32_e32 v17, 0xffff, v32
; GFX9-NEXT: v_lshl_or_b32 v0, v39, 16, v0
-; GFX9-NEXT: v_lshl_or_b32 v1, v38, 16, v1
-; GFX9-NEXT: v_lshl_or_b32 v16, v37, 16, v16
-; GFX9-NEXT: v_lshl_or_b32 v17, v36, 16, v17
; GFX9-NEXT: v_pk_add_u16 v14, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v33
+; GFX9-NEXT: v_lshl_or_b32 v0, v37, 16, v0
+; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v34
+; GFX9-NEXT: v_pk_add_u16 v16, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v32
+; GFX9-NEXT: v_lshl_or_b32 v1, v38, 16, v1
+; GFX9-NEXT: v_lshl_or_b32 v0, v36, 16, v0
; GFX9-NEXT: v_pk_add_u16 v15, v1, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_pk_add_u16 v17, v0, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s6, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s7, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v2, s8, 3 op_sel_hi:[1,0]
@@ -17809,20 +17990,20 @@ define inreg <9 x i64> @bitcast_v36i16_to_v9i64_scalar(<36 x i16> inreg %a, i32
; GFX9-NEXT: v_pk_add_u16 v11, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v12, s18, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v13, s19, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: .LBB43_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB43_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB43_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB43_2
+; GFX9-NEXT: s_branch .LBB43_3
;
; GFX11-LABEL: bitcast_v36i16_to_v9i64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-NEXT: s_lshr_b32 s42, s28, 16
+; GFX11-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-NEXT: s_lshr_b32 s41, s28, 16
; GFX11-NEXT: s_lshr_b32 s15, s27, 16
; GFX11-NEXT: s_lshr_b32 s14, s26, 16
; GFX11-NEXT: s_lshr_b32 s13, s25, 16
@@ -17835,15 +18016,14 @@ define inreg <9 x i64> @bitcast_v36i16_to_v9i64_scalar(<36 x i16> inreg %a, i32
; GFX11-NEXT: s_lshr_b32 s6, s18, 16
; GFX11-NEXT: s_lshr_b32 s5, s17, 16
; GFX11-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-NEXT: s_mov_b32 s40, 0
-; GFX11-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-NEXT: s_pack_ll_b32_b16 s3, s3, s43
+; GFX11-NEXT: s_lshr_b32 s42, s3, 16
+; GFX11-NEXT: s_lshr_b32 s43, s2, 16
+; GFX11-NEXT: s_lshr_b32 s44, s1, 16
+; GFX11-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-NEXT: s_pack_ll_b32_b16 s1, s1, s44
+; GFX11-NEXT: s_pack_ll_b32_b16 s0, s0, s45
+; GFX11-NEXT: s_pack_ll_b32_b16 s2, s2, s43
+; GFX11-NEXT: s_pack_ll_b32_b16 s3, s3, s42
; GFX11-NEXT: s_pack_ll_b32_b16 s4, s16, s4
; GFX11-NEXT: s_pack_ll_b32_b16 s5, s17, s5
; GFX11-NEXT: s_pack_ll_b32_b16 s6, s18, s6
@@ -17856,14 +18036,18 @@ define inreg <9 x i64> @bitcast_v36i16_to_v9i64_scalar(<36 x i16> inreg %a, i32
; GFX11-NEXT: s_pack_ll_b32_b16 s13, s25, s13
; GFX11-NEXT: s_pack_ll_b32_b16 s14, s26, s14
; GFX11-NEXT: s_pack_ll_b32_b16 s15, s27, s15
-; GFX11-NEXT: s_pack_ll_b32_b16 s16, s28, s42
-; GFX11-NEXT: s_pack_ll_b32_b16 s17, s29, s41
-; GFX11-NEXT: s_and_b32 s47, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB43_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-NEXT: s_mov_b32 s40, -1
+; GFX11-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-NEXT: s_cbranch_scc0 .LBB43_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-NEXT: s_mov_b32 s40, 0
+; GFX11-NEXT: .LBB43_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
; GFX11-NEXT: s_cbranch_vccnz .LBB43_4
-; GFX11-NEXT: .LBB43_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
@@ -17883,8 +18067,6 @@ define inreg <9 x i64> @bitcast_v36i16_to_v9i64_scalar(<36 x i16> inreg %a, i32
; GFX11-NEXT: v_pk_add_u16 v16, s16, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB43_3:
-; GFX11-NEXT: s_branch .LBB43_2
; GFX11-NEXT: .LBB43_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -18663,20 +18845,21 @@ define inreg <36 x half> @bitcast_v9i64_to_v36f16_scalar(<9 x i64> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v5
-; SI-NEXT: v_readfirstlane_b32 s7, v1
-; SI-NEXT: v_readfirstlane_b32 s8, v2
-; SI-NEXT: v_readfirstlane_b32 s6, v3
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: v_readfirstlane_b32 s9, v4
+; SI-NEXT: v_readfirstlane_b32 s8, v1
+; SI-NEXT: v_readfirstlane_b32 s9, v2
+; SI-NEXT: v_readfirstlane_b32 s6, v3
+; SI-NEXT: v_readfirstlane_b32 s7, v4
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB45_4
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_lshr_b32 s4, s9, 16
+; SI-NEXT: s_lshr_b32 s4, s7, 16
; SI-NEXT: v_cvt_f32_f16_e32 v1, s4
; SI-NEXT: s_lshr_b32 s4, s6, 16
; SI-NEXT: v_cvt_f32_f16_e32 v2, s4
-; SI-NEXT: s_lshr_b32 s4, s8, 16
+; SI-NEXT: s_lshr_b32 s4, s9, 16
; SI-NEXT: v_cvt_f32_f16_e32 v4, s4
-; SI-NEXT: s_lshr_b32 s4, s7, 16
+; SI-NEXT: s_lshr_b32 s4, s8, 16
; SI-NEXT: v_cvt_f32_f16_e32 v6, s4
; SI-NEXT: s_lshr_b32 s4, s29, 16
; SI-NEXT: v_cvt_f32_f16_e32 v8, s4
@@ -18706,10 +18889,10 @@ define inreg <36 x half> @bitcast_v9i64_to_v36f16_scalar(<9 x i64> inreg %a, i32
; SI-NEXT: v_cvt_f32_f16_e32 v34, s4
; SI-NEXT: s_lshr_b32 s4, s16, 16
; SI-NEXT: v_cvt_f32_f16_e32 v36, s4
-; SI-NEXT: v_cvt_f32_f16_e32 v3, s9
+; SI-NEXT: v_cvt_f32_f16_e32 v3, s7
; SI-NEXT: v_cvt_f32_f16_e32 v5, s6
-; SI-NEXT: v_cvt_f32_f16_e32 v7, s8
-; SI-NEXT: v_cvt_f32_f16_e32 v9, s7
+; SI-NEXT: v_cvt_f32_f16_e32 v7, s9
+; SI-NEXT: v_cvt_f32_f16_e32 v9, s8
; SI-NEXT: v_cvt_f32_f16_e32 v11, s29
; SI-NEXT: v_cvt_f32_f16_e32 v13, s28
; SI-NEXT: v_cvt_f32_f16_e32 v14, s27
@@ -18754,18 +18937,18 @@ define inreg <36 x half> @bitcast_v9i64_to_v36f16_scalar(<9 x i64> inreg %a, i32
; SI-NEXT: s_addc_u32 s29, s29, 0
; SI-NEXT: s_lshr_b32 s44, s28, 16
; SI-NEXT: s_lshr_b32 s45, s29, 16
-; SI-NEXT: s_add_u32 s7, s7, 3
-; SI-NEXT: s_addc_u32 s8, s8, 0
-; SI-NEXT: s_lshr_b32 s46, s7, 16
-; SI-NEXT: s_lshr_b32 s47, s8, 16
-; SI-NEXT: s_add_u32 s6, s6, 3
+; SI-NEXT: s_add_u32 s8, s8, 3
; SI-NEXT: s_addc_u32 s9, s9, 0
+; SI-NEXT: s_lshr_b32 s46, s8, 16
+; SI-NEXT: s_lshr_b32 s47, s9, 16
+; SI-NEXT: s_add_u32 s6, s6, 3
+; SI-NEXT: s_addc_u32 s7, s7, 0
; SI-NEXT: s_lshr_b32 s56, s6, 16
-; SI-NEXT: s_lshr_b32 s57, s9, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v3, s9
+; SI-NEXT: s_lshr_b32 s57, s7, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v3, s7
; SI-NEXT: v_cvt_f32_f16_e32 v5, s6
-; SI-NEXT: v_cvt_f32_f16_e32 v7, s8
-; SI-NEXT: v_cvt_f32_f16_e32 v9, s7
+; SI-NEXT: v_cvt_f32_f16_e32 v7, s9
+; SI-NEXT: v_cvt_f32_f16_e32 v9, s8
; SI-NEXT: v_cvt_f32_f16_e32 v11, s29
; SI-NEXT: v_cvt_f32_f16_e32 v13, s28
; SI-NEXT: v_cvt_f32_f16_e32 v14, s27
@@ -18962,21 +19145,24 @@ define inreg <36 x half> @bitcast_v9i64_to_v36f16_scalar(<9 x i64> inreg %a, i32
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: s_branch .LBB45_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB45_2
+; SI-NEXT: s_branch .LBB45_3
;
; VI-LABEL: bitcast_v9i64_to_v36f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s9, v0
; VI-NEXT: v_readfirstlane_b32 s8, v1
-; VI-NEXT: v_readfirstlane_b32 s6, v2
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: v_readfirstlane_b32 s7, v3
+; VI-NEXT: v_readfirstlane_b32 s7, v2
+; VI-NEXT: v_readfirstlane_b32 s6, v3
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB45_4
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_lshr_b32 s10, s7, 16
-; VI-NEXT: s_lshr_b32 s11, s6, 16
+; VI-NEXT: s_lshr_b32 s10, s6, 16
+; VI-NEXT: s_lshr_b32 s11, s7, 16
; VI-NEXT: s_lshr_b32 s12, s8, 16
; VI-NEXT: s_lshr_b32 s13, s9, 16
; VI-NEXT: s_lshr_b32 s14, s29, 16
@@ -18995,8 +19181,8 @@ define inreg <36 x half> @bitcast_v9i64_to_v36f16_scalar(<9 x i64> inreg %a, i32
; VI-NEXT: s_lshr_b32 s59, s16, 16
; VI-NEXT: s_cbranch_execnz .LBB45_3
; VI-NEXT: .LBB45_2: ; %cmp.true
-; VI-NEXT: s_add_u32 s6, s6, 3
-; VI-NEXT: s_addc_u32 s7, s7, 0
+; VI-NEXT: s_add_u32 s7, s7, 3
+; VI-NEXT: s_addc_u32 s6, s6, 0
; VI-NEXT: s_add_u32 s9, s9, 3
; VI-NEXT: s_addc_u32 s8, s8, 0
; VI-NEXT: s_add_u32 s28, s28, 3
@@ -19013,8 +19199,8 @@ define inreg <36 x half> @bitcast_v9i64_to_v36f16_scalar(<9 x i64> inreg %a, i32
; VI-NEXT: s_addc_u32 s19, s19, 0
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
-; VI-NEXT: s_lshr_b32 s10, s7, 16
-; VI-NEXT: s_lshr_b32 s11, s6, 16
+; VI-NEXT: s_lshr_b32 s10, s6, 16
+; VI-NEXT: s_lshr_b32 s11, s7, 16
; VI-NEXT: s_lshr_b32 s12, s8, 16
; VI-NEXT: s_lshr_b32 s13, s9, 16
; VI-NEXT: s_lshr_b32 s14, s29, 16
@@ -19077,15 +19263,15 @@ define inreg <36 x half> @bitcast_v9i64_to_v36f16_scalar(<9 x i64> inreg %a, i32
; VI-NEXT: s_lshl_b32 s13, s13, 16
; VI-NEXT: s_and_b32 s8, 0xffff, s8
; VI-NEXT: s_lshl_b32 s12, s12, 16
-; VI-NEXT: s_and_b32 s6, 0xffff, s6
-; VI-NEXT: s_lshl_b32 s11, s11, 16
; VI-NEXT: s_and_b32 s7, 0xffff, s7
+; VI-NEXT: s_lshl_b32 s11, s11, 16
+; VI-NEXT: s_and_b32 s6, 0xffff, s6
; VI-NEXT: s_lshl_b32 s10, s10, 16
; VI-NEXT: s_or_b32 s14, s26, s14
; VI-NEXT: s_or_b32 s9, s9, s13
; VI-NEXT: s_or_b32 s8, s8, s12
-; VI-NEXT: s_or_b32 s6, s6, s11
-; VI-NEXT: s_or_b32 s7, s7, s10
+; VI-NEXT: s_or_b32 s7, s7, s11
+; VI-NEXT: s_or_b32 s6, s6, s10
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: v_mov_b32_e32 v2, s16
@@ -19102,8 +19288,8 @@ define inreg <36 x half> @bitcast_v9i64_to_v36f16_scalar(<9 x i64> inreg %a, i32
; VI-NEXT: v_mov_b32_e32 v13, s14
; VI-NEXT: v_mov_b32_e32 v14, s9
; VI-NEXT: v_mov_b32_e32 v15, s8
-; VI-NEXT: v_mov_b32_e32 v16, s6
-; VI-NEXT: v_mov_b32_e32 v17, s7
+; VI-NEXT: v_mov_b32_e32 v16, s7
+; VI-NEXT: v_mov_b32_e32 v17, s6
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB45_4:
; VI-NEXT: ; implicit-def: $sgpr59
@@ -19124,23 +19310,26 @@ define inreg <36 x half> @bitcast_v9i64_to_v36f16_scalar(<9 x i64> inreg %a, i32
; VI-NEXT: ; implicit-def: $sgpr12
; VI-NEXT: ; implicit-def: $sgpr11
; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: s_branch .LBB45_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB45_2
+; VI-NEXT: s_branch .LBB45_3
;
; GFX9-LABEL: bitcast_v9i64_to_v36f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
-; GFX9-NEXT: v_readfirstlane_b32 s6, v0
-; GFX9-NEXT: v_readfirstlane_b32 s7, v1
-; GFX9-NEXT: v_readfirstlane_b32 s8, v2
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: v_readfirstlane_b32 s9, v3
+; GFX9-NEXT: v_readfirstlane_b32 s7, v0
+; GFX9-NEXT: v_readfirstlane_b32 s8, v1
+; GFX9-NEXT: v_readfirstlane_b32 s9, v2
+; GFX9-NEXT: v_readfirstlane_b32 s6, v3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB45_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_lshr_b32 s10, s9, 16
-; GFX9-NEXT: s_lshr_b32 s11, s8, 16
-; GFX9-NEXT: s_lshr_b32 s12, s7, 16
-; GFX9-NEXT: s_lshr_b32 s13, s6, 16
+; GFX9-NEXT: s_lshr_b32 s10, s6, 16
+; GFX9-NEXT: s_lshr_b32 s11, s9, 16
+; GFX9-NEXT: s_lshr_b32 s12, s8, 16
+; GFX9-NEXT: s_lshr_b32 s13, s7, 16
; GFX9-NEXT: s_lshr_b32 s14, s29, 16
; GFX9-NEXT: s_lshr_b32 s15, s28, 16
; GFX9-NEXT: s_lshr_b32 s40, s27, 16
@@ -19157,10 +19346,10 @@ define inreg <36 x half> @bitcast_v9i64_to_v36f16_scalar(<9 x i64> inreg %a, i32
; GFX9-NEXT: s_lshr_b32 s59, s16, 16
; GFX9-NEXT: s_cbranch_execnz .LBB45_3
; GFX9-NEXT: .LBB45_2: ; %cmp.true
-; GFX9-NEXT: s_add_u32 s8, s8, 3
-; GFX9-NEXT: s_addc_u32 s9, s9, 0
-; GFX9-NEXT: s_add_u32 s6, s6, 3
-; GFX9-NEXT: s_addc_u32 s7, s7, 0
+; GFX9-NEXT: s_add_u32 s9, s9, 3
+; GFX9-NEXT: s_addc_u32 s6, s6, 0
+; GFX9-NEXT: s_add_u32 s7, s7, 3
+; GFX9-NEXT: s_addc_u32 s8, s8, 0
; GFX9-NEXT: s_add_u32 s28, s28, 3
; GFX9-NEXT: s_addc_u32 s29, s29, 0
; GFX9-NEXT: s_add_u32 s26, s26, 3
@@ -19175,10 +19364,10 @@ define inreg <36 x half> @bitcast_v9i64_to_v36f16_scalar(<9 x i64> inreg %a, i32
; GFX9-NEXT: s_addc_u32 s19, s19, 0
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
-; GFX9-NEXT: s_lshr_b32 s10, s9, 16
-; GFX9-NEXT: s_lshr_b32 s11, s8, 16
-; GFX9-NEXT: s_lshr_b32 s12, s7, 16
-; GFX9-NEXT: s_lshr_b32 s13, s6, 16
+; GFX9-NEXT: s_lshr_b32 s10, s6, 16
+; GFX9-NEXT: s_lshr_b32 s11, s9, 16
+; GFX9-NEXT: s_lshr_b32 s12, s8, 16
+; GFX9-NEXT: s_lshr_b32 s13, s7, 16
; GFX9-NEXT: s_lshr_b32 s14, s29, 16
; GFX9-NEXT: s_lshr_b32 s15, s28, 16
; GFX9-NEXT: s_lshr_b32 s40, s27, 16
@@ -19208,10 +19397,10 @@ define inreg <36 x half> @bitcast_v9i64_to_v36f16_scalar(<9 x i64> inreg %a, i32
; GFX9-NEXT: s_pack_ll_b32_b16 s25, s27, s40
; GFX9-NEXT: s_pack_ll_b32_b16 s15, s28, s15
; GFX9-NEXT: s_pack_ll_b32_b16 s14, s29, s14
-; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s13
-; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s12
-; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s11
-; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s10
+; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s13
+; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s12
+; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s11
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s10
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: v_mov_b32_e32 v2, s16
@@ -19226,10 +19415,10 @@ define inreg <36 x half> @bitcast_v9i64_to_v36f16_scalar(<9 x i64> inreg %a, i32
; GFX9-NEXT: v_mov_b32_e32 v11, s25
; GFX9-NEXT: v_mov_b32_e32 v12, s15
; GFX9-NEXT: v_mov_b32_e32 v13, s14
-; GFX9-NEXT: v_mov_b32_e32 v14, s6
-; GFX9-NEXT: v_mov_b32_e32 v15, s7
-; GFX9-NEXT: v_mov_b32_e32 v16, s8
-; GFX9-NEXT: v_mov_b32_e32 v17, s9
+; GFX9-NEXT: v_mov_b32_e32 v14, s7
+; GFX9-NEXT: v_mov_b32_e32 v15, s8
+; GFX9-NEXT: v_mov_b32_e32 v16, s9
+; GFX9-NEXT: v_mov_b32_e32 v17, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB45_4:
; GFX9-NEXT: ; implicit-def: $sgpr59
@@ -19250,13 +19439,15 @@ define inreg <36 x half> @bitcast_v9i64_to_v36f16_scalar(<9 x i64> inreg %a, i32
; GFX9-NEXT: ; implicit-def: $sgpr12
; GFX9-NEXT: ; implicit-def: $sgpr11
; GFX9-NEXT: ; implicit-def: $sgpr10
-; GFX9-NEXT: s_branch .LBB45_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB45_2
+; GFX9-NEXT: s_branch .LBB45_3
;
; GFX11-LABEL: bitcast_v9i64_to_v36f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-NEXT: s_mov_b32 s46, 0
+; GFX11-NEXT: s_mov_b32 s46, -1
; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
; GFX11-NEXT: s_cbranch_scc0 .LBB45_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
@@ -19278,8 +19469,7 @@ define inreg <36 x half> @bitcast_v9i64_to_v36f16_scalar(<9 x i64> inreg %a, i32
; GFX11-NEXT: s_lshr_b32 s43, s2, 16
; GFX11-NEXT: s_lshr_b32 s44, s1, 16
; GFX11-NEXT: s_lshr_b32 s45, s0, 16
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
-; GFX11-NEXT: s_cbranch_vccnz .LBB45_3
+; GFX11-NEXT: s_cbranch_execnz .LBB45_3
; GFX11-NEXT: .LBB45_2: ; %cmp.true
; GFX11-NEXT: s_add_u32 s28, s28, 3
; GFX11-NEXT: s_addc_u32 s29, s29, 0
@@ -19366,7 +19556,9 @@ define inreg <36 x half> @bitcast_v9i64_to_v36f16_scalar(<9 x i64> inreg %a, i32
; GFX11-NEXT: ; implicit-def: $sgpr6
; GFX11-NEXT: ; implicit-def: $sgpr5
; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: s_branch .LBB45_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
+; GFX11-NEXT: s_cbranch_vccz .LBB45_2
+; GFX11-NEXT: s_branch .LBB45_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -20266,6 +20458,7 @@ define inreg <9 x i64> @bitcast_v36f16_to_v9i64_scalar(<36 x half> inreg %a, i32
; SI-NEXT: v_cvt_f16_f32_e32 v58, s28
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v22
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB47_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v35
@@ -20526,7 +20719,9 @@ define inreg <9 x i64> @bitcast_v36f16_to_v9i64_scalar(<36 x half> inreg %a, i32
; SI-NEXT: v_mov_b32_e32 v44, v32
; SI-NEXT: v_mov_b32_e32 v25, v48
; SI-NEXT: v_mov_b32_e32 v48, v43
-; SI-NEXT: s_branch .LBB47_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB47_2
+; SI-NEXT: s_branch .LBB47_3
;
; VI-LABEL: bitcast_v36f16_to_v9i64_scalar:
; VI: ; %bb.0:
@@ -20546,11 +20741,12 @@ define inreg <9 x i64> @bitcast_v36f16_to_v9i64_scalar(<36 x half> inreg %a, i32
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v3
; VI-NEXT: v_mov_b32_e32 v33, v2
; VI-NEXT: v_mov_b32_e32 v34, v1
; VI-NEXT: v_mov_b32_e32 v35, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB47_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -20694,15 +20890,13 @@ define inreg <9 x i64> @bitcast_v36f16_to_v9i64_scalar(<36 x half> inreg %a, i32
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB47_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB47_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB47_2
+; VI-NEXT: s_branch .LBB47_3
;
; GFX9-LABEL: bitcast_v36f16_to_v9i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v3
-; GFX9-NEXT: v_mov_b32_e32 v33, v2
-; GFX9-NEXT: v_mov_b32_e32 v34, v1
-; GFX9-NEXT: v_mov_b32_e32 v35, v0
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
; GFX9-NEXT: s_lshr_b32 s42, s27, 16
@@ -20718,11 +20912,15 @@ define inreg <9 x i64> @bitcast_v36f16_to_v9i64_scalar(<36 x half> inreg %a, i32
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
+; GFX9-NEXT: v_mov_b32_e32 v32, v3
+; GFX9-NEXT: v_mov_b32_e32 v33, v2
+; GFX9-NEXT: v_mov_b32_e32 v34, v1
+; GFX9-NEXT: v_mov_b32_e32 v35, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_lshrrev_b32_e32 v36, 16, v32
; GFX9-NEXT: v_lshrrev_b32_e32 v37, 16, v33
; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v34
; GFX9-NEXT: v_lshrrev_b32_e32 v39, 16, v35
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -20737,6 +20935,7 @@ define inreg <9 x i64> @bitcast_v36f16_to_v9i64_scalar(<36 x half> inreg %a, i32
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB47_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v35
@@ -20795,14 +20994,16 @@ define inreg <9 x i64> @bitcast_v36f16_to_v9i64_scalar(<36 x half> inreg %a, i32
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB47_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB47_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB47_2
+; GFX9-NEXT: s_branch .LBB47_3
;
; GFX11-LABEL: bitcast_v36f16_to_v9i64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-NEXT: s_lshr_b32 s42, s28, 16
+; GFX11-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-NEXT: s_lshr_b32 s41, s28, 16
; GFX11-NEXT: s_lshr_b32 s15, s27, 16
; GFX11-NEXT: s_lshr_b32 s14, s26, 16
; GFX11-NEXT: s_lshr_b32 s13, s25, 16
@@ -20815,15 +21016,14 @@ define inreg <9 x i64> @bitcast_v36f16_to_v9i64_scalar(<36 x half> inreg %a, i32
; GFX11-NEXT: s_lshr_b32 s6, s18, 16
; GFX11-NEXT: s_lshr_b32 s5, s17, 16
; GFX11-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-NEXT: s_mov_b32 s40, 0
-; GFX11-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-NEXT: s_pack_ll_b32_b16 s3, s3, s43
+; GFX11-NEXT: s_lshr_b32 s42, s3, 16
+; GFX11-NEXT: s_lshr_b32 s43, s2, 16
+; GFX11-NEXT: s_lshr_b32 s44, s1, 16
+; GFX11-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-NEXT: s_pack_ll_b32_b16 s1, s1, s44
+; GFX11-NEXT: s_pack_ll_b32_b16 s0, s0, s45
+; GFX11-NEXT: s_pack_ll_b32_b16 s2, s2, s43
+; GFX11-NEXT: s_pack_ll_b32_b16 s3, s3, s42
; GFX11-NEXT: s_pack_ll_b32_b16 s4, s16, s4
; GFX11-NEXT: s_pack_ll_b32_b16 s5, s17, s5
; GFX11-NEXT: s_pack_ll_b32_b16 s6, s18, s6
@@ -20836,14 +21036,18 @@ define inreg <9 x i64> @bitcast_v36f16_to_v9i64_scalar(<36 x half> inreg %a, i32
; GFX11-NEXT: s_pack_ll_b32_b16 s13, s25, s13
; GFX11-NEXT: s_pack_ll_b32_b16 s14, s26, s14
; GFX11-NEXT: s_pack_ll_b32_b16 s15, s27, s15
-; GFX11-NEXT: s_pack_ll_b32_b16 s16, s28, s42
-; GFX11-NEXT: s_pack_ll_b32_b16 s17, s29, s41
-; GFX11-NEXT: s_and_b32 s47, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB47_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-NEXT: s_mov_b32 s40, -1
+; GFX11-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-NEXT: s_cbranch_scc0 .LBB47_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-NEXT: s_mov_b32 s40, 0
+; GFX11-NEXT: .LBB47_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
; GFX11-NEXT: s_cbranch_vccnz .LBB47_4
-; GFX11-NEXT: .LBB47_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
@@ -20863,8 +21067,6 @@ define inreg <9 x i64> @bitcast_v36f16_to_v9i64_scalar(<36 x half> inreg %a, i32
; GFX11-NEXT: v_pk_add_f16 v16, 0x200, s16 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB47_3:
-; GFX11-NEXT: s_branch .LBB47_2
; GFX11-NEXT: .LBB47_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -21443,6 +21645,7 @@ define inreg <36 x i16> @bitcast_v9f64_to_v36i16_scalar(<9 x double> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v5
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v18, s16
; SI-NEXT: v_mov_b32_e32 v19, s17
; SI-NEXT: v_mov_b32_e32 v16, s18
@@ -21456,8 +21659,8 @@ define inreg <36 x i16> @bitcast_v9f64_to_v36i16_scalar(<9 x double> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v8, s26
; SI-NEXT: v_mov_b32_e32 v9, s27
; SI-NEXT: v_mov_b32_e32 v6, s28
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v7, s29
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB49_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_alignbit_b32 v5, v4, v3, 16
@@ -21635,12 +21838,15 @@ define inreg <36 x i16> @bitcast_v9f64_to_v36i16_scalar(<9 x double> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr25
; SI-NEXT: ; implicit-def: $vgpr5
; SI-NEXT: ; implicit-def: $vgpr23
-; SI-NEXT: s_branch .LBB49_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB49_2
+; SI-NEXT: s_branch .LBB49_3
;
; VI-LABEL: bitcast_v9f64_to_v36i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v18, s16
; VI-NEXT: v_mov_b32_e32 v19, s17
; VI-NEXT: v_mov_b32_e32 v16, s18
@@ -21654,8 +21860,8 @@ define inreg <36 x i16> @bitcast_v9f64_to_v36i16_scalar(<9 x double> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB49_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_lshrrev_b32_e32 v22, 16, v3
@@ -21766,12 +21972,15 @@ define inreg <36 x i16> @bitcast_v9f64_to_v36i16_scalar(<9 x double> inreg %a, i
; VI-NEXT: ; implicit-def: $vgpr24
; VI-NEXT: ; implicit-def: $vgpr23
; VI-NEXT: ; implicit-def: $vgpr22
-; VI-NEXT: s_branch .LBB49_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB49_2
+; VI-NEXT: s_branch .LBB49_3
;
; GFX9-LABEL: bitcast_v9f64_to_v36i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v18, s16
; GFX9-NEXT: v_mov_b32_e32 v19, s17
; GFX9-NEXT: v_mov_b32_e32 v16, s18
@@ -21785,8 +21994,8 @@ define inreg <36 x i16> @bitcast_v9f64_to_v36i16_scalar(<9 x double> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB49_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_lshrrev_b32_e32 v22, 16, v3
@@ -21897,36 +22106,37 @@ define inreg <36 x i16> @bitcast_v9f64_to_v36i16_scalar(<9 x double> inreg %a, i
; GFX9-NEXT: ; implicit-def: $vgpr24
; GFX9-NEXT: ; implicit-def: $vgpr23
; GFX9-NEXT: ; implicit-def: $vgpr22
-; GFX9-NEXT: s_branch .LBB49_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB49_2
+; GFX9-NEXT: s_branch .LBB49_3
;
; GFX11-LABEL: bitcast_v9f64_to_v36i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s46, -1
+; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
; GFX11-NEXT: s_cbranch_scc0 .LBB49_3
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s5, s29, 16
-; GFX11-NEXT: s_lshr_b32 s14, s28, 16
-; GFX11-NEXT: s_lshr_b32 s6, s27, 16
-; GFX11-NEXT: s_lshr_b32 s15, s26, 16
-; GFX11-NEXT: s_lshr_b32 s7, s25, 16
-; GFX11-NEXT: s_lshr_b32 s40, s24, 16
-; GFX11-NEXT: s_lshr_b32 s8, s23, 16
-; GFX11-NEXT: s_lshr_b32 s41, s22, 16
-; GFX11-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-NEXT: s_lshr_b32 s42, s20, 16
-; GFX11-NEXT: s_lshr_b32 s10, s19, 16
-; GFX11-NEXT: s_lshr_b32 s43, s18, 16
-; GFX11-NEXT: s_lshr_b32 s11, s17, 16
-; GFX11-NEXT: s_lshr_b32 s44, s16, 16
-; GFX11-NEXT: s_lshr_b32 s12, s3, 16
-; GFX11-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-NEXT: s_lshr_b32 s13, s1, 16
-; GFX11-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX11-NEXT: s_lshr_b32 s4, s29, 16
+; GFX11-NEXT: s_lshr_b32 s13, s28, 16
+; GFX11-NEXT: s_lshr_b32 s5, s27, 16
+; GFX11-NEXT: s_lshr_b32 s14, s26, 16
+; GFX11-NEXT: s_lshr_b32 s6, s25, 16
+; GFX11-NEXT: s_lshr_b32 s15, s24, 16
+; GFX11-NEXT: s_lshr_b32 s7, s23, 16
+; GFX11-NEXT: s_lshr_b32 s40, s22, 16
+; GFX11-NEXT: s_lshr_b32 s8, s21, 16
+; GFX11-NEXT: s_lshr_b32 s41, s20, 16
+; GFX11-NEXT: s_lshr_b32 s9, s19, 16
+; GFX11-NEXT: s_lshr_b32 s42, s18, 16
+; GFX11-NEXT: s_lshr_b32 s10, s17, 16
+; GFX11-NEXT: s_lshr_b32 s43, s16, 16
+; GFX11-NEXT: s_lshr_b32 s11, s3, 16
+; GFX11-NEXT: s_lshr_b32 s44, s2, 16
+; GFX11-NEXT: s_lshr_b32 s12, s1, 16
+; GFX11-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-NEXT: s_cbranch_execnz .LBB49_4
; GFX11-NEXT: .LBB49_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[13:14], s[28:29], 1.0
; GFX11-NEXT: v_add_f64 v[15:16], s[26:27], 1.0
@@ -21957,8 +22167,6 @@ define inreg <36 x i16> @bitcast_v9f64_to_v36i16_scalar(<9 x double> inreg %a, i
; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v0
; GFX11-NEXT: s_branch .LBB49_5
; GFX11-NEXT: .LBB49_3:
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr13
; GFX11-NEXT: ; implicit-def: $sgpr45
; GFX11-NEXT: ; implicit-def: $sgpr12
; GFX11-NEXT: ; implicit-def: $sgpr44
@@ -21975,7 +22183,10 @@ define inreg <36 x i16> @bitcast_v9f64_to_v36i16_scalar(<9 x double> inreg %a, i
; GFX11-NEXT: ; implicit-def: $sgpr6
; GFX11-NEXT: ; implicit-def: $sgpr14
; GFX11-NEXT: ; implicit-def: $sgpr5
-; GFX11-NEXT: s_branch .LBB49_2
+; GFX11-NEXT: ; implicit-def: $sgpr13
+; GFX11-NEXT: ; implicit-def: $sgpr4
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
+; GFX11-NEXT: s_cbranch_vccz .LBB49_2
; GFX11-NEXT: .LBB49_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v19, s2
; GFX11-NEXT: v_dual_mov_b32 v5, s16 :: v_dual_mov_b32 v10, s20
@@ -21986,15 +22197,15 @@ define inreg <36 x i16> @bitcast_v9f64_to_v36i16_scalar(<9 x double> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v18, s25
; GFX11-NEXT: v_dual_mov_b32 v11, s21 :: v_dual_mov_b32 v16, s27
; GFX11-NEXT: v_dual_mov_b32 v9, s23 :: v_dual_mov_b32 v14, s29
-; GFX11-NEXT: v_dual_mov_b32 v34, s46 :: v_dual_mov_b32 v7, s43
-; GFX11-NEXT: v_dual_mov_b32 v2, s45 :: v_dual_mov_b32 v27, s42
-; GFX11-NEXT: v_dual_mov_b32 v30, s44 :: v_dual_mov_b32 v21, s14
-; GFX11-NEXT: v_dual_mov_b32 v26, s41 :: v_dual_mov_b32 v35, s13
-; GFX11-NEXT: v_dual_mov_b32 v12, s40 :: v_dual_mov_b32 v33, s12
-; GFX11-NEXT: v_dual_mov_b32 v22, s15 :: v_dual_mov_b32 v31, s10
-; GFX11-NEXT: v_dual_mov_b32 v32, s11 :: v_dual_mov_b32 v29, s9
-; GFX11-NEXT: v_dual_mov_b32 v28, s8 :: v_dual_mov_b32 v25, s7
-; GFX11-NEXT: v_dual_mov_b32 v24, s6 :: v_dual_mov_b32 v23, s5
+; GFX11-NEXT: v_dual_mov_b32 v34, s45 :: v_dual_mov_b32 v7, s42
+; GFX11-NEXT: v_dual_mov_b32 v2, s44 :: v_dual_mov_b32 v27, s41
+; GFX11-NEXT: v_dual_mov_b32 v30, s43 :: v_dual_mov_b32 v21, s13
+; GFX11-NEXT: v_dual_mov_b32 v26, s40 :: v_dual_mov_b32 v35, s12
+; GFX11-NEXT: v_dual_mov_b32 v12, s15 :: v_dual_mov_b32 v33, s11
+; GFX11-NEXT: v_dual_mov_b32 v22, s14 :: v_dual_mov_b32 v31, s9
+; GFX11-NEXT: v_dual_mov_b32 v32, s10 :: v_dual_mov_b32 v29, s8
+; GFX11-NEXT: v_dual_mov_b32 v28, s7 :: v_dual_mov_b32 v25, s6
+; GFX11-NEXT: v_dual_mov_b32 v24, s5 :: v_dual_mov_b32 v23, s4
; GFX11-NEXT: .LBB49_5: ; %end
; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
@@ -22782,6 +22993,7 @@ define inreg <9 x double> @bitcast_v36i16_to_v9f64_scalar(<36 x i16> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v22
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
@@ -22799,7 +23011,7 @@ define inreg <9 x double> @bitcast_v36i16_to_v9f64_scalar(<36 x i16> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v48, v4
; SI-NEXT: v_mov_b32_e32 v49, v2
; SI-NEXT: v_mov_b32_e32 v50, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v44, 16, v3
@@ -22816,46 +23028,46 @@ define inreg <9 x double> @bitcast_v36i16_to_v9f64_scalar(<36 x i16> inreg %a, i
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v50
; SI-NEXT: v_or_b32_e32 v7, v0, v45
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49
-; SI-NEXT: v_or_b32_e32 v8, v0, v44
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v48
-; SI-NEXT: s_and_b32 s4, s16, 0xffff
-; SI-NEXT: s_lshl_b32 s5, s17, 16
; SI-NEXT: v_or_b32_e32 v9, v0, v43
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v39
+; SI-NEXT: s_and_b32 s4, s16, 0xffff
+; SI-NEXT: s_lshl_b32 s5, s17, 16
+; SI-NEXT: v_or_b32_e32 v10, v0, v42
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v38
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: s_and_b32 s5, s18, 0xffff
; SI-NEXT: s_lshl_b32 s6, s19, 16
-; SI-NEXT: v_or_b32_e32 v10, v0, v42
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v38
+; SI-NEXT: v_or_b32_e32 v11, v0, v41
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v37
; SI-NEXT: s_or_b32 s5, s5, s6
; SI-NEXT: s_and_b32 s6, s20, 0xffff
; SI-NEXT: s_lshl_b32 s7, s21, 16
-; SI-NEXT: v_or_b32_e32 v11, v0, v41
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v37
+; SI-NEXT: v_or_b32_e32 v12, v0, v40
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v36
; SI-NEXT: s_or_b32 s6, s6, s7
; SI-NEXT: s_and_b32 s7, s22, 0xffff
; SI-NEXT: s_lshl_b32 s8, s23, 16
-; SI-NEXT: v_or_b32_e32 v12, v0, v40
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v36
+; SI-NEXT: v_or_b32_e32 v13, v0, v55
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v35
; SI-NEXT: s_or_b32 s7, s7, s8
; SI-NEXT: s_and_b32 s8, s24, 0xffff
; SI-NEXT: s_lshl_b32 s9, s25, 16
-; SI-NEXT: v_or_b32_e32 v13, v0, v55
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v35
+; SI-NEXT: v_or_b32_e32 v14, v0, v54
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v34
; SI-NEXT: s_or_b32 s8, s8, s9
; SI-NEXT: s_and_b32 s9, s26, 0xffff
; SI-NEXT: s_lshl_b32 s10, s27, 16
-; SI-NEXT: v_or_b32_e32 v14, v0, v54
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v34
+; SI-NEXT: v_or_b32_e32 v15, v0, v53
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v33
; SI-NEXT: s_or_b32 s9, s9, s10
; SI-NEXT: s_and_b32 s10, s28, 0xffff
; SI-NEXT: s_lshl_b32 s11, s29, 16
-; SI-NEXT: v_or_b32_e32 v15, v0, v53
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v33
-; SI-NEXT: s_or_b32 s10, s10, s11
+; SI-NEXT: v_and_b32_e32 v1, 0xffff, v49
; SI-NEXT: v_or_b32_e32 v16, v0, v52
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v32
+; SI-NEXT: s_or_b32 s10, s10, s11
+; SI-NEXT: v_or_b32_e32 v8, v1, v44
; SI-NEXT: v_or_b32_e32 v17, v0, v51
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
@@ -22870,10 +23082,6 @@ define inreg <9 x double> @bitcast_v36i16_to_v9f64_scalar(<36 x i16> inreg %a, i
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v45, v0
; SI-NEXT: v_add_i32_e32 v7, vcc, 0x30000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v49
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v0, v44, v0
-; SI-NEXT: v_add_i32_e32 v8, vcc, 0x30000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v48
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v43, v0
@@ -22929,13 +23137,16 @@ define inreg <9 x double> @bitcast_v36i16_to_v9f64_scalar(<36 x i16> inreg %a, i
; SI-NEXT: s_lshl_b32 s10, s27, 16
; SI-NEXT: s_add_i32 s28, s28, 3
; SI-NEXT: v_or_b32_e32 v0, v52, v0
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v49
; SI-NEXT: s_or_b32 s9, s10, s9
; SI-NEXT: s_and_b32 s10, s28, 0xffff
; SI-NEXT: s_lshl_b32 s11, s29, 16
; SI-NEXT: v_add_i32_e32 v16, vcc, 0x30000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v32
+; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1
; SI-NEXT: s_or_b32 s10, s11, s10
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: v_or_b32_e32 v1, v44, v1
; SI-NEXT: s_add_i32 s4, s4, 0x30000
; SI-NEXT: s_add_i32 s5, s5, 0x30000
; SI-NEXT: s_add_i32 s6, s6, 0x30000
@@ -22944,6 +23155,7 @@ define inreg <9 x double> @bitcast_v36i16_to_v9f64_scalar(<36 x i16> inreg %a, i
; SI-NEXT: s_add_i32 s9, s9, 0x30000
; SI-NEXT: s_add_i32 s10, s10, 0x30000
; SI-NEXT: v_or_b32_e32 v0, v51, v0
+; SI-NEXT: v_add_i32_e32 v8, vcc, 0x30000, v1
; SI-NEXT: v_add_i32_e32 v17, vcc, 0x30000, v0
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
@@ -22963,7 +23175,9 @@ define inreg <9 x double> @bitcast_v36i16_to_v9f64_scalar(<36 x i16> inreg %a, i
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB51_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; SI-NEXT: s_branch .LBB51_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB51_2
+; SI-NEXT: s_branch .LBB51_3
;
; VI-LABEL: bitcast_v36i16_to_v9f64_scalar:
; VI: ; %bb.0:
@@ -22983,11 +23197,12 @@ define inreg <9 x double> @bitcast_v36i16_to_v9f64_scalar(<36 x i16> inreg %a, i
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v3
; VI-NEXT: v_mov_b32_e32 v33, v2
; VI-NEXT: v_mov_b32_e32 v34, v1
; VI-NEXT: v_mov_b32_e32 v35, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB51_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -23162,15 +23377,13 @@ define inreg <9 x double> @bitcast_v36i16_to_v9f64_scalar(<36 x i16> inreg %a, i
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB51_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB51_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB51_2
+; VI-NEXT: s_branch .LBB51_3
;
; GFX9-LABEL: bitcast_v36i16_to_v9f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v3
-; GFX9-NEXT: v_mov_b32_e32 v33, v2
-; GFX9-NEXT: v_mov_b32_e32 v34, v1
-; GFX9-NEXT: v_mov_b32_e32 v35, v0
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
; GFX9-NEXT: s_lshr_b32 s42, s27, 16
@@ -23186,11 +23399,15 @@ define inreg <9 x double> @bitcast_v36i16_to_v9f64_scalar(<36 x i16> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
+; GFX9-NEXT: v_mov_b32_e32 v32, v3
+; GFX9-NEXT: v_mov_b32_e32 v33, v2
+; GFX9-NEXT: v_mov_b32_e32 v34, v1
+; GFX9-NEXT: v_mov_b32_e32 v35, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_lshrrev_b32_e32 v36, 16, v32
; GFX9-NEXT: v_lshrrev_b32_e32 v37, 16, v33
; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v34
; GFX9-NEXT: v_lshrrev_b32_e32 v39, 16, v35
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -23205,6 +23422,7 @@ define inreg <9 x double> @bitcast_v36i16_to_v9f64_scalar(<36 x i16> inreg %a, i
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB51_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v35
@@ -23232,15 +23450,17 @@ define inreg <9 x double> @bitcast_v36i16_to_v9f64_scalar(<36 x i16> inreg %a, i
; GFX9-NEXT: s_cbranch_execnz .LBB51_3
; GFX9-NEXT: .LBB51_2: ; %cmp.true
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v35
-; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v34
-; GFX9-NEXT: v_and_b32_e32 v16, 0xffff, v33
-; GFX9-NEXT: v_and_b32_e32 v17, 0xffff, v32
; GFX9-NEXT: v_lshl_or_b32 v0, v39, 16, v0
-; GFX9-NEXT: v_lshl_or_b32 v1, v38, 16, v1
-; GFX9-NEXT: v_lshl_or_b32 v16, v37, 16, v16
-; GFX9-NEXT: v_lshl_or_b32 v17, v36, 16, v17
; GFX9-NEXT: v_pk_add_u16 v14, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v33
+; GFX9-NEXT: v_lshl_or_b32 v0, v37, 16, v0
+; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v34
+; GFX9-NEXT: v_pk_add_u16 v16, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v32
+; GFX9-NEXT: v_lshl_or_b32 v1, v38, 16, v1
+; GFX9-NEXT: v_lshl_or_b32 v0, v36, 16, v0
; GFX9-NEXT: v_pk_add_u16 v15, v1, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_pk_add_u16 v17, v0, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s6, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s7, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v2, s8, 3 op_sel_hi:[1,0]
@@ -23255,20 +23475,20 @@ define inreg <9 x double> @bitcast_v36i16_to_v9f64_scalar(<36 x i16> inreg %a, i
; GFX9-NEXT: v_pk_add_u16 v11, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v12, s18, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v13, s19, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: .LBB51_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB51_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB51_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB51_2
+; GFX9-NEXT: s_branch .LBB51_3
;
; GFX11-LABEL: bitcast_v36i16_to_v9f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-NEXT: s_lshr_b32 s42, s28, 16
+; GFX11-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-NEXT: s_lshr_b32 s41, s28, 16
; GFX11-NEXT: s_lshr_b32 s15, s27, 16
; GFX11-NEXT: s_lshr_b32 s14, s26, 16
; GFX11-NEXT: s_lshr_b32 s13, s25, 16
@@ -23281,15 +23501,14 @@ define inreg <9 x double> @bitcast_v36i16_to_v9f64_scalar(<36 x i16> inreg %a, i
; GFX11-NEXT: s_lshr_b32 s6, s18, 16
; GFX11-NEXT: s_lshr_b32 s5, s17, 16
; GFX11-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-NEXT: s_mov_b32 s40, 0
-; GFX11-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-NEXT: s_pack_ll_b32_b16 s3, s3, s43
+; GFX11-NEXT: s_lshr_b32 s42, s3, 16
+; GFX11-NEXT: s_lshr_b32 s43, s2, 16
+; GFX11-NEXT: s_lshr_b32 s44, s1, 16
+; GFX11-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-NEXT: s_pack_ll_b32_b16 s1, s1, s44
+; GFX11-NEXT: s_pack_ll_b32_b16 s0, s0, s45
+; GFX11-NEXT: s_pack_ll_b32_b16 s2, s2, s43
+; GFX11-NEXT: s_pack_ll_b32_b16 s3, s3, s42
; GFX11-NEXT: s_pack_ll_b32_b16 s4, s16, s4
; GFX11-NEXT: s_pack_ll_b32_b16 s5, s17, s5
; GFX11-NEXT: s_pack_ll_b32_b16 s6, s18, s6
@@ -23302,14 +23521,18 @@ define inreg <9 x double> @bitcast_v36i16_to_v9f64_scalar(<36 x i16> inreg %a, i
; GFX11-NEXT: s_pack_ll_b32_b16 s13, s25, s13
; GFX11-NEXT: s_pack_ll_b32_b16 s14, s26, s14
; GFX11-NEXT: s_pack_ll_b32_b16 s15, s27, s15
-; GFX11-NEXT: s_pack_ll_b32_b16 s16, s28, s42
-; GFX11-NEXT: s_pack_ll_b32_b16 s17, s29, s41
-; GFX11-NEXT: s_and_b32 s47, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB51_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-NEXT: s_mov_b32 s40, -1
+; GFX11-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-NEXT: s_cbranch_scc0 .LBB51_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-NEXT: s_mov_b32 s40, 0
+; GFX11-NEXT: .LBB51_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
; GFX11-NEXT: s_cbranch_vccnz .LBB51_4
-; GFX11-NEXT: .LBB51_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
@@ -23329,8 +23552,6 @@ define inreg <9 x double> @bitcast_v36i16_to_v9f64_scalar(<36 x i16> inreg %a, i
; GFX11-NEXT: v_pk_add_u16 v16, s16, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB51_3:
-; GFX11-NEXT: s_branch .LBB51_2
; GFX11-NEXT: .LBB51_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -24045,11 +24266,12 @@ define inreg <36 x half> @bitcast_v9f64_to_v36f16_scalar(<9 x double> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v5
+; SI-NEXT: s_and_b64 s[8:9], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s6, v1
; SI-NEXT: v_readfirstlane_b32 s7, v2
; SI-NEXT: v_readfirstlane_b32 s4, v3
-; SI-NEXT: s_and_b64 s[8:9], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s5, v4
+; SI-NEXT: s_mov_b64 s[8:9], -1
; SI-NEXT: s_cbranch_scc0 .LBB53_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s8, s5, 16
@@ -24335,12 +24557,15 @@ define inreg <36 x half> @bitcast_v9f64_to_v36f16_scalar(<9 x double> inreg %a,
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: s_branch .LBB53_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; SI-NEXT: s_cbranch_vccz .LBB53_2
+; SI-NEXT: s_branch .LBB53_3
;
; VI-LABEL: bitcast_v9f64_to_v36f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v18, s16
; VI-NEXT: v_mov_b32_e32 v19, s17
; VI-NEXT: v_mov_b32_e32 v16, s18
@@ -24354,8 +24579,8 @@ define inreg <36 x half> @bitcast_v9f64_to_v36f16_scalar(<9 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB53_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_lshrrev_b32_e32 v22, 16, v3
@@ -24466,12 +24691,15 @@ define inreg <36 x half> @bitcast_v9f64_to_v36f16_scalar(<9 x double> inreg %a,
; VI-NEXT: ; implicit-def: $vgpr24
; VI-NEXT: ; implicit-def: $vgpr23
; VI-NEXT: ; implicit-def: $vgpr22
-; VI-NEXT: s_branch .LBB53_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB53_2
+; VI-NEXT: s_branch .LBB53_3
;
; GFX9-LABEL: bitcast_v9f64_to_v36f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v18, s16
; GFX9-NEXT: v_mov_b32_e32 v19, s17
; GFX9-NEXT: v_mov_b32_e32 v16, s18
@@ -24485,8 +24713,8 @@ define inreg <36 x half> @bitcast_v9f64_to_v36f16_scalar(<9 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB53_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_lshrrev_b32_e32 v22, 16, v3
@@ -24597,36 +24825,37 @@ define inreg <36 x half> @bitcast_v9f64_to_v36f16_scalar(<9 x double> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr24
; GFX9-NEXT: ; implicit-def: $vgpr23
; GFX9-NEXT: ; implicit-def: $vgpr22
-; GFX9-NEXT: s_branch .LBB53_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB53_2
+; GFX9-NEXT: s_branch .LBB53_3
;
; GFX11-LABEL: bitcast_v9f64_to_v36f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_and_b32 s5, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s46, -1
+; GFX11-NEXT: s_and_b32 s4, vcc_lo, exec_lo
; GFX11-NEXT: s_cbranch_scc0 .LBB53_3
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s5, s29, 16
-; GFX11-NEXT: s_lshr_b32 s14, s28, 16
-; GFX11-NEXT: s_lshr_b32 s6, s27, 16
-; GFX11-NEXT: s_lshr_b32 s15, s26, 16
-; GFX11-NEXT: s_lshr_b32 s7, s25, 16
-; GFX11-NEXT: s_lshr_b32 s40, s24, 16
-; GFX11-NEXT: s_lshr_b32 s8, s23, 16
-; GFX11-NEXT: s_lshr_b32 s41, s22, 16
-; GFX11-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-NEXT: s_lshr_b32 s42, s20, 16
-; GFX11-NEXT: s_lshr_b32 s10, s19, 16
-; GFX11-NEXT: s_lshr_b32 s43, s18, 16
-; GFX11-NEXT: s_lshr_b32 s11, s17, 16
-; GFX11-NEXT: s_lshr_b32 s44, s16, 16
-; GFX11-NEXT: s_lshr_b32 s12, s3, 16
-; GFX11-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-NEXT: s_lshr_b32 s13, s1, 16
-; GFX11-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB53_4
+; GFX11-NEXT: s_lshr_b32 s4, s29, 16
+; GFX11-NEXT: s_lshr_b32 s13, s28, 16
+; GFX11-NEXT: s_lshr_b32 s5, s27, 16
+; GFX11-NEXT: s_lshr_b32 s14, s26, 16
+; GFX11-NEXT: s_lshr_b32 s6, s25, 16
+; GFX11-NEXT: s_lshr_b32 s15, s24, 16
+; GFX11-NEXT: s_lshr_b32 s7, s23, 16
+; GFX11-NEXT: s_lshr_b32 s40, s22, 16
+; GFX11-NEXT: s_lshr_b32 s8, s21, 16
+; GFX11-NEXT: s_lshr_b32 s41, s20, 16
+; GFX11-NEXT: s_lshr_b32 s9, s19, 16
+; GFX11-NEXT: s_lshr_b32 s42, s18, 16
+; GFX11-NEXT: s_lshr_b32 s10, s17, 16
+; GFX11-NEXT: s_lshr_b32 s43, s16, 16
+; GFX11-NEXT: s_lshr_b32 s11, s3, 16
+; GFX11-NEXT: s_lshr_b32 s44, s2, 16
+; GFX11-NEXT: s_lshr_b32 s12, s1, 16
+; GFX11-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-NEXT: s_cbranch_execnz .LBB53_4
; GFX11-NEXT: .LBB53_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[13:14], s[28:29], 1.0
; GFX11-NEXT: v_add_f64 v[15:16], s[26:27], 1.0
@@ -24657,8 +24886,6 @@ define inreg <36 x half> @bitcast_v9f64_to_v36f16_scalar(<9 x double> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v0
; GFX11-NEXT: s_branch .LBB53_5
; GFX11-NEXT: .LBB53_3:
-; GFX11-NEXT: ; implicit-def: $sgpr46
-; GFX11-NEXT: ; implicit-def: $sgpr13
; GFX11-NEXT: ; implicit-def: $sgpr45
; GFX11-NEXT: ; implicit-def: $sgpr12
; GFX11-NEXT: ; implicit-def: $sgpr44
@@ -24675,7 +24902,10 @@ define inreg <36 x half> @bitcast_v9f64_to_v36f16_scalar(<9 x double> inreg %a,
; GFX11-NEXT: ; implicit-def: $sgpr6
; GFX11-NEXT: ; implicit-def: $sgpr14
; GFX11-NEXT: ; implicit-def: $sgpr5
-; GFX11-NEXT: s_branch .LBB53_2
+; GFX11-NEXT: ; implicit-def: $sgpr13
+; GFX11-NEXT: ; implicit-def: $sgpr4
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
+; GFX11-NEXT: s_cbranch_vccz .LBB53_2
; GFX11-NEXT: .LBB53_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v19, s2
; GFX11-NEXT: v_dual_mov_b32 v5, s16 :: v_dual_mov_b32 v10, s20
@@ -24686,15 +24916,15 @@ define inreg <36 x half> @bitcast_v9f64_to_v36f16_scalar(<9 x double> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v18, s25
; GFX11-NEXT: v_dual_mov_b32 v11, s21 :: v_dual_mov_b32 v16, s27
; GFX11-NEXT: v_dual_mov_b32 v9, s23 :: v_dual_mov_b32 v14, s29
-; GFX11-NEXT: v_dual_mov_b32 v34, s46 :: v_dual_mov_b32 v7, s43
-; GFX11-NEXT: v_dual_mov_b32 v2, s45 :: v_dual_mov_b32 v27, s42
-; GFX11-NEXT: v_dual_mov_b32 v30, s44 :: v_dual_mov_b32 v21, s14
-; GFX11-NEXT: v_dual_mov_b32 v26, s41 :: v_dual_mov_b32 v35, s13
-; GFX11-NEXT: v_dual_mov_b32 v12, s40 :: v_dual_mov_b32 v33, s12
-; GFX11-NEXT: v_dual_mov_b32 v22, s15 :: v_dual_mov_b32 v31, s10
-; GFX11-NEXT: v_dual_mov_b32 v32, s11 :: v_dual_mov_b32 v29, s9
-; GFX11-NEXT: v_dual_mov_b32 v28, s8 :: v_dual_mov_b32 v25, s7
-; GFX11-NEXT: v_dual_mov_b32 v24, s6 :: v_dual_mov_b32 v23, s5
+; GFX11-NEXT: v_dual_mov_b32 v34, s45 :: v_dual_mov_b32 v7, s42
+; GFX11-NEXT: v_dual_mov_b32 v2, s44 :: v_dual_mov_b32 v27, s41
+; GFX11-NEXT: v_dual_mov_b32 v30, s43 :: v_dual_mov_b32 v21, s13
+; GFX11-NEXT: v_dual_mov_b32 v26, s40 :: v_dual_mov_b32 v35, s12
+; GFX11-NEXT: v_dual_mov_b32 v12, s15 :: v_dual_mov_b32 v33, s11
+; GFX11-NEXT: v_dual_mov_b32 v22, s14 :: v_dual_mov_b32 v31, s9
+; GFX11-NEXT: v_dual_mov_b32 v32, s10 :: v_dual_mov_b32 v29, s8
+; GFX11-NEXT: v_dual_mov_b32 v28, s7 :: v_dual_mov_b32 v25, s6
+; GFX11-NEXT: v_dual_mov_b32 v24, s5 :: v_dual_mov_b32 v23, s4
; GFX11-NEXT: .LBB53_5: ; %end
; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
@@ -25632,6 +25862,7 @@ define inreg <9 x double> @bitcast_v36f16_to_v9f64_scalar(<36 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v58, s28
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v22
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB55_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v35
@@ -25892,7 +26123,9 @@ define inreg <9 x double> @bitcast_v36f16_to_v9f64_scalar(<36 x half> inreg %a,
; SI-NEXT: v_mov_b32_e32 v44, v32
; SI-NEXT: v_mov_b32_e32 v25, v48
; SI-NEXT: v_mov_b32_e32 v48, v43
-; SI-NEXT: s_branch .LBB55_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB55_2
+; SI-NEXT: s_branch .LBB55_3
;
; VI-LABEL: bitcast_v36f16_to_v9f64_scalar:
; VI: ; %bb.0:
@@ -25912,11 +26145,12 @@ define inreg <9 x double> @bitcast_v36f16_to_v9f64_scalar(<36 x half> inreg %a,
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v3
; VI-NEXT: v_mov_b32_e32 v33, v2
; VI-NEXT: v_mov_b32_e32 v34, v1
; VI-NEXT: v_mov_b32_e32 v35, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB55_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -26060,15 +26294,13 @@ define inreg <9 x double> @bitcast_v36f16_to_v9f64_scalar(<36 x half> inreg %a,
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB55_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB55_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB55_2
+; VI-NEXT: s_branch .LBB55_3
;
; GFX9-LABEL: bitcast_v36f16_to_v9f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v3
-; GFX9-NEXT: v_mov_b32_e32 v33, v2
-; GFX9-NEXT: v_mov_b32_e32 v34, v1
-; GFX9-NEXT: v_mov_b32_e32 v35, v0
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
; GFX9-NEXT: s_lshr_b32 s42, s27, 16
@@ -26084,11 +26316,15 @@ define inreg <9 x double> @bitcast_v36f16_to_v9f64_scalar(<36 x half> inreg %a,
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
+; GFX9-NEXT: v_mov_b32_e32 v32, v3
+; GFX9-NEXT: v_mov_b32_e32 v33, v2
+; GFX9-NEXT: v_mov_b32_e32 v34, v1
+; GFX9-NEXT: v_mov_b32_e32 v35, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_lshrrev_b32_e32 v36, 16, v32
; GFX9-NEXT: v_lshrrev_b32_e32 v37, 16, v33
; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v34
; GFX9-NEXT: v_lshrrev_b32_e32 v39, 16, v35
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -26103,6 +26339,7 @@ define inreg <9 x double> @bitcast_v36f16_to_v9f64_scalar(<36 x half> inreg %a,
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB55_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v35
@@ -26161,14 +26398,16 @@ define inreg <9 x double> @bitcast_v36f16_to_v9f64_scalar(<36 x half> inreg %a,
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB55_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB55_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB55_2
+; GFX9-NEXT: s_branch .LBB55_3
;
; GFX11-LABEL: bitcast_v36f16_to_v9f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX11-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-NEXT: s_lshr_b32 s42, s28, 16
+; GFX11-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-NEXT: s_lshr_b32 s41, s28, 16
; GFX11-NEXT: s_lshr_b32 s15, s27, 16
; GFX11-NEXT: s_lshr_b32 s14, s26, 16
; GFX11-NEXT: s_lshr_b32 s13, s25, 16
@@ -26181,15 +26420,14 @@ define inreg <9 x double> @bitcast_v36f16_to_v9f64_scalar(<36 x half> inreg %a,
; GFX11-NEXT: s_lshr_b32 s6, s18, 16
; GFX11-NEXT: s_lshr_b32 s5, s17, 16
; GFX11-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-NEXT: s_mov_b32 s40, 0
-; GFX11-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-NEXT: s_pack_ll_b32_b16 s3, s3, s43
+; GFX11-NEXT: s_lshr_b32 s42, s3, 16
+; GFX11-NEXT: s_lshr_b32 s43, s2, 16
+; GFX11-NEXT: s_lshr_b32 s44, s1, 16
+; GFX11-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-NEXT: s_pack_ll_b32_b16 s1, s1, s44
+; GFX11-NEXT: s_pack_ll_b32_b16 s0, s0, s45
+; GFX11-NEXT: s_pack_ll_b32_b16 s2, s2, s43
+; GFX11-NEXT: s_pack_ll_b32_b16 s3, s3, s42
; GFX11-NEXT: s_pack_ll_b32_b16 s4, s16, s4
; GFX11-NEXT: s_pack_ll_b32_b16 s5, s17, s5
; GFX11-NEXT: s_pack_ll_b32_b16 s6, s18, s6
@@ -26202,14 +26440,18 @@ define inreg <9 x double> @bitcast_v36f16_to_v9f64_scalar(<36 x half> inreg %a,
; GFX11-NEXT: s_pack_ll_b32_b16 s13, s25, s13
; GFX11-NEXT: s_pack_ll_b32_b16 s14, s26, s14
; GFX11-NEXT: s_pack_ll_b32_b16 s15, s27, s15
-; GFX11-NEXT: s_pack_ll_b32_b16 s16, s28, s42
-; GFX11-NEXT: s_pack_ll_b32_b16 s17, s29, s41
-; GFX11-NEXT: s_and_b32 s47, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB55_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-NEXT: s_mov_b32 s40, -1
+; GFX11-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-NEXT: s_cbranch_scc0 .LBB55_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
+; GFX11-NEXT: s_mov_b32 s40, 0
+; GFX11-NEXT: .LBB55_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
; GFX11-NEXT: s_cbranch_vccnz .LBB55_4
-; GFX11-NEXT: .LBB55_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
@@ -26229,8 +26471,6 @@ define inreg <9 x double> @bitcast_v36f16_to_v9f64_scalar(<36 x half> inreg %a,
; GFX11-NEXT: v_pk_add_f16 v16, 0x200, s16 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB55_3:
-; GFX11-NEXT: s_branch .LBB55_2
; GFX11-NEXT: .LBB55_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -27060,6 +27300,7 @@ define inreg <36 x half> @bitcast_v36i16_to_v36f16_scalar(<36 x i16> inreg %a, i
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v23
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
@@ -27364,7 +27605,9 @@ define inreg <36 x half> @bitcast_v36i16_to_v36f16_scalar(<36 x i16> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr28
; SI-NEXT: ; implicit-def: $vgpr23
; SI-NEXT: ; implicit-def: $vgpr26
-; SI-NEXT: s_branch .LBB57_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB57_2
+; SI-NEXT: s_branch .LBB57_3
;
; VI-LABEL: bitcast_v36i16_to_v36f16_scalar:
; VI: ; %bb.0:
@@ -27384,15 +27627,19 @@ define inreg <36 x half> @bitcast_v36i16_to_v36f16_scalar(<36 x i16> inreg %a, i
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_lshrrev_b32_e32 v5, 16, v3
; VI-NEXT: v_lshrrev_b32_e32 v6, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v7, 16, v1
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: v_lshrrev_b32_e32 v4, 16, v0
-; VI-NEXT: s_cbranch_scc0 .LBB57_4
+; VI-NEXT: v_lshrrev_b32_e32 v8, 16, v0
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB57_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB57_3
-; VI-NEXT: .LBB57_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB57_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB57_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s16, s16, 3
; VI-NEXT: s_add_i32 s43, s43, 3
; VI-NEXT: s_add_i32 s17, s17, 3
@@ -27422,14 +27669,14 @@ define inreg <36 x half> @bitcast_v36i16_to_v36f16_scalar(<36 x i16> inreg %a, i
; VI-NEXT: s_add_i32 s29, s29, 3
; VI-NEXT: s_add_i32 s6, s6, 3
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_add_u32_e32 v4, vcc, 3, v4
+; VI-NEXT: v_add_u32_e32 v8, vcc, 3, v8
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_add_u32_e32 v7, vcc, 3, v7
; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
; VI-NEXT: v_add_u32_e32 v6, vcc, 3, v6
; VI-NEXT: v_add_u32_e32 v3, vcc, 3, v3
; VI-NEXT: v_add_u32_e32 v5, vcc, 3, v5
-; VI-NEXT: .LBB57_3: ; %end
+; VI-NEXT: .LBB57_4: ; %end
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s43, 16
; VI-NEXT: s_or_b32 s4, s4, s5
@@ -27465,7 +27712,7 @@ define inreg <36 x half> @bitcast_v36i16_to_v36f16_scalar(<36 x i16> inreg %a, i
; VI-NEXT: s_or_b32 s9, s18, s9
; VI-NEXT: s_and_b32 s18, 0xffff, s27
; VI-NEXT: s_lshl_b32 s8, s8, 16
-; VI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; VI-NEXT: v_lshlrev_b32_e32 v4, 16, v8
; VI-NEXT: s_or_b32 s8, s18, s8
; VI-NEXT: s_and_b32 s18, 0xffff, s28
; VI-NEXT: s_lshl_b32 s7, s7, 16
@@ -27495,8 +27742,6 @@ define inreg <36 x half> @bitcast_v36i16_to_v36f16_scalar(<36 x i16> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v12, s7
; VI-NEXT: v_mov_b32_e32 v13, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB57_4:
-; VI-NEXT: s_branch .LBB57_2
;
; GFX9-LABEL: bitcast_v36i16_to_v36f16_scalar:
; GFX9: ; %bb.0:
@@ -27516,15 +27761,19 @@ define inreg <36 x half> @bitcast_v36i16_to_v36f16_scalar(<36 x i16> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_lshrrev_b32_e32 v17, 16, v3
; GFX9-NEXT: v_lshrrev_b32_e32 v16, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v15, 16, v1
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_lshrrev_b32_e32 v14, 16, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB57_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB57_4
-; GFX9-NEXT: .LBB57_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB57_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB57_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_pack_ll_b32_b16 s4, s29, s43
; GFX9-NEXT: v_pk_add_u16 v13, s4, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_pack_ll_b32_b16 s4, s28, s42
@@ -27584,8 +27833,6 @@ define inreg <36 x half> @bitcast_v36i16_to_v36f16_scalar(<36 x i16> inreg %a, i
; GFX9-NEXT: v_lshrrev_b32_e32 v16, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v17, 16, v3
; GFX9-NEXT: s_branch .LBB57_5
-; GFX9-NEXT: .LBB57_3:
-; GFX9-NEXT: s_branch .LBB57_2
; GFX9-NEXT: .LBB57_4:
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v12, s28
@@ -27674,19 +27921,22 @@ define inreg <36 x half> @bitcast_v36i16_to_v36f16_scalar(<36 x i16> inreg %a, i
; GFX11-NEXT: s_lshr_b32 s12, s20, 16
; GFX11-NEXT: s_lshr_b32 s11, s19, 16
; GFX11-NEXT: s_lshr_b32 s10, s18, 16
-; GFX11-NEXT: s_lshr_b32 s9, s17, 16
-; GFX11-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-NEXT: s_lshr_b32 s6, s3, 16
-; GFX11-NEXT: s_lshr_b32 s8, s2, 16
+; GFX11-NEXT: s_lshr_b32 s8, s17, 16
+; GFX11-NEXT: s_lshr_b32 s6, s16, 16
+; GFX11-NEXT: s_lshr_b32 s9, s3, 16
+; GFX11-NEXT: s_lshr_b32 s7, s2, 16
; GFX11-NEXT: s_lshr_b32 s4, s1, 16
; GFX11-NEXT: s_lshr_b32 s5, s0, 16
+; GFX11-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s46, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB57_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s46, 0
-; GFX11-NEXT: s_and_b32 s47, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB57_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB57_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-NEXT: s_cbranch_vccnz .LBB57_4
-; GFX11-NEXT: .LBB57_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_pack_ll_b32_b16 s29, s29, s45
; GFX11-NEXT: s_pack_ll_b32_b16 s28, s28, s44
; GFX11-NEXT: s_pack_ll_b32_b16 s27, s27, s43
@@ -27699,10 +27949,10 @@ define inreg <36 x half> @bitcast_v36i16_to_v36f16_scalar(<36 x i16> inreg %a, i
; GFX11-NEXT: s_pack_ll_b32_b16 s12, s20, s12
; GFX11-NEXT: s_pack_ll_b32_b16 s11, s19, s11
; GFX11-NEXT: s_pack_ll_b32_b16 s10, s18, s10
-; GFX11-NEXT: s_pack_ll_b32_b16 s9, s17, s9
-; GFX11-NEXT: s_pack_ll_b32_b16 s7, s16, s7
-; GFX11-NEXT: s_pack_ll_b32_b16 s3, s3, s6
-; GFX11-NEXT: s_pack_ll_b32_b16 s2, s2, s8
+; GFX11-NEXT: s_pack_ll_b32_b16 s8, s17, s8
+; GFX11-NEXT: s_pack_ll_b32_b16 s6, s16, s6
+; GFX11-NEXT: s_pack_ll_b32_b16 s3, s3, s9
+; GFX11-NEXT: s_pack_ll_b32_b16 s2, s2, s7
; GFX11-NEXT: s_pack_ll_b32_b16 s0, s0, s5
; GFX11-NEXT: s_pack_ll_b32_b16 s1, s1, s4
; GFX11-NEXT: v_pk_add_u16 v13, s29, 3 op_sel_hi:[1,0]
@@ -27717,12 +27967,12 @@ define inreg <36 x half> @bitcast_v36i16_to_v36f16_scalar(<36 x i16> inreg %a, i
; GFX11-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v3, s11, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v4, s10, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v5, s9, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v5, s8, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v2, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s2, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v7, s3, 3 op_sel_hi:[1,0]
-; GFX11-NEXT: v_pk_add_u16 v6, s7, 3 op_sel_hi:[1,0]
+; GFX11-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v2
; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v0
@@ -27742,8 +27992,6 @@ define inreg <36 x half> @bitcast_v36i16_to_v36f16_scalar(<36 x i16> inreg %a, i
; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v14
; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v13
; GFX11-NEXT: s_branch .LBB57_5
-; GFX11-NEXT: .LBB57_3:
-; GFX11-NEXT: s_branch .LBB57_2
; GFX11-NEXT: .LBB57_4:
; GFX11-NEXT: v_dual_mov_b32 v13, s29 :: v_dual_mov_b32 v14, s28
; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s26
@@ -27760,8 +28008,8 @@ define inreg <36 x half> @bitcast_v36i16_to_v36f16_scalar(<36 x i16> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v24, s15 :: v_dual_mov_b32 v25, s14
; GFX11-NEXT: v_dual_mov_b32 v26, s13 :: v_dual_mov_b32 v27, s12
; GFX11-NEXT: v_dual_mov_b32 v28, s11 :: v_dual_mov_b32 v29, s10
-; GFX11-NEXT: v_dual_mov_b32 v30, s9 :: v_dual_mov_b32 v31, s7
-; GFX11-NEXT: v_dual_mov_b32 v32, s6 :: v_dual_mov_b32 v33, s8
+; GFX11-NEXT: v_dual_mov_b32 v30, s8 :: v_dual_mov_b32 v31, s6
+; GFX11-NEXT: v_dual_mov_b32 v32, s9 :: v_dual_mov_b32 v33, s7
; GFX11-NEXT: v_dual_mov_b32 v34, s4 :: v_dual_mov_b32 v35, s5
; GFX11-NEXT: .LBB57_5: ; %end
; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -28558,10 +28806,14 @@ define inreg <36 x i16> @bitcast_v36f16_to_v36i16_scalar(<36 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v15, s28
; SI-NEXT: v_cvt_f16_f32_e32 v33, s29
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: s_cbranch_scc0 .LBB59_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB59_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB59_3
-; SI-NEXT: .LBB59_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB59_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB59_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
; SI-NEXT: v_cvt_f32_f16_e32 v4, v4
@@ -28715,7 +28967,7 @@ define inreg <36 x i16> @bitcast_v36f16_to_v36i16_scalar(<36 x half> inreg %a, i
; SI-NEXT: v_alignbit_b32 v30, v6, v30, 16
; SI-NEXT: v_alignbit_b32 v12, v3, v12, 16
; SI-NEXT: v_alignbit_b32 v9, v1, v9, 16
-; SI-NEXT: .LBB59_3: ; %end
+; SI-NEXT: .LBB59_4: ; %end
; SI-NEXT: v_and_b32_e32 v22, 0xffff, v22
; SI-NEXT: v_lshlrev_b32_e32 v36, 16, v36
; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19
@@ -28818,8 +29070,6 @@ define inreg <36 x i16> @bitcast_v36f16_to_v36i16_scalar(<36 x half> inreg %a, i
; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB59_4:
-; SI-NEXT: s_branch .LBB59_2
;
; VI-LABEL: bitcast_v36f16_to_v36i16_scalar:
; VI: ; %bb.0:
@@ -28839,15 +29089,19 @@ define inreg <36 x i16> @bitcast_v36f16_to_v36i16_scalar(<36 x half> inreg %a, i
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_lshrrev_b32_e32 v17, 16, v3
; VI-NEXT: v_lshrrev_b32_e32 v16, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v15, 16, v1
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_lshrrev_b32_e32 v14, 16, v0
-; VI-NEXT: s_cbranch_scc0 .LBB59_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB59_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB59_4
-; VI-NEXT: .LBB59_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB59_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB59_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v22, 0x200
; VI-NEXT: v_add_f16_e32 v20, s16, v22
; VI-NEXT: v_add_f16_e32 v35, s43, v22
@@ -28886,8 +29140,6 @@ define inreg <36 x i16> @bitcast_v36f16_to_v36i16_scalar(<36 x half> inreg %a, i
; VI-NEXT: v_add_f16_e32 v3, 0x200, v3
; VI-NEXT: v_add_f16_e32 v17, 0x200, v17
; VI-NEXT: s_branch .LBB59_5
-; VI-NEXT: .LBB59_3:
-; VI-NEXT: s_branch .LBB59_2
; VI-NEXT: .LBB59_4:
; VI-NEXT: v_mov_b32_e32 v22, s6
; VI-NEXT: v_mov_b32_e32 v13, s29
@@ -28978,15 +29230,19 @@ define inreg <36 x i16> @bitcast_v36f16_to_v36i16_scalar(<36 x half> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v4
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_lshrrev_b32_e32 v17, 16, v3
; GFX9-NEXT: v_lshrrev_b32_e32 v16, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v15, 16, v1
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_lshrrev_b32_e32 v14, 16, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB59_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB59_4
-; GFX9-NEXT: .LBB59_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB59_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB59_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_and_b32_e32 v3, 0xffff, v3
; GFX9-NEXT: v_and_b32_e32 v2, 0xffff, v2
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v1
@@ -29048,8 +29304,6 @@ define inreg <36 x i16> @bitcast_v36f16_to_v36i16_scalar(<36 x half> inreg %a, i
; GFX9-NEXT: v_lshrrev_b32_e32 v16, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v17, 16, v3
; GFX9-NEXT: s_branch .LBB59_5
-; GFX9-NEXT: .LBB59_3:
-; GFX9-NEXT: s_branch .LBB59_2
; GFX9-NEXT: .LBB59_4:
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v12, s28
@@ -29138,19 +29392,22 @@ define inreg <36 x i16> @bitcast_v36f16_to_v36i16_scalar(<36 x half> inreg %a, i
; GFX11-NEXT: s_lshr_b32 s12, s20, 16
; GFX11-NEXT: s_lshr_b32 s11, s19, 16
; GFX11-NEXT: s_lshr_b32 s10, s18, 16
-; GFX11-NEXT: s_lshr_b32 s9, s17, 16
-; GFX11-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-NEXT: s_lshr_b32 s6, s3, 16
-; GFX11-NEXT: s_lshr_b32 s8, s2, 16
+; GFX11-NEXT: s_lshr_b32 s8, s17, 16
+; GFX11-NEXT: s_lshr_b32 s6, s16, 16
+; GFX11-NEXT: s_lshr_b32 s9, s3, 16
+; GFX11-NEXT: s_lshr_b32 s7, s2, 16
; GFX11-NEXT: s_lshr_b32 s4, s1, 16
; GFX11-NEXT: s_lshr_b32 s5, s0, 16
+; GFX11-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s46, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB59_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s46, 0
-; GFX11-NEXT: s_and_b32 s47, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB59_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB59_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-NEXT: s_cbranch_vccnz .LBB59_4
-; GFX11-NEXT: .LBB59_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_pack_ll_b32_b16 s29, s29, s45
; GFX11-NEXT: s_pack_ll_b32_b16 s28, s28, s44
; GFX11-NEXT: s_pack_ll_b32_b16 s27, s27, s43
@@ -29163,10 +29420,10 @@ define inreg <36 x i16> @bitcast_v36f16_to_v36i16_scalar(<36 x half> inreg %a, i
; GFX11-NEXT: s_pack_ll_b32_b16 s12, s20, s12
; GFX11-NEXT: s_pack_ll_b32_b16 s11, s19, s11
; GFX11-NEXT: s_pack_ll_b32_b16 s10, s18, s10
-; GFX11-NEXT: s_pack_ll_b32_b16 s9, s17, s9
-; GFX11-NEXT: s_pack_ll_b32_b16 s7, s16, s7
-; GFX11-NEXT: s_pack_ll_b32_b16 s3, s3, s6
-; GFX11-NEXT: s_pack_ll_b32_b16 s2, s2, s8
+; GFX11-NEXT: s_pack_ll_b32_b16 s8, s17, s8
+; GFX11-NEXT: s_pack_ll_b32_b16 s6, s16, s6
+; GFX11-NEXT: s_pack_ll_b32_b16 s3, s3, s9
+; GFX11-NEXT: s_pack_ll_b32_b16 s2, s2, s7
; GFX11-NEXT: s_pack_ll_b32_b16 s0, s0, s5
; GFX11-NEXT: s_pack_ll_b32_b16 s1, s1, s4
; GFX11-NEXT: v_pk_add_f16 v13, 0x200, s29 op_sel_hi:[0,1]
@@ -29181,12 +29438,12 @@ define inreg <36 x i16> @bitcast_v36f16_to_v36i16_scalar(<36 x half> inreg %a, i
; GFX11-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v3, 0x200, s11 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v4, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v5, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v5, 0x200, s8 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v2, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s2 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v7, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-NEXT: v_pk_add_f16 v6, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v2
; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v0
@@ -29206,8 +29463,6 @@ define inreg <36 x i16> @bitcast_v36f16_to_v36i16_scalar(<36 x half> inreg %a, i
; GFX11-NEXT: v_lshrrev_b32_e32 v19, 16, v14
; GFX11-NEXT: v_lshrrev_b32_e32 v18, 16, v13
; GFX11-NEXT: s_branch .LBB59_5
-; GFX11-NEXT: .LBB59_3:
-; GFX11-NEXT: s_branch .LBB59_2
; GFX11-NEXT: .LBB59_4:
; GFX11-NEXT: v_dual_mov_b32 v13, s29 :: v_dual_mov_b32 v14, s28
; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s26
@@ -29224,8 +29479,8 @@ define inreg <36 x i16> @bitcast_v36f16_to_v36i16_scalar(<36 x half> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v24, s15 :: v_dual_mov_b32 v25, s14
; GFX11-NEXT: v_dual_mov_b32 v26, s13 :: v_dual_mov_b32 v27, s12
; GFX11-NEXT: v_dual_mov_b32 v28, s11 :: v_dual_mov_b32 v29, s10
-; GFX11-NEXT: v_dual_mov_b32 v30, s9 :: v_dual_mov_b32 v31, s7
-; GFX11-NEXT: v_dual_mov_b32 v32, s6 :: v_dual_mov_b32 v33, s8
+; GFX11-NEXT: v_dual_mov_b32 v30, s8 :: v_dual_mov_b32 v31, s6
+; GFX11-NEXT: v_dual_mov_b32 v32, s9 :: v_dual_mov_b32 v33, s7
; GFX11-NEXT: v_dual_mov_b32 v34, s4 :: v_dual_mov_b32 v35, s5
; GFX11-NEXT: .LBB59_5: ; %end
; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.640bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.640bit.ll
index b8091d8..5670820 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.640bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.640bit.ll
@@ -162,6 +162,7 @@ define inreg <20 x float> @bitcast_v20i32_to_v20f32_scalar(<20 x i32> inreg %a,
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v13, v6
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v19, v5
; SI-NEXT: v_mov_b32_e32 v18, v4
; SI-NEXT: v_mov_b32_e32 v17, v3
@@ -181,12 +182,15 @@ define inreg <20 x float> @bitcast_v20i32_to_v20f32_scalar(<20 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB1_3
-; SI-NEXT: .LBB1_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB1_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB1_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v19, vcc, 3, v19
; SI-NEXT: v_add_i32_e32 v18, vcc, 3, v18
; SI-NEXT: v_add_i32_e32 v17, vcc, 3, v17
@@ -207,16 +211,15 @@ define inreg <20 x float> @bitcast_v20i32_to_v20f32_scalar(<20 x i32> inreg %a,
; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
-; SI-NEXT: .LBB1_3: ; %end
+; SI-NEXT: .LBB1_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: s_branch .LBB1_2
;
; VI-LABEL: bitcast_v20i32_to_v20f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v13, v6
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v19, v5
; VI-NEXT: v_mov_b32_e32 v18, v4
; VI-NEXT: v_mov_b32_e32 v17, v3
@@ -236,12 +239,15 @@ define inreg <20 x float> @bitcast_v20i32_to_v20f32_scalar(<20 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB1_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB1_3
-; VI-NEXT: .LBB1_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB1_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB1_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v19, vcc, 3, v19
; VI-NEXT: v_add_u32_e32 v18, vcc, 3, v18
; VI-NEXT: v_add_u32_e32 v17, vcc, 3, v17
@@ -262,16 +268,15 @@ define inreg <20 x float> @bitcast_v20i32_to_v20f32_scalar(<20 x i32> inreg %a,
; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: .LBB1_3: ; %end
+; VI-NEXT: .LBB1_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_4:
-; VI-NEXT: s_branch .LBB1_2
;
; GFX9-LABEL: bitcast_v20i32_to_v20f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v13, v6
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v19, v5
; GFX9-NEXT: v_mov_b32_e32 v18, v4
; GFX9-NEXT: v_mov_b32_e32 v17, v3
@@ -291,12 +296,15 @@ define inreg <20 x float> @bitcast_v20i32_to_v20f32_scalar(<20 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB1_3
-; GFX9-NEXT: .LBB1_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB1_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_u32_e32 v19, 3, v19
; GFX9-NEXT: v_add_u32_e32 v18, 3, v18
; GFX9-NEXT: v_add_u32_e32 v17, 3, v17
@@ -317,37 +325,35 @@ define inreg <20 x float> @bitcast_v20i32_to_v20f32_scalar(<20 x i32> inreg %a,
; GFX9-NEXT: v_add_u32_e32 v2, 3, v2
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: .LBB1_3: ; %end
+; GFX9-NEXT: .LBB1_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-LABEL: bitcast_v20i32_to_v20f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-NEXT: v_dual_mov_b32 v15, v2 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB1_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB1_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_3:
-; GFX11-NEXT: .LBB1_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_nc_u32_e32 v19, 3, v19
; GFX11-NEXT: v_add_nc_u32_e32 v18, 3, v18
; GFX11-NEXT: v_add_nc_u32_e32 v17, 3, v17
@@ -368,6 +374,7 @@ define inreg <20 x float> @bitcast_v20i32_to_v20f32_scalar(<20 x i32> inreg %a,
; GFX11-NEXT: v_add_nc_u32_e32 v2, 3, v2
; GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v1
; GFX11-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-NEXT: .LBB1_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -532,6 +539,7 @@ define inreg <20 x i32> @bitcast_v20f32_to_v20i32_scalar(<20 x float> inreg %a,
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v13, v6
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v19, v5
; SI-NEXT: v_mov_b32_e32 v18, v4
; SI-NEXT: v_mov_b32_e32 v17, v3
@@ -551,12 +559,15 @@ define inreg <20 x i32> @bitcast_v20f32_to_v20i32_scalar(<20 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB3_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB3_3
-; SI-NEXT: .LBB3_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB3_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB3_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e32 v19, 1.0, v19
; SI-NEXT: v_add_f32_e32 v18, 1.0, v18
; SI-NEXT: v_add_f32_e32 v17, 1.0, v17
@@ -577,16 +588,15 @@ define inreg <20 x i32> @bitcast_v20f32_to_v20i32_scalar(<20 x float> inreg %a,
; SI-NEXT: v_add_f32_e32 v2, 1.0, v2
; SI-NEXT: v_add_f32_e32 v1, 1.0, v1
; SI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; SI-NEXT: .LBB3_3: ; %end
+; SI-NEXT: .LBB3_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB3_4:
-; SI-NEXT: s_branch .LBB3_2
;
; VI-LABEL: bitcast_v20f32_to_v20i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v13, v6
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v19, v5
; VI-NEXT: v_mov_b32_e32 v18, v4
; VI-NEXT: v_mov_b32_e32 v17, v3
@@ -606,12 +616,15 @@ define inreg <20 x i32> @bitcast_v20f32_to_v20i32_scalar(<20 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB3_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_3
-; VI-NEXT: .LBB3_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB3_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB3_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e32 v19, 1.0, v19
; VI-NEXT: v_add_f32_e32 v18, 1.0, v18
; VI-NEXT: v_add_f32_e32 v17, 1.0, v17
@@ -632,16 +645,15 @@ define inreg <20 x i32> @bitcast_v20f32_to_v20i32_scalar(<20 x float> inreg %a,
; VI-NEXT: v_add_f32_e32 v2, 1.0, v2
; VI-NEXT: v_add_f32_e32 v1, 1.0, v1
; VI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; VI-NEXT: .LBB3_3: ; %end
+; VI-NEXT: .LBB3_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB3_4:
-; VI-NEXT: s_branch .LBB3_2
;
; GFX9-LABEL: bitcast_v20f32_to_v20i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v13, v6
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v19, v5
; GFX9-NEXT: v_mov_b32_e32 v18, v4
; GFX9-NEXT: v_mov_b32_e32 v17, v3
@@ -661,12 +673,15 @@ define inreg <20 x i32> @bitcast_v20f32_to_v20i32_scalar(<20 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_3
-; GFX9-NEXT: .LBB3_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB3_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e32 v19, 1.0, v19
; GFX9-NEXT: v_add_f32_e32 v18, 1.0, v18
; GFX9-NEXT: v_add_f32_e32 v17, 1.0, v17
@@ -687,37 +702,35 @@ define inreg <20 x i32> @bitcast_v20f32_to_v20i32_scalar(<20 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e32 v2, 1.0, v2
; GFX9-NEXT: v_add_f32_e32 v1, 1.0, v1
; GFX9-NEXT: v_add_f32_e32 v0, 1.0, v0
-; GFX9-NEXT: .LBB3_3: ; %end
+; GFX9-NEXT: .LBB3_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB3_4:
-; GFX9-NEXT: s_branch .LBB3_2
;
; GFX11-LABEL: bitcast_v20f32_to_v20i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-NEXT: v_dual_mov_b32 v15, v2 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB3_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB3_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB3_3:
-; GFX11-NEXT: .LBB3_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
; GFX11-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
@@ -728,6 +741,7 @@ define inreg <20 x i32> @bitcast_v20f32_to_v20i32_scalar(<20 x float> inreg %a,
; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-NEXT: .LBB3_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -902,6 +916,7 @@ define inreg <10 x i64> @bitcast_v20i32_to_v10i64_scalar(<20 x i32> inreg %a, i3
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v13, v6
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v19, v5
; SI-NEXT: v_mov_b32_e32 v18, v4
; SI-NEXT: v_mov_b32_e32 v17, v3
@@ -921,12 +936,15 @@ define inreg <10 x i64> @bitcast_v20i32_to_v10i64_scalar(<20 x i32> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB5_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB5_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB5_3
-; SI-NEXT: .LBB5_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB5_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB5_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v19, vcc, 3, v19
; SI-NEXT: v_add_i32_e32 v18, vcc, 3, v18
; SI-NEXT: v_add_i32_e32 v17, vcc, 3, v17
@@ -947,16 +965,15 @@ define inreg <10 x i64> @bitcast_v20i32_to_v10i64_scalar(<20 x i32> inreg %a, i3
; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
-; SI-NEXT: .LBB5_3: ; %end
+; SI-NEXT: .LBB5_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB5_4:
-; SI-NEXT: s_branch .LBB5_2
;
; VI-LABEL: bitcast_v20i32_to_v10i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v13, v6
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v19, v5
; VI-NEXT: v_mov_b32_e32 v18, v4
; VI-NEXT: v_mov_b32_e32 v17, v3
@@ -976,12 +993,15 @@ define inreg <10 x i64> @bitcast_v20i32_to_v10i64_scalar(<20 x i32> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB5_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB5_3
-; VI-NEXT: .LBB5_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB5_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB5_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v19, vcc, 3, v19
; VI-NEXT: v_add_u32_e32 v18, vcc, 3, v18
; VI-NEXT: v_add_u32_e32 v17, vcc, 3, v17
@@ -1002,16 +1022,15 @@ define inreg <10 x i64> @bitcast_v20i32_to_v10i64_scalar(<20 x i32> inreg %a, i3
; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: .LBB5_3: ; %end
+; VI-NEXT: .LBB5_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB5_4:
-; VI-NEXT: s_branch .LBB5_2
;
; GFX9-LABEL: bitcast_v20i32_to_v10i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v13, v6
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v19, v5
; GFX9-NEXT: v_mov_b32_e32 v18, v4
; GFX9-NEXT: v_mov_b32_e32 v17, v3
@@ -1031,12 +1050,15 @@ define inreg <10 x i64> @bitcast_v20i32_to_v10i64_scalar(<20 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB5_3
-; GFX9-NEXT: .LBB5_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB5_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_u32_e32 v19, 3, v19
; GFX9-NEXT: v_add_u32_e32 v18, 3, v18
; GFX9-NEXT: v_add_u32_e32 v17, 3, v17
@@ -1057,37 +1079,35 @@ define inreg <10 x i64> @bitcast_v20i32_to_v10i64_scalar(<20 x i32> inreg %a, i3
; GFX9-NEXT: v_add_u32_e32 v2, 3, v2
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: .LBB5_3: ; %end
+; GFX9-NEXT: .LBB5_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_4:
-; GFX9-NEXT: s_branch .LBB5_2
;
; GFX11-LABEL: bitcast_v20i32_to_v10i64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-NEXT: v_dual_mov_b32 v15, v2 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB5_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB5_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB5_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB5_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB5_3:
-; GFX11-NEXT: .LBB5_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_nc_u32_e32 v19, 3, v19
; GFX11-NEXT: v_add_nc_u32_e32 v18, 3, v18
; GFX11-NEXT: v_add_nc_u32_e32 v17, 3, v17
@@ -1108,6 +1128,7 @@ define inreg <10 x i64> @bitcast_v20i32_to_v10i64_scalar(<20 x i32> inreg %a, i3
; GFX11-NEXT: v_add_nc_u32_e32 v2, 3, v2
; GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v1
; GFX11-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-NEXT: .LBB5_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1287,6 +1308,7 @@ define inreg <20 x i32> @bitcast_v10i64_to_v20i32_scalar(<10 x i64> inreg %a, i3
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v13, v6
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v19, v5
; SI-NEXT: v_mov_b32_e32 v18, v4
; SI-NEXT: v_mov_b32_e32 v17, v3
@@ -1306,12 +1328,15 @@ define inreg <20 x i32> @bitcast_v10i64_to_v20i32_scalar(<10 x i64> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB7_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB7_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB7_3
-; SI-NEXT: .LBB7_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB7_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB7_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v18, vcc, 3, v18
; SI-NEXT: v_addc_u32_e32 v19, vcc, 0, v19, vcc
; SI-NEXT: v_add_i32_e32 v16, vcc, 3, v16
@@ -1332,16 +1357,15 @@ define inreg <20 x i32> @bitcast_v10i64_to_v20i32_scalar(<10 x i64> inreg %a, i3
; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT: .LBB7_3: ; %end
+; SI-NEXT: .LBB7_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB7_4:
-; SI-NEXT: s_branch .LBB7_2
;
; VI-LABEL: bitcast_v10i64_to_v20i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v13, v6
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v19, v5
; VI-NEXT: v_mov_b32_e32 v18, v4
; VI-NEXT: v_mov_b32_e32 v17, v3
@@ -1361,12 +1385,15 @@ define inreg <20 x i32> @bitcast_v10i64_to_v20i32_scalar(<10 x i64> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB7_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB7_3
-; VI-NEXT: .LBB7_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB7_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB7_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v18, vcc, 3, v18
; VI-NEXT: v_addc_u32_e32 v19, vcc, 0, v19, vcc
; VI-NEXT: v_add_u32_e32 v16, vcc, 3, v16
@@ -1387,16 +1414,15 @@ define inreg <20 x i32> @bitcast_v10i64_to_v20i32_scalar(<10 x i64> inreg %a, i3
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT: .LBB7_3: ; %end
+; VI-NEXT: .LBB7_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB7_4:
-; VI-NEXT: s_branch .LBB7_2
;
; GFX9-LABEL: bitcast_v10i64_to_v20i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v13, v6
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v19, v5
; GFX9-NEXT: v_mov_b32_e32 v18, v4
; GFX9-NEXT: v_mov_b32_e32 v17, v3
@@ -1416,12 +1442,15 @@ define inreg <20 x i32> @bitcast_v10i64_to_v20i32_scalar(<10 x i64> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB7_3
-; GFX9-NEXT: .LBB7_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB7_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB7_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_co_u32_e32 v18, vcc, 3, v18
; GFX9-NEXT: v_addc_co_u32_e32 v19, vcc, 0, v19, vcc
; GFX9-NEXT: v_add_co_u32_e32 v16, vcc, 3, v16
@@ -1442,37 +1471,35 @@ define inreg <20 x i32> @bitcast_v10i64_to_v20i32_scalar(<10 x i64> inreg %a, i3
; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 3, v0
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
-; GFX9-NEXT: .LBB7_3: ; %end
+; GFX9-NEXT: .LBB7_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB7_4:
-; GFX9-NEXT: s_branch .LBB7_2
;
; GFX11-LABEL: bitcast_v10i64_to_v20i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-NEXT: v_dual_mov_b32 v15, v2 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB7_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB7_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB7_3:
-; GFX11-NEXT: .LBB7_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB7_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_co_u32 v18, vcc_lo, v18, 3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add_co_ci_u32_e64 v19, null, 0, v19, vcc_lo
@@ -1498,6 +1525,7 @@ define inreg <20 x i32> @bitcast_v10i64_to_v20i32_scalar(<10 x i64> inreg %a, i3
; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-NEXT: .LBB7_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1672,6 +1700,7 @@ define inreg <10 x double> @bitcast_v20i32_to_v10f64_scalar(<20 x i32> inreg %a,
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v13, v6
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v19, v5
; SI-NEXT: v_mov_b32_e32 v18, v4
; SI-NEXT: v_mov_b32_e32 v17, v3
@@ -1691,12 +1720,15 @@ define inreg <10 x double> @bitcast_v20i32_to_v10f64_scalar(<20 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB9_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB9_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB9_3
-; SI-NEXT: .LBB9_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB9_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB9_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v19, vcc, 3, v19
; SI-NEXT: v_add_i32_e32 v18, vcc, 3, v18
; SI-NEXT: v_add_i32_e32 v17, vcc, 3, v17
@@ -1717,16 +1749,15 @@ define inreg <10 x double> @bitcast_v20i32_to_v10f64_scalar(<20 x i32> inreg %a,
; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
-; SI-NEXT: .LBB9_3: ; %end
+; SI-NEXT: .LBB9_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB9_4:
-; SI-NEXT: s_branch .LBB9_2
;
; VI-LABEL: bitcast_v20i32_to_v10f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v13, v6
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v19, v5
; VI-NEXT: v_mov_b32_e32 v18, v4
; VI-NEXT: v_mov_b32_e32 v17, v3
@@ -1746,12 +1777,15 @@ define inreg <10 x double> @bitcast_v20i32_to_v10f64_scalar(<20 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB9_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB9_3
-; VI-NEXT: .LBB9_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB9_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB9_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v19, vcc, 3, v19
; VI-NEXT: v_add_u32_e32 v18, vcc, 3, v18
; VI-NEXT: v_add_u32_e32 v17, vcc, 3, v17
@@ -1772,16 +1806,15 @@ define inreg <10 x double> @bitcast_v20i32_to_v10f64_scalar(<20 x i32> inreg %a,
; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: .LBB9_3: ; %end
+; VI-NEXT: .LBB9_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB9_4:
-; VI-NEXT: s_branch .LBB9_2
;
; GFX9-LABEL: bitcast_v20i32_to_v10f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v13, v6
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v19, v5
; GFX9-NEXT: v_mov_b32_e32 v18, v4
; GFX9-NEXT: v_mov_b32_e32 v17, v3
@@ -1801,12 +1834,15 @@ define inreg <10 x double> @bitcast_v20i32_to_v10f64_scalar(<20 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB9_3
-; GFX9-NEXT: .LBB9_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB9_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_u32_e32 v19, 3, v19
; GFX9-NEXT: v_add_u32_e32 v18, 3, v18
; GFX9-NEXT: v_add_u32_e32 v17, 3, v17
@@ -1827,37 +1863,35 @@ define inreg <10 x double> @bitcast_v20i32_to_v10f64_scalar(<20 x i32> inreg %a,
; GFX9-NEXT: v_add_u32_e32 v2, 3, v2
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: .LBB9_3: ; %end
+; GFX9-NEXT: .LBB9_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB9_4:
-; GFX9-NEXT: s_branch .LBB9_2
;
; GFX11-LABEL: bitcast_v20i32_to_v10f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-NEXT: v_dual_mov_b32 v15, v2 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB9_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB9_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB9_3:
-; GFX11-NEXT: .LBB9_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_nc_u32_e32 v19, 3, v19
; GFX11-NEXT: v_add_nc_u32_e32 v18, 3, v18
; GFX11-NEXT: v_add_nc_u32_e32 v17, 3, v17
@@ -1878,6 +1912,7 @@ define inreg <10 x double> @bitcast_v20i32_to_v10f64_scalar(<20 x i32> inreg %a,
; GFX11-NEXT: v_add_nc_u32_e32 v2, 3, v2
; GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v1
; GFX11-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-NEXT: .LBB9_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2012,6 +2047,7 @@ define inreg <20 x i32> @bitcast_v10f64_to_v20i32_scalar(<10 x double> inreg %a,
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v12, v6
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v19, v5
; SI-NEXT: v_mov_b32_e32 v18, v4
; SI-NEXT: v_mov_b32_e32 v17, v3
@@ -2031,12 +2067,15 @@ define inreg <20 x i32> @bitcast_v10f64_to_v20i32_scalar(<10 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB11_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB11_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB11_3
-; SI-NEXT: .LBB11_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB11_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB11_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
; SI-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
; SI-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
@@ -2047,16 +2086,15 @@ define inreg <20 x i32> @bitcast_v10f64_to_v20i32_scalar(<10 x double> inreg %a,
; SI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; SI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; SI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; SI-NEXT: .LBB11_3: ; %end
+; SI-NEXT: .LBB11_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB11_4:
-; SI-NEXT: s_branch .LBB11_2
;
; VI-LABEL: bitcast_v10f64_to_v20i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v12, v6
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v19, v5
; VI-NEXT: v_mov_b32_e32 v18, v4
; VI-NEXT: v_mov_b32_e32 v17, v3
@@ -2076,12 +2114,15 @@ define inreg <20 x i32> @bitcast_v10f64_to_v20i32_scalar(<10 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB11_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB11_3
-; VI-NEXT: .LBB11_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB11_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB11_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
; VI-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
; VI-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
@@ -2092,16 +2133,15 @@ define inreg <20 x i32> @bitcast_v10f64_to_v20i32_scalar(<10 x double> inreg %a,
; VI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; VI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; VI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; VI-NEXT: .LBB11_3: ; %end
+; VI-NEXT: .LBB11_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB11_4:
-; VI-NEXT: s_branch .LBB11_2
;
; GFX9-LABEL: bitcast_v10f64_to_v20i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v12, v6
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v19, v5
; GFX9-NEXT: v_mov_b32_e32 v18, v4
; GFX9-NEXT: v_mov_b32_e32 v17, v3
@@ -2121,12 +2161,15 @@ define inreg <20 x i32> @bitcast_v10f64_to_v20i32_scalar(<10 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_3
-; GFX9-NEXT: .LBB11_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB11_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
; GFX9-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
@@ -2137,37 +2180,35 @@ define inreg <20 x i32> @bitcast_v10f64_to_v20i32_scalar(<10 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX9-NEXT: .LBB11_3: ; %end
+; GFX9-NEXT: .LBB11_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB11_4:
-; GFX9-NEXT: s_branch .LBB11_2
;
; GFX11-LABEL: bitcast_v10f64_to_v20i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-NEXT: v_dual_mov_b32 v15, v2 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB11_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB11_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: .LBB11_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
; GFX11-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
; GFX11-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
@@ -2178,6 +2219,7 @@ define inreg <20 x i32> @bitcast_v10f64_to_v20i32_scalar(<10 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-NEXT: .LBB11_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2838,13 +2880,14 @@ define inreg <40 x i16> @bitcast_v20i32_to_v40i16_scalar(<20 x i32> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v7
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s11, v1
; SI-NEXT: v_readfirstlane_b32 s10, v2
; SI-NEXT: v_readfirstlane_b32 s9, v3
; SI-NEXT: v_readfirstlane_b32 s8, v4
; SI-NEXT: v_readfirstlane_b32 s7, v5
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s6, v6
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB13_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v1, s7
@@ -3072,23 +3115,26 @@ define inreg <40 x i16> @bitcast_v20i32_to_v40i16_scalar(<20 x i32> inreg %a, i3
; SI-NEXT: ; implicit-def: $sgpr13
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: s_branch .LBB13_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB13_2
+; SI-NEXT: s_branch .LBB13_3
;
; VI-LABEL: bitcast_v20i32_to_v40i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s11, v0
; VI-NEXT: v_readfirstlane_b32 s10, v1
; VI-NEXT: v_readfirstlane_b32 s9, v2
; VI-NEXT: v_readfirstlane_b32 s8, v3
-; VI-NEXT: v_readfirstlane_b32 s6, v4
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: v_readfirstlane_b32 s7, v5
+; VI-NEXT: v_readfirstlane_b32 s7, v4
+; VI-NEXT: v_readfirstlane_b32 s6, v5
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB13_4
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_lshr_b32 s12, s7, 16
-; VI-NEXT: s_lshr_b32 s13, s6, 16
+; VI-NEXT: s_lshr_b32 s12, s6, 16
+; VI-NEXT: s_lshr_b32 s13, s7, 16
; VI-NEXT: s_lshr_b32 s14, s8, 16
; VI-NEXT: s_lshr_b32 s15, s9, 16
; VI-NEXT: s_lshr_b32 s40, s10, 16
@@ -3109,8 +3155,8 @@ define inreg <40 x i16> @bitcast_v20i32_to_v40i16_scalar(<20 x i32> inreg %a, i3
; VI-NEXT: s_lshr_b32 s63, s16, 16
; VI-NEXT: s_cbranch_execnz .LBB13_3
; VI-NEXT: .LBB13_2: ; %cmp.true
-; VI-NEXT: s_add_i32 s7, s7, 3
; VI-NEXT: s_add_i32 s6, s6, 3
+; VI-NEXT: s_add_i32 s7, s7, 3
; VI-NEXT: s_add_i32 s8, s8, 3
; VI-NEXT: s_add_i32 s9, s9, 3
; VI-NEXT: s_add_i32 s10, s10, 3
@@ -3129,8 +3175,8 @@ define inreg <40 x i16> @bitcast_v20i32_to_v40i16_scalar(<20 x i32> inreg %a, i3
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: s_lshr_b32 s12, s7, 16
-; VI-NEXT: s_lshr_b32 s13, s6, 16
+; VI-NEXT: s_lshr_b32 s12, s6, 16
+; VI-NEXT: s_lshr_b32 s13, s7, 16
; VI-NEXT: s_lshr_b32 s14, s8, 16
; VI-NEXT: s_lshr_b32 s15, s9, 16
; VI-NEXT: s_lshr_b32 s40, s10, 16
@@ -3201,15 +3247,15 @@ define inreg <40 x i16> @bitcast_v20i32_to_v40i16_scalar(<20 x i32> inreg %a, i3
; VI-NEXT: s_lshl_b32 s15, s15, 16
; VI-NEXT: s_and_b32 s8, 0xffff, s8
; VI-NEXT: s_lshl_b32 s14, s14, 16
-; VI-NEXT: s_and_b32 s6, 0xffff, s6
-; VI-NEXT: s_lshl_b32 s13, s13, 16
; VI-NEXT: s_and_b32 s7, 0xffff, s7
+; VI-NEXT: s_lshl_b32 s13, s13, 16
+; VI-NEXT: s_and_b32 s6, 0xffff, s6
; VI-NEXT: s_lshl_b32 s12, s12, 16
; VI-NEXT: s_or_b32 s10, s10, s28
; VI-NEXT: s_or_b32 s9, s9, s15
; VI-NEXT: s_or_b32 s8, s8, s14
-; VI-NEXT: s_or_b32 s6, s6, s13
-; VI-NEXT: s_or_b32 s7, s7, s12
+; VI-NEXT: s_or_b32 s7, s7, s13
+; VI-NEXT: s_or_b32 s6, s6, s12
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: v_mov_b32_e32 v2, s16
@@ -3228,8 +3274,8 @@ define inreg <40 x i16> @bitcast_v20i32_to_v40i16_scalar(<20 x i32> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v15, s10
; VI-NEXT: v_mov_b32_e32 v16, s9
; VI-NEXT: v_mov_b32_e32 v17, s8
-; VI-NEXT: v_mov_b32_e32 v18, s6
-; VI-NEXT: v_mov_b32_e32 v19, s7
+; VI-NEXT: v_mov_b32_e32 v18, s7
+; VI-NEXT: v_mov_b32_e32 v19, s6
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB13_4:
; VI-NEXT: ; implicit-def: $sgpr63
@@ -3252,27 +3298,30 @@ define inreg <40 x i16> @bitcast_v20i32_to_v40i16_scalar(<20 x i32> inreg %a, i3
; VI-NEXT: ; implicit-def: $sgpr14
; VI-NEXT: ; implicit-def: $sgpr13
; VI-NEXT: ; implicit-def: $sgpr12
-; VI-NEXT: s_branch .LBB13_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB13_2
+; VI-NEXT: s_branch .LBB13_3
;
; GFX9-LABEL: bitcast_v20i32_to_v40i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
-; GFX9-NEXT: v_readfirstlane_b32 s6, v0
-; GFX9-NEXT: v_readfirstlane_b32 s7, v1
-; GFX9-NEXT: v_readfirstlane_b32 s8, v2
-; GFX9-NEXT: v_readfirstlane_b32 s9, v3
-; GFX9-NEXT: v_readfirstlane_b32 s10, v4
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: v_readfirstlane_b32 s11, v5
+; GFX9-NEXT: v_readfirstlane_b32 s7, v0
+; GFX9-NEXT: v_readfirstlane_b32 s8, v1
+; GFX9-NEXT: v_readfirstlane_b32 s9, v2
+; GFX9-NEXT: v_readfirstlane_b32 s10, v3
+; GFX9-NEXT: v_readfirstlane_b32 s11, v4
+; GFX9-NEXT: v_readfirstlane_b32 s6, v5
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB13_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_lshr_b32 s12, s11, 16
-; GFX9-NEXT: s_lshr_b32 s13, s10, 16
-; GFX9-NEXT: s_lshr_b32 s14, s9, 16
-; GFX9-NEXT: s_lshr_b32 s15, s8, 16
-; GFX9-NEXT: s_lshr_b32 s40, s7, 16
-; GFX9-NEXT: s_lshr_b32 s41, s6, 16
+; GFX9-NEXT: s_lshr_b32 s12, s6, 16
+; GFX9-NEXT: s_lshr_b32 s13, s11, 16
+; GFX9-NEXT: s_lshr_b32 s14, s10, 16
+; GFX9-NEXT: s_lshr_b32 s15, s9, 16
+; GFX9-NEXT: s_lshr_b32 s40, s8, 16
+; GFX9-NEXT: s_lshr_b32 s41, s7, 16
; GFX9-NEXT: s_lshr_b32 s42, s29, 16
; GFX9-NEXT: s_lshr_b32 s43, s28, 16
; GFX9-NEXT: s_lshr_b32 s44, s27, 16
@@ -3289,12 +3338,12 @@ define inreg <40 x i16> @bitcast_v20i32_to_v40i16_scalar(<20 x i32> inreg %a, i3
; GFX9-NEXT: s_lshr_b32 s63, s16, 16
; GFX9-NEXT: s_cbranch_execnz .LBB13_3
; GFX9-NEXT: .LBB13_2: ; %cmp.true
+; GFX9-NEXT: s_add_i32 s6, s6, 3
; GFX9-NEXT: s_add_i32 s11, s11, 3
; GFX9-NEXT: s_add_i32 s10, s10, 3
; GFX9-NEXT: s_add_i32 s9, s9, 3
; GFX9-NEXT: s_add_i32 s8, s8, 3
; GFX9-NEXT: s_add_i32 s7, s7, 3
-; GFX9-NEXT: s_add_i32 s6, s6, 3
; GFX9-NEXT: s_add_i32 s29, s29, 3
; GFX9-NEXT: s_add_i32 s28, s28, 3
; GFX9-NEXT: s_add_i32 s27, s27, 3
@@ -3309,12 +3358,12 @@ define inreg <40 x i16> @bitcast_v20i32_to_v40i16_scalar(<20 x i32> inreg %a, i3
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: s_lshr_b32 s12, s11, 16
-; GFX9-NEXT: s_lshr_b32 s13, s10, 16
-; GFX9-NEXT: s_lshr_b32 s14, s9, 16
-; GFX9-NEXT: s_lshr_b32 s15, s8, 16
-; GFX9-NEXT: s_lshr_b32 s40, s7, 16
-; GFX9-NEXT: s_lshr_b32 s41, s6, 16
+; GFX9-NEXT: s_lshr_b32 s12, s6, 16
+; GFX9-NEXT: s_lshr_b32 s13, s11, 16
+; GFX9-NEXT: s_lshr_b32 s14, s10, 16
+; GFX9-NEXT: s_lshr_b32 s15, s9, 16
+; GFX9-NEXT: s_lshr_b32 s40, s8, 16
+; GFX9-NEXT: s_lshr_b32 s41, s7, 16
; GFX9-NEXT: s_lshr_b32 s42, s29, 16
; GFX9-NEXT: s_lshr_b32 s43, s28, 16
; GFX9-NEXT: s_lshr_b32 s44, s27, 16
@@ -3344,12 +3393,12 @@ define inreg <40 x i16> @bitcast_v20i32_to_v40i16_scalar(<20 x i32> inreg %a, i3
; GFX9-NEXT: s_pack_ll_b32_b16 s25, s27, s44
; GFX9-NEXT: s_pack_ll_b32_b16 s26, s28, s43
; GFX9-NEXT: s_pack_ll_b32_b16 s27, s29, s42
-; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s41
-; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s40
-; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s15
-; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s14
-; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s13
-; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s12
+; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s41
+; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s40
+; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s15
+; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s14
+; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s13
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s12
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: v_mov_b32_e32 v2, s16
@@ -3364,12 +3413,12 @@ define inreg <40 x i16> @bitcast_v20i32_to_v40i16_scalar(<20 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v11, s25
; GFX9-NEXT: v_mov_b32_e32 v12, s26
; GFX9-NEXT: v_mov_b32_e32 v13, s27
-; GFX9-NEXT: v_mov_b32_e32 v14, s6
-; GFX9-NEXT: v_mov_b32_e32 v15, s7
-; GFX9-NEXT: v_mov_b32_e32 v16, s8
-; GFX9-NEXT: v_mov_b32_e32 v17, s9
-; GFX9-NEXT: v_mov_b32_e32 v18, s10
-; GFX9-NEXT: v_mov_b32_e32 v19, s11
+; GFX9-NEXT: v_mov_b32_e32 v14, s7
+; GFX9-NEXT: v_mov_b32_e32 v15, s8
+; GFX9-NEXT: v_mov_b32_e32 v16, s9
+; GFX9-NEXT: v_mov_b32_e32 v17, s10
+; GFX9-NEXT: v_mov_b32_e32 v18, s11
+; GFX9-NEXT: v_mov_b32_e32 v19, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB13_4:
; GFX9-NEXT: ; implicit-def: $sgpr63
@@ -3392,7 +3441,9 @@ define inreg <40 x i16> @bitcast_v20i32_to_v40i16_scalar(<20 x i32> inreg %a, i3
; GFX9-NEXT: ; implicit-def: $sgpr14
; GFX9-NEXT: ; implicit-def: $sgpr13
; GFX9-NEXT: ; implicit-def: $sgpr12
-; GFX9-NEXT: s_branch .LBB13_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB13_2
+; GFX9-NEXT: s_branch .LBB13_3
;
; GFX11-LABEL: bitcast_v20i32_to_v40i16_scalar:
; GFX11: ; %bb.0:
@@ -3400,7 +3451,7 @@ define inreg <40 x i16> @bitcast_v20i32_to_v40i16_scalar(<20 x i32> inreg %a, i3
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
; GFX11-NEXT: v_readfirstlane_b32 s5, v0
; GFX11-NEXT: v_readfirstlane_b32 s4, v1
-; GFX11-NEXT: s_mov_b32 s58, 0
+; GFX11-NEXT: s_mov_b32 s58, -1
; GFX11-NEXT: s_and_b32 s6, vcc_lo, exec_lo
; GFX11-NEXT: s_cbranch_scc0 .LBB13_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
@@ -3424,8 +3475,7 @@ define inreg <40 x i16> @bitcast_v20i32_to_v40i16_scalar(<20 x i32> inreg %a, i3
; GFX11-NEXT: s_lshr_b32 s47, s2, 16
; GFX11-NEXT: s_lshr_b32 s56, s1, 16
; GFX11-NEXT: s_lshr_b32 s57, s0, 16
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s58
-; GFX11-NEXT: s_cbranch_vccnz .LBB13_3
+; GFX11-NEXT: s_cbranch_execnz .LBB13_3
; GFX11-NEXT: .LBB13_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s4, s4, 3
; GFX11-NEXT: s_add_i32 s5, s5, 3
@@ -3521,7 +3571,9 @@ define inreg <40 x i16> @bitcast_v20i32_to_v40i16_scalar(<20 x i32> inreg %a, i3
; GFX11-NEXT: ; implicit-def: $sgpr8
; GFX11-NEXT: ; implicit-def: $sgpr7
; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: s_branch .LBB13_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s58
+; GFX11-NEXT: s_cbranch_vccz .LBB13_2
+; GFX11-NEXT: s_branch .LBB13_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -4379,6 +4431,7 @@ define inreg <20 x i32> @bitcast_v40i16_to_v20i32_scalar(<40 x i16> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v26
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
@@ -4402,7 +4455,7 @@ define inreg <20 x i32> @bitcast_v40i16_to_v20i32_scalar(<40 x i16> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v50, v4
; SI-NEXT: v_mov_b32_e32 v51, v2
; SI-NEXT: v_mov_b32_e32 v52, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v3
@@ -4421,50 +4474,50 @@ define inreg <20 x i32> @bitcast_v40i16_to_v20i32_scalar(<40 x i16> inreg %a, i3
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v52
; SI-NEXT: v_or_b32_e32 v7, v0, v57
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v51
-; SI-NEXT: v_or_b32_e32 v8, v0, v56
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v50
; SI-NEXT: v_or_b32_e32 v9, v0, v47
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49
; SI-NEXT: v_or_b32_e32 v10, v0, v46
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v48
-; SI-NEXT: s_and_b32 s4, s16, 0xffff
-; SI-NEXT: s_lshl_b32 s5, s17, 16
; SI-NEXT: v_or_b32_e32 v11, v0, v45
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v39
+; SI-NEXT: s_and_b32 s4, s16, 0xffff
+; SI-NEXT: s_lshl_b32 s5, s17, 16
+; SI-NEXT: v_or_b32_e32 v12, v0, v44
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v38
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: s_and_b32 s5, s18, 0xffff
; SI-NEXT: s_lshl_b32 s6, s19, 16
-; SI-NEXT: v_or_b32_e32 v12, v0, v44
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v38
+; SI-NEXT: v_or_b32_e32 v13, v0, v43
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v37
; SI-NEXT: s_or_b32 s5, s5, s6
; SI-NEXT: s_and_b32 s6, s20, 0xffff
; SI-NEXT: s_lshl_b32 s7, s21, 16
-; SI-NEXT: v_or_b32_e32 v13, v0, v43
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v37
+; SI-NEXT: v_or_b32_e32 v14, v0, v42
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v36
; SI-NEXT: s_or_b32 s6, s6, s7
; SI-NEXT: s_and_b32 s7, s22, 0xffff
; SI-NEXT: s_lshl_b32 s8, s23, 16
-; SI-NEXT: v_or_b32_e32 v14, v0, v42
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v36
+; SI-NEXT: v_or_b32_e32 v15, v0, v41
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v35
; SI-NEXT: s_or_b32 s7, s7, s8
; SI-NEXT: s_and_b32 s8, s24, 0xffff
; SI-NEXT: s_lshl_b32 s9, s25, 16
-; SI-NEXT: v_or_b32_e32 v15, v0, v41
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v35
+; SI-NEXT: v_or_b32_e32 v16, v0, v40
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v34
; SI-NEXT: s_or_b32 s8, s8, s9
; SI-NEXT: s_and_b32 s9, s26, 0xffff
; SI-NEXT: s_lshl_b32 s10, s27, 16
-; SI-NEXT: v_or_b32_e32 v16, v0, v40
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v34
+; SI-NEXT: v_or_b32_e32 v17, v0, v55
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v33
; SI-NEXT: s_or_b32 s9, s9, s10
; SI-NEXT: s_and_b32 s10, s28, 0xffff
; SI-NEXT: s_lshl_b32 s11, s29, 16
-; SI-NEXT: v_or_b32_e32 v17, v0, v55
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v33
-; SI-NEXT: s_or_b32 s10, s10, s11
+; SI-NEXT: v_and_b32_e32 v1, 0xffff, v51
; SI-NEXT: v_or_b32_e32 v18, v0, v54
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v32
+; SI-NEXT: s_or_b32 s10, s10, s11
+; SI-NEXT: v_or_b32_e32 v8, v1, v56
; SI-NEXT: v_or_b32_e32 v19, v0, v53
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
@@ -4479,10 +4532,6 @@ define inreg <20 x i32> @bitcast_v40i16_to_v20i32_scalar(<40 x i16> inreg %a, i3
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v57, v0
; SI-NEXT: v_add_i32_e32 v7, vcc, 0x30000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v51
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v0, v56, v0
-; SI-NEXT: v_add_i32_e32 v8, vcc, 0x30000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v50
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v47, v0
@@ -4546,13 +4595,16 @@ define inreg <20 x i32> @bitcast_v40i16_to_v20i32_scalar(<40 x i16> inreg %a, i3
; SI-NEXT: s_lshl_b32 s10, s27, 16
; SI-NEXT: s_add_i32 s28, s28, 3
; SI-NEXT: v_or_b32_e32 v0, v54, v0
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v51
; SI-NEXT: s_or_b32 s9, s10, s9
; SI-NEXT: s_and_b32 s10, s28, 0xffff
; SI-NEXT: s_lshl_b32 s11, s29, 16
; SI-NEXT: v_add_i32_e32 v18, vcc, 0x30000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v32
+; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1
; SI-NEXT: s_or_b32 s10, s11, s10
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: v_or_b32_e32 v1, v56, v1
; SI-NEXT: s_add_i32 s4, s4, 0x30000
; SI-NEXT: s_add_i32 s5, s5, 0x30000
; SI-NEXT: s_add_i32 s6, s6, 0x30000
@@ -4561,6 +4613,7 @@ define inreg <20 x i32> @bitcast_v40i16_to_v20i32_scalar(<40 x i16> inreg %a, i3
; SI-NEXT: s_add_i32 s9, s9, 0x30000
; SI-NEXT: s_add_i32 s10, s10, 0x30000
; SI-NEXT: v_or_b32_e32 v0, v53, v0
+; SI-NEXT: v_add_i32_e32 v8, vcc, 0x30000, v1
; SI-NEXT: v_add_i32_e32 v19, vcc, 0x30000, v0
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
@@ -4584,7 +4637,9 @@ define inreg <20 x i32> @bitcast_v40i16_to_v20i32_scalar(<40 x i16> inreg %a, i3
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB15_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; SI-NEXT: s_branch .LBB15_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB15_2
+; SI-NEXT: s_branch .LBB15_3
;
; VI-LABEL: bitcast_v40i16_to_v20i32_scalar:
; VI: ; %bb.0:
@@ -4604,13 +4659,14 @@ define inreg <20 x i32> @bitcast_v40i16_to_v20i32_scalar(<40 x i16> inreg %a, i3
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v5
; VI-NEXT: v_mov_b32_e32 v33, v4
; VI-NEXT: v_mov_b32_e32 v34, v3
; VI-NEXT: v_mov_b32_e32 v35, v2
; VI-NEXT: v_mov_b32_e32 v36, v1
; VI-NEXT: v_mov_b32_e32 v37, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB15_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -4748,21 +4804,22 @@ define inreg <20 x i32> @bitcast_v40i16_to_v20i32_scalar(<40 x i16> inreg %a, i3
; VI-NEXT: s_and_b32 s18, s27, 0xffff
; VI-NEXT: s_lshl_b32 s8, s8, 16
; VI-NEXT: s_add_i32 s28, s28, 3
+; VI-NEXT: v_add_u32_e32 v17, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v33
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s8, s8, s18
; VI-NEXT: s_and_b32 s18, s28, 0xffff
; VI-NEXT: s_lshl_b32 s7, s7, 16
; VI-NEXT: s_add_i32 s29, s29, 3
-; VI-NEXT: v_add_u32_e32 v17, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v33
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s7, s7, s18
; VI-NEXT: s_and_b32 s18, s29, 0xffff
; VI-NEXT: s_lshl_b32 s6, s6, 16
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: s_or_b32 s6, s6, s18
; VI-NEXT: v_add_u32_e32 v18, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v32
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v1, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_lshlrev_b32_sdwa v0, v1, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v32
+; VI-NEXT: s_or_b32 s6, s6, s18
+; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_add_i32 s4, s4, 0x30000
; VI-NEXT: s_add_i32 s5, s5, 0x30000
; VI-NEXT: s_add_i32 s16, s16, 0x30000
@@ -4777,7 +4834,6 @@ define inreg <20 x i32> @bitcast_v40i16_to_v20i32_scalar(<40 x i16> inreg %a, i3
; VI-NEXT: s_add_i32 s8, s8, 0x30000
; VI-NEXT: s_add_i32 s7, s7, 0x30000
; VI-NEXT: s_add_i32 s6, s6, 0x30000
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v19, vcc, 0x30000, v0
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
@@ -4797,17 +4853,13 @@ define inreg <20 x i32> @bitcast_v40i16_to_v20i32_scalar(<40 x i16> inreg %a, i3
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB15_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB15_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB15_2
+; VI-NEXT: s_branch .LBB15_3
;
; GFX9-LABEL: bitcast_v40i16_to_v20i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v5
-; GFX9-NEXT: v_mov_b32_e32 v33, v4
-; GFX9-NEXT: v_mov_b32_e32 v34, v3
-; GFX9-NEXT: v_mov_b32_e32 v35, v2
-; GFX9-NEXT: v_mov_b32_e32 v36, v1
-; GFX9-NEXT: v_mov_b32_e32 v37, v0
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
; GFX9-NEXT: s_lshr_b32 s42, s27, 16
@@ -4823,13 +4875,19 @@ define inreg <20 x i32> @bitcast_v40i16_to_v20i32_scalar(<40 x i16> inreg %a, i3
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
+; GFX9-NEXT: v_mov_b32_e32 v32, v5
+; GFX9-NEXT: v_mov_b32_e32 v33, v4
+; GFX9-NEXT: v_mov_b32_e32 v34, v3
+; GFX9-NEXT: v_mov_b32_e32 v35, v2
+; GFX9-NEXT: v_mov_b32_e32 v36, v1
+; GFX9-NEXT: v_mov_b32_e32 v37, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v32
; GFX9-NEXT: v_lshrrev_b32_e32 v39, 16, v33
; GFX9-NEXT: v_lshrrev_b32_e32 v48, 16, v34
; GFX9-NEXT: v_lshrrev_b32_e32 v49, 16, v35
; GFX9-NEXT: v_lshrrev_b32_e32 v50, 16, v36
; GFX9-NEXT: v_lshrrev_b32_e32 v51, 16, v37
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -4844,19 +4902,20 @@ define inreg <20 x i32> @bitcast_v40i16_to_v20i32_scalar(<40 x i16> inreg %a, i3
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB15_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v37
; GFX9-NEXT: v_lshl_or_b32 v14, v51, 16, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v36
-; GFX9-NEXT: v_lshl_or_b32 v15, v50, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v35
; GFX9-NEXT: v_lshl_or_b32 v16, v49, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v34
; GFX9-NEXT: v_lshl_or_b32 v17, v48, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v33
+; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v36
; GFX9-NEXT: v_lshl_or_b32 v18, v39, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v32
+; GFX9-NEXT: v_lshl_or_b32 v15, v50, 16, v1
; GFX9-NEXT: v_lshl_or_b32 v19, v38, 16, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s6
; GFX9-NEXT: v_mov_b32_e32 v1, s7
@@ -4874,18 +4933,24 @@ define inreg <20 x i32> @bitcast_v40i16_to_v20i32_scalar(<40 x i16> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v13, s19
; GFX9-NEXT: s_cbranch_execnz .LBB15_3
; GFX9-NEXT: .LBB15_2: ; %cmp.true
-; GFX9-NEXT: v_and_b32_e32 v14, 0xffff, v37
-; GFX9-NEXT: v_and_b32_e32 v15, 0xffff, v36
-; GFX9-NEXT: v_and_b32_e32 v16, 0xffff, v35
-; GFX9-NEXT: v_and_b32_e32 v17, 0xffff, v34
-; GFX9-NEXT: v_and_b32_e32 v18, 0xffff, v33
-; GFX9-NEXT: v_and_b32_e32 v19, 0xffff, v32
-; GFX9-NEXT: v_lshl_or_b32 v14, v51, 16, v14
-; GFX9-NEXT: v_lshl_or_b32 v15, v50, 16, v15
-; GFX9-NEXT: v_lshl_or_b32 v16, v49, 16, v16
-; GFX9-NEXT: v_lshl_or_b32 v17, v48, 16, v17
-; GFX9-NEXT: v_lshl_or_b32 v18, v39, 16, v18
-; GFX9-NEXT: v_lshl_or_b32 v19, v38, 16, v19
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v37
+; GFX9-NEXT: v_lshl_or_b32 v0, v51, 16, v0
+; GFX9-NEXT: v_pk_add_u16 v14, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v35
+; GFX9-NEXT: v_lshl_or_b32 v0, v49, 16, v0
+; GFX9-NEXT: v_pk_add_u16 v16, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v34
+; GFX9-NEXT: v_lshl_or_b32 v0, v48, 16, v0
+; GFX9-NEXT: v_pk_add_u16 v17, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v33
+; GFX9-NEXT: v_lshl_or_b32 v0, v39, 16, v0
+; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v36
+; GFX9-NEXT: v_pk_add_u16 v18, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v32
+; GFX9-NEXT: v_lshl_or_b32 v1, v50, 16, v1
+; GFX9-NEXT: v_lshl_or_b32 v0, v38, 16, v0
+; GFX9-NEXT: v_pk_add_u16 v15, v1, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_pk_add_u16 v19, v0, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s6, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s7, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v2, s8, 3 op_sel_hi:[1,0]
@@ -4900,17 +4965,13 @@ define inreg <20 x i32> @bitcast_v40i16_to_v20i32_scalar(<40 x i16> inreg %a, i3
; GFX9-NEXT: v_pk_add_u16 v11, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v12, s18, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v13, s19, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
; GFX9-NEXT: .LBB15_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB15_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB15_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB15_2
+; GFX9-NEXT: s_branch .LBB15_3
;
; GFX11-TRUE16-LABEL: bitcast_v40i16_to_v20i32_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -4922,11 +4983,11 @@ define inreg <20 x i32> @bitcast_v40i16_to_v20i32_scalar(<40 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v0
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v1
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s25, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s24, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s23, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s22, 16
@@ -4936,15 +4997,14 @@ define inreg <20 x i32> @bitcast_v40i16_to_v20i32_scalar(<40 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s18, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s17, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s2, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s2, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s45
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s17, s8
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s18, s9
@@ -4954,12 +5014,13 @@ define inreg <20 x i32> @bitcast_v40i16_to_v20i32_scalar(<40 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s22, s13
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s23, s14
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s24, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s25, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s26, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s29, s41
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s25, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s26, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s27, s42
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB15_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
@@ -4973,8 +5034,7 @@ define inreg <20 x i32> @bitcast_v40i16_to_v20i32_scalar(<40 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s15 :: v_dual_mov_b32 v13, s16
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s17 :: v_dual_mov_b32 v15, s0
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s1 :: v_dual_mov_b32 v17, s2
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB15_3
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB15_3
; GFX11-TRUE16-NEXT: .LBB15_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
@@ -5002,7 +5062,9 @@ define inreg <20 x i32> @bitcast_v40i16_to_v20i32_scalar(<40 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB15_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB15_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB15_2
+; GFX11-TRUE16-NEXT: s_branch .LBB15_3
;
; GFX11-FAKE16-LABEL: bitcast_v40i16_to_v20i32_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -5012,11 +5074,11 @@ define inreg <20 x i32> @bitcast_v40i16_to_v20i32_scalar(<40 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v0
; GFX11-FAKE16-NEXT: v_and_b32_e32 v35, 0xffff, v0
; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v1
-; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s26, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s25, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s26, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s25, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s24, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s23, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s22, 16
@@ -5026,15 +5088,14 @@ define inreg <20 x i32> @bitcast_v40i16_to_v20i32_scalar(<40 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s18, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s17, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s3, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s2, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s40, 0
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s2, s6
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s46
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s45
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s17, s8
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s18, s9
@@ -5044,12 +5105,13 @@ define inreg <20 x i32> @bitcast_v40i16_to_v20i32_scalar(<40 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s22, s13
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s23, s14
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s24, s15
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s25, s45
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s26, s44
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s27, s43
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s28, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s29, s41
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s25, s44
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s26, s43
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s27, s42
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB15_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
@@ -5063,8 +5125,7 @@ define inreg <20 x i32> @bitcast_v40i16_to_v20i32_scalar(<40 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s15 :: v_dual_mov_b32 v13, s16
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s17 :: v_dual_mov_b32 v15, s0
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s1 :: v_dual_mov_b32 v17, s2
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB15_3
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB15_3
; GFX11-FAKE16-NEXT: .LBB15_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
@@ -5092,7 +5153,9 @@ define inreg <20 x i32> @bitcast_v40i16_to_v20i32_scalar(<40 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB15_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB15_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB15_2
+; GFX11-FAKE16-NEXT: s_branch .LBB15_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -5923,22 +5986,23 @@ define inreg <40 x half> @bitcast_v20i32_to_v40f16_scalar(<20 x i32> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v7
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s11, v1
; SI-NEXT: v_readfirstlane_b32 s10, v2
-; SI-NEXT: v_readfirstlane_b32 s8, v3
-; SI-NEXT: v_readfirstlane_b32 s7, v4
-; SI-NEXT: v_readfirstlane_b32 s6, v5
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: v_readfirstlane_b32 s9, v6
+; SI-NEXT: v_readfirstlane_b32 s9, v3
+; SI-NEXT: v_readfirstlane_b32 s8, v4
+; SI-NEXT: v_readfirstlane_b32 s7, v5
+; SI-NEXT: v_readfirstlane_b32 s6, v6
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB17_4
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_lshr_b32 s4, s9, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v1, s4
; SI-NEXT: s_lshr_b32 s4, s6, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v2, s4
+; SI-NEXT: v_cvt_f32_f16_e32 v1, s4
; SI-NEXT: s_lshr_b32 s4, s7, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v4, s4
+; SI-NEXT: v_cvt_f32_f16_e32 v2, s4
; SI-NEXT: s_lshr_b32 s4, s8, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v4, s4
+; SI-NEXT: s_lshr_b32 s4, s9, 16
; SI-NEXT: v_cvt_f32_f16_e32 v6, s4
; SI-NEXT: s_lshr_b32 s4, s10, 16
; SI-NEXT: v_cvt_f32_f16_e32 v8, s4
@@ -5972,10 +6036,10 @@ define inreg <40 x half> @bitcast_v20i32_to_v40f16_scalar(<20 x i32> inreg %a, i
; SI-NEXT: v_cvt_f32_f16_e32 v38, s4
; SI-NEXT: s_lshr_b32 s4, s16, 16
; SI-NEXT: v_cvt_f32_f16_e32 v48, s4
-; SI-NEXT: v_cvt_f32_f16_e32 v3, s9
-; SI-NEXT: v_cvt_f32_f16_e32 v5, s6
-; SI-NEXT: v_cvt_f32_f16_e32 v7, s7
-; SI-NEXT: v_cvt_f32_f16_e32 v9, s8
+; SI-NEXT: v_cvt_f32_f16_e32 v3, s6
+; SI-NEXT: v_cvt_f32_f16_e32 v5, s7
+; SI-NEXT: v_cvt_f32_f16_e32 v7, s8
+; SI-NEXT: v_cvt_f32_f16_e32 v9, s9
; SI-NEXT: v_cvt_f32_f16_e32 v11, s10
; SI-NEXT: v_cvt_f32_f16_e32 v13, s11
; SI-NEXT: v_cvt_f32_f16_e32 v15, s29
@@ -6010,10 +6074,10 @@ define inreg <40 x half> @bitcast_v20i32_to_v40f16_scalar(<20 x i32> inreg %a, i
; SI-NEXT: s_add_i32 s29, s29, 3
; SI-NEXT: s_add_i32 s11, s11, 3
; SI-NEXT: s_add_i32 s10, s10, 3
+; SI-NEXT: s_add_i32 s9, s9, 3
; SI-NEXT: s_add_i32 s8, s8, 3
; SI-NEXT: s_add_i32 s7, s7, 3
; SI-NEXT: s_add_i32 s6, s6, 3
-; SI-NEXT: s_add_i32 s9, s9, 3
; SI-NEXT: s_lshr_b32 s4, s16, 16
; SI-NEXT: s_lshr_b32 s5, s17, 16
; SI-NEXT: s_lshr_b32 s12, s18, 16
@@ -6030,14 +6094,14 @@ define inreg <40 x half> @bitcast_v20i32_to_v40f16_scalar(<20 x i32> inreg %a, i
; SI-NEXT: s_lshr_b32 s47, s29, 16
; SI-NEXT: s_lshr_b32 s56, s11, 16
; SI-NEXT: s_lshr_b32 s57, s10, 16
-; SI-NEXT: s_lshr_b32 s58, s8, 16
-; SI-NEXT: s_lshr_b32 s59, s7, 16
-; SI-NEXT: s_lshr_b32 s60, s6, 16
-; SI-NEXT: s_lshr_b32 s61, s9, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v3, s9
-; SI-NEXT: v_cvt_f32_f16_e32 v5, s6
-; SI-NEXT: v_cvt_f32_f16_e32 v7, s7
-; SI-NEXT: v_cvt_f32_f16_e32 v9, s8
+; SI-NEXT: s_lshr_b32 s58, s9, 16
+; SI-NEXT: s_lshr_b32 s59, s8, 16
+; SI-NEXT: s_lshr_b32 s60, s7, 16
+; SI-NEXT: s_lshr_b32 s61, s6, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v3, s6
+; SI-NEXT: v_cvt_f32_f16_e32 v5, s7
+; SI-NEXT: v_cvt_f32_f16_e32 v7, s8
+; SI-NEXT: v_cvt_f32_f16_e32 v9, s9
; SI-NEXT: v_cvt_f32_f16_e32 v11, s10
; SI-NEXT: v_cvt_f32_f16_e32 v13, s11
; SI-NEXT: v_cvt_f32_f16_e32 v15, s29
@@ -6256,23 +6320,26 @@ define inreg <40 x half> @bitcast_v20i32_to_v40f16_scalar(<20 x i32> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: s_branch .LBB17_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB17_2
+; SI-NEXT: s_branch .LBB17_3
;
; VI-LABEL: bitcast_v20i32_to_v40f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s11, v0
; VI-NEXT: v_readfirstlane_b32 s10, v1
; VI-NEXT: v_readfirstlane_b32 s9, v2
; VI-NEXT: v_readfirstlane_b32 s8, v3
-; VI-NEXT: v_readfirstlane_b32 s6, v4
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: v_readfirstlane_b32 s7, v5
+; VI-NEXT: v_readfirstlane_b32 s7, v4
+; VI-NEXT: v_readfirstlane_b32 s6, v5
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB17_4
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_lshr_b32 s12, s7, 16
-; VI-NEXT: s_lshr_b32 s13, s6, 16
+; VI-NEXT: s_lshr_b32 s12, s6, 16
+; VI-NEXT: s_lshr_b32 s13, s7, 16
; VI-NEXT: s_lshr_b32 s14, s8, 16
; VI-NEXT: s_lshr_b32 s15, s9, 16
; VI-NEXT: s_lshr_b32 s40, s10, 16
@@ -6293,8 +6360,8 @@ define inreg <40 x half> @bitcast_v20i32_to_v40f16_scalar(<20 x i32> inreg %a, i
; VI-NEXT: s_lshr_b32 s63, s16, 16
; VI-NEXT: s_cbranch_execnz .LBB17_3
; VI-NEXT: .LBB17_2: ; %cmp.true
-; VI-NEXT: s_add_i32 s7, s7, 3
; VI-NEXT: s_add_i32 s6, s6, 3
+; VI-NEXT: s_add_i32 s7, s7, 3
; VI-NEXT: s_add_i32 s8, s8, 3
; VI-NEXT: s_add_i32 s9, s9, 3
; VI-NEXT: s_add_i32 s10, s10, 3
@@ -6313,8 +6380,8 @@ define inreg <40 x half> @bitcast_v20i32_to_v40f16_scalar(<20 x i32> inreg %a, i
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: s_lshr_b32 s12, s7, 16
-; VI-NEXT: s_lshr_b32 s13, s6, 16
+; VI-NEXT: s_lshr_b32 s12, s6, 16
+; VI-NEXT: s_lshr_b32 s13, s7, 16
; VI-NEXT: s_lshr_b32 s14, s8, 16
; VI-NEXT: s_lshr_b32 s15, s9, 16
; VI-NEXT: s_lshr_b32 s40, s10, 16
@@ -6385,15 +6452,15 @@ define inreg <40 x half> @bitcast_v20i32_to_v40f16_scalar(<20 x i32> inreg %a, i
; VI-NEXT: s_lshl_b32 s15, s15, 16
; VI-NEXT: s_and_b32 s8, 0xffff, s8
; VI-NEXT: s_lshl_b32 s14, s14, 16
-; VI-NEXT: s_and_b32 s6, 0xffff, s6
-; VI-NEXT: s_lshl_b32 s13, s13, 16
; VI-NEXT: s_and_b32 s7, 0xffff, s7
+; VI-NEXT: s_lshl_b32 s13, s13, 16
+; VI-NEXT: s_and_b32 s6, 0xffff, s6
; VI-NEXT: s_lshl_b32 s12, s12, 16
; VI-NEXT: s_or_b32 s10, s10, s28
; VI-NEXT: s_or_b32 s9, s9, s15
; VI-NEXT: s_or_b32 s8, s8, s14
-; VI-NEXT: s_or_b32 s6, s6, s13
-; VI-NEXT: s_or_b32 s7, s7, s12
+; VI-NEXT: s_or_b32 s7, s7, s13
+; VI-NEXT: s_or_b32 s6, s6, s12
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: v_mov_b32_e32 v2, s16
@@ -6412,8 +6479,8 @@ define inreg <40 x half> @bitcast_v20i32_to_v40f16_scalar(<20 x i32> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v15, s10
; VI-NEXT: v_mov_b32_e32 v16, s9
; VI-NEXT: v_mov_b32_e32 v17, s8
-; VI-NEXT: v_mov_b32_e32 v18, s6
-; VI-NEXT: v_mov_b32_e32 v19, s7
+; VI-NEXT: v_mov_b32_e32 v18, s7
+; VI-NEXT: v_mov_b32_e32 v19, s6
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB17_4:
; VI-NEXT: ; implicit-def: $sgpr63
@@ -6436,27 +6503,30 @@ define inreg <40 x half> @bitcast_v20i32_to_v40f16_scalar(<20 x i32> inreg %a, i
; VI-NEXT: ; implicit-def: $sgpr14
; VI-NEXT: ; implicit-def: $sgpr13
; VI-NEXT: ; implicit-def: $sgpr12
-; VI-NEXT: s_branch .LBB17_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB17_2
+; VI-NEXT: s_branch .LBB17_3
;
; GFX9-LABEL: bitcast_v20i32_to_v40f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
-; GFX9-NEXT: v_readfirstlane_b32 s6, v0
-; GFX9-NEXT: v_readfirstlane_b32 s7, v1
-; GFX9-NEXT: v_readfirstlane_b32 s8, v2
-; GFX9-NEXT: v_readfirstlane_b32 s9, v3
-; GFX9-NEXT: v_readfirstlane_b32 s10, v4
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: v_readfirstlane_b32 s11, v5
+; GFX9-NEXT: v_readfirstlane_b32 s7, v0
+; GFX9-NEXT: v_readfirstlane_b32 s8, v1
+; GFX9-NEXT: v_readfirstlane_b32 s9, v2
+; GFX9-NEXT: v_readfirstlane_b32 s10, v3
+; GFX9-NEXT: v_readfirstlane_b32 s11, v4
+; GFX9-NEXT: v_readfirstlane_b32 s6, v5
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB17_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_lshr_b32 s12, s11, 16
-; GFX9-NEXT: s_lshr_b32 s13, s10, 16
-; GFX9-NEXT: s_lshr_b32 s14, s9, 16
-; GFX9-NEXT: s_lshr_b32 s15, s8, 16
-; GFX9-NEXT: s_lshr_b32 s40, s7, 16
-; GFX9-NEXT: s_lshr_b32 s41, s6, 16
+; GFX9-NEXT: s_lshr_b32 s12, s6, 16
+; GFX9-NEXT: s_lshr_b32 s13, s11, 16
+; GFX9-NEXT: s_lshr_b32 s14, s10, 16
+; GFX9-NEXT: s_lshr_b32 s15, s9, 16
+; GFX9-NEXT: s_lshr_b32 s40, s8, 16
+; GFX9-NEXT: s_lshr_b32 s41, s7, 16
; GFX9-NEXT: s_lshr_b32 s42, s29, 16
; GFX9-NEXT: s_lshr_b32 s43, s28, 16
; GFX9-NEXT: s_lshr_b32 s44, s27, 16
@@ -6473,12 +6543,12 @@ define inreg <40 x half> @bitcast_v20i32_to_v40f16_scalar(<20 x i32> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s63, s16, 16
; GFX9-NEXT: s_cbranch_execnz .LBB17_3
; GFX9-NEXT: .LBB17_2: ; %cmp.true
+; GFX9-NEXT: s_add_i32 s6, s6, 3
; GFX9-NEXT: s_add_i32 s11, s11, 3
; GFX9-NEXT: s_add_i32 s10, s10, 3
; GFX9-NEXT: s_add_i32 s9, s9, 3
; GFX9-NEXT: s_add_i32 s8, s8, 3
; GFX9-NEXT: s_add_i32 s7, s7, 3
-; GFX9-NEXT: s_add_i32 s6, s6, 3
; GFX9-NEXT: s_add_i32 s29, s29, 3
; GFX9-NEXT: s_add_i32 s28, s28, 3
; GFX9-NEXT: s_add_i32 s27, s27, 3
@@ -6493,12 +6563,12 @@ define inreg <40 x half> @bitcast_v20i32_to_v40f16_scalar(<20 x i32> inreg %a, i
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: s_lshr_b32 s12, s11, 16
-; GFX9-NEXT: s_lshr_b32 s13, s10, 16
-; GFX9-NEXT: s_lshr_b32 s14, s9, 16
-; GFX9-NEXT: s_lshr_b32 s15, s8, 16
-; GFX9-NEXT: s_lshr_b32 s40, s7, 16
-; GFX9-NEXT: s_lshr_b32 s41, s6, 16
+; GFX9-NEXT: s_lshr_b32 s12, s6, 16
+; GFX9-NEXT: s_lshr_b32 s13, s11, 16
+; GFX9-NEXT: s_lshr_b32 s14, s10, 16
+; GFX9-NEXT: s_lshr_b32 s15, s9, 16
+; GFX9-NEXT: s_lshr_b32 s40, s8, 16
+; GFX9-NEXT: s_lshr_b32 s41, s7, 16
; GFX9-NEXT: s_lshr_b32 s42, s29, 16
; GFX9-NEXT: s_lshr_b32 s43, s28, 16
; GFX9-NEXT: s_lshr_b32 s44, s27, 16
@@ -6528,12 +6598,12 @@ define inreg <40 x half> @bitcast_v20i32_to_v40f16_scalar(<20 x i32> inreg %a, i
; GFX9-NEXT: s_pack_ll_b32_b16 s25, s27, s44
; GFX9-NEXT: s_pack_ll_b32_b16 s26, s28, s43
; GFX9-NEXT: s_pack_ll_b32_b16 s27, s29, s42
-; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s41
-; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s40
-; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s15
-; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s14
-; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s13
-; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s12
+; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s41
+; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s40
+; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s15
+; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s14
+; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s13
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s12
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: v_mov_b32_e32 v2, s16
@@ -6548,12 +6618,12 @@ define inreg <40 x half> @bitcast_v20i32_to_v40f16_scalar(<20 x i32> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v11, s25
; GFX9-NEXT: v_mov_b32_e32 v12, s26
; GFX9-NEXT: v_mov_b32_e32 v13, s27
-; GFX9-NEXT: v_mov_b32_e32 v14, s6
-; GFX9-NEXT: v_mov_b32_e32 v15, s7
-; GFX9-NEXT: v_mov_b32_e32 v16, s8
-; GFX9-NEXT: v_mov_b32_e32 v17, s9
-; GFX9-NEXT: v_mov_b32_e32 v18, s10
-; GFX9-NEXT: v_mov_b32_e32 v19, s11
+; GFX9-NEXT: v_mov_b32_e32 v14, s7
+; GFX9-NEXT: v_mov_b32_e32 v15, s8
+; GFX9-NEXT: v_mov_b32_e32 v16, s9
+; GFX9-NEXT: v_mov_b32_e32 v17, s10
+; GFX9-NEXT: v_mov_b32_e32 v18, s11
+; GFX9-NEXT: v_mov_b32_e32 v19, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB17_4:
; GFX9-NEXT: ; implicit-def: $sgpr63
@@ -6576,7 +6646,9 @@ define inreg <40 x half> @bitcast_v20i32_to_v40f16_scalar(<20 x i32> inreg %a, i
; GFX9-NEXT: ; implicit-def: $sgpr14
; GFX9-NEXT: ; implicit-def: $sgpr13
; GFX9-NEXT: ; implicit-def: $sgpr12
-; GFX9-NEXT: s_branch .LBB17_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB17_2
+; GFX9-NEXT: s_branch .LBB17_3
;
; GFX11-LABEL: bitcast_v20i32_to_v40f16_scalar:
; GFX11: ; %bb.0:
@@ -6584,7 +6656,7 @@ define inreg <40 x half> @bitcast_v20i32_to_v40f16_scalar(<20 x i32> inreg %a, i
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
; GFX11-NEXT: v_readfirstlane_b32 s5, v0
; GFX11-NEXT: v_readfirstlane_b32 s4, v1
-; GFX11-NEXT: s_mov_b32 s58, 0
+; GFX11-NEXT: s_mov_b32 s58, -1
; GFX11-NEXT: s_and_b32 s6, vcc_lo, exec_lo
; GFX11-NEXT: s_cbranch_scc0 .LBB17_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
@@ -6608,8 +6680,7 @@ define inreg <40 x half> @bitcast_v20i32_to_v40f16_scalar(<20 x i32> inreg %a, i
; GFX11-NEXT: s_lshr_b32 s47, s2, 16
; GFX11-NEXT: s_lshr_b32 s56, s1, 16
; GFX11-NEXT: s_lshr_b32 s57, s0, 16
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s58
-; GFX11-NEXT: s_cbranch_vccnz .LBB17_3
+; GFX11-NEXT: s_cbranch_execnz .LBB17_3
; GFX11-NEXT: .LBB17_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s4, s4, 3
; GFX11-NEXT: s_add_i32 s5, s5, 3
@@ -6705,7 +6776,9 @@ define inreg <40 x half> @bitcast_v20i32_to_v40f16_scalar(<20 x i32> inreg %a, i
; GFX11-NEXT: ; implicit-def: $sgpr8
; GFX11-NEXT: ; implicit-def: $sgpr7
; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: s_branch .LBB17_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s58
+; GFX11-NEXT: s_cbranch_vccz .LBB17_2
+; GFX11-NEXT: s_branch .LBB17_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -7737,6 +7810,7 @@ define inreg <20 x i32> @bitcast_v40f16_to_v20i32_scalar(<40 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v24, s28
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v26
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB19_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v58
@@ -8044,7 +8118,9 @@ define inreg <20 x i32> @bitcast_v40f16_to_v20i32_scalar(<40 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v53, v46
; SI-NEXT: v_mov_b32_e32 v27, v52
; SI-NEXT: v_mov_b32_e32 v52, v45
-; SI-NEXT: s_branch .LBB19_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB19_2
+; SI-NEXT: s_branch .LBB19_3
;
; VI-LABEL: bitcast_v40f16_to_v20i32_scalar:
; VI: ; %bb.0:
@@ -8064,13 +8140,14 @@ define inreg <20 x i32> @bitcast_v40f16_to_v20i32_scalar(<40 x half> inreg %a, i
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v5
; VI-NEXT: v_mov_b32_e32 v33, v4
; VI-NEXT: v_mov_b32_e32 v34, v3
; VI-NEXT: v_mov_b32_e32 v35, v2
; VI-NEXT: v_mov_b32_e32 v36, v1
; VI-NEXT: v_mov_b32_e32 v37, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB19_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -8224,17 +8301,13 @@ define inreg <20 x i32> @bitcast_v40f16_to_v20i32_scalar(<40 x half> inreg %a, i
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB19_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB19_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB19_2
+; VI-NEXT: s_branch .LBB19_3
;
; GFX9-LABEL: bitcast_v40f16_to_v20i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v5
-; GFX9-NEXT: v_mov_b32_e32 v33, v4
-; GFX9-NEXT: v_mov_b32_e32 v34, v3
-; GFX9-NEXT: v_mov_b32_e32 v35, v2
-; GFX9-NEXT: v_mov_b32_e32 v36, v1
-; GFX9-NEXT: v_mov_b32_e32 v37, v0
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
; GFX9-NEXT: s_lshr_b32 s42, s27, 16
@@ -8250,13 +8323,19 @@ define inreg <20 x i32> @bitcast_v40f16_to_v20i32_scalar(<40 x half> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
+; GFX9-NEXT: v_mov_b32_e32 v32, v5
+; GFX9-NEXT: v_mov_b32_e32 v33, v4
+; GFX9-NEXT: v_mov_b32_e32 v34, v3
+; GFX9-NEXT: v_mov_b32_e32 v35, v2
+; GFX9-NEXT: v_mov_b32_e32 v36, v1
+; GFX9-NEXT: v_mov_b32_e32 v37, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v32
; GFX9-NEXT: v_lshrrev_b32_e32 v39, 16, v33
; GFX9-NEXT: v_lshrrev_b32_e32 v48, 16, v34
; GFX9-NEXT: v_lshrrev_b32_e32 v49, 16, v35
; GFX9-NEXT: v_lshrrev_b32_e32 v50, 16, v36
; GFX9-NEXT: v_lshrrev_b32_e32 v51, 16, v37
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -8271,19 +8350,20 @@ define inreg <20 x i32> @bitcast_v40f16_to_v20i32_scalar(<40 x half> inreg %a, i
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB19_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v37
; GFX9-NEXT: v_lshl_or_b32 v14, v51, 16, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v36
-; GFX9-NEXT: v_lshl_or_b32 v15, v50, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v35
; GFX9-NEXT: v_lshl_or_b32 v16, v49, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v34
; GFX9-NEXT: v_lshl_or_b32 v17, v48, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v33
+; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v36
; GFX9-NEXT: v_lshl_or_b32 v18, v39, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v32
+; GFX9-NEXT: v_lshl_or_b32 v15, v50, 16, v1
; GFX9-NEXT: v_lshl_or_b32 v19, v38, 16, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s6
; GFX9-NEXT: v_mov_b32_e32 v1, s7
@@ -8308,8 +8388,8 @@ define inreg <20 x i32> @bitcast_v40f16_to_v20i32_scalar(<40 x half> inreg %a, i
; GFX9-NEXT: v_and_b32_e32 v18, 0xffff, v33
; GFX9-NEXT: v_and_b32_e32 v19, 0xffff, v32
; GFX9-NEXT: v_mov_b32_e32 v13, 0x200
-; GFX9-NEXT: s_movk_i32 s4, 0x200
; GFX9-NEXT: v_lshl_or_b32 v14, v51, 16, v14
+; GFX9-NEXT: s_movk_i32 s4, 0x200
; GFX9-NEXT: v_lshl_or_b32 v15, v50, 16, v15
; GFX9-NEXT: v_lshl_or_b32 v16, v49, 16, v16
; GFX9-NEXT: v_lshl_or_b32 v17, v48, 16, v17
@@ -8339,7 +8419,9 @@ define inreg <20 x i32> @bitcast_v40f16_to_v20i32_scalar(<40 x half> inreg %a, i
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB19_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB19_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB19_2
+; GFX9-NEXT: s_branch .LBB19_3
;
; GFX11-TRUE16-LABEL: bitcast_v40f16_to_v20i32_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -8351,11 +8433,11 @@ define inreg <20 x i32> @bitcast_v40f16_to_v20i32_scalar(<40 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v0
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v1
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s25, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s24, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s23, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s22, 16
@@ -8365,15 +8447,14 @@ define inreg <20 x i32> @bitcast_v40f16_to_v20i32_scalar(<40 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s18, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s17, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s2, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s2, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s45
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s17, s8
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s18, s9
@@ -8383,12 +8464,13 @@ define inreg <20 x i32> @bitcast_v40f16_to_v20i32_scalar(<40 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s22, s13
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s23, s14
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s24, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s25, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s26, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s29, s41
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s25, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s26, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s27, s42
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB19_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
@@ -8402,8 +8484,7 @@ define inreg <20 x i32> @bitcast_v40f16_to_v20i32_scalar(<40 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s15 :: v_dual_mov_b32 v13, s16
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s17 :: v_dual_mov_b32 v15, s0
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s1 :: v_dual_mov_b32 v17, s2
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB19_3
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB19_3
; GFX11-TRUE16-NEXT: .LBB19_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
@@ -8431,7 +8512,9 @@ define inreg <20 x i32> @bitcast_v40f16_to_v20i32_scalar(<40 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB19_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB19_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB19_2
+; GFX11-TRUE16-NEXT: s_branch .LBB19_3
;
; GFX11-FAKE16-LABEL: bitcast_v40f16_to_v20i32_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -8441,11 +8524,11 @@ define inreg <20 x i32> @bitcast_v40f16_to_v20i32_scalar(<40 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v0
; GFX11-FAKE16-NEXT: v_and_b32_e32 v35, 0xffff, v0
; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v1
-; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s26, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s25, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s26, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s25, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s24, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s23, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s22, 16
@@ -8455,15 +8538,14 @@ define inreg <20 x i32> @bitcast_v40f16_to_v20i32_scalar(<40 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s18, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s17, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s3, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s2, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s40, 0
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s2, s6
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s46
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s45
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s17, s8
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s18, s9
@@ -8473,12 +8555,13 @@ define inreg <20 x i32> @bitcast_v40f16_to_v20i32_scalar(<40 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s22, s13
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s23, s14
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s24, s15
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s25, s45
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s26, s44
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s27, s43
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s28, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s29, s41
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s25, s44
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s26, s43
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s27, s42
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB19_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
@@ -8492,8 +8575,7 @@ define inreg <20 x i32> @bitcast_v40f16_to_v20i32_scalar(<40 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s15 :: v_dual_mov_b32 v13, s16
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s17 :: v_dual_mov_b32 v15, s0
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s1 :: v_dual_mov_b32 v17, s2
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB19_3
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB19_3
; GFX11-FAKE16-NEXT: .LBB19_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
@@ -8521,7 +8603,9 @@ define inreg <20 x i32> @bitcast_v40f16_to_v20i32_scalar(<40 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB19_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB19_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB19_2
+; GFX11-FAKE16-NEXT: s_branch .LBB19_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -8685,6 +8769,7 @@ define inreg <10 x i64> @bitcast_v20f32_to_v10i64_scalar(<20 x float> inreg %a,
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v13, v6
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v19, v5
; SI-NEXT: v_mov_b32_e32 v18, v4
; SI-NEXT: v_mov_b32_e32 v17, v3
@@ -8704,12 +8789,15 @@ define inreg <10 x i64> @bitcast_v20f32_to_v10i64_scalar(<20 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB21_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB21_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB21_3
-; SI-NEXT: .LBB21_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB21_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB21_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e32 v19, 1.0, v19
; SI-NEXT: v_add_f32_e32 v18, 1.0, v18
; SI-NEXT: v_add_f32_e32 v17, 1.0, v17
@@ -8730,16 +8818,15 @@ define inreg <10 x i64> @bitcast_v20f32_to_v10i64_scalar(<20 x float> inreg %a,
; SI-NEXT: v_add_f32_e32 v2, 1.0, v2
; SI-NEXT: v_add_f32_e32 v1, 1.0, v1
; SI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; SI-NEXT: .LBB21_3: ; %end
+; SI-NEXT: .LBB21_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB21_4:
-; SI-NEXT: s_branch .LBB21_2
;
; VI-LABEL: bitcast_v20f32_to_v10i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v13, v6
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v19, v5
; VI-NEXT: v_mov_b32_e32 v18, v4
; VI-NEXT: v_mov_b32_e32 v17, v3
@@ -8759,12 +8846,15 @@ define inreg <10 x i64> @bitcast_v20f32_to_v10i64_scalar(<20 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB21_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB21_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB21_3
-; VI-NEXT: .LBB21_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB21_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB21_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e32 v19, 1.0, v19
; VI-NEXT: v_add_f32_e32 v18, 1.0, v18
; VI-NEXT: v_add_f32_e32 v17, 1.0, v17
@@ -8785,16 +8875,15 @@ define inreg <10 x i64> @bitcast_v20f32_to_v10i64_scalar(<20 x float> inreg %a,
; VI-NEXT: v_add_f32_e32 v2, 1.0, v2
; VI-NEXT: v_add_f32_e32 v1, 1.0, v1
; VI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; VI-NEXT: .LBB21_3: ; %end
+; VI-NEXT: .LBB21_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB21_4:
-; VI-NEXT: s_branch .LBB21_2
;
; GFX9-LABEL: bitcast_v20f32_to_v10i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v13, v6
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v19, v5
; GFX9-NEXT: v_mov_b32_e32 v18, v4
; GFX9-NEXT: v_mov_b32_e32 v17, v3
@@ -8814,12 +8903,15 @@ define inreg <10 x i64> @bitcast_v20f32_to_v10i64_scalar(<20 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB21_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB21_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB21_3
-; GFX9-NEXT: .LBB21_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB21_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e32 v19, 1.0, v19
; GFX9-NEXT: v_add_f32_e32 v18, 1.0, v18
; GFX9-NEXT: v_add_f32_e32 v17, 1.0, v17
@@ -8840,37 +8932,35 @@ define inreg <10 x i64> @bitcast_v20f32_to_v10i64_scalar(<20 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e32 v2, 1.0, v2
; GFX9-NEXT: v_add_f32_e32 v1, 1.0, v1
; GFX9-NEXT: v_add_f32_e32 v0, 1.0, v0
-; GFX9-NEXT: .LBB21_3: ; %end
+; GFX9-NEXT: .LBB21_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB21_4:
-; GFX9-NEXT: s_branch .LBB21_2
;
; GFX11-LABEL: bitcast_v20f32_to_v10i64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-NEXT: v_dual_mov_b32 v15, v2 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB21_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB21_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB21_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB21_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB21_3:
-; GFX11-NEXT: .LBB21_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
; GFX11-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
@@ -8881,6 +8971,7 @@ define inreg <10 x i64> @bitcast_v20f32_to_v10i64_scalar(<20 x float> inreg %a,
; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-NEXT: .LBB21_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -9060,6 +9151,7 @@ define inreg <20 x float> @bitcast_v10i64_to_v20f32_scalar(<10 x i64> inreg %a,
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v13, v6
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v19, v5
; SI-NEXT: v_mov_b32_e32 v18, v4
; SI-NEXT: v_mov_b32_e32 v17, v3
@@ -9079,12 +9171,15 @@ define inreg <20 x float> @bitcast_v10i64_to_v20f32_scalar(<10 x i64> inreg %a,
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB23_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB23_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB23_3
-; SI-NEXT: .LBB23_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB23_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB23_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v18, vcc, 3, v18
; SI-NEXT: v_addc_u32_e32 v19, vcc, 0, v19, vcc
; SI-NEXT: v_add_i32_e32 v16, vcc, 3, v16
@@ -9105,16 +9200,15 @@ define inreg <20 x float> @bitcast_v10i64_to_v20f32_scalar(<10 x i64> inreg %a,
; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT: .LBB23_3: ; %end
+; SI-NEXT: .LBB23_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB23_4:
-; SI-NEXT: s_branch .LBB23_2
;
; VI-LABEL: bitcast_v10i64_to_v20f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v13, v6
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v19, v5
; VI-NEXT: v_mov_b32_e32 v18, v4
; VI-NEXT: v_mov_b32_e32 v17, v3
@@ -9134,12 +9228,15 @@ define inreg <20 x float> @bitcast_v10i64_to_v20f32_scalar(<10 x i64> inreg %a,
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB23_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB23_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB23_3
-; VI-NEXT: .LBB23_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB23_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB23_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v18, vcc, 3, v18
; VI-NEXT: v_addc_u32_e32 v19, vcc, 0, v19, vcc
; VI-NEXT: v_add_u32_e32 v16, vcc, 3, v16
@@ -9160,16 +9257,15 @@ define inreg <20 x float> @bitcast_v10i64_to_v20f32_scalar(<10 x i64> inreg %a,
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT: .LBB23_3: ; %end
+; VI-NEXT: .LBB23_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB23_4:
-; VI-NEXT: s_branch .LBB23_2
;
; GFX9-LABEL: bitcast_v10i64_to_v20f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v13, v6
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v19, v5
; GFX9-NEXT: v_mov_b32_e32 v18, v4
; GFX9-NEXT: v_mov_b32_e32 v17, v3
@@ -9189,12 +9285,15 @@ define inreg <20 x float> @bitcast_v10i64_to_v20f32_scalar(<10 x i64> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB23_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB23_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB23_3
-; GFX9-NEXT: .LBB23_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB23_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_co_u32_e32 v18, vcc, 3, v18
; GFX9-NEXT: v_addc_co_u32_e32 v19, vcc, 0, v19, vcc
; GFX9-NEXT: v_add_co_u32_e32 v16, vcc, 3, v16
@@ -9215,37 +9314,35 @@ define inreg <20 x float> @bitcast_v10i64_to_v20f32_scalar(<10 x i64> inreg %a,
; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 3, v0
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
-; GFX9-NEXT: .LBB23_3: ; %end
+; GFX9-NEXT: .LBB23_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB23_4:
-; GFX9-NEXT: s_branch .LBB23_2
;
; GFX11-LABEL: bitcast_v10i64_to_v20f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-NEXT: v_dual_mov_b32 v15, v2 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB23_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB23_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB23_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB23_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB23_3:
-; GFX11-NEXT: .LBB23_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_co_u32 v18, vcc_lo, v18, 3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add_co_ci_u32_e64 v19, null, 0, v19, vcc_lo
@@ -9271,6 +9368,7 @@ define inreg <20 x float> @bitcast_v10i64_to_v20f32_scalar(<10 x i64> inreg %a,
; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-NEXT: .LBB23_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -9435,6 +9533,7 @@ define inreg <10 x double> @bitcast_v20f32_to_v10f64_scalar(<20 x float> inreg %
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v13, v6
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v19, v5
; SI-NEXT: v_mov_b32_e32 v18, v4
; SI-NEXT: v_mov_b32_e32 v17, v3
@@ -9454,12 +9553,15 @@ define inreg <10 x double> @bitcast_v20f32_to_v10f64_scalar(<20 x float> inreg %
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB25_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB25_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB25_3
-; SI-NEXT: .LBB25_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB25_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB25_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e32 v19, 1.0, v19
; SI-NEXT: v_add_f32_e32 v18, 1.0, v18
; SI-NEXT: v_add_f32_e32 v17, 1.0, v17
@@ -9480,16 +9582,15 @@ define inreg <10 x double> @bitcast_v20f32_to_v10f64_scalar(<20 x float> inreg %
; SI-NEXT: v_add_f32_e32 v2, 1.0, v2
; SI-NEXT: v_add_f32_e32 v1, 1.0, v1
; SI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; SI-NEXT: .LBB25_3: ; %end
+; SI-NEXT: .LBB25_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB25_4:
-; SI-NEXT: s_branch .LBB25_2
;
; VI-LABEL: bitcast_v20f32_to_v10f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v13, v6
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v19, v5
; VI-NEXT: v_mov_b32_e32 v18, v4
; VI-NEXT: v_mov_b32_e32 v17, v3
@@ -9509,12 +9610,15 @@ define inreg <10 x double> @bitcast_v20f32_to_v10f64_scalar(<20 x float> inreg %
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB25_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB25_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB25_3
-; VI-NEXT: .LBB25_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB25_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB25_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e32 v19, 1.0, v19
; VI-NEXT: v_add_f32_e32 v18, 1.0, v18
; VI-NEXT: v_add_f32_e32 v17, 1.0, v17
@@ -9535,16 +9639,15 @@ define inreg <10 x double> @bitcast_v20f32_to_v10f64_scalar(<20 x float> inreg %
; VI-NEXT: v_add_f32_e32 v2, 1.0, v2
; VI-NEXT: v_add_f32_e32 v1, 1.0, v1
; VI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; VI-NEXT: .LBB25_3: ; %end
+; VI-NEXT: .LBB25_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB25_4:
-; VI-NEXT: s_branch .LBB25_2
;
; GFX9-LABEL: bitcast_v20f32_to_v10f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v13, v6
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v19, v5
; GFX9-NEXT: v_mov_b32_e32 v18, v4
; GFX9-NEXT: v_mov_b32_e32 v17, v3
@@ -9564,12 +9667,15 @@ define inreg <10 x double> @bitcast_v20f32_to_v10f64_scalar(<20 x float> inreg %
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB25_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB25_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB25_3
-; GFX9-NEXT: .LBB25_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB25_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB25_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e32 v19, 1.0, v19
; GFX9-NEXT: v_add_f32_e32 v18, 1.0, v18
; GFX9-NEXT: v_add_f32_e32 v17, 1.0, v17
@@ -9590,37 +9696,35 @@ define inreg <10 x double> @bitcast_v20f32_to_v10f64_scalar(<20 x float> inreg %
; GFX9-NEXT: v_add_f32_e32 v2, 1.0, v2
; GFX9-NEXT: v_add_f32_e32 v1, 1.0, v1
; GFX9-NEXT: v_add_f32_e32 v0, 1.0, v0
-; GFX9-NEXT: .LBB25_3: ; %end
+; GFX9-NEXT: .LBB25_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB25_4:
-; GFX9-NEXT: s_branch .LBB25_2
;
; GFX11-LABEL: bitcast_v20f32_to_v10f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-NEXT: v_dual_mov_b32 v15, v2 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB25_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB25_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB25_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB25_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB25_3:
-; GFX11-NEXT: .LBB25_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB25_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
; GFX11-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
@@ -9631,6 +9735,7 @@ define inreg <10 x double> @bitcast_v20f32_to_v10f64_scalar(<20 x float> inreg %
; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-NEXT: .LBB25_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -9765,6 +9870,7 @@ define inreg <20 x float> @bitcast_v10f64_to_v20f32_scalar(<10 x double> inreg %
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v12, v6
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v19, v5
; SI-NEXT: v_mov_b32_e32 v18, v4
; SI-NEXT: v_mov_b32_e32 v17, v3
@@ -9784,12 +9890,15 @@ define inreg <20 x float> @bitcast_v10f64_to_v20f32_scalar(<10 x double> inreg %
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB27_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB27_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB27_3
-; SI-NEXT: .LBB27_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB27_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB27_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
; SI-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
; SI-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
@@ -9800,16 +9909,15 @@ define inreg <20 x float> @bitcast_v10f64_to_v20f32_scalar(<10 x double> inreg %
; SI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; SI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; SI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; SI-NEXT: .LBB27_3: ; %end
+; SI-NEXT: .LBB27_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB27_4:
-; SI-NEXT: s_branch .LBB27_2
;
; VI-LABEL: bitcast_v10f64_to_v20f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v12, v6
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v19, v5
; VI-NEXT: v_mov_b32_e32 v18, v4
; VI-NEXT: v_mov_b32_e32 v17, v3
@@ -9829,12 +9937,15 @@ define inreg <20 x float> @bitcast_v10f64_to_v20f32_scalar(<10 x double> inreg %
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB27_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB27_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB27_3
-; VI-NEXT: .LBB27_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB27_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB27_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
; VI-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
; VI-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
@@ -9845,16 +9956,15 @@ define inreg <20 x float> @bitcast_v10f64_to_v20f32_scalar(<10 x double> inreg %
; VI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; VI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; VI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; VI-NEXT: .LBB27_3: ; %end
+; VI-NEXT: .LBB27_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB27_4:
-; VI-NEXT: s_branch .LBB27_2
;
; GFX9-LABEL: bitcast_v10f64_to_v20f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v12, v6
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v19, v5
; GFX9-NEXT: v_mov_b32_e32 v18, v4
; GFX9-NEXT: v_mov_b32_e32 v17, v3
@@ -9874,12 +9984,15 @@ define inreg <20 x float> @bitcast_v10f64_to_v20f32_scalar(<10 x double> inreg %
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB27_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB27_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB27_3
-; GFX9-NEXT: .LBB27_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB27_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB27_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
; GFX9-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
@@ -9890,37 +10003,35 @@ define inreg <20 x float> @bitcast_v10f64_to_v20f32_scalar(<10 x double> inreg %
; GFX9-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX9-NEXT: .LBB27_3: ; %end
+; GFX9-NEXT: .LBB27_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB27_4:
-; GFX9-NEXT: s_branch .LBB27_2
;
; GFX11-LABEL: bitcast_v10f64_to_v20f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-NEXT: v_dual_mov_b32 v15, v2 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB27_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB27_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB27_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB27_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB27_3:
-; GFX11-NEXT: .LBB27_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB27_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
; GFX11-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
; GFX11-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
@@ -9931,6 +10042,7 @@ define inreg <20 x float> @bitcast_v10f64_to_v20f32_scalar(<10 x double> inreg %
; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-NEXT: .LBB27_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -10571,6 +10683,7 @@ define inreg <40 x i16> @bitcast_v20f32_to_v40i16_scalar(<20 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v7
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v21, s16
; SI-NEXT: v_mov_b32_e32 v20, s17
; SI-NEXT: v_mov_b32_e32 v19, s18
@@ -10584,7 +10697,7 @@ define inreg <40 x i16> @bitcast_v20f32_to_v40i16_scalar(<20 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v9, s27
; SI-NEXT: v_mov_b32_e32 v8, s28
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v7, s29
; SI-NEXT: s_cbranch_scc0 .LBB29_4
; SI-NEXT: ; %bb.1: ; %cmp.false
@@ -10792,12 +10905,15 @@ define inreg <40 x i16> @bitcast_v20f32_to_v40i16_scalar(<20 x float> inreg %a,
; SI-NEXT: ; implicit-def: $vgpr27
; SI-NEXT: ; implicit-def: $vgpr18
; SI-NEXT: ; implicit-def: $vgpr25
-; SI-NEXT: s_branch .LBB29_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB29_2
+; SI-NEXT: s_branch .LBB29_3
;
; VI-LABEL: bitcast_v20f32_to_v40i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v20, s16
; VI-NEXT: v_mov_b32_e32 v19, s17
; VI-NEXT: v_mov_b32_e32 v18, s18
@@ -10811,8 +10927,8 @@ define inreg <40 x i16> @bitcast_v20f32_to_v40i16_scalar(<20 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB29_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_lshrrev_b32_e32 v26, 16, v5
@@ -10946,12 +11062,15 @@ define inreg <40 x i16> @bitcast_v20f32_to_v40i16_scalar(<20 x float> inreg %a,
; VI-NEXT: ; implicit-def: $vgpr28
; VI-NEXT: ; implicit-def: $vgpr27
; VI-NEXT: ; implicit-def: $vgpr26
-; VI-NEXT: s_branch .LBB29_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB29_2
+; VI-NEXT: s_branch .LBB29_3
;
; GFX9-LABEL: bitcast_v20f32_to_v40i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v20, s16
; GFX9-NEXT: v_mov_b32_e32 v19, s17
; GFX9-NEXT: v_mov_b32_e32 v18, s18
@@ -10965,8 +11084,8 @@ define inreg <40 x i16> @bitcast_v20f32_to_v40i16_scalar(<20 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB29_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_lshrrev_b32_e32 v26, 16, v5
@@ -11100,7 +11219,9 @@ define inreg <40 x i16> @bitcast_v20f32_to_v40i16_scalar(<20 x float> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr28
; GFX9-NEXT: ; implicit-def: $vgpr27
; GFX9-NEXT: ; implicit-def: $vgpr26
-; GFX9-NEXT: s_branch .LBB29_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB29_2
+; GFX9-NEXT: s_branch .LBB29_3
;
; GFX11-LABEL: bitcast_v20f32_to_v40i16_scalar:
; GFX11: ; %bb.0:
@@ -11113,17 +11234,17 @@ define inreg <40 x i16> @bitcast_v20f32_to_v40i16_scalar(<20 x float> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v6, s20 :: v_dual_mov_b32 v5, s21
; GFX11-NEXT: v_dual_mov_b32 v14, s22 :: v_dual_mov_b32 v13, s23
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v11, s25
-; GFX11-NEXT: v_dual_mov_b32 v10, s26 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-NEXT: v_dual_mov_b32 v10, s26 :: v_dual_mov_b32 v17, s27
+; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v15, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB29_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v1
; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v16
+; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v17
; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v10
; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v11
; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v12
@@ -11139,12 +11260,11 @@ define inreg <40 x i16> @bitcast_v20f32_to_v40i16_scalar(<20 x float> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v18
; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v19
; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v20
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB29_3
+; GFX11-NEXT: s_cbranch_execnz .LBB29_3
; GFX11-NEXT: .LBB29_2: ; %cmp.true
; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
-; GFX11-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
-; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v10, 1.0, v10
; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v12, 1.0, v12
; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v14, 1.0, v14
; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v6, 1.0, v6
@@ -11154,9 +11274,9 @@ define inreg <40 x i16> @bitcast_v20f32_to_v40i16_scalar(<20 x float> inreg %a,
; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v1
; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v16
+; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v17
; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v10
; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v11
; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v12
@@ -11199,9 +11319,9 @@ define inreg <40 x i16> @bitcast_v20f32_to_v40i16_scalar(<20 x float> inreg %a,
; GFX11-NEXT: v_lshl_or_b32 v11, v30, 16, v13
; GFX11-NEXT: v_lshl_or_b32 v13, v28, 16, v18
; GFX11-NEXT: v_lshl_or_b32 v14, v27, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v16
+; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v15
; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-NEXT: v_lshl_or_b32 v19, v22, 16, v1
; GFX11-NEXT: v_mov_b32_e32 v1, v21
@@ -11210,8 +11330,8 @@ define inreg <40 x i16> @bitcast_v20f32_to_v40i16_scalar(<20 x float> inreg %a,
; GFX11-NEXT: v_lshl_or_b32 v4, v37, 16, v48
; GFX11-NEXT: v_lshl_or_b32 v7, v34, 16, v7
; GFX11-NEXT: v_lshl_or_b32 v12, v29, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v15, v26, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v16, v25, 16, v17
+; GFX11-NEXT: v_lshl_or_b32 v15, v26, 16, v17
+; GFX11-NEXT: v_lshl_or_b32 v16, v25, 16, v16
; GFX11-NEXT: v_lshl_or_b32 v17, v24, 16, v18
; GFX11-NEXT: v_lshl_or_b32 v18, v23, 16, v0
; GFX11-NEXT: v_mov_b32_e32 v0, v20
@@ -11237,7 +11357,9 @@ define inreg <40 x i16> @bitcast_v20f32_to_v40i16_scalar(<20 x float> inreg %a,
; GFX11-NEXT: ; implicit-def: $vgpr24
; GFX11-NEXT: ; implicit-def: $vgpr23
; GFX11-NEXT: ; implicit-def: $vgpr22
-; GFX11-NEXT: s_branch .LBB29_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_vccz .LBB29_2
+; GFX11-NEXT: s_branch .LBB29_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -12095,6 +12217,7 @@ define inreg <20 x float> @bitcast_v40i16_to_v20f32_scalar(<40 x i16> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v26
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
@@ -12118,7 +12241,7 @@ define inreg <20 x float> @bitcast_v40i16_to_v20f32_scalar(<40 x i16> inreg %a,
; SI-NEXT: v_mov_b32_e32 v50, v4
; SI-NEXT: v_mov_b32_e32 v51, v2
; SI-NEXT: v_mov_b32_e32 v52, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v3
@@ -12137,50 +12260,50 @@ define inreg <20 x float> @bitcast_v40i16_to_v20f32_scalar(<40 x i16> inreg %a,
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v52
; SI-NEXT: v_or_b32_e32 v7, v0, v57
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v51
-; SI-NEXT: v_or_b32_e32 v8, v0, v56
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v50
; SI-NEXT: v_or_b32_e32 v9, v0, v47
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49
; SI-NEXT: v_or_b32_e32 v10, v0, v46
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v48
-; SI-NEXT: s_and_b32 s4, s16, 0xffff
-; SI-NEXT: s_lshl_b32 s5, s17, 16
; SI-NEXT: v_or_b32_e32 v11, v0, v45
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v39
+; SI-NEXT: s_and_b32 s4, s16, 0xffff
+; SI-NEXT: s_lshl_b32 s5, s17, 16
+; SI-NEXT: v_or_b32_e32 v12, v0, v44
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v38
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: s_and_b32 s5, s18, 0xffff
; SI-NEXT: s_lshl_b32 s6, s19, 16
-; SI-NEXT: v_or_b32_e32 v12, v0, v44
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v38
+; SI-NEXT: v_or_b32_e32 v13, v0, v43
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v37
; SI-NEXT: s_or_b32 s5, s5, s6
; SI-NEXT: s_and_b32 s6, s20, 0xffff
; SI-NEXT: s_lshl_b32 s7, s21, 16
-; SI-NEXT: v_or_b32_e32 v13, v0, v43
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v37
+; SI-NEXT: v_or_b32_e32 v14, v0, v42
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v36
; SI-NEXT: s_or_b32 s6, s6, s7
; SI-NEXT: s_and_b32 s7, s22, 0xffff
; SI-NEXT: s_lshl_b32 s8, s23, 16
-; SI-NEXT: v_or_b32_e32 v14, v0, v42
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v36
+; SI-NEXT: v_or_b32_e32 v15, v0, v41
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v35
; SI-NEXT: s_or_b32 s7, s7, s8
; SI-NEXT: s_and_b32 s8, s24, 0xffff
; SI-NEXT: s_lshl_b32 s9, s25, 16
-; SI-NEXT: v_or_b32_e32 v15, v0, v41
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v35
+; SI-NEXT: v_or_b32_e32 v16, v0, v40
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v34
; SI-NEXT: s_or_b32 s8, s8, s9
; SI-NEXT: s_and_b32 s9, s26, 0xffff
; SI-NEXT: s_lshl_b32 s10, s27, 16
-; SI-NEXT: v_or_b32_e32 v16, v0, v40
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v34
+; SI-NEXT: v_or_b32_e32 v17, v0, v55
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v33
; SI-NEXT: s_or_b32 s9, s9, s10
; SI-NEXT: s_and_b32 s10, s28, 0xffff
; SI-NEXT: s_lshl_b32 s11, s29, 16
-; SI-NEXT: v_or_b32_e32 v17, v0, v55
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v33
-; SI-NEXT: s_or_b32 s10, s10, s11
+; SI-NEXT: v_and_b32_e32 v1, 0xffff, v51
; SI-NEXT: v_or_b32_e32 v18, v0, v54
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v32
+; SI-NEXT: s_or_b32 s10, s10, s11
+; SI-NEXT: v_or_b32_e32 v8, v1, v56
; SI-NEXT: v_or_b32_e32 v19, v0, v53
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
@@ -12195,10 +12318,6 @@ define inreg <20 x float> @bitcast_v40i16_to_v20f32_scalar(<40 x i16> inreg %a,
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v57, v0
; SI-NEXT: v_add_i32_e32 v7, vcc, 0x30000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v51
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v0, v56, v0
-; SI-NEXT: v_add_i32_e32 v8, vcc, 0x30000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v50
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v47, v0
@@ -12262,13 +12381,16 @@ define inreg <20 x float> @bitcast_v40i16_to_v20f32_scalar(<40 x i16> inreg %a,
; SI-NEXT: s_lshl_b32 s10, s27, 16
; SI-NEXT: s_add_i32 s28, s28, 3
; SI-NEXT: v_or_b32_e32 v0, v54, v0
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v51
; SI-NEXT: s_or_b32 s9, s10, s9
; SI-NEXT: s_and_b32 s10, s28, 0xffff
; SI-NEXT: s_lshl_b32 s11, s29, 16
; SI-NEXT: v_add_i32_e32 v18, vcc, 0x30000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v32
+; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1
; SI-NEXT: s_or_b32 s10, s11, s10
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: v_or_b32_e32 v1, v56, v1
; SI-NEXT: s_add_i32 s4, s4, 0x30000
; SI-NEXT: s_add_i32 s5, s5, 0x30000
; SI-NEXT: s_add_i32 s6, s6, 0x30000
@@ -12277,6 +12399,7 @@ define inreg <20 x float> @bitcast_v40i16_to_v20f32_scalar(<40 x i16> inreg %a,
; SI-NEXT: s_add_i32 s9, s9, 0x30000
; SI-NEXT: s_add_i32 s10, s10, 0x30000
; SI-NEXT: v_or_b32_e32 v0, v53, v0
+; SI-NEXT: v_add_i32_e32 v8, vcc, 0x30000, v1
; SI-NEXT: v_add_i32_e32 v19, vcc, 0x30000, v0
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
@@ -12300,7 +12423,9 @@ define inreg <20 x float> @bitcast_v40i16_to_v20f32_scalar(<40 x i16> inreg %a,
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB31_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; SI-NEXT: s_branch .LBB31_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB31_2
+; SI-NEXT: s_branch .LBB31_3
;
; VI-LABEL: bitcast_v40i16_to_v20f32_scalar:
; VI: ; %bb.0:
@@ -12320,13 +12445,14 @@ define inreg <20 x float> @bitcast_v40i16_to_v20f32_scalar(<40 x i16> inreg %a,
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v5
; VI-NEXT: v_mov_b32_e32 v33, v4
; VI-NEXT: v_mov_b32_e32 v34, v3
; VI-NEXT: v_mov_b32_e32 v35, v2
; VI-NEXT: v_mov_b32_e32 v36, v1
; VI-NEXT: v_mov_b32_e32 v37, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB31_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -12464,21 +12590,22 @@ define inreg <20 x float> @bitcast_v40i16_to_v20f32_scalar(<40 x i16> inreg %a,
; VI-NEXT: s_and_b32 s18, s27, 0xffff
; VI-NEXT: s_lshl_b32 s8, s8, 16
; VI-NEXT: s_add_i32 s28, s28, 3
+; VI-NEXT: v_add_u32_e32 v17, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v33
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s8, s8, s18
; VI-NEXT: s_and_b32 s18, s28, 0xffff
; VI-NEXT: s_lshl_b32 s7, s7, 16
; VI-NEXT: s_add_i32 s29, s29, 3
-; VI-NEXT: v_add_u32_e32 v17, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v33
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s7, s7, s18
; VI-NEXT: s_and_b32 s18, s29, 0xffff
; VI-NEXT: s_lshl_b32 s6, s6, 16
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: s_or_b32 s6, s6, s18
; VI-NEXT: v_add_u32_e32 v18, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v32
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v1, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_lshlrev_b32_sdwa v0, v1, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v32
+; VI-NEXT: s_or_b32 s6, s6, s18
+; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_add_i32 s4, s4, 0x30000
; VI-NEXT: s_add_i32 s5, s5, 0x30000
; VI-NEXT: s_add_i32 s16, s16, 0x30000
@@ -12493,7 +12620,6 @@ define inreg <20 x float> @bitcast_v40i16_to_v20f32_scalar(<40 x i16> inreg %a,
; VI-NEXT: s_add_i32 s8, s8, 0x30000
; VI-NEXT: s_add_i32 s7, s7, 0x30000
; VI-NEXT: s_add_i32 s6, s6, 0x30000
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v19, vcc, 0x30000, v0
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
@@ -12513,17 +12639,13 @@ define inreg <20 x float> @bitcast_v40i16_to_v20f32_scalar(<40 x i16> inreg %a,
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB31_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB31_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB31_2
+; VI-NEXT: s_branch .LBB31_3
;
; GFX9-LABEL: bitcast_v40i16_to_v20f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v5
-; GFX9-NEXT: v_mov_b32_e32 v33, v4
-; GFX9-NEXT: v_mov_b32_e32 v34, v3
-; GFX9-NEXT: v_mov_b32_e32 v35, v2
-; GFX9-NEXT: v_mov_b32_e32 v36, v1
-; GFX9-NEXT: v_mov_b32_e32 v37, v0
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
; GFX9-NEXT: s_lshr_b32 s42, s27, 16
@@ -12539,13 +12661,19 @@ define inreg <20 x float> @bitcast_v40i16_to_v20f32_scalar(<40 x i16> inreg %a,
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
+; GFX9-NEXT: v_mov_b32_e32 v32, v5
+; GFX9-NEXT: v_mov_b32_e32 v33, v4
+; GFX9-NEXT: v_mov_b32_e32 v34, v3
+; GFX9-NEXT: v_mov_b32_e32 v35, v2
+; GFX9-NEXT: v_mov_b32_e32 v36, v1
+; GFX9-NEXT: v_mov_b32_e32 v37, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v32
; GFX9-NEXT: v_lshrrev_b32_e32 v39, 16, v33
; GFX9-NEXT: v_lshrrev_b32_e32 v48, 16, v34
; GFX9-NEXT: v_lshrrev_b32_e32 v49, 16, v35
; GFX9-NEXT: v_lshrrev_b32_e32 v50, 16, v36
; GFX9-NEXT: v_lshrrev_b32_e32 v51, 16, v37
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -12560,19 +12688,20 @@ define inreg <20 x float> @bitcast_v40i16_to_v20f32_scalar(<40 x i16> inreg %a,
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB31_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v37
; GFX9-NEXT: v_lshl_or_b32 v14, v51, 16, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v36
-; GFX9-NEXT: v_lshl_or_b32 v15, v50, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v35
; GFX9-NEXT: v_lshl_or_b32 v16, v49, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v34
; GFX9-NEXT: v_lshl_or_b32 v17, v48, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v33
+; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v36
; GFX9-NEXT: v_lshl_or_b32 v18, v39, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v32
+; GFX9-NEXT: v_lshl_or_b32 v15, v50, 16, v1
; GFX9-NEXT: v_lshl_or_b32 v19, v38, 16, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s6
; GFX9-NEXT: v_mov_b32_e32 v1, s7
@@ -12590,18 +12719,24 @@ define inreg <20 x float> @bitcast_v40i16_to_v20f32_scalar(<40 x i16> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v13, s19
; GFX9-NEXT: s_cbranch_execnz .LBB31_3
; GFX9-NEXT: .LBB31_2: ; %cmp.true
-; GFX9-NEXT: v_and_b32_e32 v14, 0xffff, v37
-; GFX9-NEXT: v_and_b32_e32 v15, 0xffff, v36
-; GFX9-NEXT: v_and_b32_e32 v16, 0xffff, v35
-; GFX9-NEXT: v_and_b32_e32 v17, 0xffff, v34
-; GFX9-NEXT: v_and_b32_e32 v18, 0xffff, v33
-; GFX9-NEXT: v_and_b32_e32 v19, 0xffff, v32
-; GFX9-NEXT: v_lshl_or_b32 v14, v51, 16, v14
-; GFX9-NEXT: v_lshl_or_b32 v15, v50, 16, v15
-; GFX9-NEXT: v_lshl_or_b32 v16, v49, 16, v16
-; GFX9-NEXT: v_lshl_or_b32 v17, v48, 16, v17
-; GFX9-NEXT: v_lshl_or_b32 v18, v39, 16, v18
-; GFX9-NEXT: v_lshl_or_b32 v19, v38, 16, v19
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v37
+; GFX9-NEXT: v_lshl_or_b32 v0, v51, 16, v0
+; GFX9-NEXT: v_pk_add_u16 v14, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v35
+; GFX9-NEXT: v_lshl_or_b32 v0, v49, 16, v0
+; GFX9-NEXT: v_pk_add_u16 v16, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v34
+; GFX9-NEXT: v_lshl_or_b32 v0, v48, 16, v0
+; GFX9-NEXT: v_pk_add_u16 v17, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v33
+; GFX9-NEXT: v_lshl_or_b32 v0, v39, 16, v0
+; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v36
+; GFX9-NEXT: v_pk_add_u16 v18, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v32
+; GFX9-NEXT: v_lshl_or_b32 v1, v50, 16, v1
+; GFX9-NEXT: v_lshl_or_b32 v0, v38, 16, v0
+; GFX9-NEXT: v_pk_add_u16 v15, v1, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_pk_add_u16 v19, v0, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s6, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s7, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v2, s8, 3 op_sel_hi:[1,0]
@@ -12616,17 +12751,13 @@ define inreg <20 x float> @bitcast_v40i16_to_v20f32_scalar(<40 x i16> inreg %a,
; GFX9-NEXT: v_pk_add_u16 v11, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v12, s18, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v13, s19, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
; GFX9-NEXT: .LBB31_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB31_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB31_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB31_2
+; GFX9-NEXT: s_branch .LBB31_3
;
; GFX11-TRUE16-LABEL: bitcast_v40i16_to_v20f32_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -12638,11 +12769,11 @@ define inreg <20 x float> @bitcast_v40i16_to_v20f32_scalar(<40 x i16> inreg %a,
; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v0
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v1
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s25, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s24, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s23, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s22, 16
@@ -12652,15 +12783,14 @@ define inreg <20 x float> @bitcast_v40i16_to_v20f32_scalar(<40 x i16> inreg %a,
; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s18, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s17, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s2, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s2, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s45
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s17, s8
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s18, s9
@@ -12670,12 +12800,13 @@ define inreg <20 x float> @bitcast_v40i16_to_v20f32_scalar(<40 x i16> inreg %a,
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s22, s13
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s23, s14
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s24, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s25, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s26, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s29, s41
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s25, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s26, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s27, s42
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB31_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
@@ -12689,8 +12820,7 @@ define inreg <20 x float> @bitcast_v40i16_to_v20f32_scalar(<40 x i16> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s15 :: v_dual_mov_b32 v13, s16
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s17 :: v_dual_mov_b32 v15, s0
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s1 :: v_dual_mov_b32 v17, s2
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB31_3
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB31_3
; GFX11-TRUE16-NEXT: .LBB31_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
@@ -12718,7 +12848,9 @@ define inreg <20 x float> @bitcast_v40i16_to_v20f32_scalar(<40 x i16> inreg %a,
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB31_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB31_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB31_2
+; GFX11-TRUE16-NEXT: s_branch .LBB31_3
;
; GFX11-FAKE16-LABEL: bitcast_v40i16_to_v20f32_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -12728,11 +12860,11 @@ define inreg <20 x float> @bitcast_v40i16_to_v20f32_scalar(<40 x i16> inreg %a,
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v0
; GFX11-FAKE16-NEXT: v_and_b32_e32 v35, 0xffff, v0
; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v1
-; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s26, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s25, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s26, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s25, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s24, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s23, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s22, 16
@@ -12742,15 +12874,14 @@ define inreg <20 x float> @bitcast_v40i16_to_v20f32_scalar(<40 x i16> inreg %a,
; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s18, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s17, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s3, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s2, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s40, 0
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s2, s6
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s46
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s45
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s17, s8
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s18, s9
@@ -12760,12 +12891,13 @@ define inreg <20 x float> @bitcast_v40i16_to_v20f32_scalar(<40 x i16> inreg %a,
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s22, s13
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s23, s14
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s24, s15
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s25, s45
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s26, s44
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s27, s43
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s28, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s29, s41
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s25, s44
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s26, s43
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s27, s42
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB31_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
@@ -12779,8 +12911,7 @@ define inreg <20 x float> @bitcast_v40i16_to_v20f32_scalar(<40 x i16> inreg %a,
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s15 :: v_dual_mov_b32 v13, s16
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s17 :: v_dual_mov_b32 v15, s0
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s1 :: v_dual_mov_b32 v17, s2
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB31_3
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB31_3
; GFX11-FAKE16-NEXT: .LBB31_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
@@ -12808,7 +12939,9 @@ define inreg <20 x float> @bitcast_v40i16_to_v20f32_scalar(<40 x i16> inreg %a,
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB31_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB31_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB31_2
+; GFX11-FAKE16-NEXT: s_branch .LBB31_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -13619,25 +13752,26 @@ define inreg <40 x half> @bitcast_v20f32_to_v40f16_scalar(<20 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v7
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s11, v1
; SI-NEXT: v_readfirstlane_b32 s10, v2
-; SI-NEXT: v_readfirstlane_b32 s8, v3
-; SI-NEXT: v_readfirstlane_b32 s7, v4
-; SI-NEXT: v_readfirstlane_b32 s6, v5
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: v_readfirstlane_b32 s9, v6
+; SI-NEXT: v_readfirstlane_b32 s9, v3
+; SI-NEXT: v_readfirstlane_b32 s8, v4
+; SI-NEXT: v_readfirstlane_b32 s7, v5
+; SI-NEXT: v_readfirstlane_b32 s6, v6
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB33_4
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_lshr_b32 s4, s9, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v1, s4
; SI-NEXT: s_lshr_b32 s4, s6, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v2, s4
+; SI-NEXT: v_cvt_f32_f16_e32 v1, s4
; SI-NEXT: s_lshr_b32 s4, s7, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v4, s4
+; SI-NEXT: v_cvt_f32_f16_e32 v2, s4
; SI-NEXT: s_lshr_b32 s4, s8, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v4, s4
+; SI-NEXT: s_lshr_b32 s4, s9, 16
; SI-NEXT: v_cvt_f32_f16_e32 v6, s4
; SI-NEXT: s_lshr_b32 s4, s10, 16
; SI-NEXT: v_cvt_f32_f16_e32 v8, s4
@@ -13671,10 +13805,10 @@ define inreg <40 x half> @bitcast_v20f32_to_v40f16_scalar(<20 x float> inreg %a,
; SI-NEXT: v_cvt_f32_f16_e32 v38, s4
; SI-NEXT: s_lshr_b32 s4, s16, 16
; SI-NEXT: v_cvt_f32_f16_e32 v48, s4
-; SI-NEXT: v_cvt_f32_f16_e32 v3, s9
-; SI-NEXT: v_cvt_f32_f16_e32 v5, s6
-; SI-NEXT: v_cvt_f32_f16_e32 v7, s7
-; SI-NEXT: v_cvt_f32_f16_e32 v9, s8
+; SI-NEXT: v_cvt_f32_f16_e32 v3, s6
+; SI-NEXT: v_cvt_f32_f16_e32 v5, s7
+; SI-NEXT: v_cvt_f32_f16_e32 v7, s8
+; SI-NEXT: v_cvt_f32_f16_e32 v9, s9
; SI-NEXT: v_cvt_f32_f16_e32 v11, s10
; SI-NEXT: v_cvt_f32_f16_e32 v13, s11
; SI-NEXT: v_cvt_f32_f16_e32 v15, s29
@@ -13709,10 +13843,10 @@ define inreg <40 x half> @bitcast_v20f32_to_v40f16_scalar(<20 x float> inreg %a,
; SI-NEXT: v_add_f32_e64 v15, s29, 1.0
; SI-NEXT: v_add_f32_e64 v13, s11, 1.0
; SI-NEXT: v_add_f32_e64 v11, s10, 1.0
-; SI-NEXT: v_add_f32_e64 v9, s8, 1.0
-; SI-NEXT: v_add_f32_e64 v7, s7, 1.0
-; SI-NEXT: v_add_f32_e64 v5, s6, 1.0
-; SI-NEXT: v_add_f32_e64 v3, s9, 1.0
+; SI-NEXT: v_add_f32_e64 v9, s9, 1.0
+; SI-NEXT: v_add_f32_e64 v7, s8, 1.0
+; SI-NEXT: v_add_f32_e64 v5, s7, 1.0
+; SI-NEXT: v_add_f32_e64 v3, s6, 1.0
; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v1
; SI-NEXT: v_lshrrev_b32_e32 v38, 16, v2
; SI-NEXT: v_lshrrev_b32_e32 v36, 16, v4
@@ -13961,12 +14095,15 @@ define inreg <40 x half> @bitcast_v20f32_to_v40f16_scalar(<20 x float> inreg %a,
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: s_branch .LBB33_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB33_2
+; SI-NEXT: s_branch .LBB33_3
;
; VI-LABEL: bitcast_v20f32_to_v40f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v20, s16
; VI-NEXT: v_mov_b32_e32 v19, s17
; VI-NEXT: v_mov_b32_e32 v18, s18
@@ -13980,8 +14117,8 @@ define inreg <40 x half> @bitcast_v20f32_to_v40f16_scalar(<20 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB33_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_lshrrev_b32_e32 v26, 16, v5
@@ -14115,12 +14252,15 @@ define inreg <40 x half> @bitcast_v20f32_to_v40f16_scalar(<20 x float> inreg %a,
; VI-NEXT: ; implicit-def: $vgpr28
; VI-NEXT: ; implicit-def: $vgpr27
; VI-NEXT: ; implicit-def: $vgpr26
-; VI-NEXT: s_branch .LBB33_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB33_2
+; VI-NEXT: s_branch .LBB33_3
;
; GFX9-LABEL: bitcast_v20f32_to_v40f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v20, s16
; GFX9-NEXT: v_mov_b32_e32 v19, s17
; GFX9-NEXT: v_mov_b32_e32 v18, s18
@@ -14134,8 +14274,8 @@ define inreg <40 x half> @bitcast_v20f32_to_v40f16_scalar(<20 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB33_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_lshrrev_b32_e32 v26, 16, v5
@@ -14269,7 +14409,9 @@ define inreg <40 x half> @bitcast_v20f32_to_v40f16_scalar(<20 x float> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr28
; GFX9-NEXT: ; implicit-def: $vgpr27
; GFX9-NEXT: ; implicit-def: $vgpr26
-; GFX9-NEXT: s_branch .LBB33_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB33_2
+; GFX9-NEXT: s_branch .LBB33_3
;
; GFX11-LABEL: bitcast_v20f32_to_v40f16_scalar:
; GFX11: ; %bb.0:
@@ -14282,17 +14424,17 @@ define inreg <40 x half> @bitcast_v20f32_to_v40f16_scalar(<20 x float> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v6, s20 :: v_dual_mov_b32 v5, s21
; GFX11-NEXT: v_dual_mov_b32 v14, s22 :: v_dual_mov_b32 v13, s23
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v11, s25
-; GFX11-NEXT: v_dual_mov_b32 v10, s26 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-NEXT: v_dual_mov_b32 v10, s26 :: v_dual_mov_b32 v17, s27
+; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v15, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB33_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v1
; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v16
+; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v17
; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v10
; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v11
; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v12
@@ -14308,12 +14450,11 @@ define inreg <40 x half> @bitcast_v20f32_to_v40f16_scalar(<20 x float> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v18
; GFX11-NEXT: v_lshrrev_b32_e32 v21, 16, v19
; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v20
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB33_3
+; GFX11-NEXT: s_cbranch_execnz .LBB33_3
; GFX11-NEXT: .LBB33_2: ; %cmp.true
; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
-; GFX11-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
-; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v10, 1.0, v10
; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v12, 1.0, v12
; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v14, 1.0, v14
; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v6, 1.0, v6
@@ -14323,9 +14464,9 @@ define inreg <40 x half> @bitcast_v20f32_to_v40f16_scalar(<20 x float> inreg %a,
; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v1
; GFX11-NEXT: v_lshrrev_b32_e32 v23, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v16
+; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v17
; GFX11-NEXT: v_lshrrev_b32_e32 v27, 16, v10
; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v11
; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v12
@@ -14368,9 +14509,9 @@ define inreg <40 x half> @bitcast_v20f32_to_v40f16_scalar(<20 x float> inreg %a,
; GFX11-NEXT: v_lshl_or_b32 v11, v30, 16, v13
; GFX11-NEXT: v_lshl_or_b32 v13, v28, 16, v18
; GFX11-NEXT: v_lshl_or_b32 v14, v27, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v16
+; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v15
; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-NEXT: v_lshl_or_b32 v19, v22, 16, v1
; GFX11-NEXT: v_mov_b32_e32 v1, v21
@@ -14379,8 +14520,8 @@ define inreg <40 x half> @bitcast_v20f32_to_v40f16_scalar(<20 x float> inreg %a,
; GFX11-NEXT: v_lshl_or_b32 v4, v37, 16, v48
; GFX11-NEXT: v_lshl_or_b32 v7, v34, 16, v7
; GFX11-NEXT: v_lshl_or_b32 v12, v29, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v15, v26, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v16, v25, 16, v17
+; GFX11-NEXT: v_lshl_or_b32 v15, v26, 16, v17
+; GFX11-NEXT: v_lshl_or_b32 v16, v25, 16, v16
; GFX11-NEXT: v_lshl_or_b32 v17, v24, 16, v18
; GFX11-NEXT: v_lshl_or_b32 v18, v23, 16, v0
; GFX11-NEXT: v_mov_b32_e32 v0, v20
@@ -14406,7 +14547,9 @@ define inreg <40 x half> @bitcast_v20f32_to_v40f16_scalar(<20 x float> inreg %a,
; GFX11-NEXT: ; implicit-def: $vgpr24
; GFX11-NEXT: ; implicit-def: $vgpr23
; GFX11-NEXT: ; implicit-def: $vgpr22
-; GFX11-NEXT: s_branch .LBB33_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_vccz .LBB33_2
+; GFX11-NEXT: s_branch .LBB33_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -15438,6 +15581,7 @@ define inreg <20 x float> @bitcast_v40f16_to_v20f32_scalar(<40 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v24, s28
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v26
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB35_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v58
@@ -15745,7 +15889,9 @@ define inreg <20 x float> @bitcast_v40f16_to_v20f32_scalar(<40 x half> inreg %a,
; SI-NEXT: v_mov_b32_e32 v53, v46
; SI-NEXT: v_mov_b32_e32 v27, v52
; SI-NEXT: v_mov_b32_e32 v52, v45
-; SI-NEXT: s_branch .LBB35_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB35_2
+; SI-NEXT: s_branch .LBB35_3
;
; VI-LABEL: bitcast_v40f16_to_v20f32_scalar:
; VI: ; %bb.0:
@@ -15765,13 +15911,14 @@ define inreg <20 x float> @bitcast_v40f16_to_v20f32_scalar(<40 x half> inreg %a,
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v5
; VI-NEXT: v_mov_b32_e32 v33, v4
; VI-NEXT: v_mov_b32_e32 v34, v3
; VI-NEXT: v_mov_b32_e32 v35, v2
; VI-NEXT: v_mov_b32_e32 v36, v1
; VI-NEXT: v_mov_b32_e32 v37, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB35_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -15925,17 +16072,13 @@ define inreg <20 x float> @bitcast_v40f16_to_v20f32_scalar(<40 x half> inreg %a,
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB35_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB35_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB35_2
+; VI-NEXT: s_branch .LBB35_3
;
; GFX9-LABEL: bitcast_v40f16_to_v20f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v5
-; GFX9-NEXT: v_mov_b32_e32 v33, v4
-; GFX9-NEXT: v_mov_b32_e32 v34, v3
-; GFX9-NEXT: v_mov_b32_e32 v35, v2
-; GFX9-NEXT: v_mov_b32_e32 v36, v1
-; GFX9-NEXT: v_mov_b32_e32 v37, v0
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
; GFX9-NEXT: s_lshr_b32 s42, s27, 16
@@ -15951,13 +16094,19 @@ define inreg <20 x float> @bitcast_v40f16_to_v20f32_scalar(<40 x half> inreg %a,
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
+; GFX9-NEXT: v_mov_b32_e32 v32, v5
+; GFX9-NEXT: v_mov_b32_e32 v33, v4
+; GFX9-NEXT: v_mov_b32_e32 v34, v3
+; GFX9-NEXT: v_mov_b32_e32 v35, v2
+; GFX9-NEXT: v_mov_b32_e32 v36, v1
+; GFX9-NEXT: v_mov_b32_e32 v37, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v32
; GFX9-NEXT: v_lshrrev_b32_e32 v39, 16, v33
; GFX9-NEXT: v_lshrrev_b32_e32 v48, 16, v34
; GFX9-NEXT: v_lshrrev_b32_e32 v49, 16, v35
; GFX9-NEXT: v_lshrrev_b32_e32 v50, 16, v36
; GFX9-NEXT: v_lshrrev_b32_e32 v51, 16, v37
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -15972,19 +16121,20 @@ define inreg <20 x float> @bitcast_v40f16_to_v20f32_scalar(<40 x half> inreg %a,
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB35_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v37
; GFX9-NEXT: v_lshl_or_b32 v14, v51, 16, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v36
-; GFX9-NEXT: v_lshl_or_b32 v15, v50, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v35
; GFX9-NEXT: v_lshl_or_b32 v16, v49, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v34
; GFX9-NEXT: v_lshl_or_b32 v17, v48, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v33
+; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v36
; GFX9-NEXT: v_lshl_or_b32 v18, v39, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v32
+; GFX9-NEXT: v_lshl_or_b32 v15, v50, 16, v1
; GFX9-NEXT: v_lshl_or_b32 v19, v38, 16, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s6
; GFX9-NEXT: v_mov_b32_e32 v1, s7
@@ -16009,8 +16159,8 @@ define inreg <20 x float> @bitcast_v40f16_to_v20f32_scalar(<40 x half> inreg %a,
; GFX9-NEXT: v_and_b32_e32 v18, 0xffff, v33
; GFX9-NEXT: v_and_b32_e32 v19, 0xffff, v32
; GFX9-NEXT: v_mov_b32_e32 v13, 0x200
-; GFX9-NEXT: s_movk_i32 s4, 0x200
; GFX9-NEXT: v_lshl_or_b32 v14, v51, 16, v14
+; GFX9-NEXT: s_movk_i32 s4, 0x200
; GFX9-NEXT: v_lshl_or_b32 v15, v50, 16, v15
; GFX9-NEXT: v_lshl_or_b32 v16, v49, 16, v16
; GFX9-NEXT: v_lshl_or_b32 v17, v48, 16, v17
@@ -16040,7 +16190,9 @@ define inreg <20 x float> @bitcast_v40f16_to_v20f32_scalar(<40 x half> inreg %a,
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB35_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB35_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB35_2
+; GFX9-NEXT: s_branch .LBB35_3
;
; GFX11-TRUE16-LABEL: bitcast_v40f16_to_v20f32_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -16052,11 +16204,11 @@ define inreg <20 x float> @bitcast_v40f16_to_v20f32_scalar(<40 x half> inreg %a,
; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v0
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v1
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s25, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s24, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s23, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s22, 16
@@ -16066,15 +16218,14 @@ define inreg <20 x float> @bitcast_v40f16_to_v20f32_scalar(<40 x half> inreg %a,
; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s18, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s17, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s2, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s2, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s45
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s17, s8
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s18, s9
@@ -16084,12 +16235,13 @@ define inreg <20 x float> @bitcast_v40f16_to_v20f32_scalar(<40 x half> inreg %a,
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s22, s13
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s23, s14
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s24, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s25, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s26, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s29, s41
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s25, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s26, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s27, s42
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB35_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
@@ -16103,8 +16255,7 @@ define inreg <20 x float> @bitcast_v40f16_to_v20f32_scalar(<40 x half> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s15 :: v_dual_mov_b32 v13, s16
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s17 :: v_dual_mov_b32 v15, s0
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s1 :: v_dual_mov_b32 v17, s2
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB35_3
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB35_3
; GFX11-TRUE16-NEXT: .LBB35_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
@@ -16132,7 +16283,9 @@ define inreg <20 x float> @bitcast_v40f16_to_v20f32_scalar(<40 x half> inreg %a,
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB35_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB35_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB35_2
+; GFX11-TRUE16-NEXT: s_branch .LBB35_3
;
; GFX11-FAKE16-LABEL: bitcast_v40f16_to_v20f32_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -16142,11 +16295,11 @@ define inreg <20 x float> @bitcast_v40f16_to_v20f32_scalar(<40 x half> inreg %a,
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v0
; GFX11-FAKE16-NEXT: v_and_b32_e32 v35, 0xffff, v0
; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v1
-; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s26, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s25, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s26, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s25, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s24, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s23, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s22, 16
@@ -16156,15 +16309,14 @@ define inreg <20 x float> @bitcast_v40f16_to_v20f32_scalar(<40 x half> inreg %a,
; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s18, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s17, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s3, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s2, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s40, 0
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s2, s6
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s46
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s45
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s17, s8
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s18, s9
@@ -16174,12 +16326,13 @@ define inreg <20 x float> @bitcast_v40f16_to_v20f32_scalar(<40 x half> inreg %a,
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s22, s13
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s23, s14
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s24, s15
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s25, s45
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s26, s44
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s27, s43
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s28, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s29, s41
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s25, s44
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s26, s43
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s27, s42
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB35_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
@@ -16193,8 +16346,7 @@ define inreg <20 x float> @bitcast_v40f16_to_v20f32_scalar(<40 x half> inreg %a,
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s15 :: v_dual_mov_b32 v13, s16
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s17 :: v_dual_mov_b32 v15, s0
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s1 :: v_dual_mov_b32 v17, s2
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB35_3
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB35_3
; GFX11-FAKE16-NEXT: .LBB35_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
@@ -16222,7 +16374,9 @@ define inreg <20 x float> @bitcast_v40f16_to_v20f32_scalar(<40 x half> inreg %a,
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB35_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB35_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB35_2
+; GFX11-FAKE16-NEXT: s_branch .LBB35_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -16401,6 +16555,7 @@ define inreg <10 x double> @bitcast_v10i64_to_v10f64_scalar(<10 x i64> inreg %a,
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v13, v6
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v19, v5
; SI-NEXT: v_mov_b32_e32 v18, v4
; SI-NEXT: v_mov_b32_e32 v17, v3
@@ -16420,12 +16575,15 @@ define inreg <10 x double> @bitcast_v10i64_to_v10f64_scalar(<10 x i64> inreg %a,
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB37_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB37_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB37_3
-; SI-NEXT: .LBB37_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB37_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB37_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2
@@ -16446,16 +16604,15 @@ define inreg <10 x double> @bitcast_v10i64_to_v10f64_scalar(<10 x i64> inreg %a,
; SI-NEXT: v_addc_u32_e32 v17, vcc, 0, v17, vcc
; SI-NEXT: v_add_i32_e32 v18, vcc, 3, v18
; SI-NEXT: v_addc_u32_e32 v19, vcc, 0, v19, vcc
-; SI-NEXT: .LBB37_3: ; %end
+; SI-NEXT: .LBB37_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB37_4:
-; SI-NEXT: s_branch .LBB37_2
;
; VI-LABEL: bitcast_v10i64_to_v10f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v13, v6
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v19, v5
; VI-NEXT: v_mov_b32_e32 v18, v4
; VI-NEXT: v_mov_b32_e32 v17, v3
@@ -16475,12 +16632,15 @@ define inreg <10 x double> @bitcast_v10i64_to_v10f64_scalar(<10 x i64> inreg %a,
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB37_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB37_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB37_3
-; VI-NEXT: .LBB37_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB37_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB37_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
@@ -16501,16 +16661,15 @@ define inreg <10 x double> @bitcast_v10i64_to_v10f64_scalar(<10 x i64> inreg %a,
; VI-NEXT: v_addc_u32_e32 v17, vcc, 0, v17, vcc
; VI-NEXT: v_add_u32_e32 v18, vcc, 3, v18
; VI-NEXT: v_addc_u32_e32 v19, vcc, 0, v19, vcc
-; VI-NEXT: .LBB37_3: ; %end
+; VI-NEXT: .LBB37_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB37_4:
-; VI-NEXT: s_branch .LBB37_2
;
; GFX9-LABEL: bitcast_v10i64_to_v10f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v13, v6
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v19, v5
; GFX9-NEXT: v_mov_b32_e32 v18, v4
; GFX9-NEXT: v_mov_b32_e32 v17, v3
@@ -16530,12 +16689,15 @@ define inreg <10 x double> @bitcast_v10i64_to_v10f64_scalar(<10 x i64> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB37_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB37_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB37_3
-; GFX9-NEXT: .LBB37_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB37_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB37_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 3, v0
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, 3, v2
@@ -16556,37 +16718,35 @@ define inreg <10 x double> @bitcast_v10i64_to_v10f64_scalar(<10 x i64> inreg %a,
; GFX9-NEXT: v_addc_co_u32_e32 v17, vcc, 0, v17, vcc
; GFX9-NEXT: v_add_co_u32_e32 v18, vcc, 3, v18
; GFX9-NEXT: v_addc_co_u32_e32 v19, vcc, 0, v19, vcc
-; GFX9-NEXT: .LBB37_3: ; %end
+; GFX9-NEXT: .LBB37_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB37_4:
-; GFX9-NEXT: s_branch .LBB37_2
;
; GFX11-LABEL: bitcast_v10i64_to_v10f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-NEXT: v_dual_mov_b32 v15, v2 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB37_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB37_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB37_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB37_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB37_3:
-; GFX11-NEXT: .LBB37_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB37_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
@@ -16612,6 +16772,7 @@ define inreg <10 x double> @bitcast_v10i64_to_v10f64_scalar(<10 x i64> inreg %a,
; GFX11-NEXT: v_add_co_ci_u32_e64 v17, null, 0, v17, vcc_lo
; GFX11-NEXT: v_add_co_u32 v18, vcc_lo, v18, 3
; GFX11-NEXT: v_add_co_ci_u32_e64 v19, null, 0, v19, vcc_lo
+; GFX11-NEXT: .LBB37_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -16746,6 +16907,7 @@ define inreg <10 x i64> @bitcast_v10f64_to_v10i64_scalar(<10 x double> inreg %a,
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v12, v6
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v19, v5
; SI-NEXT: v_mov_b32_e32 v18, v4
; SI-NEXT: v_mov_b32_e32 v17, v3
@@ -16765,12 +16927,15 @@ define inreg <10 x i64> @bitcast_v10f64_to_v10i64_scalar(<10 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB39_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB39_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB39_3
-; SI-NEXT: .LBB39_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB39_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB39_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
; SI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; SI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
@@ -16781,16 +16946,15 @@ define inreg <10 x i64> @bitcast_v10f64_to_v10i64_scalar(<10 x double> inreg %a,
; SI-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
; SI-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
; SI-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
-; SI-NEXT: .LBB39_3: ; %end
+; SI-NEXT: .LBB39_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB39_4:
-; SI-NEXT: s_branch .LBB39_2
;
; VI-LABEL: bitcast_v10f64_to_v10i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v12, v6
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v19, v5
; VI-NEXT: v_mov_b32_e32 v18, v4
; VI-NEXT: v_mov_b32_e32 v17, v3
@@ -16810,12 +16974,15 @@ define inreg <10 x i64> @bitcast_v10f64_to_v10i64_scalar(<10 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB39_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB39_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB39_3
-; VI-NEXT: .LBB39_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB39_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB39_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
; VI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; VI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
@@ -16826,16 +16993,15 @@ define inreg <10 x i64> @bitcast_v10f64_to_v10i64_scalar(<10 x double> inreg %a,
; VI-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
; VI-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
; VI-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
-; VI-NEXT: .LBB39_3: ; %end
+; VI-NEXT: .LBB39_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB39_4:
-; VI-NEXT: s_branch .LBB39_2
;
; GFX9-LABEL: bitcast_v10f64_to_v10i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v12, v6
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v19, v5
; GFX9-NEXT: v_mov_b32_e32 v18, v4
; GFX9-NEXT: v_mov_b32_e32 v17, v3
@@ -16855,12 +17021,15 @@ define inreg <10 x i64> @bitcast_v10f64_to_v10i64_scalar(<10 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB39_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB39_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB39_3
-; GFX9-NEXT: .LBB39_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB39_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB39_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX9-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
@@ -16871,37 +17040,35 @@ define inreg <10 x i64> @bitcast_v10f64_to_v10i64_scalar(<10 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
; GFX9-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
; GFX9-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
-; GFX9-NEXT: .LBB39_3: ; %end
+; GFX9-NEXT: .LBB39_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB39_4:
-; GFX9-NEXT: s_branch .LBB39_2
;
; GFX11-LABEL: bitcast_v10f64_to_v10i64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
+; GFX11-NEXT: v_dual_mov_b32 v15, v2 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB39_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB39_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB39_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB39_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB39_3:
-; GFX11-NEXT: .LBB39_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB39_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
@@ -16912,6 +17079,7 @@ define inreg <10 x i64> @bitcast_v10f64_to_v10i64_scalar(<10 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[14:15], v[14:15], 1.0
; GFX11-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
; GFX11-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
+; GFX11-NEXT: .LBB39_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -17582,13 +17750,14 @@ define inreg <40 x i16> @bitcast_v10i64_to_v40i16_scalar(<10 x i64> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v7
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s11, v1
; SI-NEXT: v_readfirstlane_b32 s10, v2
; SI-NEXT: v_readfirstlane_b32 s9, v3
; SI-NEXT: v_readfirstlane_b32 s8, v4
; SI-NEXT: v_readfirstlane_b32 s7, v5
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s6, v6
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB41_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v1, s7
@@ -17816,23 +17985,26 @@ define inreg <40 x i16> @bitcast_v10i64_to_v40i16_scalar(<10 x i64> inreg %a, i3
; SI-NEXT: ; implicit-def: $sgpr13
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; implicit-def: $sgpr12
-; SI-NEXT: s_branch .LBB41_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB41_2
+; SI-NEXT: s_branch .LBB41_3
;
; VI-LABEL: bitcast_v10i64_to_v40i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s11, v0
; VI-NEXT: v_readfirstlane_b32 s10, v1
; VI-NEXT: v_readfirstlane_b32 s9, v2
; VI-NEXT: v_readfirstlane_b32 s8, v3
-; VI-NEXT: v_readfirstlane_b32 s6, v4
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: v_readfirstlane_b32 s7, v5
+; VI-NEXT: v_readfirstlane_b32 s7, v4
+; VI-NEXT: v_readfirstlane_b32 s6, v5
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB41_4
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_lshr_b32 s12, s7, 16
-; VI-NEXT: s_lshr_b32 s13, s6, 16
+; VI-NEXT: s_lshr_b32 s12, s6, 16
+; VI-NEXT: s_lshr_b32 s13, s7, 16
; VI-NEXT: s_lshr_b32 s14, s8, 16
; VI-NEXT: s_lshr_b32 s15, s9, 16
; VI-NEXT: s_lshr_b32 s40, s10, 16
@@ -17853,8 +18025,8 @@ define inreg <40 x i16> @bitcast_v10i64_to_v40i16_scalar(<10 x i64> inreg %a, i3
; VI-NEXT: s_lshr_b32 s63, s16, 16
; VI-NEXT: s_cbranch_execnz .LBB41_3
; VI-NEXT: .LBB41_2: ; %cmp.true
-; VI-NEXT: s_add_u32 s6, s6, 3
-; VI-NEXT: s_addc_u32 s7, s7, 0
+; VI-NEXT: s_add_u32 s7, s7, 3
+; VI-NEXT: s_addc_u32 s6, s6, 0
; VI-NEXT: s_add_u32 s9, s9, 3
; VI-NEXT: s_addc_u32 s8, s8, 0
; VI-NEXT: s_add_u32 s11, s11, 3
@@ -17873,8 +18045,8 @@ define inreg <40 x i16> @bitcast_v10i64_to_v40i16_scalar(<10 x i64> inreg %a, i3
; VI-NEXT: s_addc_u32 s19, s19, 0
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
-; VI-NEXT: s_lshr_b32 s12, s7, 16
-; VI-NEXT: s_lshr_b32 s13, s6, 16
+; VI-NEXT: s_lshr_b32 s12, s6, 16
+; VI-NEXT: s_lshr_b32 s13, s7, 16
; VI-NEXT: s_lshr_b32 s14, s8, 16
; VI-NEXT: s_lshr_b32 s15, s9, 16
; VI-NEXT: s_lshr_b32 s40, s10, 16
@@ -17945,15 +18117,15 @@ define inreg <40 x i16> @bitcast_v10i64_to_v40i16_scalar(<10 x i64> inreg %a, i3
; VI-NEXT: s_lshl_b32 s15, s15, 16
; VI-NEXT: s_and_b32 s8, 0xffff, s8
; VI-NEXT: s_lshl_b32 s14, s14, 16
-; VI-NEXT: s_and_b32 s6, 0xffff, s6
-; VI-NEXT: s_lshl_b32 s13, s13, 16
; VI-NEXT: s_and_b32 s7, 0xffff, s7
+; VI-NEXT: s_lshl_b32 s13, s13, 16
+; VI-NEXT: s_and_b32 s6, 0xffff, s6
; VI-NEXT: s_lshl_b32 s12, s12, 16
; VI-NEXT: s_or_b32 s10, s10, s28
; VI-NEXT: s_or_b32 s9, s9, s15
; VI-NEXT: s_or_b32 s8, s8, s14
-; VI-NEXT: s_or_b32 s6, s6, s13
-; VI-NEXT: s_or_b32 s7, s7, s12
+; VI-NEXT: s_or_b32 s7, s7, s13
+; VI-NEXT: s_or_b32 s6, s6, s12
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: v_mov_b32_e32 v2, s16
@@ -17972,8 +18144,8 @@ define inreg <40 x i16> @bitcast_v10i64_to_v40i16_scalar(<10 x i64> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v15, s10
; VI-NEXT: v_mov_b32_e32 v16, s9
; VI-NEXT: v_mov_b32_e32 v17, s8
-; VI-NEXT: v_mov_b32_e32 v18, s6
-; VI-NEXT: v_mov_b32_e32 v19, s7
+; VI-NEXT: v_mov_b32_e32 v18, s7
+; VI-NEXT: v_mov_b32_e32 v19, s6
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB41_4:
; VI-NEXT: ; implicit-def: $sgpr63
@@ -17996,27 +18168,30 @@ define inreg <40 x i16> @bitcast_v10i64_to_v40i16_scalar(<10 x i64> inreg %a, i3
; VI-NEXT: ; implicit-def: $sgpr14
; VI-NEXT: ; implicit-def: $sgpr13
; VI-NEXT: ; implicit-def: $sgpr12
-; VI-NEXT: s_branch .LBB41_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB41_2
+; VI-NEXT: s_branch .LBB41_3
;
; GFX9-LABEL: bitcast_v10i64_to_v40i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
-; GFX9-NEXT: v_readfirstlane_b32 s6, v0
-; GFX9-NEXT: v_readfirstlane_b32 s7, v1
-; GFX9-NEXT: v_readfirstlane_b32 s8, v2
-; GFX9-NEXT: v_readfirstlane_b32 s9, v3
-; GFX9-NEXT: v_readfirstlane_b32 s10, v4
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: v_readfirstlane_b32 s11, v5
+; GFX9-NEXT: v_readfirstlane_b32 s7, v0
+; GFX9-NEXT: v_readfirstlane_b32 s8, v1
+; GFX9-NEXT: v_readfirstlane_b32 s9, v2
+; GFX9-NEXT: v_readfirstlane_b32 s10, v3
+; GFX9-NEXT: v_readfirstlane_b32 s11, v4
+; GFX9-NEXT: v_readfirstlane_b32 s6, v5
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB41_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_lshr_b32 s12, s11, 16
-; GFX9-NEXT: s_lshr_b32 s13, s10, 16
-; GFX9-NEXT: s_lshr_b32 s14, s9, 16
-; GFX9-NEXT: s_lshr_b32 s15, s8, 16
-; GFX9-NEXT: s_lshr_b32 s40, s7, 16
-; GFX9-NEXT: s_lshr_b32 s41, s6, 16
+; GFX9-NEXT: s_lshr_b32 s12, s6, 16
+; GFX9-NEXT: s_lshr_b32 s13, s11, 16
+; GFX9-NEXT: s_lshr_b32 s14, s10, 16
+; GFX9-NEXT: s_lshr_b32 s15, s9, 16
+; GFX9-NEXT: s_lshr_b32 s40, s8, 16
+; GFX9-NEXT: s_lshr_b32 s41, s7, 16
; GFX9-NEXT: s_lshr_b32 s42, s29, 16
; GFX9-NEXT: s_lshr_b32 s43, s28, 16
; GFX9-NEXT: s_lshr_b32 s44, s27, 16
@@ -18033,12 +18208,12 @@ define inreg <40 x i16> @bitcast_v10i64_to_v40i16_scalar(<10 x i64> inreg %a, i3
; GFX9-NEXT: s_lshr_b32 s63, s16, 16
; GFX9-NEXT: s_cbranch_execnz .LBB41_3
; GFX9-NEXT: .LBB41_2: ; %cmp.true
-; GFX9-NEXT: s_add_u32 s10, s10, 3
-; GFX9-NEXT: s_addc_u32 s11, s11, 0
-; GFX9-NEXT: s_add_u32 s8, s8, 3
-; GFX9-NEXT: s_addc_u32 s9, s9, 0
-; GFX9-NEXT: s_add_u32 s6, s6, 3
-; GFX9-NEXT: s_addc_u32 s7, s7, 0
+; GFX9-NEXT: s_add_u32 s11, s11, 3
+; GFX9-NEXT: s_addc_u32 s6, s6, 0
+; GFX9-NEXT: s_add_u32 s9, s9, 3
+; GFX9-NEXT: s_addc_u32 s10, s10, 0
+; GFX9-NEXT: s_add_u32 s7, s7, 3
+; GFX9-NEXT: s_addc_u32 s8, s8, 0
; GFX9-NEXT: s_add_u32 s28, s28, 3
; GFX9-NEXT: s_addc_u32 s29, s29, 0
; GFX9-NEXT: s_add_u32 s26, s26, 3
@@ -18053,12 +18228,12 @@ define inreg <40 x i16> @bitcast_v10i64_to_v40i16_scalar(<10 x i64> inreg %a, i3
; GFX9-NEXT: s_addc_u32 s19, s19, 0
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
-; GFX9-NEXT: s_lshr_b32 s12, s11, 16
-; GFX9-NEXT: s_lshr_b32 s13, s10, 16
-; GFX9-NEXT: s_lshr_b32 s14, s9, 16
-; GFX9-NEXT: s_lshr_b32 s15, s8, 16
-; GFX9-NEXT: s_lshr_b32 s40, s7, 16
-; GFX9-NEXT: s_lshr_b32 s41, s6, 16
+; GFX9-NEXT: s_lshr_b32 s12, s6, 16
+; GFX9-NEXT: s_lshr_b32 s13, s11, 16
+; GFX9-NEXT: s_lshr_b32 s14, s10, 16
+; GFX9-NEXT: s_lshr_b32 s15, s9, 16
+; GFX9-NEXT: s_lshr_b32 s40, s8, 16
+; GFX9-NEXT: s_lshr_b32 s41, s7, 16
; GFX9-NEXT: s_lshr_b32 s42, s29, 16
; GFX9-NEXT: s_lshr_b32 s43, s28, 16
; GFX9-NEXT: s_lshr_b32 s44, s27, 16
@@ -18088,12 +18263,12 @@ define inreg <40 x i16> @bitcast_v10i64_to_v40i16_scalar(<10 x i64> inreg %a, i3
; GFX9-NEXT: s_pack_ll_b32_b16 s25, s27, s44
; GFX9-NEXT: s_pack_ll_b32_b16 s26, s28, s43
; GFX9-NEXT: s_pack_ll_b32_b16 s27, s29, s42
-; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s41
-; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s40
-; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s15
-; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s14
-; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s13
-; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s12
+; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s41
+; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s40
+; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s15
+; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s14
+; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s13
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s12
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: v_mov_b32_e32 v2, s16
@@ -18108,12 +18283,12 @@ define inreg <40 x i16> @bitcast_v10i64_to_v40i16_scalar(<10 x i64> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v11, s25
; GFX9-NEXT: v_mov_b32_e32 v12, s26
; GFX9-NEXT: v_mov_b32_e32 v13, s27
-; GFX9-NEXT: v_mov_b32_e32 v14, s6
-; GFX9-NEXT: v_mov_b32_e32 v15, s7
-; GFX9-NEXT: v_mov_b32_e32 v16, s8
-; GFX9-NEXT: v_mov_b32_e32 v17, s9
-; GFX9-NEXT: v_mov_b32_e32 v18, s10
-; GFX9-NEXT: v_mov_b32_e32 v19, s11
+; GFX9-NEXT: v_mov_b32_e32 v14, s7
+; GFX9-NEXT: v_mov_b32_e32 v15, s8
+; GFX9-NEXT: v_mov_b32_e32 v16, s9
+; GFX9-NEXT: v_mov_b32_e32 v17, s10
+; GFX9-NEXT: v_mov_b32_e32 v18, s11
+; GFX9-NEXT: v_mov_b32_e32 v19, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB41_4:
; GFX9-NEXT: ; implicit-def: $sgpr63
@@ -18136,7 +18311,9 @@ define inreg <40 x i16> @bitcast_v10i64_to_v40i16_scalar(<10 x i64> inreg %a, i3
; GFX9-NEXT: ; implicit-def: $sgpr14
; GFX9-NEXT: ; implicit-def: $sgpr13
; GFX9-NEXT: ; implicit-def: $sgpr12
-; GFX9-NEXT: s_branch .LBB41_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB41_2
+; GFX9-NEXT: s_branch .LBB41_3
;
; GFX11-LABEL: bitcast_v10i64_to_v40i16_scalar:
; GFX11: ; %bb.0:
@@ -18144,7 +18321,7 @@ define inreg <40 x i16> @bitcast_v10i64_to_v40i16_scalar(<10 x i64> inreg %a, i3
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
; GFX11-NEXT: v_readfirstlane_b32 s5, v0
; GFX11-NEXT: v_readfirstlane_b32 s4, v1
-; GFX11-NEXT: s_mov_b32 s58, 0
+; GFX11-NEXT: s_mov_b32 s58, -1
; GFX11-NEXT: s_and_b32 s6, vcc_lo, exec_lo
; GFX11-NEXT: s_cbranch_scc0 .LBB41_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
@@ -18168,8 +18345,7 @@ define inreg <40 x i16> @bitcast_v10i64_to_v40i16_scalar(<10 x i64> inreg %a, i3
; GFX11-NEXT: s_lshr_b32 s47, s2, 16
; GFX11-NEXT: s_lshr_b32 s56, s1, 16
; GFX11-NEXT: s_lshr_b32 s57, s0, 16
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s58
-; GFX11-NEXT: s_cbranch_vccnz .LBB41_3
+; GFX11-NEXT: s_cbranch_execnz .LBB41_3
; GFX11-NEXT: .LBB41_2: ; %cmp.true
; GFX11-NEXT: s_add_u32 s5, s5, 3
; GFX11-NEXT: s_addc_u32 s4, s4, 0
@@ -18265,7 +18441,9 @@ define inreg <40 x i16> @bitcast_v10i64_to_v40i16_scalar(<10 x i64> inreg %a, i3
; GFX11-NEXT: ; implicit-def: $sgpr8
; GFX11-NEXT: ; implicit-def: $sgpr7
; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: s_branch .LBB41_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s58
+; GFX11-NEXT: s_cbranch_vccz .LBB41_2
+; GFX11-NEXT: s_branch .LBB41_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -19123,6 +19301,7 @@ define inreg <10 x i64> @bitcast_v40i16_to_v10i64_scalar(<40 x i16> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v26
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
@@ -19146,7 +19325,7 @@ define inreg <10 x i64> @bitcast_v40i16_to_v10i64_scalar(<40 x i16> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v50, v4
; SI-NEXT: v_mov_b32_e32 v51, v2
; SI-NEXT: v_mov_b32_e32 v52, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v3
@@ -19165,50 +19344,50 @@ define inreg <10 x i64> @bitcast_v40i16_to_v10i64_scalar(<40 x i16> inreg %a, i3
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v52
; SI-NEXT: v_or_b32_e32 v7, v0, v57
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v51
-; SI-NEXT: v_or_b32_e32 v8, v0, v56
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v50
; SI-NEXT: v_or_b32_e32 v9, v0, v47
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49
; SI-NEXT: v_or_b32_e32 v10, v0, v46
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v48
-; SI-NEXT: s_and_b32 s4, s16, 0xffff
-; SI-NEXT: s_lshl_b32 s5, s17, 16
; SI-NEXT: v_or_b32_e32 v11, v0, v45
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v39
+; SI-NEXT: s_and_b32 s4, s16, 0xffff
+; SI-NEXT: s_lshl_b32 s5, s17, 16
+; SI-NEXT: v_or_b32_e32 v12, v0, v44
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v38
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: s_and_b32 s5, s18, 0xffff
; SI-NEXT: s_lshl_b32 s6, s19, 16
-; SI-NEXT: v_or_b32_e32 v12, v0, v44
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v38
+; SI-NEXT: v_or_b32_e32 v13, v0, v43
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v37
; SI-NEXT: s_or_b32 s5, s5, s6
; SI-NEXT: s_and_b32 s6, s20, 0xffff
; SI-NEXT: s_lshl_b32 s7, s21, 16
-; SI-NEXT: v_or_b32_e32 v13, v0, v43
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v37
+; SI-NEXT: v_or_b32_e32 v14, v0, v42
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v36
; SI-NEXT: s_or_b32 s6, s6, s7
; SI-NEXT: s_and_b32 s7, s22, 0xffff
; SI-NEXT: s_lshl_b32 s8, s23, 16
-; SI-NEXT: v_or_b32_e32 v14, v0, v42
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v36
+; SI-NEXT: v_or_b32_e32 v15, v0, v41
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v35
; SI-NEXT: s_or_b32 s7, s7, s8
; SI-NEXT: s_and_b32 s8, s24, 0xffff
; SI-NEXT: s_lshl_b32 s9, s25, 16
-; SI-NEXT: v_or_b32_e32 v15, v0, v41
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v35
+; SI-NEXT: v_or_b32_e32 v16, v0, v40
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v34
; SI-NEXT: s_or_b32 s8, s8, s9
; SI-NEXT: s_and_b32 s9, s26, 0xffff
; SI-NEXT: s_lshl_b32 s10, s27, 16
-; SI-NEXT: v_or_b32_e32 v16, v0, v40
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v34
+; SI-NEXT: v_or_b32_e32 v17, v0, v55
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v33
; SI-NEXT: s_or_b32 s9, s9, s10
; SI-NEXT: s_and_b32 s10, s28, 0xffff
; SI-NEXT: s_lshl_b32 s11, s29, 16
-; SI-NEXT: v_or_b32_e32 v17, v0, v55
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v33
-; SI-NEXT: s_or_b32 s10, s10, s11
+; SI-NEXT: v_and_b32_e32 v1, 0xffff, v51
; SI-NEXT: v_or_b32_e32 v18, v0, v54
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v32
+; SI-NEXT: s_or_b32 s10, s10, s11
+; SI-NEXT: v_or_b32_e32 v8, v1, v56
; SI-NEXT: v_or_b32_e32 v19, v0, v53
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
@@ -19223,10 +19402,6 @@ define inreg <10 x i64> @bitcast_v40i16_to_v10i64_scalar(<40 x i16> inreg %a, i3
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v57, v0
; SI-NEXT: v_add_i32_e32 v7, vcc, 0x30000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v51
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v0, v56, v0
-; SI-NEXT: v_add_i32_e32 v8, vcc, 0x30000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v50
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v47, v0
@@ -19290,13 +19465,16 @@ define inreg <10 x i64> @bitcast_v40i16_to_v10i64_scalar(<40 x i16> inreg %a, i3
; SI-NEXT: s_lshl_b32 s10, s27, 16
; SI-NEXT: s_add_i32 s28, s28, 3
; SI-NEXT: v_or_b32_e32 v0, v54, v0
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v51
; SI-NEXT: s_or_b32 s9, s10, s9
; SI-NEXT: s_and_b32 s10, s28, 0xffff
; SI-NEXT: s_lshl_b32 s11, s29, 16
; SI-NEXT: v_add_i32_e32 v18, vcc, 0x30000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v32
+; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1
; SI-NEXT: s_or_b32 s10, s11, s10
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: v_or_b32_e32 v1, v56, v1
; SI-NEXT: s_add_i32 s4, s4, 0x30000
; SI-NEXT: s_add_i32 s5, s5, 0x30000
; SI-NEXT: s_add_i32 s6, s6, 0x30000
@@ -19305,6 +19483,7 @@ define inreg <10 x i64> @bitcast_v40i16_to_v10i64_scalar(<40 x i16> inreg %a, i3
; SI-NEXT: s_add_i32 s9, s9, 0x30000
; SI-NEXT: s_add_i32 s10, s10, 0x30000
; SI-NEXT: v_or_b32_e32 v0, v53, v0
+; SI-NEXT: v_add_i32_e32 v8, vcc, 0x30000, v1
; SI-NEXT: v_add_i32_e32 v19, vcc, 0x30000, v0
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
@@ -19328,7 +19507,9 @@ define inreg <10 x i64> @bitcast_v40i16_to_v10i64_scalar(<40 x i16> inreg %a, i3
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB43_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; SI-NEXT: s_branch .LBB43_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB43_2
+; SI-NEXT: s_branch .LBB43_3
;
; VI-LABEL: bitcast_v40i16_to_v10i64_scalar:
; VI: ; %bb.0:
@@ -19348,13 +19529,14 @@ define inreg <10 x i64> @bitcast_v40i16_to_v10i64_scalar(<40 x i16> inreg %a, i3
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v5
; VI-NEXT: v_mov_b32_e32 v33, v4
; VI-NEXT: v_mov_b32_e32 v34, v3
; VI-NEXT: v_mov_b32_e32 v35, v2
; VI-NEXT: v_mov_b32_e32 v36, v1
; VI-NEXT: v_mov_b32_e32 v37, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB43_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -19492,21 +19674,22 @@ define inreg <10 x i64> @bitcast_v40i16_to_v10i64_scalar(<40 x i16> inreg %a, i3
; VI-NEXT: s_and_b32 s18, s27, 0xffff
; VI-NEXT: s_lshl_b32 s8, s8, 16
; VI-NEXT: s_add_i32 s28, s28, 3
+; VI-NEXT: v_add_u32_e32 v17, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v33
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s8, s8, s18
; VI-NEXT: s_and_b32 s18, s28, 0xffff
; VI-NEXT: s_lshl_b32 s7, s7, 16
; VI-NEXT: s_add_i32 s29, s29, 3
-; VI-NEXT: v_add_u32_e32 v17, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v33
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s7, s7, s18
; VI-NEXT: s_and_b32 s18, s29, 0xffff
; VI-NEXT: s_lshl_b32 s6, s6, 16
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: s_or_b32 s6, s6, s18
; VI-NEXT: v_add_u32_e32 v18, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v32
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v1, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_lshlrev_b32_sdwa v0, v1, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v32
+; VI-NEXT: s_or_b32 s6, s6, s18
+; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_add_i32 s4, s4, 0x30000
; VI-NEXT: s_add_i32 s5, s5, 0x30000
; VI-NEXT: s_add_i32 s16, s16, 0x30000
@@ -19521,7 +19704,6 @@ define inreg <10 x i64> @bitcast_v40i16_to_v10i64_scalar(<40 x i16> inreg %a, i3
; VI-NEXT: s_add_i32 s8, s8, 0x30000
; VI-NEXT: s_add_i32 s7, s7, 0x30000
; VI-NEXT: s_add_i32 s6, s6, 0x30000
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v19, vcc, 0x30000, v0
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
@@ -19541,17 +19723,13 @@ define inreg <10 x i64> @bitcast_v40i16_to_v10i64_scalar(<40 x i16> inreg %a, i3
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB43_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB43_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB43_2
+; VI-NEXT: s_branch .LBB43_3
;
; GFX9-LABEL: bitcast_v40i16_to_v10i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v5
-; GFX9-NEXT: v_mov_b32_e32 v33, v4
-; GFX9-NEXT: v_mov_b32_e32 v34, v3
-; GFX9-NEXT: v_mov_b32_e32 v35, v2
-; GFX9-NEXT: v_mov_b32_e32 v36, v1
-; GFX9-NEXT: v_mov_b32_e32 v37, v0
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
; GFX9-NEXT: s_lshr_b32 s42, s27, 16
@@ -19567,13 +19745,19 @@ define inreg <10 x i64> @bitcast_v40i16_to_v10i64_scalar(<40 x i16> inreg %a, i3
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
+; GFX9-NEXT: v_mov_b32_e32 v32, v5
+; GFX9-NEXT: v_mov_b32_e32 v33, v4
+; GFX9-NEXT: v_mov_b32_e32 v34, v3
+; GFX9-NEXT: v_mov_b32_e32 v35, v2
+; GFX9-NEXT: v_mov_b32_e32 v36, v1
+; GFX9-NEXT: v_mov_b32_e32 v37, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v32
; GFX9-NEXT: v_lshrrev_b32_e32 v39, 16, v33
; GFX9-NEXT: v_lshrrev_b32_e32 v48, 16, v34
; GFX9-NEXT: v_lshrrev_b32_e32 v49, 16, v35
; GFX9-NEXT: v_lshrrev_b32_e32 v50, 16, v36
; GFX9-NEXT: v_lshrrev_b32_e32 v51, 16, v37
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -19588,19 +19772,20 @@ define inreg <10 x i64> @bitcast_v40i16_to_v10i64_scalar(<40 x i16> inreg %a, i3
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB43_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v37
; GFX9-NEXT: v_lshl_or_b32 v14, v51, 16, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v36
-; GFX9-NEXT: v_lshl_or_b32 v15, v50, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v35
; GFX9-NEXT: v_lshl_or_b32 v16, v49, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v34
; GFX9-NEXT: v_lshl_or_b32 v17, v48, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v33
+; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v36
; GFX9-NEXT: v_lshl_or_b32 v18, v39, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v32
+; GFX9-NEXT: v_lshl_or_b32 v15, v50, 16, v1
; GFX9-NEXT: v_lshl_or_b32 v19, v38, 16, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s6
; GFX9-NEXT: v_mov_b32_e32 v1, s7
@@ -19618,18 +19803,24 @@ define inreg <10 x i64> @bitcast_v40i16_to_v10i64_scalar(<40 x i16> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v13, s19
; GFX9-NEXT: s_cbranch_execnz .LBB43_3
; GFX9-NEXT: .LBB43_2: ; %cmp.true
-; GFX9-NEXT: v_and_b32_e32 v14, 0xffff, v37
-; GFX9-NEXT: v_and_b32_e32 v15, 0xffff, v36
-; GFX9-NEXT: v_and_b32_e32 v16, 0xffff, v35
-; GFX9-NEXT: v_and_b32_e32 v17, 0xffff, v34
-; GFX9-NEXT: v_and_b32_e32 v18, 0xffff, v33
-; GFX9-NEXT: v_and_b32_e32 v19, 0xffff, v32
-; GFX9-NEXT: v_lshl_or_b32 v14, v51, 16, v14
-; GFX9-NEXT: v_lshl_or_b32 v15, v50, 16, v15
-; GFX9-NEXT: v_lshl_or_b32 v16, v49, 16, v16
-; GFX9-NEXT: v_lshl_or_b32 v17, v48, 16, v17
-; GFX9-NEXT: v_lshl_or_b32 v18, v39, 16, v18
-; GFX9-NEXT: v_lshl_or_b32 v19, v38, 16, v19
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v37
+; GFX9-NEXT: v_lshl_or_b32 v0, v51, 16, v0
+; GFX9-NEXT: v_pk_add_u16 v14, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v35
+; GFX9-NEXT: v_lshl_or_b32 v0, v49, 16, v0
+; GFX9-NEXT: v_pk_add_u16 v16, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v34
+; GFX9-NEXT: v_lshl_or_b32 v0, v48, 16, v0
+; GFX9-NEXT: v_pk_add_u16 v17, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v33
+; GFX9-NEXT: v_lshl_or_b32 v0, v39, 16, v0
+; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v36
+; GFX9-NEXT: v_pk_add_u16 v18, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v32
+; GFX9-NEXT: v_lshl_or_b32 v1, v50, 16, v1
+; GFX9-NEXT: v_lshl_or_b32 v0, v38, 16, v0
+; GFX9-NEXT: v_pk_add_u16 v15, v1, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_pk_add_u16 v19, v0, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s6, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s7, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v2, s8, 3 op_sel_hi:[1,0]
@@ -19644,17 +19835,13 @@ define inreg <10 x i64> @bitcast_v40i16_to_v10i64_scalar(<40 x i16> inreg %a, i3
; GFX9-NEXT: v_pk_add_u16 v11, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v12, s18, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v13, s19, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
; GFX9-NEXT: .LBB43_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB43_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB43_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB43_2
+; GFX9-NEXT: s_branch .LBB43_3
;
; GFX11-TRUE16-LABEL: bitcast_v40i16_to_v10i64_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -19666,11 +19853,11 @@ define inreg <10 x i64> @bitcast_v40i16_to_v10i64_scalar(<40 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v0
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v1
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s25, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s24, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s23, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s22, 16
@@ -19680,15 +19867,14 @@ define inreg <10 x i64> @bitcast_v40i16_to_v10i64_scalar(<40 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s18, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s17, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s2, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s2, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s45
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s17, s8
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s18, s9
@@ -19698,12 +19884,13 @@ define inreg <10 x i64> @bitcast_v40i16_to_v10i64_scalar(<40 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s22, s13
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s23, s14
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s24, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s25, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s26, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s29, s41
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s25, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s26, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s27, s42
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB43_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
@@ -19717,8 +19904,7 @@ define inreg <10 x i64> @bitcast_v40i16_to_v10i64_scalar(<40 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s15 :: v_dual_mov_b32 v13, s16
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s17 :: v_dual_mov_b32 v15, s0
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s1 :: v_dual_mov_b32 v17, s2
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB43_3
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB43_3
; GFX11-TRUE16-NEXT: .LBB43_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
@@ -19746,7 +19932,9 @@ define inreg <10 x i64> @bitcast_v40i16_to_v10i64_scalar(<40 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB43_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB43_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB43_2
+; GFX11-TRUE16-NEXT: s_branch .LBB43_3
;
; GFX11-FAKE16-LABEL: bitcast_v40i16_to_v10i64_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -19756,11 +19944,11 @@ define inreg <10 x i64> @bitcast_v40i16_to_v10i64_scalar(<40 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v0
; GFX11-FAKE16-NEXT: v_and_b32_e32 v35, 0xffff, v0
; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v1
-; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s26, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s25, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s26, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s25, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s24, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s23, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s22, 16
@@ -19770,15 +19958,14 @@ define inreg <10 x i64> @bitcast_v40i16_to_v10i64_scalar(<40 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s18, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s17, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s3, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s2, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s40, 0
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s2, s6
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s46
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s45
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s17, s8
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s18, s9
@@ -19788,12 +19975,13 @@ define inreg <10 x i64> @bitcast_v40i16_to_v10i64_scalar(<40 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s22, s13
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s23, s14
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s24, s15
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s25, s45
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s26, s44
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s27, s43
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s28, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s29, s41
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s25, s44
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s26, s43
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s27, s42
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB43_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
@@ -19807,8 +19995,7 @@ define inreg <10 x i64> @bitcast_v40i16_to_v10i64_scalar(<40 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s15 :: v_dual_mov_b32 v13, s16
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s17 :: v_dual_mov_b32 v15, s0
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s1 :: v_dual_mov_b32 v17, s2
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB43_3
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB43_3
; GFX11-FAKE16-NEXT: .LBB43_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
@@ -19836,7 +20023,9 @@ define inreg <10 x i64> @bitcast_v40i16_to_v10i64_scalar(<40 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB43_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB43_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB43_2
+; GFX11-FAKE16-NEXT: s_branch .LBB43_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -20677,22 +20866,23 @@ define inreg <40 x half> @bitcast_v10i64_to_v40f16_scalar(<10 x i64> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v7
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s10, v1
; SI-NEXT: v_readfirstlane_b32 s11, v2
-; SI-NEXT: v_readfirstlane_b32 s7, v3
-; SI-NEXT: v_readfirstlane_b32 s8, v4
+; SI-NEXT: v_readfirstlane_b32 s8, v3
+; SI-NEXT: v_readfirstlane_b32 s9, v4
; SI-NEXT: v_readfirstlane_b32 s6, v5
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: v_readfirstlane_b32 s9, v6
+; SI-NEXT: v_readfirstlane_b32 s7, v6
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB45_4
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_lshr_b32 s4, s9, 16
+; SI-NEXT: s_lshr_b32 s4, s7, 16
; SI-NEXT: v_cvt_f32_f16_e32 v1, s4
; SI-NEXT: s_lshr_b32 s4, s6, 16
; SI-NEXT: v_cvt_f32_f16_e32 v2, s4
-; SI-NEXT: s_lshr_b32 s4, s8, 16
+; SI-NEXT: s_lshr_b32 s4, s9, 16
; SI-NEXT: v_cvt_f32_f16_e32 v4, s4
-; SI-NEXT: s_lshr_b32 s4, s7, 16
+; SI-NEXT: s_lshr_b32 s4, s8, 16
; SI-NEXT: v_cvt_f32_f16_e32 v6, s4
; SI-NEXT: s_lshr_b32 s4, s11, 16
; SI-NEXT: v_cvt_f32_f16_e32 v8, s4
@@ -20726,10 +20916,10 @@ define inreg <40 x half> @bitcast_v10i64_to_v40f16_scalar(<10 x i64> inreg %a, i
; SI-NEXT: v_cvt_f32_f16_e32 v38, s4
; SI-NEXT: s_lshr_b32 s4, s16, 16
; SI-NEXT: v_cvt_f32_f16_e32 v48, s4
-; SI-NEXT: v_cvt_f32_f16_e32 v3, s9
+; SI-NEXT: v_cvt_f32_f16_e32 v3, s7
; SI-NEXT: v_cvt_f32_f16_e32 v5, s6
-; SI-NEXT: v_cvt_f32_f16_e32 v7, s8
-; SI-NEXT: v_cvt_f32_f16_e32 v9, s7
+; SI-NEXT: v_cvt_f32_f16_e32 v7, s9
+; SI-NEXT: v_cvt_f32_f16_e32 v9, s8
; SI-NEXT: v_cvt_f32_f16_e32 v11, s11
; SI-NEXT: v_cvt_f32_f16_e32 v13, s10
; SI-NEXT: v_cvt_f32_f16_e32 v15, s29
@@ -20780,18 +20970,18 @@ define inreg <40 x half> @bitcast_v10i64_to_v40f16_scalar(<10 x i64> inreg %a, i
; SI-NEXT: s_addc_u32 s11, s11, 0
; SI-NEXT: s_lshr_b32 s56, s10, 16
; SI-NEXT: s_lshr_b32 s57, s11, 16
-; SI-NEXT: s_add_u32 s7, s7, 3
-; SI-NEXT: s_addc_u32 s8, s8, 0
-; SI-NEXT: s_lshr_b32 s58, s7, 16
-; SI-NEXT: s_lshr_b32 s59, s8, 16
-; SI-NEXT: s_add_u32 s6, s6, 3
+; SI-NEXT: s_add_u32 s8, s8, 3
; SI-NEXT: s_addc_u32 s9, s9, 0
+; SI-NEXT: s_lshr_b32 s58, s8, 16
+; SI-NEXT: s_lshr_b32 s59, s9, 16
+; SI-NEXT: s_add_u32 s6, s6, 3
+; SI-NEXT: s_addc_u32 s7, s7, 0
; SI-NEXT: s_lshr_b32 s60, s6, 16
-; SI-NEXT: s_lshr_b32 s61, s9, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v3, s9
+; SI-NEXT: s_lshr_b32 s61, s7, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v3, s7
; SI-NEXT: v_cvt_f32_f16_e32 v5, s6
-; SI-NEXT: v_cvt_f32_f16_e32 v7, s8
-; SI-NEXT: v_cvt_f32_f16_e32 v9, s7
+; SI-NEXT: v_cvt_f32_f16_e32 v7, s9
+; SI-NEXT: v_cvt_f32_f16_e32 v9, s8
; SI-NEXT: v_cvt_f32_f16_e32 v11, s11
; SI-NEXT: v_cvt_f32_f16_e32 v13, s10
; SI-NEXT: v_cvt_f32_f16_e32 v15, s29
@@ -21010,23 +21200,26 @@ define inreg <40 x half> @bitcast_v10i64_to_v40f16_scalar(<10 x i64> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: s_branch .LBB45_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB45_2
+; SI-NEXT: s_branch .LBB45_3
;
; VI-LABEL: bitcast_v10i64_to_v40f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s11, v0
; VI-NEXT: v_readfirstlane_b32 s10, v1
; VI-NEXT: v_readfirstlane_b32 s9, v2
; VI-NEXT: v_readfirstlane_b32 s8, v3
-; VI-NEXT: v_readfirstlane_b32 s6, v4
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: v_readfirstlane_b32 s7, v5
+; VI-NEXT: v_readfirstlane_b32 s7, v4
+; VI-NEXT: v_readfirstlane_b32 s6, v5
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB45_4
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_lshr_b32 s12, s7, 16
-; VI-NEXT: s_lshr_b32 s13, s6, 16
+; VI-NEXT: s_lshr_b32 s12, s6, 16
+; VI-NEXT: s_lshr_b32 s13, s7, 16
; VI-NEXT: s_lshr_b32 s14, s8, 16
; VI-NEXT: s_lshr_b32 s15, s9, 16
; VI-NEXT: s_lshr_b32 s40, s10, 16
@@ -21047,8 +21240,8 @@ define inreg <40 x half> @bitcast_v10i64_to_v40f16_scalar(<10 x i64> inreg %a, i
; VI-NEXT: s_lshr_b32 s63, s16, 16
; VI-NEXT: s_cbranch_execnz .LBB45_3
; VI-NEXT: .LBB45_2: ; %cmp.true
-; VI-NEXT: s_add_u32 s6, s6, 3
-; VI-NEXT: s_addc_u32 s7, s7, 0
+; VI-NEXT: s_add_u32 s7, s7, 3
+; VI-NEXT: s_addc_u32 s6, s6, 0
; VI-NEXT: s_add_u32 s9, s9, 3
; VI-NEXT: s_addc_u32 s8, s8, 0
; VI-NEXT: s_add_u32 s11, s11, 3
@@ -21067,8 +21260,8 @@ define inreg <40 x half> @bitcast_v10i64_to_v40f16_scalar(<10 x i64> inreg %a, i
; VI-NEXT: s_addc_u32 s19, s19, 0
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
-; VI-NEXT: s_lshr_b32 s12, s7, 16
-; VI-NEXT: s_lshr_b32 s13, s6, 16
+; VI-NEXT: s_lshr_b32 s12, s6, 16
+; VI-NEXT: s_lshr_b32 s13, s7, 16
; VI-NEXT: s_lshr_b32 s14, s8, 16
; VI-NEXT: s_lshr_b32 s15, s9, 16
; VI-NEXT: s_lshr_b32 s40, s10, 16
@@ -21139,15 +21332,15 @@ define inreg <40 x half> @bitcast_v10i64_to_v40f16_scalar(<10 x i64> inreg %a, i
; VI-NEXT: s_lshl_b32 s15, s15, 16
; VI-NEXT: s_and_b32 s8, 0xffff, s8
; VI-NEXT: s_lshl_b32 s14, s14, 16
-; VI-NEXT: s_and_b32 s6, 0xffff, s6
-; VI-NEXT: s_lshl_b32 s13, s13, 16
; VI-NEXT: s_and_b32 s7, 0xffff, s7
+; VI-NEXT: s_lshl_b32 s13, s13, 16
+; VI-NEXT: s_and_b32 s6, 0xffff, s6
; VI-NEXT: s_lshl_b32 s12, s12, 16
; VI-NEXT: s_or_b32 s10, s10, s28
; VI-NEXT: s_or_b32 s9, s9, s15
; VI-NEXT: s_or_b32 s8, s8, s14
-; VI-NEXT: s_or_b32 s6, s6, s13
-; VI-NEXT: s_or_b32 s7, s7, s12
+; VI-NEXT: s_or_b32 s7, s7, s13
+; VI-NEXT: s_or_b32 s6, s6, s12
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: v_mov_b32_e32 v2, s16
@@ -21166,8 +21359,8 @@ define inreg <40 x half> @bitcast_v10i64_to_v40f16_scalar(<10 x i64> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v15, s10
; VI-NEXT: v_mov_b32_e32 v16, s9
; VI-NEXT: v_mov_b32_e32 v17, s8
-; VI-NEXT: v_mov_b32_e32 v18, s6
-; VI-NEXT: v_mov_b32_e32 v19, s7
+; VI-NEXT: v_mov_b32_e32 v18, s7
+; VI-NEXT: v_mov_b32_e32 v19, s6
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB45_4:
; VI-NEXT: ; implicit-def: $sgpr63
@@ -21190,27 +21383,30 @@ define inreg <40 x half> @bitcast_v10i64_to_v40f16_scalar(<10 x i64> inreg %a, i
; VI-NEXT: ; implicit-def: $sgpr14
; VI-NEXT: ; implicit-def: $sgpr13
; VI-NEXT: ; implicit-def: $sgpr12
-; VI-NEXT: s_branch .LBB45_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB45_2
+; VI-NEXT: s_branch .LBB45_3
;
; GFX9-LABEL: bitcast_v10i64_to_v40f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
-; GFX9-NEXT: v_readfirstlane_b32 s6, v0
-; GFX9-NEXT: v_readfirstlane_b32 s7, v1
-; GFX9-NEXT: v_readfirstlane_b32 s8, v2
-; GFX9-NEXT: v_readfirstlane_b32 s9, v3
-; GFX9-NEXT: v_readfirstlane_b32 s10, v4
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: v_readfirstlane_b32 s11, v5
+; GFX9-NEXT: v_readfirstlane_b32 s7, v0
+; GFX9-NEXT: v_readfirstlane_b32 s8, v1
+; GFX9-NEXT: v_readfirstlane_b32 s9, v2
+; GFX9-NEXT: v_readfirstlane_b32 s10, v3
+; GFX9-NEXT: v_readfirstlane_b32 s11, v4
+; GFX9-NEXT: v_readfirstlane_b32 s6, v5
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB45_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_lshr_b32 s12, s11, 16
-; GFX9-NEXT: s_lshr_b32 s13, s10, 16
-; GFX9-NEXT: s_lshr_b32 s14, s9, 16
-; GFX9-NEXT: s_lshr_b32 s15, s8, 16
-; GFX9-NEXT: s_lshr_b32 s40, s7, 16
-; GFX9-NEXT: s_lshr_b32 s41, s6, 16
+; GFX9-NEXT: s_lshr_b32 s12, s6, 16
+; GFX9-NEXT: s_lshr_b32 s13, s11, 16
+; GFX9-NEXT: s_lshr_b32 s14, s10, 16
+; GFX9-NEXT: s_lshr_b32 s15, s9, 16
+; GFX9-NEXT: s_lshr_b32 s40, s8, 16
+; GFX9-NEXT: s_lshr_b32 s41, s7, 16
; GFX9-NEXT: s_lshr_b32 s42, s29, 16
; GFX9-NEXT: s_lshr_b32 s43, s28, 16
; GFX9-NEXT: s_lshr_b32 s44, s27, 16
@@ -21227,12 +21423,12 @@ define inreg <40 x half> @bitcast_v10i64_to_v40f16_scalar(<10 x i64> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s63, s16, 16
; GFX9-NEXT: s_cbranch_execnz .LBB45_3
; GFX9-NEXT: .LBB45_2: ; %cmp.true
-; GFX9-NEXT: s_add_u32 s10, s10, 3
-; GFX9-NEXT: s_addc_u32 s11, s11, 0
-; GFX9-NEXT: s_add_u32 s8, s8, 3
-; GFX9-NEXT: s_addc_u32 s9, s9, 0
-; GFX9-NEXT: s_add_u32 s6, s6, 3
-; GFX9-NEXT: s_addc_u32 s7, s7, 0
+; GFX9-NEXT: s_add_u32 s11, s11, 3
+; GFX9-NEXT: s_addc_u32 s6, s6, 0
+; GFX9-NEXT: s_add_u32 s9, s9, 3
+; GFX9-NEXT: s_addc_u32 s10, s10, 0
+; GFX9-NEXT: s_add_u32 s7, s7, 3
+; GFX9-NEXT: s_addc_u32 s8, s8, 0
; GFX9-NEXT: s_add_u32 s28, s28, 3
; GFX9-NEXT: s_addc_u32 s29, s29, 0
; GFX9-NEXT: s_add_u32 s26, s26, 3
@@ -21247,12 +21443,12 @@ define inreg <40 x half> @bitcast_v10i64_to_v40f16_scalar(<10 x i64> inreg %a, i
; GFX9-NEXT: s_addc_u32 s19, s19, 0
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
-; GFX9-NEXT: s_lshr_b32 s12, s11, 16
-; GFX9-NEXT: s_lshr_b32 s13, s10, 16
-; GFX9-NEXT: s_lshr_b32 s14, s9, 16
-; GFX9-NEXT: s_lshr_b32 s15, s8, 16
-; GFX9-NEXT: s_lshr_b32 s40, s7, 16
-; GFX9-NEXT: s_lshr_b32 s41, s6, 16
+; GFX9-NEXT: s_lshr_b32 s12, s6, 16
+; GFX9-NEXT: s_lshr_b32 s13, s11, 16
+; GFX9-NEXT: s_lshr_b32 s14, s10, 16
+; GFX9-NEXT: s_lshr_b32 s15, s9, 16
+; GFX9-NEXT: s_lshr_b32 s40, s8, 16
+; GFX9-NEXT: s_lshr_b32 s41, s7, 16
; GFX9-NEXT: s_lshr_b32 s42, s29, 16
; GFX9-NEXT: s_lshr_b32 s43, s28, 16
; GFX9-NEXT: s_lshr_b32 s44, s27, 16
@@ -21282,12 +21478,12 @@ define inreg <40 x half> @bitcast_v10i64_to_v40f16_scalar(<10 x i64> inreg %a, i
; GFX9-NEXT: s_pack_ll_b32_b16 s25, s27, s44
; GFX9-NEXT: s_pack_ll_b32_b16 s26, s28, s43
; GFX9-NEXT: s_pack_ll_b32_b16 s27, s29, s42
-; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s41
-; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s40
-; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s15
-; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s14
-; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s13
-; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s12
+; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s41
+; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s40
+; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s15
+; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s14
+; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s13
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s12
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: v_mov_b32_e32 v2, s16
@@ -21302,12 +21498,12 @@ define inreg <40 x half> @bitcast_v10i64_to_v40f16_scalar(<10 x i64> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v11, s25
; GFX9-NEXT: v_mov_b32_e32 v12, s26
; GFX9-NEXT: v_mov_b32_e32 v13, s27
-; GFX9-NEXT: v_mov_b32_e32 v14, s6
-; GFX9-NEXT: v_mov_b32_e32 v15, s7
-; GFX9-NEXT: v_mov_b32_e32 v16, s8
-; GFX9-NEXT: v_mov_b32_e32 v17, s9
-; GFX9-NEXT: v_mov_b32_e32 v18, s10
-; GFX9-NEXT: v_mov_b32_e32 v19, s11
+; GFX9-NEXT: v_mov_b32_e32 v14, s7
+; GFX9-NEXT: v_mov_b32_e32 v15, s8
+; GFX9-NEXT: v_mov_b32_e32 v16, s9
+; GFX9-NEXT: v_mov_b32_e32 v17, s10
+; GFX9-NEXT: v_mov_b32_e32 v18, s11
+; GFX9-NEXT: v_mov_b32_e32 v19, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB45_4:
; GFX9-NEXT: ; implicit-def: $sgpr63
@@ -21330,7 +21526,9 @@ define inreg <40 x half> @bitcast_v10i64_to_v40f16_scalar(<10 x i64> inreg %a, i
; GFX9-NEXT: ; implicit-def: $sgpr14
; GFX9-NEXT: ; implicit-def: $sgpr13
; GFX9-NEXT: ; implicit-def: $sgpr12
-; GFX9-NEXT: s_branch .LBB45_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB45_2
+; GFX9-NEXT: s_branch .LBB45_3
;
; GFX11-LABEL: bitcast_v10i64_to_v40f16_scalar:
; GFX11: ; %bb.0:
@@ -21338,7 +21536,7 @@ define inreg <40 x half> @bitcast_v10i64_to_v40f16_scalar(<10 x i64> inreg %a, i
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v2
; GFX11-NEXT: v_readfirstlane_b32 s5, v0
; GFX11-NEXT: v_readfirstlane_b32 s4, v1
-; GFX11-NEXT: s_mov_b32 s58, 0
+; GFX11-NEXT: s_mov_b32 s58, -1
; GFX11-NEXT: s_and_b32 s6, vcc_lo, exec_lo
; GFX11-NEXT: s_cbranch_scc0 .LBB45_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
@@ -21362,8 +21560,7 @@ define inreg <40 x half> @bitcast_v10i64_to_v40f16_scalar(<10 x i64> inreg %a, i
; GFX11-NEXT: s_lshr_b32 s47, s2, 16
; GFX11-NEXT: s_lshr_b32 s56, s1, 16
; GFX11-NEXT: s_lshr_b32 s57, s0, 16
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s58
-; GFX11-NEXT: s_cbranch_vccnz .LBB45_3
+; GFX11-NEXT: s_cbranch_execnz .LBB45_3
; GFX11-NEXT: .LBB45_2: ; %cmp.true
; GFX11-NEXT: s_add_u32 s5, s5, 3
; GFX11-NEXT: s_addc_u32 s4, s4, 0
@@ -21459,7 +21656,9 @@ define inreg <40 x half> @bitcast_v10i64_to_v40f16_scalar(<10 x i64> inreg %a, i
; GFX11-NEXT: ; implicit-def: $sgpr8
; GFX11-NEXT: ; implicit-def: $sgpr7
; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: s_branch .LBB45_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s58
+; GFX11-NEXT: s_cbranch_vccz .LBB45_2
+; GFX11-NEXT: s_branch .LBB45_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -22491,6 +22690,7 @@ define inreg <10 x i64> @bitcast_v40f16_to_v10i64_scalar(<40 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v24, s28
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v26
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB47_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v58
@@ -22798,7 +22998,9 @@ define inreg <10 x i64> @bitcast_v40f16_to_v10i64_scalar(<40 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v53, v46
; SI-NEXT: v_mov_b32_e32 v27, v52
; SI-NEXT: v_mov_b32_e32 v52, v45
-; SI-NEXT: s_branch .LBB47_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB47_2
+; SI-NEXT: s_branch .LBB47_3
;
; VI-LABEL: bitcast_v40f16_to_v10i64_scalar:
; VI: ; %bb.0:
@@ -22818,13 +23020,14 @@ define inreg <10 x i64> @bitcast_v40f16_to_v10i64_scalar(<40 x half> inreg %a, i
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v5
; VI-NEXT: v_mov_b32_e32 v33, v4
; VI-NEXT: v_mov_b32_e32 v34, v3
; VI-NEXT: v_mov_b32_e32 v35, v2
; VI-NEXT: v_mov_b32_e32 v36, v1
; VI-NEXT: v_mov_b32_e32 v37, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB47_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -22978,17 +23181,13 @@ define inreg <10 x i64> @bitcast_v40f16_to_v10i64_scalar(<40 x half> inreg %a, i
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB47_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB47_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB47_2
+; VI-NEXT: s_branch .LBB47_3
;
; GFX9-LABEL: bitcast_v40f16_to_v10i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v5
-; GFX9-NEXT: v_mov_b32_e32 v33, v4
-; GFX9-NEXT: v_mov_b32_e32 v34, v3
-; GFX9-NEXT: v_mov_b32_e32 v35, v2
-; GFX9-NEXT: v_mov_b32_e32 v36, v1
-; GFX9-NEXT: v_mov_b32_e32 v37, v0
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
; GFX9-NEXT: s_lshr_b32 s42, s27, 16
@@ -23004,13 +23203,19 @@ define inreg <10 x i64> @bitcast_v40f16_to_v10i64_scalar(<40 x half> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
+; GFX9-NEXT: v_mov_b32_e32 v32, v5
+; GFX9-NEXT: v_mov_b32_e32 v33, v4
+; GFX9-NEXT: v_mov_b32_e32 v34, v3
+; GFX9-NEXT: v_mov_b32_e32 v35, v2
+; GFX9-NEXT: v_mov_b32_e32 v36, v1
+; GFX9-NEXT: v_mov_b32_e32 v37, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v32
; GFX9-NEXT: v_lshrrev_b32_e32 v39, 16, v33
; GFX9-NEXT: v_lshrrev_b32_e32 v48, 16, v34
; GFX9-NEXT: v_lshrrev_b32_e32 v49, 16, v35
; GFX9-NEXT: v_lshrrev_b32_e32 v50, 16, v36
; GFX9-NEXT: v_lshrrev_b32_e32 v51, 16, v37
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -23025,19 +23230,20 @@ define inreg <10 x i64> @bitcast_v40f16_to_v10i64_scalar(<40 x half> inreg %a, i
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB47_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v37
; GFX9-NEXT: v_lshl_or_b32 v14, v51, 16, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v36
-; GFX9-NEXT: v_lshl_or_b32 v15, v50, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v35
; GFX9-NEXT: v_lshl_or_b32 v16, v49, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v34
; GFX9-NEXT: v_lshl_or_b32 v17, v48, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v33
+; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v36
; GFX9-NEXT: v_lshl_or_b32 v18, v39, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v32
+; GFX9-NEXT: v_lshl_or_b32 v15, v50, 16, v1
; GFX9-NEXT: v_lshl_or_b32 v19, v38, 16, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s6
; GFX9-NEXT: v_mov_b32_e32 v1, s7
@@ -23062,8 +23268,8 @@ define inreg <10 x i64> @bitcast_v40f16_to_v10i64_scalar(<40 x half> inreg %a, i
; GFX9-NEXT: v_and_b32_e32 v18, 0xffff, v33
; GFX9-NEXT: v_and_b32_e32 v19, 0xffff, v32
; GFX9-NEXT: v_mov_b32_e32 v13, 0x200
-; GFX9-NEXT: s_movk_i32 s4, 0x200
; GFX9-NEXT: v_lshl_or_b32 v14, v51, 16, v14
+; GFX9-NEXT: s_movk_i32 s4, 0x200
; GFX9-NEXT: v_lshl_or_b32 v15, v50, 16, v15
; GFX9-NEXT: v_lshl_or_b32 v16, v49, 16, v16
; GFX9-NEXT: v_lshl_or_b32 v17, v48, 16, v17
@@ -23093,7 +23299,9 @@ define inreg <10 x i64> @bitcast_v40f16_to_v10i64_scalar(<40 x half> inreg %a, i
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB47_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB47_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB47_2
+; GFX9-NEXT: s_branch .LBB47_3
;
; GFX11-TRUE16-LABEL: bitcast_v40f16_to_v10i64_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -23105,11 +23313,11 @@ define inreg <10 x i64> @bitcast_v40f16_to_v10i64_scalar(<40 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v0
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v1
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s25, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s24, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s23, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s22, 16
@@ -23119,15 +23327,14 @@ define inreg <10 x i64> @bitcast_v40f16_to_v10i64_scalar(<40 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s18, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s17, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s2, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s2, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s45
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s17, s8
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s18, s9
@@ -23137,12 +23344,13 @@ define inreg <10 x i64> @bitcast_v40f16_to_v10i64_scalar(<40 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s22, s13
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s23, s14
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s24, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s25, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s26, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s29, s41
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s25, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s26, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s27, s42
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB47_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
@@ -23156,8 +23364,7 @@ define inreg <10 x i64> @bitcast_v40f16_to_v10i64_scalar(<40 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s15 :: v_dual_mov_b32 v13, s16
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s17 :: v_dual_mov_b32 v15, s0
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s1 :: v_dual_mov_b32 v17, s2
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB47_3
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB47_3
; GFX11-TRUE16-NEXT: .LBB47_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
@@ -23185,7 +23392,9 @@ define inreg <10 x i64> @bitcast_v40f16_to_v10i64_scalar(<40 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB47_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB47_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB47_2
+; GFX11-TRUE16-NEXT: s_branch .LBB47_3
;
; GFX11-FAKE16-LABEL: bitcast_v40f16_to_v10i64_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -23195,11 +23404,11 @@ define inreg <10 x i64> @bitcast_v40f16_to_v10i64_scalar(<40 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v0
; GFX11-FAKE16-NEXT: v_and_b32_e32 v35, 0xffff, v0
; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v1
-; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s26, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s25, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s26, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s25, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s24, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s23, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s22, 16
@@ -23209,15 +23418,14 @@ define inreg <10 x i64> @bitcast_v40f16_to_v10i64_scalar(<40 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s18, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s17, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s3, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s2, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s40, 0
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s2, s6
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s46
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s45
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s17, s8
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s18, s9
@@ -23227,12 +23435,13 @@ define inreg <10 x i64> @bitcast_v40f16_to_v10i64_scalar(<40 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s22, s13
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s23, s14
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s24, s15
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s25, s45
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s26, s44
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s27, s43
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s28, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s29, s41
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s25, s44
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s26, s43
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s27, s42
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB47_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
@@ -23246,8 +23455,7 @@ define inreg <10 x i64> @bitcast_v40f16_to_v10i64_scalar(<40 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s15 :: v_dual_mov_b32 v13, s16
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s17 :: v_dual_mov_b32 v15, s0
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s1 :: v_dual_mov_b32 v17, s2
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB47_3
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB47_3
; GFX11-FAKE16-NEXT: .LBB47_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
@@ -23275,7 +23483,9 @@ define inreg <10 x i64> @bitcast_v40f16_to_v10i64_scalar(<40 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB47_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB47_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB47_2
+; GFX11-FAKE16-NEXT: s_branch .LBB47_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -23885,6 +24095,7 @@ define inreg <40 x i16> @bitcast_v10f64_to_v40i16_scalar(<10 x double> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v7
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v19, s16
; SI-NEXT: v_mov_b32_e32 v20, s17
; SI-NEXT: v_mov_b32_e32 v17, s18
@@ -23897,9 +24108,9 @@ define inreg <40 x i16> @bitcast_v10f64_to_v40i16_scalar(<10 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v12, s25
; SI-NEXT: v_mov_b32_e32 v9, s26
; SI-NEXT: v_mov_b32_e32 v10, s27
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v7, s28
; SI-NEXT: v_mov_b32_e32 v8, s29
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB49_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_alignbit_b32 v21, v6, v5, 16
@@ -24096,12 +24307,15 @@ define inreg <40 x i16> @bitcast_v10f64_to_v40i16_scalar(<10 x double> inreg %a,
; SI-NEXT: ; implicit-def: $vgpr27
; SI-NEXT: ; implicit-def: $vgpr21
; SI-NEXT: ; implicit-def: $vgpr25
-; SI-NEXT: s_branch .LBB49_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB49_2
+; SI-NEXT: s_branch .LBB49_3
;
; VI-LABEL: bitcast_v10f64_to_v40i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v20, s16
; VI-NEXT: v_mov_b32_e32 v21, s17
; VI-NEXT: v_mov_b32_e32 v18, s18
@@ -24115,8 +24329,8 @@ define inreg <40 x i16> @bitcast_v10f64_to_v40i16_scalar(<10 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB49_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_lshrrev_b32_e32 v26, 16, v5
@@ -24240,12 +24454,15 @@ define inreg <40 x i16> @bitcast_v10f64_to_v40i16_scalar(<10 x double> inreg %a,
; VI-NEXT: ; implicit-def: $vgpr28
; VI-NEXT: ; implicit-def: $vgpr27
; VI-NEXT: ; implicit-def: $vgpr26
-; VI-NEXT: s_branch .LBB49_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB49_2
+; VI-NEXT: s_branch .LBB49_3
;
; GFX9-LABEL: bitcast_v10f64_to_v40i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v20, s16
; GFX9-NEXT: v_mov_b32_e32 v21, s17
; GFX9-NEXT: v_mov_b32_e32 v18, s18
@@ -24259,8 +24476,8 @@ define inreg <40 x i16> @bitcast_v10f64_to_v40i16_scalar(<10 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB49_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_lshrrev_b32_e32 v26, 16, v5
@@ -24384,7 +24601,9 @@ define inreg <40 x i16> @bitcast_v10f64_to_v40i16_scalar(<10 x double> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr28
; GFX9-NEXT: ; implicit-def: $vgpr27
; GFX9-NEXT: ; implicit-def: $vgpr26
-; GFX9-NEXT: s_branch .LBB49_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB49_2
+; GFX9-NEXT: s_branch .LBB49_3
;
; GFX11-LABEL: bitcast_v10f64_to_v40i16_scalar:
; GFX11: ; %bb.0:
@@ -24399,8 +24618,8 @@ define inreg <40 x i16> @bitcast_v10f64_to_v40i16_scalar(<10 x double> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v10, s24 :: v_dual_mov_b32 v11, s25
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB49_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v1
@@ -24423,8 +24642,7 @@ define inreg <40 x i16> @bitcast_v10f64_to_v40i16_scalar(<10 x double> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v3
; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v21
; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v20
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB49_3
+; GFX11-NEXT: s_cbranch_execnz .LBB49_3
; GFX11-NEXT: .LBB49_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
; GFX11-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
@@ -24521,7 +24739,9 @@ define inreg <40 x i16> @bitcast_v10f64_to_v40i16_scalar(<10 x double> inreg %a,
; GFX11-NEXT: ; implicit-def: $vgpr24
; GFX11-NEXT: ; implicit-def: $vgpr23
; GFX11-NEXT: ; implicit-def: $vgpr22
-; GFX11-NEXT: s_branch .LBB49_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_vccz .LBB49_2
+; GFX11-NEXT: s_branch .LBB49_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -25379,6 +25599,7 @@ define inreg <10 x double> @bitcast_v40i16_to_v10f64_scalar(<40 x i16> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v26
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
@@ -25402,7 +25623,7 @@ define inreg <10 x double> @bitcast_v40i16_to_v10f64_scalar(<40 x i16> inreg %a,
; SI-NEXT: v_mov_b32_e32 v50, v4
; SI-NEXT: v_mov_b32_e32 v51, v2
; SI-NEXT: v_mov_b32_e32 v52, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v3
@@ -25421,50 +25642,50 @@ define inreg <10 x double> @bitcast_v40i16_to_v10f64_scalar(<40 x i16> inreg %a,
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v52
; SI-NEXT: v_or_b32_e32 v7, v0, v57
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v51
-; SI-NEXT: v_or_b32_e32 v8, v0, v56
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v50
; SI-NEXT: v_or_b32_e32 v9, v0, v47
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v49
; SI-NEXT: v_or_b32_e32 v10, v0, v46
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v48
-; SI-NEXT: s_and_b32 s4, s16, 0xffff
-; SI-NEXT: s_lshl_b32 s5, s17, 16
; SI-NEXT: v_or_b32_e32 v11, v0, v45
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v39
+; SI-NEXT: s_and_b32 s4, s16, 0xffff
+; SI-NEXT: s_lshl_b32 s5, s17, 16
+; SI-NEXT: v_or_b32_e32 v12, v0, v44
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v38
; SI-NEXT: s_or_b32 s4, s4, s5
; SI-NEXT: s_and_b32 s5, s18, 0xffff
; SI-NEXT: s_lshl_b32 s6, s19, 16
-; SI-NEXT: v_or_b32_e32 v12, v0, v44
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v38
+; SI-NEXT: v_or_b32_e32 v13, v0, v43
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v37
; SI-NEXT: s_or_b32 s5, s5, s6
; SI-NEXT: s_and_b32 s6, s20, 0xffff
; SI-NEXT: s_lshl_b32 s7, s21, 16
-; SI-NEXT: v_or_b32_e32 v13, v0, v43
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v37
+; SI-NEXT: v_or_b32_e32 v14, v0, v42
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v36
; SI-NEXT: s_or_b32 s6, s6, s7
; SI-NEXT: s_and_b32 s7, s22, 0xffff
; SI-NEXT: s_lshl_b32 s8, s23, 16
-; SI-NEXT: v_or_b32_e32 v14, v0, v42
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v36
+; SI-NEXT: v_or_b32_e32 v15, v0, v41
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v35
; SI-NEXT: s_or_b32 s7, s7, s8
; SI-NEXT: s_and_b32 s8, s24, 0xffff
; SI-NEXT: s_lshl_b32 s9, s25, 16
-; SI-NEXT: v_or_b32_e32 v15, v0, v41
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v35
+; SI-NEXT: v_or_b32_e32 v16, v0, v40
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v34
; SI-NEXT: s_or_b32 s8, s8, s9
; SI-NEXT: s_and_b32 s9, s26, 0xffff
; SI-NEXT: s_lshl_b32 s10, s27, 16
-; SI-NEXT: v_or_b32_e32 v16, v0, v40
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v34
+; SI-NEXT: v_or_b32_e32 v17, v0, v55
+; SI-NEXT: v_and_b32_e32 v0, 0xffff, v33
; SI-NEXT: s_or_b32 s9, s9, s10
; SI-NEXT: s_and_b32 s10, s28, 0xffff
; SI-NEXT: s_lshl_b32 s11, s29, 16
-; SI-NEXT: v_or_b32_e32 v17, v0, v55
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v33
-; SI-NEXT: s_or_b32 s10, s10, s11
+; SI-NEXT: v_and_b32_e32 v1, 0xffff, v51
; SI-NEXT: v_or_b32_e32 v18, v0, v54
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v32
+; SI-NEXT: s_or_b32 s10, s10, s11
+; SI-NEXT: v_or_b32_e32 v8, v1, v56
; SI-NEXT: v_or_b32_e32 v19, v0, v53
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
@@ -25479,10 +25700,6 @@ define inreg <10 x double> @bitcast_v40i16_to_v10f64_scalar(<40 x i16> inreg %a,
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v57, v0
; SI-NEXT: v_add_i32_e32 v7, vcc, 0x30000, v0
-; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v51
-; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; SI-NEXT: v_or_b32_e32 v0, v56, v0
-; SI-NEXT: v_add_i32_e32 v8, vcc, 0x30000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v50
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
; SI-NEXT: v_or_b32_e32 v0, v47, v0
@@ -25546,13 +25763,16 @@ define inreg <10 x double> @bitcast_v40i16_to_v10f64_scalar(<40 x i16> inreg %a,
; SI-NEXT: s_lshl_b32 s10, s27, 16
; SI-NEXT: s_add_i32 s28, s28, 3
; SI-NEXT: v_or_b32_e32 v0, v54, v0
+; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v51
; SI-NEXT: s_or_b32 s9, s10, s9
; SI-NEXT: s_and_b32 s10, s28, 0xffff
; SI-NEXT: s_lshl_b32 s11, s29, 16
; SI-NEXT: v_add_i32_e32 v18, vcc, 0x30000, v0
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v32
+; SI-NEXT: v_and_b32_e32 v1, 0xffff, v1
; SI-NEXT: s_or_b32 s10, s11, s10
; SI-NEXT: v_and_b32_e32 v0, 0xffff, v0
+; SI-NEXT: v_or_b32_e32 v1, v56, v1
; SI-NEXT: s_add_i32 s4, s4, 0x30000
; SI-NEXT: s_add_i32 s5, s5, 0x30000
; SI-NEXT: s_add_i32 s6, s6, 0x30000
@@ -25561,6 +25781,7 @@ define inreg <10 x double> @bitcast_v40i16_to_v10f64_scalar(<40 x i16> inreg %a,
; SI-NEXT: s_add_i32 s9, s9, 0x30000
; SI-NEXT: s_add_i32 s10, s10, 0x30000
; SI-NEXT: v_or_b32_e32 v0, v53, v0
+; SI-NEXT: v_add_i32_e32 v8, vcc, 0x30000, v1
; SI-NEXT: v_add_i32_e32 v19, vcc, 0x30000, v0
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: v_mov_b32_e32 v1, s5
@@ -25584,7 +25805,9 @@ define inreg <10 x double> @bitcast_v40i16_to_v10f64_scalar(<40 x i16> inreg %a,
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB51_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; SI-NEXT: s_branch .LBB51_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB51_2
+; SI-NEXT: s_branch .LBB51_3
;
; VI-LABEL: bitcast_v40i16_to_v10f64_scalar:
; VI: ; %bb.0:
@@ -25604,13 +25827,14 @@ define inreg <10 x double> @bitcast_v40i16_to_v10f64_scalar(<40 x i16> inreg %a,
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v5
; VI-NEXT: v_mov_b32_e32 v33, v4
; VI-NEXT: v_mov_b32_e32 v34, v3
; VI-NEXT: v_mov_b32_e32 v35, v2
; VI-NEXT: v_mov_b32_e32 v36, v1
; VI-NEXT: v_mov_b32_e32 v37, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB51_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -25748,21 +25972,22 @@ define inreg <10 x double> @bitcast_v40i16_to_v10f64_scalar(<40 x i16> inreg %a,
; VI-NEXT: s_and_b32 s18, s27, 0xffff
; VI-NEXT: s_lshl_b32 s8, s8, 16
; VI-NEXT: s_add_i32 s28, s28, 3
+; VI-NEXT: v_add_u32_e32 v17, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v33
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s8, s8, s18
; VI-NEXT: s_and_b32 s18, s28, 0xffff
; VI-NEXT: s_lshl_b32 s7, s7, 16
; VI-NEXT: s_add_i32 s29, s29, 3
-; VI-NEXT: v_add_u32_e32 v17, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v33
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s7, s7, s18
; VI-NEXT: s_and_b32 s18, s29, 0xffff
; VI-NEXT: s_lshl_b32 s6, s6, 16
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: s_or_b32 s6, s6, s18
; VI-NEXT: v_add_u32_e32 v18, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v32
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v1, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_lshlrev_b32_sdwa v0, v1, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v32
+; VI-NEXT: s_or_b32 s6, s6, s18
+; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_add_i32 s4, s4, 0x30000
; VI-NEXT: s_add_i32 s5, s5, 0x30000
; VI-NEXT: s_add_i32 s16, s16, 0x30000
@@ -25777,7 +26002,6 @@ define inreg <10 x double> @bitcast_v40i16_to_v10f64_scalar(<40 x i16> inreg %a,
; VI-NEXT: s_add_i32 s8, s8, 0x30000
; VI-NEXT: s_add_i32 s7, s7, 0x30000
; VI-NEXT: s_add_i32 s6, s6, 0x30000
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v19, vcc, 0x30000, v0
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
@@ -25797,17 +26021,13 @@ define inreg <10 x double> @bitcast_v40i16_to_v10f64_scalar(<40 x i16> inreg %a,
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB51_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB51_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB51_2
+; VI-NEXT: s_branch .LBB51_3
;
; GFX9-LABEL: bitcast_v40i16_to_v10f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v5
-; GFX9-NEXT: v_mov_b32_e32 v33, v4
-; GFX9-NEXT: v_mov_b32_e32 v34, v3
-; GFX9-NEXT: v_mov_b32_e32 v35, v2
-; GFX9-NEXT: v_mov_b32_e32 v36, v1
-; GFX9-NEXT: v_mov_b32_e32 v37, v0
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
; GFX9-NEXT: s_lshr_b32 s42, s27, 16
@@ -25823,13 +26043,19 @@ define inreg <10 x double> @bitcast_v40i16_to_v10f64_scalar(<40 x i16> inreg %a,
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
+; GFX9-NEXT: v_mov_b32_e32 v32, v5
+; GFX9-NEXT: v_mov_b32_e32 v33, v4
+; GFX9-NEXT: v_mov_b32_e32 v34, v3
+; GFX9-NEXT: v_mov_b32_e32 v35, v2
+; GFX9-NEXT: v_mov_b32_e32 v36, v1
+; GFX9-NEXT: v_mov_b32_e32 v37, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v32
; GFX9-NEXT: v_lshrrev_b32_e32 v39, 16, v33
; GFX9-NEXT: v_lshrrev_b32_e32 v48, 16, v34
; GFX9-NEXT: v_lshrrev_b32_e32 v49, 16, v35
; GFX9-NEXT: v_lshrrev_b32_e32 v50, 16, v36
; GFX9-NEXT: v_lshrrev_b32_e32 v51, 16, v37
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -25844,19 +26070,20 @@ define inreg <10 x double> @bitcast_v40i16_to_v10f64_scalar(<40 x i16> inreg %a,
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB51_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v37
; GFX9-NEXT: v_lshl_or_b32 v14, v51, 16, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v36
-; GFX9-NEXT: v_lshl_or_b32 v15, v50, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v35
; GFX9-NEXT: v_lshl_or_b32 v16, v49, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v34
; GFX9-NEXT: v_lshl_or_b32 v17, v48, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v33
+; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v36
; GFX9-NEXT: v_lshl_or_b32 v18, v39, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v32
+; GFX9-NEXT: v_lshl_or_b32 v15, v50, 16, v1
; GFX9-NEXT: v_lshl_or_b32 v19, v38, 16, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s6
; GFX9-NEXT: v_mov_b32_e32 v1, s7
@@ -25874,18 +26101,24 @@ define inreg <10 x double> @bitcast_v40i16_to_v10f64_scalar(<40 x i16> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v13, s19
; GFX9-NEXT: s_cbranch_execnz .LBB51_3
; GFX9-NEXT: .LBB51_2: ; %cmp.true
-; GFX9-NEXT: v_and_b32_e32 v14, 0xffff, v37
-; GFX9-NEXT: v_and_b32_e32 v15, 0xffff, v36
-; GFX9-NEXT: v_and_b32_e32 v16, 0xffff, v35
-; GFX9-NEXT: v_and_b32_e32 v17, 0xffff, v34
-; GFX9-NEXT: v_and_b32_e32 v18, 0xffff, v33
-; GFX9-NEXT: v_and_b32_e32 v19, 0xffff, v32
-; GFX9-NEXT: v_lshl_or_b32 v14, v51, 16, v14
-; GFX9-NEXT: v_lshl_or_b32 v15, v50, 16, v15
-; GFX9-NEXT: v_lshl_or_b32 v16, v49, 16, v16
-; GFX9-NEXT: v_lshl_or_b32 v17, v48, 16, v17
-; GFX9-NEXT: v_lshl_or_b32 v18, v39, 16, v18
-; GFX9-NEXT: v_lshl_or_b32 v19, v38, 16, v19
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v37
+; GFX9-NEXT: v_lshl_or_b32 v0, v51, 16, v0
+; GFX9-NEXT: v_pk_add_u16 v14, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v35
+; GFX9-NEXT: v_lshl_or_b32 v0, v49, 16, v0
+; GFX9-NEXT: v_pk_add_u16 v16, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v34
+; GFX9-NEXT: v_lshl_or_b32 v0, v48, 16, v0
+; GFX9-NEXT: v_pk_add_u16 v17, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v33
+; GFX9-NEXT: v_lshl_or_b32 v0, v39, 16, v0
+; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v36
+; GFX9-NEXT: v_pk_add_u16 v18, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v32
+; GFX9-NEXT: v_lshl_or_b32 v1, v50, 16, v1
+; GFX9-NEXT: v_lshl_or_b32 v0, v38, 16, v0
+; GFX9-NEXT: v_pk_add_u16 v15, v1, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_pk_add_u16 v19, v0, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s6, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s7, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v2, s8, 3 op_sel_hi:[1,0]
@@ -25900,17 +26133,13 @@ define inreg <10 x double> @bitcast_v40i16_to_v10f64_scalar(<40 x i16> inreg %a,
; GFX9-NEXT: v_pk_add_u16 v11, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v12, s18, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v13, s19, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
; GFX9-NEXT: .LBB51_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB51_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB51_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB51_2
+; GFX9-NEXT: s_branch .LBB51_3
;
; GFX11-TRUE16-LABEL: bitcast_v40i16_to_v10f64_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -25922,11 +26151,11 @@ define inreg <10 x double> @bitcast_v40i16_to_v10f64_scalar(<40 x i16> inreg %a,
; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v0
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v1
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s25, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s24, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s23, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s22, 16
@@ -25936,15 +26165,14 @@ define inreg <10 x double> @bitcast_v40i16_to_v10f64_scalar(<40 x i16> inreg %a,
; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s18, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s17, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s2, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s2, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s45
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s17, s8
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s18, s9
@@ -25954,12 +26182,13 @@ define inreg <10 x double> @bitcast_v40i16_to_v10f64_scalar(<40 x i16> inreg %a,
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s22, s13
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s23, s14
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s24, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s25, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s26, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s29, s41
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s25, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s26, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s27, s42
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB51_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
@@ -25973,8 +26202,7 @@ define inreg <10 x double> @bitcast_v40i16_to_v10f64_scalar(<40 x i16> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s15 :: v_dual_mov_b32 v13, s16
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s17 :: v_dual_mov_b32 v15, s0
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s1 :: v_dual_mov_b32 v17, s2
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB51_3
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB51_3
; GFX11-TRUE16-NEXT: .LBB51_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
@@ -26002,7 +26230,9 @@ define inreg <10 x double> @bitcast_v40i16_to_v10f64_scalar(<40 x i16> inreg %a,
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB51_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB51_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB51_2
+; GFX11-TRUE16-NEXT: s_branch .LBB51_3
;
; GFX11-FAKE16-LABEL: bitcast_v40i16_to_v10f64_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -26012,11 +26242,11 @@ define inreg <10 x double> @bitcast_v40i16_to_v10f64_scalar(<40 x i16> inreg %a,
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v0
; GFX11-FAKE16-NEXT: v_and_b32_e32 v35, 0xffff, v0
; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v1
-; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s26, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s25, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s26, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s25, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s24, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s23, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s22, 16
@@ -26026,15 +26256,14 @@ define inreg <10 x double> @bitcast_v40i16_to_v10f64_scalar(<40 x i16> inreg %a,
; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s18, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s17, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s3, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s2, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s40, 0
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s2, s6
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s46
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s45
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s17, s8
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s18, s9
@@ -26044,12 +26273,13 @@ define inreg <10 x double> @bitcast_v40i16_to_v10f64_scalar(<40 x i16> inreg %a,
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s22, s13
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s23, s14
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s24, s15
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s25, s45
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s26, s44
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s27, s43
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s28, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s29, s41
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s25, s44
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s26, s43
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s27, s42
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB51_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
@@ -26063,8 +26293,7 @@ define inreg <10 x double> @bitcast_v40i16_to_v10f64_scalar(<40 x i16> inreg %a,
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s15 :: v_dual_mov_b32 v13, s16
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s17 :: v_dual_mov_b32 v15, s0
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s1 :: v_dual_mov_b32 v17, s2
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB51_3
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB51_3
; GFX11-FAKE16-NEXT: .LBB51_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
@@ -26092,7 +26321,9 @@ define inreg <10 x double> @bitcast_v40i16_to_v10f64_scalar(<40 x i16> inreg %a,
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB51_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB51_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB51_2
+; GFX11-FAKE16-NEXT: s_branch .LBB51_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -26863,13 +27094,14 @@ define inreg <40 x half> @bitcast_v10f64_to_v40f16_scalar(<10 x double> inreg %a
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v7
+; SI-NEXT: s_and_b64 s[10:11], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s8, v1
; SI-NEXT: v_readfirstlane_b32 s9, v2
; SI-NEXT: v_readfirstlane_b32 s6, v3
; SI-NEXT: v_readfirstlane_b32 s7, v4
; SI-NEXT: v_readfirstlane_b32 s4, v5
-; SI-NEXT: s_and_b64 s[10:11], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s5, v6
+; SI-NEXT: s_mov_b64 s[10:11], -1
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
@@ -27198,12 +27430,15 @@ define inreg <40 x half> @bitcast_v10f64_to_v40f16_scalar(<10 x double> inreg %a
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: s_branch .LBB53_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[10:11]
+; SI-NEXT: s_cbranch_vccz .LBB53_2
+; SI-NEXT: s_branch .LBB53_3
;
; VI-LABEL: bitcast_v10f64_to_v40f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v20, s16
; VI-NEXT: v_mov_b32_e32 v21, s17
; VI-NEXT: v_mov_b32_e32 v18, s18
@@ -27217,8 +27452,8 @@ define inreg <40 x half> @bitcast_v10f64_to_v40f16_scalar(<10 x double> inreg %a
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s29
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB53_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_lshrrev_b32_e32 v26, 16, v5
@@ -27342,12 +27577,15 @@ define inreg <40 x half> @bitcast_v10f64_to_v40f16_scalar(<10 x double> inreg %a
; VI-NEXT: ; implicit-def: $vgpr28
; VI-NEXT: ; implicit-def: $vgpr27
; VI-NEXT: ; implicit-def: $vgpr26
-; VI-NEXT: s_branch .LBB53_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB53_2
+; VI-NEXT: s_branch .LBB53_3
;
; GFX9-LABEL: bitcast_v10f64_to_v40f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v20, s16
; GFX9-NEXT: v_mov_b32_e32 v21, s17
; GFX9-NEXT: v_mov_b32_e32 v18, s18
@@ -27361,8 +27599,8 @@ define inreg <40 x half> @bitcast_v10f64_to_v40f16_scalar(<10 x double> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s29
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB53_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_lshrrev_b32_e32 v26, 16, v5
@@ -27486,7 +27724,9 @@ define inreg <40 x half> @bitcast_v10f64_to_v40f16_scalar(<10 x double> inreg %a
; GFX9-NEXT: ; implicit-def: $vgpr28
; GFX9-NEXT: ; implicit-def: $vgpr27
; GFX9-NEXT: ; implicit-def: $vgpr26
-; GFX9-NEXT: s_branch .LBB53_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB53_2
+; GFX9-NEXT: s_branch .LBB53_3
;
; GFX11-LABEL: bitcast_v10f64_to_v40f16_scalar:
; GFX11: ; %bb.0:
@@ -27501,8 +27741,8 @@ define inreg <40 x half> @bitcast_v10f64_to_v40f16_scalar(<10 x double> inreg %a
; GFX11-NEXT: v_dual_mov_b32 v10, s24 :: v_dual_mov_b32 v11, s25
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB53_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_lshrrev_b32_e32 v22, 16, v1
@@ -27525,8 +27765,7 @@ define inreg <40 x half> @bitcast_v10f64_to_v40f16_scalar(<10 x double> inreg %a
; GFX11-NEXT: v_lshrrev_b32_e32 v2, 16, v3
; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v21
; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v20
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB53_3
+; GFX11-NEXT: s_cbranch_execnz .LBB53_3
; GFX11-NEXT: .LBB53_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
; GFX11-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
@@ -27623,7 +27862,9 @@ define inreg <40 x half> @bitcast_v10f64_to_v40f16_scalar(<10 x double> inreg %a
; GFX11-NEXT: ; implicit-def: $vgpr24
; GFX11-NEXT: ; implicit-def: $vgpr23
; GFX11-NEXT: ; implicit-def: $vgpr22
-; GFX11-NEXT: s_branch .LBB53_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_vccz .LBB53_2
+; GFX11-NEXT: s_branch .LBB53_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -28655,6 +28896,7 @@ define inreg <10 x double> @bitcast_v40f16_to_v10f64_scalar(<40 x half> inreg %a
; SI-NEXT: v_cvt_f16_f32_e32 v24, s28
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v26
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB55_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v58
@@ -28962,7 +29204,9 @@ define inreg <10 x double> @bitcast_v40f16_to_v10f64_scalar(<40 x half> inreg %a
; SI-NEXT: v_mov_b32_e32 v53, v46
; SI-NEXT: v_mov_b32_e32 v27, v52
; SI-NEXT: v_mov_b32_e32 v52, v45
-; SI-NEXT: s_branch .LBB55_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB55_2
+; SI-NEXT: s_branch .LBB55_3
;
; VI-LABEL: bitcast_v40f16_to_v10f64_scalar:
; VI: ; %bb.0:
@@ -28982,13 +29226,14 @@ define inreg <10 x double> @bitcast_v40f16_to_v10f64_scalar(<40 x half> inreg %a
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v5
; VI-NEXT: v_mov_b32_e32 v33, v4
; VI-NEXT: v_mov_b32_e32 v34, v3
; VI-NEXT: v_mov_b32_e32 v35, v2
; VI-NEXT: v_mov_b32_e32 v36, v1
; VI-NEXT: v_mov_b32_e32 v37, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB55_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -29142,17 +29387,13 @@ define inreg <10 x double> @bitcast_v40f16_to_v10f64_scalar(<40 x half> inreg %a
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB55_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB55_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB55_2
+; VI-NEXT: s_branch .LBB55_3
;
; GFX9-LABEL: bitcast_v40f16_to_v10f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v5
-; GFX9-NEXT: v_mov_b32_e32 v33, v4
-; GFX9-NEXT: v_mov_b32_e32 v34, v3
-; GFX9-NEXT: v_mov_b32_e32 v35, v2
-; GFX9-NEXT: v_mov_b32_e32 v36, v1
-; GFX9-NEXT: v_mov_b32_e32 v37, v0
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
; GFX9-NEXT: s_lshr_b32 s42, s27, 16
@@ -29168,13 +29409,19 @@ define inreg <10 x double> @bitcast_v40f16_to_v10f64_scalar(<40 x half> inreg %a
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
+; GFX9-NEXT: v_mov_b32_e32 v32, v5
+; GFX9-NEXT: v_mov_b32_e32 v33, v4
+; GFX9-NEXT: v_mov_b32_e32 v34, v3
+; GFX9-NEXT: v_mov_b32_e32 v35, v2
+; GFX9-NEXT: v_mov_b32_e32 v36, v1
+; GFX9-NEXT: v_mov_b32_e32 v37, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v32
; GFX9-NEXT: v_lshrrev_b32_e32 v39, 16, v33
; GFX9-NEXT: v_lshrrev_b32_e32 v48, 16, v34
; GFX9-NEXT: v_lshrrev_b32_e32 v49, 16, v35
; GFX9-NEXT: v_lshrrev_b32_e32 v50, 16, v36
; GFX9-NEXT: v_lshrrev_b32_e32 v51, 16, v37
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -29189,19 +29436,20 @@ define inreg <10 x double> @bitcast_v40f16_to_v10f64_scalar(<40 x half> inreg %a
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB55_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v37
; GFX9-NEXT: v_lshl_or_b32 v14, v51, 16, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v36
-; GFX9-NEXT: v_lshl_or_b32 v15, v50, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v35
; GFX9-NEXT: v_lshl_or_b32 v16, v49, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v34
; GFX9-NEXT: v_lshl_or_b32 v17, v48, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v33
+; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v36
; GFX9-NEXT: v_lshl_or_b32 v18, v39, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v32
+; GFX9-NEXT: v_lshl_or_b32 v15, v50, 16, v1
; GFX9-NEXT: v_lshl_or_b32 v19, v38, 16, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s6
; GFX9-NEXT: v_mov_b32_e32 v1, s7
@@ -29226,8 +29474,8 @@ define inreg <10 x double> @bitcast_v40f16_to_v10f64_scalar(<40 x half> inreg %a
; GFX9-NEXT: v_and_b32_e32 v18, 0xffff, v33
; GFX9-NEXT: v_and_b32_e32 v19, 0xffff, v32
; GFX9-NEXT: v_mov_b32_e32 v13, 0x200
-; GFX9-NEXT: s_movk_i32 s4, 0x200
; GFX9-NEXT: v_lshl_or_b32 v14, v51, 16, v14
+; GFX9-NEXT: s_movk_i32 s4, 0x200
; GFX9-NEXT: v_lshl_or_b32 v15, v50, 16, v15
; GFX9-NEXT: v_lshl_or_b32 v16, v49, 16, v16
; GFX9-NEXT: v_lshl_or_b32 v17, v48, 16, v17
@@ -29257,7 +29505,9 @@ define inreg <10 x double> @bitcast_v40f16_to_v10f64_scalar(<40 x half> inreg %a
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB55_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB55_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB55_2
+; GFX9-NEXT: s_branch .LBB55_3
;
; GFX11-TRUE16-LABEL: bitcast_v40f16_to_v10f64_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -29269,11 +29519,11 @@ define inreg <10 x double> @bitcast_v40f16_to_v10f64_scalar(<40 x half> inreg %a
; GFX11-TRUE16-NEXT: v_and_b32_e32 v35, 0xffff, v0
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v33.h, v32.h
; GFX11-TRUE16-NEXT: v_and_b32_e32 v34, 0xffff, v1
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s25, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s24, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s23, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s22, 16
@@ -29283,15 +29533,14 @@ define inreg <10 x double> @bitcast_v40f16_to_v10f64_scalar(<40 x half> inreg %a
; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s18, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s17, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s3, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s2, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s2, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s46
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s45
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s17, s8
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s18, s9
@@ -29301,12 +29550,13 @@ define inreg <10 x double> @bitcast_v40f16_to_v10f64_scalar(<40 x half> inreg %a
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s22, s13
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s23, s14
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s24, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s25, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s26, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s29, s41
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s25, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s26, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s27, s42
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB55_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
@@ -29320,8 +29570,7 @@ define inreg <10 x double> @bitcast_v40f16_to_v10f64_scalar(<40 x half> inreg %a
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s15 :: v_dual_mov_b32 v13, s16
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s17 :: v_dual_mov_b32 v15, s0
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s1 :: v_dual_mov_b32 v17, s2
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB55_3
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB55_3
; GFX11-TRUE16-NEXT: .LBB55_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
@@ -29349,7 +29598,9 @@ define inreg <10 x double> @bitcast_v40f16_to_v10f64_scalar(<40 x half> inreg %a
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB55_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB55_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB55_2
+; GFX11-TRUE16-NEXT: s_branch .LBB55_3
;
; GFX11-FAKE16-LABEL: bitcast_v40f16_to_v10f64_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -29359,11 +29610,11 @@ define inreg <10 x double> @bitcast_v40f16_to_v10f64_scalar(<40 x half> inreg %a
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v33, 16, v0
; GFX11-FAKE16-NEXT: v_and_b32_e32 v35, 0xffff, v0
; GFX11-FAKE16-NEXT: v_and_b32_e32 v34, 0xffff, v1
-; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s27, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s26, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s25, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s26, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s25, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s24, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s23, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s22, 16
@@ -29373,15 +29624,14 @@ define inreg <10 x double> @bitcast_v40f16_to_v10f64_scalar(<40 x half> inreg %a
; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s18, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s17, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s3, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s2, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s40, 0
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s2, s6
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s46
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s45
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s17, s8
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s18, s9
@@ -29391,12 +29641,13 @@ define inreg <10 x double> @bitcast_v40f16_to_v10f64_scalar(<40 x half> inreg %a
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s22, s13
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s23, s14
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s24, s15
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s25, s45
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s26, s44
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s27, s43
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s28, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s29, s41
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s25, s44
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s26, s43
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s27, s42
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB55_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
@@ -29410,8 +29661,7 @@ define inreg <10 x double> @bitcast_v40f16_to_v10f64_scalar(<40 x half> inreg %a
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s15 :: v_dual_mov_b32 v13, s16
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s17 :: v_dual_mov_b32 v15, s0
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s1 :: v_dual_mov_b32 v17, s2
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB55_3
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB55_3
; GFX11-FAKE16-NEXT: .LBB55_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v33, 16, v35
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v32, 16, v34
@@ -29439,7 +29689,9 @@ define inreg <10 x double> @bitcast_v40f16_to_v10f64_scalar(<40 x half> inreg %a
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB55_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB55_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB55_2
+; GFX11-FAKE16-NEXT: s_branch .LBB55_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -30377,6 +30629,7 @@ define inreg <40 x half> @bitcast_v40i16_to_v40f16_scalar(<40 x i16> inreg %a, i
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v27
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
@@ -30697,8 +30950,6 @@ define inreg <40 x half> @bitcast_v40i16_to_v40f16_scalar(<40 x i16> inreg %a, i
; SI-NEXT: .LBB57_4:
; SI-NEXT: ; implicit-def: $vgpr30
; SI-NEXT: ; kill: killed $vgpr30
-; SI-NEXT: ; implicit-def: $vgpr30
-; SI-NEXT: ; kill: killed $vgpr30
; SI-NEXT: ; implicit-def: $vgpr62
; SI-NEXT: ; implicit-def: $vgpr28
; SI-NEXT: ; implicit-def: $vgpr63
@@ -30738,7 +30989,11 @@ define inreg <40 x half> @bitcast_v40i16_to_v40f16_scalar(<40 x i16> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr30
; SI-NEXT: ; kill: killed $vgpr30
; SI-NEXT: ; implicit-def: $vgpr30
-; SI-NEXT: s_branch .LBB57_2
+; SI-NEXT: ; kill: killed $vgpr30
+; SI-NEXT: ; implicit-def: $vgpr30
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB57_2
+; SI-NEXT: s_branch .LBB57_3
;
; VI-LABEL: bitcast_v40i16_to_v40f16_scalar:
; VI: ; %bb.0:
@@ -30758,17 +31013,21 @@ define inreg <40 x half> @bitcast_v40i16_to_v40f16_scalar(<40 x i16> inreg %a, i
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_lshrrev_b32_e32 v7, 16, v5
; VI-NEXT: v_lshrrev_b32_e32 v8, 16, v4
; VI-NEXT: v_lshrrev_b32_e32 v9, 16, v3
; VI-NEXT: v_lshrrev_b32_e32 v10, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v11, 16, v1
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: v_lshrrev_b32_e32 v6, 16, v0
-; VI-NEXT: s_cbranch_scc0 .LBB57_4
+; VI-NEXT: v_lshrrev_b32_e32 v12, 16, v0
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB57_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB57_3
-; VI-NEXT: .LBB57_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB57_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB57_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s16, s16, 3
; VI-NEXT: s_add_i32 s43, s43, 3
; VI-NEXT: s_add_i32 s17, s17, 3
@@ -30798,7 +31057,7 @@ define inreg <40 x half> @bitcast_v40i16_to_v40f16_scalar(<40 x i16> inreg %a, i
; VI-NEXT: s_add_i32 s29, s29, 3
; VI-NEXT: s_add_i32 s6, s6, 3
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_add_u32_e32 v6, vcc, 3, v6
+; VI-NEXT: v_add_u32_e32 v12, vcc, 3, v12
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_add_u32_e32 v11, vcc, 3, v11
; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
@@ -30809,7 +31068,7 @@ define inreg <40 x half> @bitcast_v40i16_to_v40f16_scalar(<40 x i16> inreg %a, i
; VI-NEXT: v_add_u32_e32 v8, vcc, 3, v8
; VI-NEXT: v_add_u32_e32 v5, vcc, 3, v5
; VI-NEXT: v_add_u32_e32 v7, vcc, 3, v7
-; VI-NEXT: .LBB57_3: ; %end
+; VI-NEXT: .LBB57_4: ; %end
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s43, 16
; VI-NEXT: s_or_b32 s4, s4, s5
@@ -30839,7 +31098,7 @@ define inreg <40 x half> @bitcast_v40i16_to_v40f16_scalar(<40 x i16> inreg %a, i
; VI-NEXT: s_or_b32 s11, s18, s11
; VI-NEXT: s_and_b32 s18, 0xffff, s25
; VI-NEXT: s_lshl_b32 s10, s10, 16
-; VI-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; VI-NEXT: v_lshlrev_b32_e32 v6, 16, v12
; VI-NEXT: s_or_b32 s10, s18, s10
; VI-NEXT: s_and_b32 s18, 0xffff, s26
; VI-NEXT: s_lshl_b32 s9, s9, 16
@@ -30879,8 +31138,6 @@ define inreg <40 x half> @bitcast_v40i16_to_v40f16_scalar(<40 x i16> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v12, s7
; VI-NEXT: v_mov_b32_e32 v13, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB57_4:
-; VI-NEXT: s_branch .LBB57_2
;
; GFX9-LABEL: bitcast_v40i16_to_v40f16_scalar:
; GFX9: ; %bb.0:
@@ -30900,17 +31157,21 @@ define inreg <40 x half> @bitcast_v40i16_to_v40f16_scalar(<40 x i16> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_lshrrev_b32_e32 v19, 16, v5
; GFX9-NEXT: v_lshrrev_b32_e32 v18, 16, v4
; GFX9-NEXT: v_lshrrev_b32_e32 v17, 16, v3
; GFX9-NEXT: v_lshrrev_b32_e32 v16, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v15, 16, v1
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_lshrrev_b32_e32 v14, 16, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB57_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB57_4
-; GFX9-NEXT: .LBB57_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB57_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB57_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_pack_ll_b32_b16 s4, s29, s43
; GFX9-NEXT: v_pk_add_u16 v13, s4, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_pack_ll_b32_b16 s4, s28, s42
@@ -30978,8 +31239,6 @@ define inreg <40 x half> @bitcast_v40i16_to_v40f16_scalar(<40 x i16> inreg %a, i
; GFX9-NEXT: v_lshrrev_b32_e32 v18, 16, v4
; GFX9-NEXT: v_lshrrev_b32_e32 v19, 16, v5
; GFX9-NEXT: s_branch .LBB57_5
-; GFX9-NEXT: .LBB57_3:
-; GFX9-NEXT: s_branch .LBB57_2
; GFX9-NEXT: .LBB57_4:
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v12, s28
@@ -31077,20 +31336,23 @@ define inreg <40 x half> @bitcast_v40i16_to_v40f16_scalar(<40 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s21, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s20, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s18, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s17, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s16, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s3, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s2, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s0, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s0, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s46, -1
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB57_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_mov_b32 s46, 0
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB57_3
-; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: .LBB57_2: ; %Flow
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB57_4
-; GFX11-TRUE16-NEXT: .LBB57_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: ; %bb.3: ; %cmp.true
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s29, s29, s45
@@ -31106,12 +31368,12 @@ define inreg <40 x half> @bitcast_v40i16_to_v40f16_scalar(<40 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s21, s13
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s20, s12
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s19, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s18, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s18, s9
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s17, s8
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s6
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s10
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s29, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s28, 3 op_sel_hi:[1,0]
@@ -31126,7 +31388,7 @@ define inreg <40 x half> @bitcast_v40i16_to_v40f16_scalar(<40 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s13, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s12, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s9, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s8, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v21, s0, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, s1, 3 op_sel_hi:[1,0]
@@ -31154,8 +31416,6 @@ define inreg <40 x half> @bitcast_v40i16_to_v40f16_scalar(<40 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v0
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v1
; GFX11-TRUE16-NEXT: s_branch .LBB57_5
-; GFX11-TRUE16-NEXT: .LBB57_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB57_2
; GFX11-TRUE16-NEXT: .LBB57_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s29 :: v_dual_mov_b32 v16, s28
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s27 :: v_dual_mov_b32 v10, s26
@@ -31171,10 +31431,10 @@ define inreg <40 x half> @bitcast_v40i16_to_v40f16_scalar(<40 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, s41 :: v_dual_mov_b32 v27, s40
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, s15 :: v_dual_mov_b32 v29, s14
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s13 :: v_dual_mov_b32 v31, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v32, s11 :: v_dual_mov_b32 v33, s10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v32, s11 :: v_dual_mov_b32 v33, s9
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v34, s8 :: v_dual_mov_b32 v35, s7
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v36, s6 :: v_dual_mov_b32 v37, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v38, s4 :: v_dual_mov_b32 v39, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v38, s4 :: v_dual_mov_b32 v39, s10
; GFX11-TRUE16-NEXT: .LBB57_5: ; %end
; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v2
@@ -31237,19 +31497,22 @@ define inreg <40 x half> @bitcast_v40i16_to_v40f16_scalar(<40 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s20, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s19, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s18, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s17, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s3, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s17, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s16, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s2, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s0, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_mov_b32 s46, -1
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB57_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_mov_b32 s46, 0
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB57_3
-; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: .LBB57_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB57_4
-; GFX11-FAKE16-NEXT: .LBB57_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s29, s29, s45
@@ -31266,10 +31529,10 @@ define inreg <40 x half> @bitcast_v40i16_to_v40f16_scalar(<40 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s20, s12
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s19, s11
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s18, s10
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s17, s9
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s6
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s17, s8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s16, s6
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s9
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s7
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s5
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, s29, 3 op_sel_hi:[1,0]
@@ -31286,12 +31549,12 @@ define inreg <40 x half> @bitcast_v40i16_to_v40f16_scalar(<40 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, s12, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, s11, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, s10, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, s9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, s8, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v21, s0, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v20, s1, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, s2, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, s7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, s6, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v21
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v20
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v4
@@ -31313,8 +31576,6 @@ define inreg <40 x half> @bitcast_v40i16_to_v40f16_scalar(<40 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v0
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v1
; GFX11-FAKE16-NEXT: s_branch .LBB57_5
-; GFX11-FAKE16-NEXT: .LBB57_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB57_2
; GFX11-FAKE16-NEXT: .LBB57_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v15, s29 :: v_dual_mov_b32 v16, s28
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v17, s27 :: v_dual_mov_b32 v10, s26
@@ -31331,8 +31592,8 @@ define inreg <40 x half> @bitcast_v40i16_to_v40f16_scalar(<40 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v28, s15 :: v_dual_mov_b32 v29, s14
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, s13 :: v_dual_mov_b32 v31, s12
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v32, s11 :: v_dual_mov_b32 v33, s10
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v34, s9 :: v_dual_mov_b32 v35, s7
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v36, s6 :: v_dual_mov_b32 v37, s8
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v34, s8 :: v_dual_mov_b32 v35, s6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v36, s9 :: v_dual_mov_b32 v37, s7
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v38, s4 :: v_dual_mov_b32 v39, s5
; GFX11-FAKE16-NEXT: .LBB57_5: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
@@ -32227,10 +32488,14 @@ define inreg <40 x i16> @bitcast_v40f16_to_v40i16_scalar(<40 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v24, s29
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v27
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: s_cbranch_scc0 .LBB59_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB59_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB59_3
-; SI-NEXT: .LBB59_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB59_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB59_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v27, v49
; SI-NEXT: v_cvt_f32_f16_e32 v26, v26
; SI-NEXT: v_cvt_f32_f16_e32 v25, v25
@@ -32401,7 +32666,7 @@ define inreg <40 x i16> @bitcast_v40f16_to_v40i16_scalar(<40 x half> inreg %a, i
; SI-NEXT: v_alignbit_b32 v37, v9, v51, 16
; SI-NEXT: v_alignbit_b32 v36, v3, v23, 16
; SI-NEXT: v_alignbit_b32 v23, v5, v52, 16
-; SI-NEXT: .LBB59_3: ; %end
+; SI-NEXT: .LBB59_4: ; %end
; SI-NEXT: v_and_b32_e32 v22, 0xffff, v22
; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v49
; SI-NEXT: v_and_b32_e32 v19, 0xffff, v19
@@ -32519,8 +32784,6 @@ define inreg <40 x i16> @bitcast_v40f16_to_v40i16_scalar(<40 x half> inreg %a, i
; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB59_4:
-; SI-NEXT: s_branch .LBB59_2
;
; VI-LABEL: bitcast_v40f16_to_v40i16_scalar:
; VI: ; %bb.0:
@@ -32540,17 +32803,21 @@ define inreg <40 x i16> @bitcast_v40f16_to_v40i16_scalar(<40 x half> inreg %a, i
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_lshrrev_b32_e32 v19, 16, v5
; VI-NEXT: v_lshrrev_b32_e32 v18, 16, v4
; VI-NEXT: v_lshrrev_b32_e32 v17, 16, v3
; VI-NEXT: v_lshrrev_b32_e32 v16, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v15, 16, v1
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_lshrrev_b32_e32 v14, 16, v0
-; VI-NEXT: s_cbranch_scc0 .LBB59_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB59_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB59_4
-; VI-NEXT: .LBB59_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB59_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB59_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v26, 0x200
; VI-NEXT: v_add_f16_e32 v24, s16, v26
; VI-NEXT: v_add_f16_e32 v39, s43, v26
@@ -32593,8 +32860,6 @@ define inreg <40 x i16> @bitcast_v40f16_to_v40i16_scalar(<40 x half> inreg %a, i
; VI-NEXT: v_add_f16_e32 v5, 0x200, v5
; VI-NEXT: v_add_f16_e32 v19, 0x200, v19
; VI-NEXT: s_branch .LBB59_5
-; VI-NEXT: .LBB59_3:
-; VI-NEXT: s_branch .LBB59_2
; VI-NEXT: .LBB59_4:
; VI-NEXT: v_mov_b32_e32 v26, s6
; VI-NEXT: v_mov_b32_e32 v13, s29
@@ -32691,17 +32956,21 @@ define inreg <40 x i16> @bitcast_v40f16_to_v40i16_scalar(<40 x half> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v6
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_lshrrev_b32_e32 v19, 16, v5
; GFX9-NEXT: v_lshrrev_b32_e32 v18, 16, v4
; GFX9-NEXT: v_lshrrev_b32_e32 v17, 16, v3
; GFX9-NEXT: v_lshrrev_b32_e32 v16, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v15, 16, v1
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_lshrrev_b32_e32 v14, 16, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB59_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB59_4
-; GFX9-NEXT: .LBB59_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB59_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB59_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_and_b32_e32 v5, 0xffff, v5
; GFX9-NEXT: v_and_b32_e32 v4, 0xffff, v4
; GFX9-NEXT: v_and_b32_e32 v3, 0xffff, v3
@@ -32771,8 +33040,6 @@ define inreg <40 x i16> @bitcast_v40f16_to_v40i16_scalar(<40 x half> inreg %a, i
; GFX9-NEXT: v_lshrrev_b32_e32 v18, 16, v4
; GFX9-NEXT: v_lshrrev_b32_e32 v19, 16, v5
; GFX9-NEXT: s_branch .LBB59_5
-; GFX9-NEXT: .LBB59_3:
-; GFX9-NEXT: s_branch .LBB59_2
; GFX9-NEXT: .LBB59_4:
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v12, s28
@@ -32870,20 +33137,23 @@ define inreg <40 x i16> @bitcast_v40f16_to_v40i16_scalar(<40 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s21, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s20, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s18, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s17, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s16, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s3, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s2, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s0, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s0, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s46, -1
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB59_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_mov_b32 s46, 0
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB59_3
-; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: .LBB59_2: ; %Flow
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB59_4
-; GFX11-TRUE16-NEXT: .LBB59_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: ; %bb.3: ; %cmp.true
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s29, s29, s45
@@ -32899,12 +33169,12 @@ define inreg <40 x i16> @bitcast_v40f16_to_v40i16_scalar(<40 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s21, s13
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s20, s12
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s19, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s18, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s18, s9
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s17, s8
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s6
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s10
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s29 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s28 op_sel_hi:[0,1]
@@ -32919,7 +33189,7 @@ define inreg <40 x i16> @bitcast_v40f16_to_v40i16_scalar(<40 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s13 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s12 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s9 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s8 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v21, 0x200, s0 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, s1 op_sel_hi:[0,1]
@@ -32947,8 +33217,6 @@ define inreg <40 x i16> @bitcast_v40f16_to_v40i16_scalar(<40 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v18, 16, v0
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v19, 16, v1
; GFX11-TRUE16-NEXT: s_branch .LBB59_5
-; GFX11-TRUE16-NEXT: .LBB59_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB59_2
; GFX11-TRUE16-NEXT: .LBB59_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s29 :: v_dual_mov_b32 v16, s28
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s27 :: v_dual_mov_b32 v10, s26
@@ -32964,10 +33232,10 @@ define inreg <40 x i16> @bitcast_v40f16_to_v40i16_scalar(<40 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v26, s41 :: v_dual_mov_b32 v27, s40
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v28, s15 :: v_dual_mov_b32 v29, s14
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s13 :: v_dual_mov_b32 v31, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v32, s11 :: v_dual_mov_b32 v33, s10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v32, s11 :: v_dual_mov_b32 v33, s9
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v34, s8 :: v_dual_mov_b32 v35, s7
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v36, s6 :: v_dual_mov_b32 v37, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v38, s4 :: v_dual_mov_b32 v39, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v38, s4 :: v_dual_mov_b32 v39, s10
; GFX11-TRUE16-NEXT: .LBB59_5: ; %end
; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v2
@@ -33030,19 +33298,22 @@ define inreg <40 x i16> @bitcast_v40f16_to_v40i16_scalar(<40 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s20, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s19, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s18, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s17, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s3, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s17, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s16, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s2, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s0, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_mov_b32 s46, -1
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB59_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_mov_b32 s46, 0
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB59_3
-; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: .LBB59_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB59_4
-; GFX11-FAKE16-NEXT: .LBB59_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX11-FAKE16-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s29, s29, s45
@@ -33059,10 +33330,10 @@ define inreg <40 x i16> @bitcast_v40f16_to_v40i16_scalar(<40 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s20, s12
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s19, s11
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s18, s10
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s17, s9
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s6
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s17, s8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s16, s6
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s9
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s7
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s5
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, s29 op_sel_hi:[0,1]
@@ -33079,12 +33350,12 @@ define inreg <40 x i16> @bitcast_v40f16_to_v40i16_scalar(<40 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, s12 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, s11 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, s8 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v21, 0x200, s0 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v20, 0x200, s1 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, s2 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, s6 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v39, 16, v21
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v38, 16, v20
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v37, 16, v4
@@ -33106,8 +33377,6 @@ define inreg <40 x i16> @bitcast_v40f16_to_v40i16_scalar(<40 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v18, 16, v0
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v19, 16, v1
; GFX11-FAKE16-NEXT: s_branch .LBB59_5
-; GFX11-FAKE16-NEXT: .LBB59_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB59_2
; GFX11-FAKE16-NEXT: .LBB59_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v15, s29 :: v_dual_mov_b32 v16, s28
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v17, s27 :: v_dual_mov_b32 v10, s26
@@ -33124,8 +33393,8 @@ define inreg <40 x i16> @bitcast_v40f16_to_v40i16_scalar(<40 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v28, s15 :: v_dual_mov_b32 v29, s14
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v30, s13 :: v_dual_mov_b32 v31, s12
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v32, s11 :: v_dual_mov_b32 v33, s10
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v34, s9 :: v_dual_mov_b32 v35, s7
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v36, s6 :: v_dual_mov_b32 v37, s8
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v34, s8 :: v_dual_mov_b32 v35, s6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v36, s9 :: v_dual_mov_b32 v37, s7
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v38, s4 :: v_dual_mov_b32 v39, s5
; GFX11-FAKE16-NEXT: .LBB59_5: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll
index 6fe6665..6d6d18d 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.64bit.ll
@@ -86,71 +86,78 @@ define inreg double @bitcast_i64_to_f64_scalar(i64 inreg %a, i32 inreg %b) {
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB1_3
-; SI-NEXT: .LBB1_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB1_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB1_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_u32 s16, s16, 3
; SI-NEXT: s_addc_u32 s17, s17, 0
-; SI-NEXT: .LBB1_3: ; %end
+; SI-NEXT: .LBB1_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: s_branch .LBB1_2
;
; VI-LABEL: bitcast_i64_to_f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB1_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB1_3
-; VI-NEXT: .LBB1_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB1_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB1_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
-; VI-NEXT: .LBB1_3: ; %end
+; VI-NEXT: .LBB1_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_4:
-; VI-NEXT: s_branch .LBB1_2
;
; GFX9-LABEL: bitcast_i64_to_f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB1_3
-; GFX9-NEXT: .LBB1_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB1_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
-; GFX9-NEXT: .LBB1_3: ; %end
+; GFX9-NEXT: .LBB1_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-LABEL: bitcast_i64_to_f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB1_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB1_3
-; GFX11-NEXT: .LBB1_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: .LBB1_3: ; %end
+; GFX11-NEXT: .LBB1_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_4:
-; GFX11-NEXT: s_branch .LBB1_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -244,14 +251,16 @@ define inreg i64 @bitcast_f64_to_i64_scalar(double inreg %a, i32 inreg %b) {
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB3_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB3_4
-; SI-NEXT: .LBB3_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB3_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB3_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB3_3:
-; SI-NEXT: s_branch .LBB3_2
; SI-NEXT: .LBB3_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -261,14 +270,16 @@ define inreg i64 @bitcast_f64_to_i64_scalar(double inreg %a, i32 inreg %b) {
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB3_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_4
-; VI-NEXT: .LBB3_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB3_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB3_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB3_3:
-; VI-NEXT: s_branch .LBB3_2
; VI-NEXT: .LBB3_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -278,14 +289,16 @@ define inreg i64 @bitcast_f64_to_i64_scalar(double inreg %a, i32 inreg %b) {
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_4
-; GFX9-NEXT: .LBB3_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB3_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB3_3:
-; GFX9-NEXT: s_branch .LBB3_2
; GFX9-NEXT: .LBB3_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -295,16 +308,17 @@ define inreg i64 @bitcast_f64_to_i64_scalar(double inreg %a, i32 inreg %b) {
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB3_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
-; GFX11-NEXT: .LBB3_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB3_3:
-; GFX11-NEXT: s_branch .LBB3_2
; GFX11-NEXT: .LBB3_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -405,71 +419,78 @@ define inreg <2 x i32> @bitcast_i64_to_v2i32_scalar(i64 inreg %a, i32 inreg %b)
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB5_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB5_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB5_3
-; SI-NEXT: .LBB5_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB5_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB5_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_u32 s16, s16, 3
; SI-NEXT: s_addc_u32 s17, s17, 0
-; SI-NEXT: .LBB5_3: ; %end
+; SI-NEXT: .LBB5_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB5_4:
-; SI-NEXT: s_branch .LBB5_2
;
; VI-LABEL: bitcast_i64_to_v2i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB5_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB5_3
-; VI-NEXT: .LBB5_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB5_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB5_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
-; VI-NEXT: .LBB5_3: ; %end
+; VI-NEXT: .LBB5_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB5_4:
-; VI-NEXT: s_branch .LBB5_2
;
; GFX9-LABEL: bitcast_i64_to_v2i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB5_3
-; GFX9-NEXT: .LBB5_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB5_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
-; GFX9-NEXT: .LBB5_3: ; %end
+; GFX9-NEXT: .LBB5_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_4:
-; GFX9-NEXT: s_branch .LBB5_2
;
; GFX11-LABEL: bitcast_i64_to_v2i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB5_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB5_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB5_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB5_3
-; GFX11-NEXT: .LBB5_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: .LBB5_3: ; %end
+; GFX11-NEXT: .LBB5_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB5_4:
-; GFX11-NEXT: s_branch .LBB5_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -566,71 +587,78 @@ define inreg i64 @bitcast_v2i32_to_i64_scalar(<2 x i32> inreg %a, i32 inreg %b)
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB7_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB7_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB7_3
-; SI-NEXT: .LBB7_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB7_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB7_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_i32 s17, s17, 3
; SI-NEXT: s_add_i32 s16, s16, 3
-; SI-NEXT: .LBB7_3: ; %end
+; SI-NEXT: .LBB7_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB7_4:
-; SI-NEXT: s_branch .LBB7_2
;
; VI-LABEL: bitcast_v2i32_to_i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB7_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB7_3
-; VI-NEXT: .LBB7_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB7_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB7_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB7_3: ; %end
+; VI-NEXT: .LBB7_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB7_4:
-; VI-NEXT: s_branch .LBB7_2
;
; GFX9-LABEL: bitcast_v2i32_to_i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB7_3
-; GFX9-NEXT: .LBB7_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB7_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB7_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB7_3: ; %end
+; GFX9-NEXT: .LBB7_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB7_4:
-; GFX9-NEXT: s_branch .LBB7_2
;
; GFX11-LABEL: bitcast_v2i32_to_i64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB7_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB7_3
-; GFX11-NEXT: .LBB7_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB7_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB7_3: ; %end
+; GFX11-NEXT: .LBB7_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB7_4:
-; GFX11-NEXT: s_branch .LBB7_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -728,71 +756,78 @@ define inreg <2 x float> @bitcast_i64_to_v2f32_scalar(i64 inreg %a, i32 inreg %b
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB9_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB9_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB9_3
-; SI-NEXT: .LBB9_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB9_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB9_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_u32 s16, s16, 3
; SI-NEXT: s_addc_u32 s17, s17, 0
-; SI-NEXT: .LBB9_3: ; %end
+; SI-NEXT: .LBB9_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB9_4:
-; SI-NEXT: s_branch .LBB9_2
;
; VI-LABEL: bitcast_i64_to_v2f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB9_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB9_3
-; VI-NEXT: .LBB9_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB9_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB9_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
-; VI-NEXT: .LBB9_3: ; %end
+; VI-NEXT: .LBB9_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB9_4:
-; VI-NEXT: s_branch .LBB9_2
;
; GFX9-LABEL: bitcast_i64_to_v2f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB9_3
-; GFX9-NEXT: .LBB9_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB9_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
-; GFX9-NEXT: .LBB9_3: ; %end
+; GFX9-NEXT: .LBB9_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB9_4:
-; GFX9-NEXT: s_branch .LBB9_2
;
; GFX11-LABEL: bitcast_i64_to_v2f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB9_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB9_3
-; GFX11-NEXT: .LBB9_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: .LBB9_3: ; %end
+; GFX11-NEXT: .LBB9_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB9_4:
-; GFX11-NEXT: s_branch .LBB9_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -888,15 +923,17 @@ define inreg i64 @bitcast_v2f32_to_i64_scalar(<2 x float> inreg %a, i32 inreg %b
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB11_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB11_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB11_4
-; SI-NEXT: .LBB11_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB11_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB11_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v1, s17, 1.0
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB11_3:
-; SI-NEXT: s_branch .LBB11_2
; SI-NEXT: .LBB11_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -906,15 +943,17 @@ define inreg i64 @bitcast_v2f32_to_i64_scalar(<2 x float> inreg %a, i32 inreg %b
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB11_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB11_4
-; VI-NEXT: .LBB11_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB11_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB11_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB11_3:
-; VI-NEXT: s_branch .LBB11_2
; VI-NEXT: .LBB11_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -924,15 +963,17 @@ define inreg i64 @bitcast_v2f32_to_i64_scalar(<2 x float> inreg %a, i32 inreg %b
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_4
-; GFX9-NEXT: .LBB11_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB11_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB11_3:
-; GFX9-NEXT: s_branch .LBB11_2
; GFX9-NEXT: .LBB11_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -942,17 +983,18 @@ define inreg i64 @bitcast_v2f32_to_i64_scalar(<2 x float> inreg %a, i32 inreg %b
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB11_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
-; GFX11-NEXT: .LBB11_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: s_branch .LBB11_2
; GFX11-NEXT: .LBB11_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -1063,6 +1105,7 @@ define inreg <4 x i16> @bitcast_i64_to_v4i16_scalar(i64 inreg %a, i32 inreg %b)
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB13_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s16
@@ -1083,60 +1126,67 @@ define inreg <4 x i16> @bitcast_i64_to_v4i16_scalar(i64 inreg %a, i32 inreg %b)
; SI-NEXT: .LBB13_4:
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB13_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB13_2
+; SI-NEXT: s_branch .LBB13_3
;
; VI-LABEL: bitcast_i64_to_v4i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB13_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB13_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB13_3
-; VI-NEXT: .LBB13_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB13_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB13_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
-; VI-NEXT: .LBB13_3: ; %end
+; VI-NEXT: .LBB13_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB13_4:
-; VI-NEXT: s_branch .LBB13_2
;
; GFX9-LABEL: bitcast_i64_to_v4i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB13_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB13_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB13_3
-; GFX9-NEXT: .LBB13_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB13_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB13_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
-; GFX9-NEXT: .LBB13_3: ; %end
+; GFX9-NEXT: .LBB13_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB13_4:
-; GFX9-NEXT: s_branch .LBB13_2
;
; GFX11-LABEL: bitcast_i64_to_v4i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB13_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB13_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB13_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB13_3
-; GFX11-NEXT: .LBB13_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB13_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: .LBB13_3: ; %end
+; GFX11-NEXT: .LBB13_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB13_4:
-; GFX11-NEXT: s_branch .LBB13_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1264,6 +1314,7 @@ define inreg i64 @bitcast_v4i16_to_i64_scalar(<4 x i16> inreg %a, i32 inreg %b)
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
+; SI-NEXT: s_mov_b64 s[6:7], -1
; SI-NEXT: s_cbranch_scc0 .LBB15_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
@@ -1290,16 +1341,22 @@ define inreg i64 @bitcast_v4i16_to_i64_scalar(<4 x i16> inreg %a, i32 inreg %b)
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB15_4:
; SI-NEXT: ; implicit-def: $sgpr4_sgpr5
-; SI-NEXT: s_branch .LBB15_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; SI-NEXT: s_cbranch_vccz .LBB15_2
+; SI-NEXT: s_branch .LBB15_3
;
; VI-LABEL: bitcast_v4i16_to_i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB15_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB15_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB15_3
-; VI-NEXT: .LBB15_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB15_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB15_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s17, 3
; VI-NEXT: s_and_b32 s4, s17, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
@@ -1310,26 +1367,26 @@ define inreg i64 @bitcast_v4i16_to_i64_scalar(<4 x i16> inreg %a, i32 inreg %b)
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB15_3: ; %end
+; VI-NEXT: .LBB15_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB15_4:
-; VI-NEXT: s_branch .LBB15_2
;
; GFX9-LABEL: bitcast_v4i16_to_i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB15_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB15_4
-; GFX9-NEXT: .LBB15_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB15_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB15_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB15_3:
-; GFX9-NEXT: s_branch .LBB15_2
; GFX9-NEXT: .LBB15_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -1339,17 +1396,18 @@ define inreg i64 @bitcast_v4i16_to_i64_scalar(<4 x i16> inreg %a, i32 inreg %b)
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB15_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB15_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB15_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB15_4
-; GFX11-NEXT: .LBB15_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB15_3:
-; GFX11-NEXT: s_branch .LBB15_2
; GFX11-NEXT: .LBB15_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -1478,6 +1536,7 @@ define inreg <4 x half> @bitcast_i64_to_v4f16_scalar(i64 inreg %a, i32 inreg %b)
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB17_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s4, s17, 16
@@ -1503,60 +1562,67 @@ define inreg <4 x half> @bitcast_i64_to_v4f16_scalar(i64 inreg %a, i32 inreg %b)
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr3
-; SI-NEXT: s_branch .LBB17_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB17_2
+; SI-NEXT: s_branch .LBB17_3
;
; VI-LABEL: bitcast_i64_to_v4f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB17_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB17_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB17_3
-; VI-NEXT: .LBB17_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB17_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB17_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
-; VI-NEXT: .LBB17_3: ; %end
+; VI-NEXT: .LBB17_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB17_4:
-; VI-NEXT: s_branch .LBB17_2
;
; GFX9-LABEL: bitcast_i64_to_v4f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB17_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB17_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB17_3
-; GFX9-NEXT: .LBB17_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB17_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB17_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
-; GFX9-NEXT: .LBB17_3: ; %end
+; GFX9-NEXT: .LBB17_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB17_4:
-; GFX9-NEXT: s_branch .LBB17_2
;
; GFX11-LABEL: bitcast_i64_to_v4f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB17_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB17_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB17_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB17_3
-; GFX11-NEXT: .LBB17_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB17_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: .LBB17_3: ; %end
+; GFX11-NEXT: .LBB17_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB17_4:
-; GFX11-NEXT: s_branch .LBB17_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1698,6 +1764,7 @@ define inreg i64 @bitcast_v4f16_to_i64_scalar(<4 x half> inreg %a, i32 inreg %b)
; SI-NEXT: v_cvt_f16_f32_e32 v3, s19
; SI-NEXT: v_cvt_f16_f32_e32 v2, s18
; SI-NEXT: s_cmp_lg_u32 s20, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB19_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v5
@@ -1726,16 +1793,22 @@ define inreg i64 @bitcast_v4f16_to_i64_scalar(<4 x half> inreg %a, i32 inreg %b)
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB19_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1
-; SI-NEXT: s_branch .LBB19_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB19_2
+; SI-NEXT: s_branch .LBB19_3
;
; VI-LABEL: bitcast_v4f16_to_i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB19_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB19_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB19_4
-; VI-NEXT: .LBB19_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB19_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB19_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s17, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -1748,8 +1821,6 @@ define inreg i64 @bitcast_v4f16_to_i64_scalar(<4 x half> inreg %a, i32 inreg %b)
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v2
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB19_3:
-; VI-NEXT: s_branch .LBB19_2
; VI-NEXT: .LBB19_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -1759,16 +1830,18 @@ define inreg i64 @bitcast_v4f16_to_i64_scalar(<4 x half> inreg %a, i32 inreg %b)
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB19_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB19_4
-; GFX9-NEXT: .LBB19_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB19_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB19_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB19_3:
-; GFX9-NEXT: s_branch .LBB19_2
; GFX9-NEXT: .LBB19_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -1778,17 +1851,18 @@ define inreg i64 @bitcast_v4f16_to_i64_scalar(<4 x half> inreg %a, i32 inreg %b)
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB19_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB19_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB19_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB19_4
-; GFX11-NEXT: .LBB19_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB19_3:
-; GFX11-NEXT: s_branch .LBB19_2
; GFX11-NEXT: .LBB19_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -1913,6 +1987,7 @@ define inreg <4 x bfloat> @bitcast_i64_to_v4bf16_scalar(i64 inreg %a, i32 inreg
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB21_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s6, s17, 0xffff0000
@@ -1938,60 +2013,67 @@ define inreg <4 x bfloat> @bitcast_i64_to_v4bf16_scalar(i64 inreg %a, i32 inreg
; SI-NEXT: ; implicit-def: $sgpr8
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB21_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB21_2
+; SI-NEXT: s_branch .LBB21_3
;
; VI-LABEL: bitcast_i64_to_v4bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB21_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB21_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB21_3
-; VI-NEXT: .LBB21_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB21_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB21_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
-; VI-NEXT: .LBB21_3: ; %end
+; VI-NEXT: .LBB21_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB21_4:
-; VI-NEXT: s_branch .LBB21_2
;
; GFX9-LABEL: bitcast_i64_to_v4bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB21_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB21_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB21_3
-; GFX9-NEXT: .LBB21_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB21_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
-; GFX9-NEXT: .LBB21_3: ; %end
+; GFX9-NEXT: .LBB21_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB21_4:
-; GFX9-NEXT: s_branch .LBB21_2
;
; GFX11-LABEL: bitcast_i64_to_v4bf16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB21_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB21_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB21_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB21_3
-; GFX11-NEXT: .LBB21_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: .LBB21_3: ; %end
+; GFX11-NEXT: .LBB21_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB21_4:
-; GFX11-NEXT: s_branch .LBB21_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2277,6 +2359,7 @@ define inreg i64 @bitcast_v4bf16_to_i64_scalar(<4 x bfloat> inreg %a, i32 inreg
; SI-NEXT: v_mul_f32_e64 v5, 1.0, s16
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s19
; SI-NEXT: v_mul_f32_e64 v3, 1.0, s18
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB23_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v4
@@ -2301,16 +2384,22 @@ define inreg i64 @bitcast_v4bf16_to_i64_scalar(<4 x bfloat> inreg %a, i32 inreg
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB23_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1
-; SI-NEXT: s_branch .LBB23_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB23_2
+; SI-NEXT: s_branch .LBB23_3
;
; VI-LABEL: bitcast_v4bf16_to_i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB23_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB23_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB23_4
-; VI-NEXT: .LBB23_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB23_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB23_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s17, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x40c00000
; VI-NEXT: v_add_f32_e32 v1, s4, v0
@@ -2349,8 +2438,6 @@ define inreg i64 @bitcast_v4bf16_to_i64_scalar(<4 x bfloat> inreg %a, i32 inreg
; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; VI-NEXT: v_alignbit_b32 v0, v0, v2, 16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB23_3:
-; VI-NEXT: s_branch .LBB23_2
; VI-NEXT: .LBB23_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -2360,10 +2447,14 @@ define inreg i64 @bitcast_v4bf16_to_i64_scalar(<4 x bfloat> inreg %a, i32 inreg
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB23_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB23_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB23_4
-; GFX9-NEXT: .LBB23_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB23_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_pack_lh_b32_b16 s4, 0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, 0x40c00000
; GFX9-NEXT: v_add_f32_e32 v0, s4, v1
@@ -2405,8 +2496,6 @@ define inreg i64 @bitcast_v4bf16_to_i64_scalar(<4 x bfloat> inreg %a, i32 inreg
; GFX9-NEXT: v_and_b32_sdwa v1, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: v_lshl_or_b32 v1, v2, 16, v1
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB23_3:
-; GFX9-NEXT: s_branch .LBB23_2
; GFX9-NEXT: .LBB23_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -2416,12 +2505,15 @@ define inreg i64 @bitcast_v4bf16_to_i64_scalar(<4 x bfloat> inreg %a, i32 inreg
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB23_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB23_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB23_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB23_4
-; GFX11-NEXT: .LBB23_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_pack_lh_b32_b16 s2, 0, s0
; GFX11-NEXT: s_lshl_b32 s0, s0, 16
; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
@@ -2467,8 +2559,6 @@ define inreg i64 @bitcast_v4bf16_to_i64_scalar(<4 x bfloat> inreg %a, i32 inreg
; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
; GFX11-NEXT: v_lshl_or_b32 v1, v3, 16, v2
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB23_3:
-; GFX11-NEXT: s_branch .LBB23_2
; GFX11-NEXT: .LBB23_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -2706,6 +2796,7 @@ define inreg <8 x i8> @bitcast_i64_to_v8i8_scalar(i64 inreg %a, i32 inreg %b) {
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB25_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s16
@@ -2740,12 +2831,15 @@ define inreg <8 x i8> @bitcast_i64_to_v8i8_scalar(i64 inreg %a, i32 inreg %b) {
; SI-NEXT: ; implicit-def: $sgpr8
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB25_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB25_2
+; SI-NEXT: s_branch .LBB25_3
;
; VI-LABEL: bitcast_i64_to_v8i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
+; VI-NEXT: s_mov_b64 s[6:7], -1
; VI-NEXT: s_cbranch_scc0 .LBB25_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
@@ -2781,12 +2875,15 @@ define inreg <8 x i8> @bitcast_i64_to_v8i8_scalar(i64 inreg %a, i32 inreg %b) {
; VI-NEXT: ; implicit-def: $sgpr9
; VI-NEXT: ; implicit-def: $sgpr8
; VI-NEXT: ; implicit-def: $sgpr5
-; VI-NEXT: s_branch .LBB25_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; VI-NEXT: s_cbranch_vccz .LBB25_2
+; VI-NEXT: s_branch .LBB25_3
;
; GFX9-LABEL: bitcast_i64_to_v8i8_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
+; GFX9-NEXT: s_mov_b64 s[6:7], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB25_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
@@ -2822,13 +2919,15 @@ define inreg <8 x i8> @bitcast_i64_to_v8i8_scalar(i64 inreg %a, i32 inreg %b) {
; GFX9-NEXT: ; implicit-def: $sgpr9
; GFX9-NEXT: ; implicit-def: $sgpr8
; GFX9-NEXT: ; implicit-def: $sgpr5
-; GFX9-NEXT: s_branch .LBB25_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GFX9-NEXT: s_cbranch_vccz .LBB25_2
+; GFX9-NEXT: s_branch .LBB25_3
;
; GFX11-LABEL: bitcast_i64_to_v8i8_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
-; GFX11-NEXT: s_mov_b32 s8, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB25_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b64 s[2:3], s[0:1], 24
@@ -2837,8 +2936,7 @@ define inreg <8 x i8> @bitcast_i64_to_v8i8_scalar(i64 inreg %a, i32 inreg %b) {
; GFX11-NEXT: s_lshr_b32 s5, s1, 8
; GFX11-NEXT: s_lshr_b32 s6, s0, 16
; GFX11-NEXT: s_lshr_b32 s7, s0, 8
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB25_3
+; GFX11-NEXT: s_cbranch_execnz .LBB25_3
; GFX11-NEXT: .LBB25_2: ; %cmp.true
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
@@ -2862,7 +2960,9 @@ define inreg <8 x i8> @bitcast_i64_to_v8i8_scalar(i64 inreg %a, i32 inreg %b) {
; GFX11-NEXT: ; implicit-def: $sgpr5
; GFX11-NEXT: ; implicit-def: $sgpr4
; GFX11-NEXT: ; implicit-def: $sgpr3
-; GFX11-NEXT: s_branch .LBB25_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-NEXT: s_cbranch_vccz .LBB25_2
+; GFX11-NEXT: s_branch .LBB25_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -3249,6 +3349,7 @@ define inreg i64 @bitcast_v8i8_to_i64_scalar(<8 x i8> inreg %a, i32 inreg %b) {
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
+; SI-NEXT: s_mov_b64 s[6:7], -1
; SI-NEXT: s_cbranch_scc0 .LBB27_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
@@ -3303,12 +3404,15 @@ define inreg i64 @bitcast_v8i8_to_i64_scalar(<8 x i8> inreg %a, i32 inreg %b) {
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB27_4:
; SI-NEXT: ; implicit-def: $sgpr4_sgpr5
-; SI-NEXT: s_branch .LBB27_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; SI-NEXT: s_cbranch_vccz .LBB27_2
+; SI-NEXT: s_branch .LBB27_3
;
; VI-LABEL: bitcast_v8i8_to_i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
+; VI-NEXT: s_mov_b64 s[6:7], -1
; VI-NEXT: s_cbranch_scc0 .LBB27_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, s16, 0xff
@@ -3363,12 +3467,15 @@ define inreg i64 @bitcast_v8i8_to_i64_scalar(<8 x i8> inreg %a, i32 inreg %b) {
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB27_4:
; VI-NEXT: ; implicit-def: $sgpr4_sgpr5
-; VI-NEXT: s_branch .LBB27_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; VI-NEXT: s_cbranch_vccz .LBB27_2
+; VI-NEXT: s_branch .LBB27_3
;
; GFX9-LABEL: bitcast_v8i8_to_i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
+; GFX9-NEXT: s_mov_b64 s[6:7], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB27_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_and_b32 s4, s16, 0xff
@@ -3423,35 +3530,36 @@ define inreg i64 @bitcast_v8i8_to_i64_scalar(<8 x i8> inreg %a, i32 inreg %b) {
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB27_4:
; GFX9-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX9-NEXT: s_branch .LBB27_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GFX9-NEXT: s_cbranch_vccz .LBB27_2
+; GFX9-NEXT: s_branch .LBB27_3
;
; GFX11-LABEL: bitcast_v8i8_to_i64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
-; GFX11-NEXT: s_mov_b32 s6, 0
+; GFX11-NEXT: s_mov_b32 s6, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB27_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_and_b32 s4, s0, 0xff
; GFX11-NEXT: s_lshl_b32 s5, s1, 8
-; GFX11-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s3, 8
+; GFX11-NEXT: s_and_b32 s6, s2, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s3, 8
; GFX11-NEXT: s_or_b32 s4, s4, s5
-; GFX11-NEXT: s_or_b32 s5, s7, s8
-; GFX11-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
+; GFX11-NEXT: s_or_b32 s5, s6, s7
+; GFX11-NEXT: s_and_b32 s6, s16, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-NEXT: s_or_b32 s6, s6, s7
+; GFX11-NEXT: s_or_b32 s7, s8, s9
; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
; GFX11-NEXT: s_lshl_b32 s5, s5, 16
-; GFX11-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-NEXT: s_lshl_b32 s8, s8, 16
+; GFX11-NEXT: s_and_b32 s6, s6, 0xffff
+; GFX11-NEXT: s_lshl_b32 s7, s7, 16
; GFX11-NEXT: s_or_b32 s4, s4, s5
-; GFX11-NEXT: s_or_b32 s5, s7, s8
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
-; GFX11-NEXT: s_cbranch_vccnz .LBB27_3
+; GFX11-NEXT: s_or_b32 s5, s6, s7
+; GFX11-NEXT: s_cbranch_execnz .LBB27_3
; GFX11-NEXT: .LBB27_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
; GFX11-NEXT: s_lshl_b32 s1, s1, 8
@@ -3485,7 +3593,9 @@ define inreg i64 @bitcast_v8i8_to_i64_scalar(<8 x i8> inreg %a, i32 inreg %b) {
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB27_4:
; GFX11-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX11-NEXT: s_branch .LBB27_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX11-NEXT: s_cbranch_vccz .LBB27_2
+; GFX11-NEXT: s_branch .LBB27_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -3579,14 +3689,16 @@ define inreg <2 x i32> @bitcast_f64_to_v2i32_scalar(double inreg %a, i32 inreg %
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB29_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB29_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB29_4
-; SI-NEXT: .LBB29_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB29_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB29_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB29_3:
-; SI-NEXT: s_branch .LBB29_2
; SI-NEXT: .LBB29_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -3596,14 +3708,16 @@ define inreg <2 x i32> @bitcast_f64_to_v2i32_scalar(double inreg %a, i32 inreg %
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB29_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB29_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB29_4
-; VI-NEXT: .LBB29_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB29_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB29_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB29_3:
-; VI-NEXT: s_branch .LBB29_2
; VI-NEXT: .LBB29_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -3613,14 +3727,16 @@ define inreg <2 x i32> @bitcast_f64_to_v2i32_scalar(double inreg %a, i32 inreg %
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB29_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB29_4
-; GFX9-NEXT: .LBB29_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB29_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB29_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB29_3:
-; GFX9-NEXT: s_branch .LBB29_2
; GFX9-NEXT: .LBB29_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -3630,16 +3746,17 @@ define inreg <2 x i32> @bitcast_f64_to_v2i32_scalar(double inreg %a, i32 inreg %
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB29_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB29_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB29_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB29_4
-; GFX11-NEXT: .LBB29_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB29_3:
-; GFX11-NEXT: s_branch .LBB29_2
; GFX11-NEXT: .LBB29_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -3739,71 +3856,78 @@ define inreg double @bitcast_v2i32_to_f64_scalar(<2 x i32> inreg %a, i32 inreg %
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB31_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB31_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB31_3
-; SI-NEXT: .LBB31_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB31_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB31_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_i32 s17, s17, 3
; SI-NEXT: s_add_i32 s16, s16, 3
-; SI-NEXT: .LBB31_3: ; %end
+; SI-NEXT: .LBB31_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB31_4:
-; SI-NEXT: s_branch .LBB31_2
;
; VI-LABEL: bitcast_v2i32_to_f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB31_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB31_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB31_3
-; VI-NEXT: .LBB31_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB31_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB31_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB31_3: ; %end
+; VI-NEXT: .LBB31_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB31_4:
-; VI-NEXT: s_branch .LBB31_2
;
; GFX9-LABEL: bitcast_v2i32_to_f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB31_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB31_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB31_3
-; GFX9-NEXT: .LBB31_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB31_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB31_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB31_3: ; %end
+; GFX9-NEXT: .LBB31_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB31_4:
-; GFX9-NEXT: s_branch .LBB31_2
;
; GFX11-LABEL: bitcast_v2i32_to_f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB31_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB31_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB31_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB31_3
-; GFX11-NEXT: .LBB31_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB31_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB31_3: ; %end
+; GFX11-NEXT: .LBB31_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB31_4:
-; GFX11-NEXT: s_branch .LBB31_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -3897,14 +4021,16 @@ define inreg <2 x float> @bitcast_f64_to_v2f32_scalar(double inreg %a, i32 inreg
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB33_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB33_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB33_4
-; SI-NEXT: .LBB33_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB33_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB33_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB33_3:
-; SI-NEXT: s_branch .LBB33_2
; SI-NEXT: .LBB33_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -3914,14 +4040,16 @@ define inreg <2 x float> @bitcast_f64_to_v2f32_scalar(double inreg %a, i32 inreg
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB33_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB33_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB33_4
-; VI-NEXT: .LBB33_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB33_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB33_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB33_3:
-; VI-NEXT: s_branch .LBB33_2
; VI-NEXT: .LBB33_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -3931,14 +4059,16 @@ define inreg <2 x float> @bitcast_f64_to_v2f32_scalar(double inreg %a, i32 inreg
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB33_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB33_4
-; GFX9-NEXT: .LBB33_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB33_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB33_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB33_3:
-; GFX9-NEXT: s_branch .LBB33_2
; GFX9-NEXT: .LBB33_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -3948,16 +4078,17 @@ define inreg <2 x float> @bitcast_f64_to_v2f32_scalar(double inreg %a, i32 inreg
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB33_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB33_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB33_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB33_4
-; GFX11-NEXT: .LBB33_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB33_3:
-; GFX11-NEXT: s_branch .LBB33_2
; GFX11-NEXT: .LBB33_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -4056,15 +4187,17 @@ define inreg double @bitcast_v2f32_to_f64_scalar(<2 x float> inreg %a, i32 inreg
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB35_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB35_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB35_4
-; SI-NEXT: .LBB35_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB35_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB35_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v1, s17, 1.0
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB35_3:
-; SI-NEXT: s_branch .LBB35_2
; SI-NEXT: .LBB35_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -4074,15 +4207,17 @@ define inreg double @bitcast_v2f32_to_f64_scalar(<2 x float> inreg %a, i32 inreg
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB35_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB35_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB35_4
-; VI-NEXT: .LBB35_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB35_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB35_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB35_3:
-; VI-NEXT: s_branch .LBB35_2
; VI-NEXT: .LBB35_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -4092,15 +4227,17 @@ define inreg double @bitcast_v2f32_to_f64_scalar(<2 x float> inreg %a, i32 inreg
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB35_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB35_4
-; GFX9-NEXT: .LBB35_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB35_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB35_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB35_3:
-; GFX9-NEXT: s_branch .LBB35_2
; GFX9-NEXT: .LBB35_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -4110,17 +4247,18 @@ define inreg double @bitcast_v2f32_to_f64_scalar(<2 x float> inreg %a, i32 inreg
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB35_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB35_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB35_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB35_4
-; GFX11-NEXT: .LBB35_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB35_3:
-; GFX11-NEXT: s_branch .LBB35_2
; GFX11-NEXT: .LBB35_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -4229,6 +4367,7 @@ define inreg <4 x i16> @bitcast_f64_to_v4i16_scalar(double inreg %a, i32 inreg %
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB37_3
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s16
@@ -4243,7 +4382,8 @@ define inreg <4 x i16> @bitcast_f64_to_v4i16_scalar(double inreg %a, i32 inreg %
; SI-NEXT: .LBB37_3:
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB37_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB37_2
; SI-NEXT: .LBB37_4:
; SI-NEXT: v_mov_b32_e32 v5, s17
; SI-NEXT: v_mov_b32_e32 v4, s16
@@ -4257,14 +4397,16 @@ define inreg <4 x i16> @bitcast_f64_to_v4i16_scalar(double inreg %a, i32 inreg %
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB37_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB37_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB37_4
-; VI-NEXT: .LBB37_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB37_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB37_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB37_3:
-; VI-NEXT: s_branch .LBB37_2
; VI-NEXT: .LBB37_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -4274,14 +4416,16 @@ define inreg <4 x i16> @bitcast_f64_to_v4i16_scalar(double inreg %a, i32 inreg %
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB37_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB37_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB37_4
-; GFX9-NEXT: .LBB37_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB37_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB37_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB37_3:
-; GFX9-NEXT: s_branch .LBB37_2
; GFX9-NEXT: .LBB37_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -4291,16 +4435,17 @@ define inreg <4 x i16> @bitcast_f64_to_v4i16_scalar(double inreg %a, i32 inreg %
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB37_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB37_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB37_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB37_4
-; GFX11-NEXT: .LBB37_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB37_3:
-; GFX11-NEXT: s_branch .LBB37_2
; GFX11-NEXT: .LBB37_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -4431,6 +4576,7 @@ define inreg double @bitcast_v4i16_to_f64_scalar(<4 x i16> inreg %a, i32 inreg %
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
+; SI-NEXT: s_mov_b64 s[6:7], -1
; SI-NEXT: s_cbranch_scc0 .LBB39_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
@@ -4457,16 +4603,22 @@ define inreg double @bitcast_v4i16_to_f64_scalar(<4 x i16> inreg %a, i32 inreg %
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB39_4:
; SI-NEXT: ; implicit-def: $sgpr4_sgpr5
-; SI-NEXT: s_branch .LBB39_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; SI-NEXT: s_cbranch_vccz .LBB39_2
+; SI-NEXT: s_branch .LBB39_3
;
; VI-LABEL: bitcast_v4i16_to_f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB39_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB39_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB39_3
-; VI-NEXT: .LBB39_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB39_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB39_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s17, 3
; VI-NEXT: s_and_b32 s4, s17, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
@@ -4477,26 +4629,26 @@ define inreg double @bitcast_v4i16_to_f64_scalar(<4 x i16> inreg %a, i32 inreg %
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB39_3: ; %end
+; VI-NEXT: .LBB39_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB39_4:
-; VI-NEXT: s_branch .LBB39_2
;
; GFX9-LABEL: bitcast_v4i16_to_f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB39_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB39_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB39_4
-; GFX9-NEXT: .LBB39_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB39_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB39_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB39_3:
-; GFX9-NEXT: s_branch .LBB39_2
; GFX9-NEXT: .LBB39_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -4506,17 +4658,18 @@ define inreg double @bitcast_v4i16_to_f64_scalar(<4 x i16> inreg %a, i32 inreg %
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB39_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB39_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB39_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB39_4
-; GFX11-NEXT: .LBB39_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB39_3:
-; GFX11-NEXT: s_branch .LBB39_2
; GFX11-NEXT: .LBB39_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -4636,6 +4789,7 @@ define inreg <4 x half> @bitcast_f64_to_v4f16_scalar(double inreg %a, i32 inreg
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB41_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s4, s17, 16
@@ -4660,20 +4814,24 @@ define inreg <4 x half> @bitcast_f64_to_v4f16_scalar(double inreg %a, i32 inreg
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr3
-; SI-NEXT: s_branch .LBB41_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB41_2
+; SI-NEXT: s_branch .LBB41_3
;
; VI-LABEL: bitcast_f64_to_v4f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB41_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB41_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB41_4
-; VI-NEXT: .LBB41_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB41_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB41_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB41_3:
-; VI-NEXT: s_branch .LBB41_2
; VI-NEXT: .LBB41_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -4683,14 +4841,16 @@ define inreg <4 x half> @bitcast_f64_to_v4f16_scalar(double inreg %a, i32 inreg
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB41_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB41_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB41_4
-; GFX9-NEXT: .LBB41_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB41_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB41_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB41_3:
-; GFX9-NEXT: s_branch .LBB41_2
; GFX9-NEXT: .LBB41_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -4700,16 +4860,17 @@ define inreg <4 x half> @bitcast_f64_to_v4f16_scalar(double inreg %a, i32 inreg
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB41_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB41_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB41_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB41_4
-; GFX11-NEXT: .LBB41_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB41_3:
-; GFX11-NEXT: s_branch .LBB41_2
; GFX11-NEXT: .LBB41_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -4854,6 +5015,7 @@ define inreg double @bitcast_v4f16_to_f64_scalar(<4 x half> inreg %a, i32 inreg
; SI-NEXT: v_cvt_f16_f32_e32 v3, s19
; SI-NEXT: v_cvt_f16_f32_e32 v2, s18
; SI-NEXT: s_cmp_lg_u32 s20, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB43_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v5
@@ -4882,16 +5044,22 @@ define inreg double @bitcast_v4f16_to_f64_scalar(<4 x half> inreg %a, i32 inreg
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB43_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1
-; SI-NEXT: s_branch .LBB43_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB43_2
+; SI-NEXT: s_branch .LBB43_3
;
; VI-LABEL: bitcast_v4f16_to_f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB43_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB43_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB43_4
-; VI-NEXT: .LBB43_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB43_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB43_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s17, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -4904,8 +5072,6 @@ define inreg double @bitcast_v4f16_to_f64_scalar(<4 x half> inreg %a, i32 inreg
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v2
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB43_3:
-; VI-NEXT: s_branch .LBB43_2
; VI-NEXT: .LBB43_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -4915,16 +5081,18 @@ define inreg double @bitcast_v4f16_to_f64_scalar(<4 x half> inreg %a, i32 inreg
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB43_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB43_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB43_4
-; GFX9-NEXT: .LBB43_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB43_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB43_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB43_3:
-; GFX9-NEXT: s_branch .LBB43_2
; GFX9-NEXT: .LBB43_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -4934,17 +5102,18 @@ define inreg double @bitcast_v4f16_to_f64_scalar(<4 x half> inreg %a, i32 inreg
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB43_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB43_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB43_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB43_4
-; GFX11-NEXT: .LBB43_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB43_3:
-; GFX11-NEXT: s_branch .LBB43_2
; GFX11-NEXT: .LBB43_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -5058,6 +5227,7 @@ define inreg <4 x bfloat> @bitcast_f64_to_v4bf16_scalar(double inreg %a, i32 inr
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB45_3
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s9, s17, 0xffff0000
@@ -5077,7 +5247,8 @@ define inreg <4 x bfloat> @bitcast_f64_to_v4bf16_scalar(double inreg %a, i32 inr
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $sgpr8
; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: s_branch .LBB45_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB45_2
; SI-NEXT: .LBB45_4:
; SI-NEXT: v_mov_b32_e32 v3, s9
; SI-NEXT: v_mov_b32_e32 v2, s8
@@ -5089,14 +5260,16 @@ define inreg <4 x bfloat> @bitcast_f64_to_v4bf16_scalar(double inreg %a, i32 inr
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB45_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB45_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB45_4
-; VI-NEXT: .LBB45_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB45_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB45_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB45_3:
-; VI-NEXT: s_branch .LBB45_2
; VI-NEXT: .LBB45_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -5106,14 +5279,16 @@ define inreg <4 x bfloat> @bitcast_f64_to_v4bf16_scalar(double inreg %a, i32 inr
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB45_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB45_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB45_4
-; GFX9-NEXT: .LBB45_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB45_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB45_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[0:1], s[16:17], 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB45_3:
-; GFX9-NEXT: s_branch .LBB45_2
; GFX9-NEXT: .LBB45_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -5123,16 +5298,17 @@ define inreg <4 x bfloat> @bitcast_f64_to_v4bf16_scalar(double inreg %a, i32 inr
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB45_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB45_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB45_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB45_4
-; GFX11-NEXT: .LBB45_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[0:1], s[0:1], 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB45_3:
-; GFX11-NEXT: s_branch .LBB45_2
; GFX11-NEXT: .LBB45_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -5421,6 +5597,7 @@ define inreg double @bitcast_v4bf16_to_f64_scalar(<4 x bfloat> inreg %a, i32 inr
; SI-NEXT: v_mul_f32_e64 v5, 1.0, s16
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s19
; SI-NEXT: v_mul_f32_e64 v3, 1.0, s18
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB47_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v4
@@ -5445,16 +5622,22 @@ define inreg double @bitcast_v4bf16_to_f64_scalar(<4 x bfloat> inreg %a, i32 inr
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB47_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1
-; SI-NEXT: s_branch .LBB47_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB47_2
+; SI-NEXT: s_branch .LBB47_3
;
; VI-LABEL: bitcast_v4bf16_to_f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB47_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB47_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB47_4
-; VI-NEXT: .LBB47_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB47_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB47_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s17, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x40c00000
; VI-NEXT: v_add_f32_e32 v1, s4, v0
@@ -5493,8 +5676,6 @@ define inreg double @bitcast_v4bf16_to_f64_scalar(<4 x bfloat> inreg %a, i32 inr
; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; VI-NEXT: v_alignbit_b32 v0, v0, v2, 16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB47_3:
-; VI-NEXT: s_branch .LBB47_2
; VI-NEXT: .LBB47_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -5504,10 +5685,14 @@ define inreg double @bitcast_v4bf16_to_f64_scalar(<4 x bfloat> inreg %a, i32 inr
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB47_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB47_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB47_4
-; GFX9-NEXT: .LBB47_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB47_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_pack_lh_b32_b16 s4, 0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, 0x40c00000
; GFX9-NEXT: v_add_f32_e32 v0, s4, v1
@@ -5549,8 +5734,6 @@ define inreg double @bitcast_v4bf16_to_f64_scalar(<4 x bfloat> inreg %a, i32 inr
; GFX9-NEXT: v_and_b32_sdwa v1, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: v_lshl_or_b32 v1, v2, 16, v1
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB47_3:
-; GFX9-NEXT: s_branch .LBB47_2
; GFX9-NEXT: .LBB47_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -5560,12 +5743,15 @@ define inreg double @bitcast_v4bf16_to_f64_scalar(<4 x bfloat> inreg %a, i32 inr
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB47_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB47_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB47_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB47_4
-; GFX11-NEXT: .LBB47_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_pack_lh_b32_b16 s2, 0, s0
; GFX11-NEXT: s_lshl_b32 s0, s0, 16
; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
@@ -5611,8 +5797,6 @@ define inreg double @bitcast_v4bf16_to_f64_scalar(<4 x bfloat> inreg %a, i32 inr
; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
; GFX11-NEXT: v_lshl_or_b32 v1, v3, 16, v2
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB47_3:
-; GFX11-NEXT: s_branch .LBB47_2
; GFX11-NEXT: .LBB47_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -5843,6 +6027,7 @@ define inreg <8 x i8> @bitcast_f64_to_v8i8_scalar(double inreg %a, i32 inreg %b)
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB49_3
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s16
@@ -5869,7 +6054,8 @@ define inreg <8 x i8> @bitcast_f64_to_v8i8_scalar(double inreg %a, i32 inreg %b)
; SI-NEXT: ; implicit-def: $sgpr6
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: s_branch .LBB49_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB49_2
; SI-NEXT: .LBB49_4:
; SI-NEXT: v_mov_b32_e32 v9, s17
; SI-NEXT: v_mov_b32_e32 v8, s16
@@ -5885,6 +6071,7 @@ define inreg <8 x i8> @bitcast_f64_to_v8i8_scalar(double inreg %a, i32 inreg %b)
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
+; VI-NEXT: s_mov_b64 s[6:7], -1
; VI-NEXT: s_cbranch_scc0 .LBB49_3
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
@@ -5910,7 +6097,8 @@ define inreg <8 x i8> @bitcast_f64_to_v8i8_scalar(double inreg %a, i32 inreg %b)
; VI-NEXT: ; implicit-def: $sgpr5
; VI-NEXT: ; implicit-def: $sgpr8
; VI-NEXT: ; implicit-def: $sgpr9
-; VI-NEXT: s_branch .LBB49_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; VI-NEXT: s_cbranch_vccz .LBB49_2
; VI-NEXT: .LBB49_4:
; VI-NEXT: v_mov_b32_e32 v8, s16
; VI-NEXT: v_mov_b32_e32 v9, s17
@@ -5929,6 +6117,7 @@ define inreg <8 x i8> @bitcast_f64_to_v8i8_scalar(double inreg %a, i32 inreg %b)
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
+; GFX9-NEXT: s_mov_b64 s[6:7], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB49_3
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
@@ -5954,7 +6143,8 @@ define inreg <8 x i8> @bitcast_f64_to_v8i8_scalar(double inreg %a, i32 inreg %b)
; GFX9-NEXT: ; implicit-def: $sgpr5
; GFX9-NEXT: ; implicit-def: $sgpr8
; GFX9-NEXT: ; implicit-def: $sgpr9
-; GFX9-NEXT: s_branch .LBB49_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GFX9-NEXT: s_cbranch_vccz .LBB49_2
; GFX9-NEXT: .LBB49_4:
; GFX9-NEXT: v_mov_b32_e32 v8, s16
; GFX9-NEXT: v_mov_b32_e32 v9, s17
@@ -5973,17 +6163,16 @@ define inreg <8 x i8> @bitcast_f64_to_v8i8_scalar(double inreg %a, i32 inreg %b)
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
-; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB49_3
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b64 s[2:3], s[0:1], 24
-; GFX11-NEXT: s_lshr_b32 s6, s1, 24
-; GFX11-NEXT: s_lshr_b32 s5, s1, 16
+; GFX11-NEXT: s_lshr_b32 s5, s1, 24
+; GFX11-NEXT: s_lshr_b32 s4, s1, 16
; GFX11-NEXT: s_lshr_b32 s3, s1, 8
-; GFX11-NEXT: s_lshr_b32 s8, s0, 16
-; GFX11-NEXT: s_lshr_b32 s7, s0, 8
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX11-NEXT: s_lshr_b32 s7, s0, 16
+; GFX11-NEXT: s_lshr_b32 s6, s0, 8
+; GFX11-NEXT: s_cbranch_execnz .LBB49_4
; GFX11-NEXT: .LBB49_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[8:9], s[0:1], 1.0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
@@ -5995,18 +6184,19 @@ define inreg <8 x i8> @bitcast_f64_to_v8i8_scalar(double inreg %a, i32 inreg %b)
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v8
; GFX11-NEXT: s_branch .LBB49_5
; GFX11-NEXT: .LBB49_3:
+; GFX11-NEXT: ; implicit-def: $sgpr6
; GFX11-NEXT: ; implicit-def: $sgpr7
-; GFX11-NEXT: ; implicit-def: $sgpr8
; GFX11-NEXT: ; implicit-def: $sgpr2
; GFX11-NEXT: ; implicit-def: $sgpr3
+; GFX11-NEXT: ; implicit-def: $sgpr4
; GFX11-NEXT: ; implicit-def: $sgpr5
-; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: s_branch .LBB49_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-NEXT: s_cbranch_vccz .LBB49_2
; GFX11-NEXT: .LBB49_4:
; GFX11-NEXT: v_dual_mov_b32 v8, s0 :: v_dual_mov_b32 v9, s1
-; GFX11-NEXT: v_dual_mov_b32 v2, s8 :: v_dual_mov_b32 v1, s7
-; GFX11-NEXT: v_dual_mov_b32 v3, s2 :: v_dual_mov_b32 v6, s5
-; GFX11-NEXT: v_mov_b32_e32 v7, s6
+; GFX11-NEXT: v_dual_mov_b32 v2, s7 :: v_dual_mov_b32 v1, s6
+; GFX11-NEXT: v_dual_mov_b32 v3, s2 :: v_dual_mov_b32 v6, s4
+; GFX11-NEXT: v_mov_b32_e32 v7, s5
; GFX11-NEXT: v_mov_b32_e32 v5, s3
; GFX11-NEXT: .LBB49_5: ; %end
; GFX11-NEXT: v_mov_b32_e32 v0, v8
@@ -6398,6 +6588,7 @@ define inreg double @bitcast_v8i8_to_f64_scalar(<8 x i8> inreg %a, i32 inreg %b)
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
+; SI-NEXT: s_mov_b64 s[6:7], -1
; SI-NEXT: s_cbranch_scc0 .LBB51_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
@@ -6452,12 +6643,15 @@ define inreg double @bitcast_v8i8_to_f64_scalar(<8 x i8> inreg %a, i32 inreg %b)
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB51_4:
; SI-NEXT: ; implicit-def: $sgpr4_sgpr5
-; SI-NEXT: s_branch .LBB51_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; SI-NEXT: s_cbranch_vccz .LBB51_2
+; SI-NEXT: s_branch .LBB51_3
;
; VI-LABEL: bitcast_v8i8_to_f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
+; VI-NEXT: s_mov_b64 s[6:7], -1
; VI-NEXT: s_cbranch_scc0 .LBB51_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, s16, 0xff
@@ -6512,12 +6706,15 @@ define inreg double @bitcast_v8i8_to_f64_scalar(<8 x i8> inreg %a, i32 inreg %b)
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB51_4:
; VI-NEXT: ; implicit-def: $sgpr4_sgpr5
-; VI-NEXT: s_branch .LBB51_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; VI-NEXT: s_cbranch_vccz .LBB51_2
+; VI-NEXT: s_branch .LBB51_3
;
; GFX9-LABEL: bitcast_v8i8_to_f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
+; GFX9-NEXT: s_mov_b64 s[6:7], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB51_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_and_b32 s4, s16, 0xff
@@ -6572,35 +6769,36 @@ define inreg double @bitcast_v8i8_to_f64_scalar(<8 x i8> inreg %a, i32 inreg %b)
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB51_4:
; GFX9-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX9-NEXT: s_branch .LBB51_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GFX9-NEXT: s_cbranch_vccz .LBB51_2
+; GFX9-NEXT: s_branch .LBB51_3
;
; GFX11-LABEL: bitcast_v8i8_to_f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
-; GFX11-NEXT: s_mov_b32 s6, 0
+; GFX11-NEXT: s_mov_b32 s6, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB51_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_and_b32 s4, s0, 0xff
; GFX11-NEXT: s_lshl_b32 s5, s1, 8
-; GFX11-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s3, 8
+; GFX11-NEXT: s_and_b32 s6, s2, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s3, 8
; GFX11-NEXT: s_or_b32 s4, s4, s5
-; GFX11-NEXT: s_or_b32 s5, s7, s8
-; GFX11-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
+; GFX11-NEXT: s_or_b32 s5, s6, s7
+; GFX11-NEXT: s_and_b32 s6, s16, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-NEXT: s_or_b32 s6, s6, s7
+; GFX11-NEXT: s_or_b32 s7, s8, s9
; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
; GFX11-NEXT: s_lshl_b32 s5, s5, 16
-; GFX11-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-NEXT: s_lshl_b32 s8, s8, 16
+; GFX11-NEXT: s_and_b32 s6, s6, 0xffff
+; GFX11-NEXT: s_lshl_b32 s7, s7, 16
; GFX11-NEXT: s_or_b32 s4, s4, s5
-; GFX11-NEXT: s_or_b32 s5, s7, s8
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
-; GFX11-NEXT: s_cbranch_vccnz .LBB51_3
+; GFX11-NEXT: s_or_b32 s5, s6, s7
+; GFX11-NEXT: s_cbranch_execnz .LBB51_3
; GFX11-NEXT: .LBB51_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
; GFX11-NEXT: s_lshl_b32 s1, s1, 8
@@ -6634,7 +6832,9 @@ define inreg double @bitcast_v8i8_to_f64_scalar(<8 x i8> inreg %a, i32 inreg %b)
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB51_4:
; GFX11-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX11-NEXT: s_branch .LBB51_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX11-NEXT: s_cbranch_vccz .LBB51_2
+; GFX11-NEXT: s_branch .LBB51_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -6731,71 +6931,78 @@ define inreg <2 x float> @bitcast_v2i32_to_v2f32_scalar(<2 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB53_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB53_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB53_3
-; SI-NEXT: .LBB53_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB53_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB53_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_i32 s17, s17, 3
; SI-NEXT: s_add_i32 s16, s16, 3
-; SI-NEXT: .LBB53_3: ; %end
+; SI-NEXT: .LBB53_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB53_4:
-; SI-NEXT: s_branch .LBB53_2
;
; VI-LABEL: bitcast_v2i32_to_v2f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB53_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB53_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB53_3
-; VI-NEXT: .LBB53_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB53_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB53_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB53_3: ; %end
+; VI-NEXT: .LBB53_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB53_4:
-; VI-NEXT: s_branch .LBB53_2
;
; GFX9-LABEL: bitcast_v2i32_to_v2f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB53_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB53_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB53_3
-; GFX9-NEXT: .LBB53_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB53_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB53_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB53_3: ; %end
+; GFX9-NEXT: .LBB53_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB53_4:
-; GFX9-NEXT: s_branch .LBB53_2
;
; GFX11-LABEL: bitcast_v2i32_to_v2f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB53_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB53_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB53_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB53_3
-; GFX11-NEXT: .LBB53_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB53_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB53_3: ; %end
+; GFX11-NEXT: .LBB53_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB53_4:
-; GFX11-NEXT: s_branch .LBB53_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -6891,15 +7098,17 @@ define inreg <2 x i32> @bitcast_v2f32_to_v2i32_scalar(<2 x float> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
-; SI-NEXT: s_cbranch_scc0 .LBB55_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB55_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB55_4
-; SI-NEXT: .LBB55_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB55_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB55_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v1, s17, 1.0
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB55_3:
-; SI-NEXT: s_branch .LBB55_2
; SI-NEXT: .LBB55_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -6909,15 +7118,17 @@ define inreg <2 x i32> @bitcast_v2f32_to_v2i32_scalar(<2 x float> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB55_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB55_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB55_4
-; VI-NEXT: .LBB55_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB55_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB55_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB55_3:
-; VI-NEXT: s_branch .LBB55_2
; VI-NEXT: .LBB55_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -6927,15 +7138,17 @@ define inreg <2 x i32> @bitcast_v2f32_to_v2i32_scalar(<2 x float> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB55_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB55_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB55_4
-; GFX9-NEXT: .LBB55_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB55_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB55_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB55_3:
-; GFX9-NEXT: s_branch .LBB55_2
; GFX9-NEXT: .LBB55_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -6945,17 +7158,18 @@ define inreg <2 x i32> @bitcast_v2f32_to_v2i32_scalar(<2 x float> inreg %a, i32
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB55_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB55_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB55_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB55_4
-; GFX11-NEXT: .LBB55_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB55_3:
-; GFX11-NEXT: s_branch .LBB55_2
; GFX11-NEXT: .LBB55_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -7065,6 +7279,7 @@ define inreg <4 x i16> @bitcast_v2i32_to_v4i16_scalar(<2 x i32> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB57_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s16
@@ -7085,60 +7300,67 @@ define inreg <4 x i16> @bitcast_v2i32_to_v4i16_scalar(<2 x i32> inreg %a, i32 in
; SI-NEXT: .LBB57_4:
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB57_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB57_2
+; SI-NEXT: s_branch .LBB57_3
;
; VI-LABEL: bitcast_v2i32_to_v4i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB57_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB57_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB57_3
-; VI-NEXT: .LBB57_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB57_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB57_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB57_3: ; %end
+; VI-NEXT: .LBB57_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB57_4:
-; VI-NEXT: s_branch .LBB57_2
;
; GFX9-LABEL: bitcast_v2i32_to_v4i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB57_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB57_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB57_3
-; GFX9-NEXT: .LBB57_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB57_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB57_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB57_3: ; %end
+; GFX9-NEXT: .LBB57_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB57_4:
-; GFX9-NEXT: s_branch .LBB57_2
;
; GFX11-LABEL: bitcast_v2i32_to_v4i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB57_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB57_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB57_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB57_3
-; GFX11-NEXT: .LBB57_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB57_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB57_3: ; %end
+; GFX11-NEXT: .LBB57_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB57_4:
-; GFX11-NEXT: s_branch .LBB57_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -7266,6 +7488,7 @@ define inreg <2 x i32> @bitcast_v4i16_to_v2i32_scalar(<4 x i16> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
+; SI-NEXT: s_mov_b64 s[6:7], -1
; SI-NEXT: s_cbranch_scc0 .LBB59_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
@@ -7292,16 +7515,22 @@ define inreg <2 x i32> @bitcast_v4i16_to_v2i32_scalar(<4 x i16> inreg %a, i32 in
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB59_4:
; SI-NEXT: ; implicit-def: $sgpr4_sgpr5
-; SI-NEXT: s_branch .LBB59_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; SI-NEXT: s_cbranch_vccz .LBB59_2
+; SI-NEXT: s_branch .LBB59_3
;
; VI-LABEL: bitcast_v4i16_to_v2i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB59_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB59_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB59_3
-; VI-NEXT: .LBB59_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB59_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB59_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s17, 3
; VI-NEXT: s_and_b32 s4, s17, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
@@ -7312,26 +7541,26 @@ define inreg <2 x i32> @bitcast_v4i16_to_v2i32_scalar(<4 x i16> inreg %a, i32 in
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB59_3: ; %end
+; VI-NEXT: .LBB59_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB59_4:
-; VI-NEXT: s_branch .LBB59_2
;
; GFX9-LABEL: bitcast_v4i16_to_v2i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB59_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB59_4
-; GFX9-NEXT: .LBB59_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB59_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB59_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB59_3:
-; GFX9-NEXT: s_branch .LBB59_2
; GFX9-NEXT: .LBB59_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -7341,17 +7570,18 @@ define inreg <2 x i32> @bitcast_v4i16_to_v2i32_scalar(<4 x i16> inreg %a, i32 in
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB59_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB59_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB59_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB59_4
-; GFX11-NEXT: .LBB59_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB59_3:
-; GFX11-NEXT: s_branch .LBB59_2
; GFX11-NEXT: .LBB59_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -7479,6 +7709,7 @@ define inreg <4 x half> @bitcast_v2i32_to_v4f16_scalar(<2 x i32> inreg %a, i32 i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB61_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s4, s17, 16
@@ -7504,60 +7735,67 @@ define inreg <4 x half> @bitcast_v2i32_to_v4f16_scalar(<2 x i32> inreg %a, i32 i
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr3
-; SI-NEXT: s_branch .LBB61_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB61_2
+; SI-NEXT: s_branch .LBB61_3
;
; VI-LABEL: bitcast_v2i32_to_v4f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB61_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB61_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB61_3
-; VI-NEXT: .LBB61_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB61_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB61_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB61_3: ; %end
+; VI-NEXT: .LBB61_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB61_4:
-; VI-NEXT: s_branch .LBB61_2
;
; GFX9-LABEL: bitcast_v2i32_to_v4f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB61_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB61_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB61_3
-; GFX9-NEXT: .LBB61_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB61_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB61_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB61_3: ; %end
+; GFX9-NEXT: .LBB61_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB61_4:
-; GFX9-NEXT: s_branch .LBB61_2
;
; GFX11-LABEL: bitcast_v2i32_to_v4f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB61_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB61_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB61_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB61_3
-; GFX11-NEXT: .LBB61_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB61_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB61_3: ; %end
+; GFX11-NEXT: .LBB61_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB61_4:
-; GFX11-NEXT: s_branch .LBB61_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -7699,6 +7937,7 @@ define inreg <2 x i32> @bitcast_v4f16_to_v2i32_scalar(<4 x half> inreg %a, i32 i
; SI-NEXT: v_cvt_f16_f32_e32 v3, s19
; SI-NEXT: v_cvt_f16_f32_e32 v2, s18
; SI-NEXT: s_cmp_lg_u32 s20, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB63_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v5
@@ -7727,16 +7966,22 @@ define inreg <2 x i32> @bitcast_v4f16_to_v2i32_scalar(<4 x half> inreg %a, i32 i
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB63_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1
-; SI-NEXT: s_branch .LBB63_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB63_2
+; SI-NEXT: s_branch .LBB63_3
;
; VI-LABEL: bitcast_v4f16_to_v2i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB63_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB63_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB63_4
-; VI-NEXT: .LBB63_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB63_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB63_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s17, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -7749,8 +7994,6 @@ define inreg <2 x i32> @bitcast_v4f16_to_v2i32_scalar(<4 x half> inreg %a, i32 i
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v2
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB63_3:
-; VI-NEXT: s_branch .LBB63_2
; VI-NEXT: .LBB63_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -7760,16 +8003,18 @@ define inreg <2 x i32> @bitcast_v4f16_to_v2i32_scalar(<4 x half> inreg %a, i32 i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB63_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB63_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB63_4
-; GFX9-NEXT: .LBB63_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB63_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB63_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB63_3:
-; GFX9-NEXT: s_branch .LBB63_2
; GFX9-NEXT: .LBB63_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -7779,17 +8024,18 @@ define inreg <2 x i32> @bitcast_v4f16_to_v2i32_scalar(<4 x half> inreg %a, i32 i
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB63_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB63_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB63_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB63_4
-; GFX11-NEXT: .LBB63_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB63_3:
-; GFX11-NEXT: s_branch .LBB63_2
; GFX11-NEXT: .LBB63_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -7913,6 +8159,7 @@ define inreg <4 x bfloat> @bitcast_v2i32_to_v4bf16_scalar(<2 x i32> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB65_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s6, s17, 0xffff0000
@@ -7938,60 +8185,67 @@ define inreg <4 x bfloat> @bitcast_v2i32_to_v4bf16_scalar(<2 x i32> inreg %a, i3
; SI-NEXT: ; implicit-def: $sgpr8
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB65_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB65_2
+; SI-NEXT: s_branch .LBB65_3
;
; VI-LABEL: bitcast_v2i32_to_v4bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB65_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB65_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB65_3
-; VI-NEXT: .LBB65_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB65_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB65_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB65_3: ; %end
+; VI-NEXT: .LBB65_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB65_4:
-; VI-NEXT: s_branch .LBB65_2
;
; GFX9-LABEL: bitcast_v2i32_to_v4bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB65_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB65_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB65_3
-; GFX9-NEXT: .LBB65_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB65_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB65_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB65_3: ; %end
+; GFX9-NEXT: .LBB65_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB65_4:
-; GFX9-NEXT: s_branch .LBB65_2
;
; GFX11-LABEL: bitcast_v2i32_to_v4bf16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB65_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB65_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB65_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
-; GFX11-NEXT: s_cbranch_vccnz .LBB65_3
-; GFX11-NEXT: .LBB65_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB65_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB65_3: ; %end
+; GFX11-NEXT: .LBB65_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB65_4:
-; GFX11-NEXT: s_branch .LBB65_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -8277,6 +8531,7 @@ define inreg <2 x i32> @bitcast_v4bf16_to_v2i32_scalar(<4 x bfloat> inreg %a, i3
; SI-NEXT: v_mul_f32_e64 v5, 1.0, s16
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s19
; SI-NEXT: v_mul_f32_e64 v3, 1.0, s18
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB67_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v4
@@ -8301,16 +8556,22 @@ define inreg <2 x i32> @bitcast_v4bf16_to_v2i32_scalar(<4 x bfloat> inreg %a, i3
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB67_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1
-; SI-NEXT: s_branch .LBB67_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB67_2
+; SI-NEXT: s_branch .LBB67_3
;
; VI-LABEL: bitcast_v4bf16_to_v2i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB67_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB67_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB67_4
-; VI-NEXT: .LBB67_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB67_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB67_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s17, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x40c00000
; VI-NEXT: v_add_f32_e32 v1, s4, v0
@@ -8349,8 +8610,6 @@ define inreg <2 x i32> @bitcast_v4bf16_to_v2i32_scalar(<4 x bfloat> inreg %a, i3
; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; VI-NEXT: v_alignbit_b32 v0, v0, v2, 16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB67_3:
-; VI-NEXT: s_branch .LBB67_2
; VI-NEXT: .LBB67_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -8360,10 +8619,14 @@ define inreg <2 x i32> @bitcast_v4bf16_to_v2i32_scalar(<4 x bfloat> inreg %a, i3
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB67_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB67_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB67_4
-; GFX9-NEXT: .LBB67_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB67_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB67_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_pack_lh_b32_b16 s4, 0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, 0x40c00000
; GFX9-NEXT: v_add_f32_e32 v0, s4, v1
@@ -8405,8 +8668,6 @@ define inreg <2 x i32> @bitcast_v4bf16_to_v2i32_scalar(<4 x bfloat> inreg %a, i3
; GFX9-NEXT: v_and_b32_sdwa v1, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: v_lshl_or_b32 v1, v2, 16, v1
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB67_3:
-; GFX9-NEXT: s_branch .LBB67_2
; GFX9-NEXT: .LBB67_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -8416,12 +8677,15 @@ define inreg <2 x i32> @bitcast_v4bf16_to_v2i32_scalar(<4 x bfloat> inreg %a, i3
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB67_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB67_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB67_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB67_4
-; GFX11-NEXT: .LBB67_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_pack_lh_b32_b16 s2, 0, s0
; GFX11-NEXT: s_lshl_b32 s0, s0, 16
; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
@@ -8467,8 +8731,6 @@ define inreg <2 x i32> @bitcast_v4bf16_to_v2i32_scalar(<4 x bfloat> inreg %a, i3
; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
; GFX11-NEXT: v_lshl_or_b32 v1, v3, 16, v2
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB67_3:
-; GFX11-NEXT: s_branch .LBB67_2
; GFX11-NEXT: .LBB67_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -8704,6 +8966,7 @@ define inreg <8 x i8> @bitcast_v2i32_to_v8i8_scalar(<2 x i32> inreg %a, i32 inre
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB69_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s16
@@ -8738,12 +9001,15 @@ define inreg <8 x i8> @bitcast_v2i32_to_v8i8_scalar(<2 x i32> inreg %a, i32 inre
; SI-NEXT: ; implicit-def: $sgpr8
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB69_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB69_2
+; SI-NEXT: s_branch .LBB69_3
;
; VI-LABEL: bitcast_v2i32_to_v8i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
+; VI-NEXT: s_mov_b64 s[6:7], -1
; VI-NEXT: s_cbranch_scc0 .LBB69_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
@@ -8779,12 +9045,15 @@ define inreg <8 x i8> @bitcast_v2i32_to_v8i8_scalar(<2 x i32> inreg %a, i32 inre
; VI-NEXT: ; implicit-def: $sgpr9
; VI-NEXT: ; implicit-def: $sgpr8
; VI-NEXT: ; implicit-def: $sgpr5
-; VI-NEXT: s_branch .LBB69_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; VI-NEXT: s_cbranch_vccz .LBB69_2
+; VI-NEXT: s_branch .LBB69_3
;
; GFX9-LABEL: bitcast_v2i32_to_v8i8_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
+; GFX9-NEXT: s_mov_b64 s[6:7], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB69_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
@@ -8820,13 +9089,15 @@ define inreg <8 x i8> @bitcast_v2i32_to_v8i8_scalar(<2 x i32> inreg %a, i32 inre
; GFX9-NEXT: ; implicit-def: $sgpr9
; GFX9-NEXT: ; implicit-def: $sgpr8
; GFX9-NEXT: ; implicit-def: $sgpr5
-; GFX9-NEXT: s_branch .LBB69_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GFX9-NEXT: s_cbranch_vccz .LBB69_2
+; GFX9-NEXT: s_branch .LBB69_3
;
; GFX11-LABEL: bitcast_v2i32_to_v8i8_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
-; GFX11-NEXT: s_mov_b32 s8, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB69_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b64 s[2:3], s[0:1], 24
@@ -8835,8 +9106,7 @@ define inreg <8 x i8> @bitcast_v2i32_to_v8i8_scalar(<2 x i32> inreg %a, i32 inre
; GFX11-NEXT: s_lshr_b32 s5, s1, 8
; GFX11-NEXT: s_lshr_b32 s6, s0, 16
; GFX11-NEXT: s_lshr_b32 s7, s0, 8
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB69_3
+; GFX11-NEXT: s_cbranch_execnz .LBB69_3
; GFX11-NEXT: .LBB69_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
@@ -8860,7 +9130,9 @@ define inreg <8 x i8> @bitcast_v2i32_to_v8i8_scalar(<2 x i32> inreg %a, i32 inre
; GFX11-NEXT: ; implicit-def: $sgpr5
; GFX11-NEXT: ; implicit-def: $sgpr4
; GFX11-NEXT: ; implicit-def: $sgpr3
-; GFX11-NEXT: s_branch .LBB69_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-NEXT: s_cbranch_vccz .LBB69_2
+; GFX11-NEXT: s_branch .LBB69_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -9247,6 +9519,7 @@ define inreg <2 x i32> @bitcast_v8i8_to_v2i32_scalar(<8 x i8> inreg %a, i32 inre
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
+; SI-NEXT: s_mov_b64 s[6:7], -1
; SI-NEXT: s_cbranch_scc0 .LBB71_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
@@ -9301,12 +9574,15 @@ define inreg <2 x i32> @bitcast_v8i8_to_v2i32_scalar(<8 x i8> inreg %a, i32 inre
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB71_4:
; SI-NEXT: ; implicit-def: $sgpr4_sgpr5
-; SI-NEXT: s_branch .LBB71_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; SI-NEXT: s_cbranch_vccz .LBB71_2
+; SI-NEXT: s_branch .LBB71_3
;
; VI-LABEL: bitcast_v8i8_to_v2i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
+; VI-NEXT: s_mov_b64 s[6:7], -1
; VI-NEXT: s_cbranch_scc0 .LBB71_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, s16, 0xff
@@ -9361,12 +9637,15 @@ define inreg <2 x i32> @bitcast_v8i8_to_v2i32_scalar(<8 x i8> inreg %a, i32 inre
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB71_4:
; VI-NEXT: ; implicit-def: $sgpr4_sgpr5
-; VI-NEXT: s_branch .LBB71_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; VI-NEXT: s_cbranch_vccz .LBB71_2
+; VI-NEXT: s_branch .LBB71_3
;
; GFX9-LABEL: bitcast_v8i8_to_v2i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
+; GFX9-NEXT: s_mov_b64 s[6:7], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB71_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_and_b32 s4, s16, 0xff
@@ -9421,35 +9700,36 @@ define inreg <2 x i32> @bitcast_v8i8_to_v2i32_scalar(<8 x i8> inreg %a, i32 inre
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB71_4:
; GFX9-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX9-NEXT: s_branch .LBB71_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GFX9-NEXT: s_cbranch_vccz .LBB71_2
+; GFX9-NEXT: s_branch .LBB71_3
;
; GFX11-LABEL: bitcast_v8i8_to_v2i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
-; GFX11-NEXT: s_mov_b32 s6, 0
+; GFX11-NEXT: s_mov_b32 s6, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB71_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_and_b32 s4, s0, 0xff
; GFX11-NEXT: s_lshl_b32 s5, s1, 8
-; GFX11-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s3, 8
+; GFX11-NEXT: s_and_b32 s6, s2, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s3, 8
; GFX11-NEXT: s_or_b32 s4, s4, s5
-; GFX11-NEXT: s_or_b32 s5, s7, s8
-; GFX11-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
+; GFX11-NEXT: s_or_b32 s5, s6, s7
+; GFX11-NEXT: s_and_b32 s6, s16, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-NEXT: s_or_b32 s6, s6, s7
+; GFX11-NEXT: s_or_b32 s7, s8, s9
; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
; GFX11-NEXT: s_lshl_b32 s5, s5, 16
-; GFX11-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-NEXT: s_lshl_b32 s8, s8, 16
+; GFX11-NEXT: s_and_b32 s6, s6, 0xffff
+; GFX11-NEXT: s_lshl_b32 s7, s7, 16
; GFX11-NEXT: s_or_b32 s4, s4, s5
-; GFX11-NEXT: s_or_b32 s5, s7, s8
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
-; GFX11-NEXT: s_cbranch_vccnz .LBB71_3
+; GFX11-NEXT: s_or_b32 s5, s6, s7
+; GFX11-NEXT: s_cbranch_execnz .LBB71_3
; GFX11-NEXT: .LBB71_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
; GFX11-NEXT: s_lshl_b32 s1, s1, 8
@@ -9483,7 +9763,9 @@ define inreg <2 x i32> @bitcast_v8i8_to_v2i32_scalar(<8 x i8> inreg %a, i32 inre
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB71_4:
; GFX11-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX11-NEXT: s_branch .LBB71_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX11-NEXT: s_cbranch_vccz .LBB71_2
+; GFX11-NEXT: s_branch .LBB71_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -9589,6 +9871,7 @@ define inreg <4 x i16> @bitcast_v2f32_to_v4i16_scalar(<2 x float> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB73_3
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s16
@@ -9604,7 +9887,8 @@ define inreg <4 x i16> @bitcast_v2f32_to_v4i16_scalar(<2 x float> inreg %a, i32
; SI-NEXT: .LBB73_3:
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB73_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB73_2
; SI-NEXT: .LBB73_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v2, s17
@@ -9615,15 +9899,17 @@ define inreg <4 x i16> @bitcast_v2f32_to_v4i16_scalar(<2 x float> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB73_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB73_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB73_4
-; VI-NEXT: .LBB73_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB73_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB73_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB73_3:
-; VI-NEXT: s_branch .LBB73_2
; VI-NEXT: .LBB73_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -9633,15 +9919,17 @@ define inreg <4 x i16> @bitcast_v2f32_to_v4i16_scalar(<2 x float> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB73_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB73_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB73_4
-; GFX9-NEXT: .LBB73_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB73_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB73_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB73_3:
-; GFX9-NEXT: s_branch .LBB73_2
; GFX9-NEXT: .LBB73_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -9651,17 +9939,18 @@ define inreg <4 x i16> @bitcast_v2f32_to_v4i16_scalar(<2 x float> inreg %a, i32
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB73_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB73_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB73_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB73_4
-; GFX11-NEXT: .LBB73_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB73_3:
-; GFX11-NEXT: s_branch .LBB73_2
; GFX11-NEXT: .LBB73_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -9792,6 +10081,7 @@ define inreg <2 x float> @bitcast_v4i16_to_v2f32_scalar(<4 x i16> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
+; SI-NEXT: s_mov_b64 s[6:7], -1
; SI-NEXT: s_cbranch_scc0 .LBB75_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
@@ -9818,16 +10108,22 @@ define inreg <2 x float> @bitcast_v4i16_to_v2f32_scalar(<4 x i16> inreg %a, i32
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB75_4:
; SI-NEXT: ; implicit-def: $sgpr4_sgpr5
-; SI-NEXT: s_branch .LBB75_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; SI-NEXT: s_cbranch_vccz .LBB75_2
+; SI-NEXT: s_branch .LBB75_3
;
; VI-LABEL: bitcast_v4i16_to_v2f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB75_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB75_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB75_3
-; VI-NEXT: .LBB75_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB75_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB75_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s17, 3
; VI-NEXT: s_and_b32 s4, s17, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
@@ -9838,26 +10134,26 @@ define inreg <2 x float> @bitcast_v4i16_to_v2f32_scalar(<4 x i16> inreg %a, i32
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB75_3: ; %end
+; VI-NEXT: .LBB75_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB75_4:
-; VI-NEXT: s_branch .LBB75_2
;
; GFX9-LABEL: bitcast_v4i16_to_v2f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB75_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB75_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB75_4
-; GFX9-NEXT: .LBB75_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB75_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB75_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB75_3:
-; GFX9-NEXT: s_branch .LBB75_2
; GFX9-NEXT: .LBB75_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -9867,17 +10163,18 @@ define inreg <2 x float> @bitcast_v4i16_to_v2f32_scalar(<4 x i16> inreg %a, i32
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB75_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB75_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB75_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB75_4
-; GFX11-NEXT: .LBB75_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB75_3:
-; GFX11-NEXT: s_branch .LBB75_2
; GFX11-NEXT: .LBB75_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -10004,6 +10301,7 @@ define inreg <4 x half> @bitcast_v2f32_to_v4f16_scalar(<2 x float> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB77_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s4, s17, 16
@@ -10029,21 +10327,25 @@ define inreg <4 x half> @bitcast_v2f32_to_v4f16_scalar(<2 x float> inreg %a, i32
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr3
-; SI-NEXT: s_branch .LBB77_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB77_2
+; SI-NEXT: s_branch .LBB77_3
;
; VI-LABEL: bitcast_v2f32_to_v4f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB77_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB77_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB77_4
-; VI-NEXT: .LBB77_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB77_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB77_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB77_3:
-; VI-NEXT: s_branch .LBB77_2
; VI-NEXT: .LBB77_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -10053,15 +10355,17 @@ define inreg <4 x half> @bitcast_v2f32_to_v4f16_scalar(<2 x float> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB77_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB77_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB77_4
-; GFX9-NEXT: .LBB77_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB77_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB77_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB77_3:
-; GFX9-NEXT: s_branch .LBB77_2
; GFX9-NEXT: .LBB77_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -10071,17 +10375,18 @@ define inreg <4 x half> @bitcast_v2f32_to_v4f16_scalar(<2 x float> inreg %a, i32
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB77_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB77_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB77_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB77_4
-; GFX11-NEXT: .LBB77_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB77_3:
-; GFX11-NEXT: s_branch .LBB77_2
; GFX11-NEXT: .LBB77_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -10226,6 +10531,7 @@ define inreg <2 x float> @bitcast_v4f16_to_v2f32_scalar(<4 x half> inreg %a, i32
; SI-NEXT: v_cvt_f16_f32_e32 v3, s19
; SI-NEXT: v_cvt_f16_f32_e32 v2, s18
; SI-NEXT: s_cmp_lg_u32 s20, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB79_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v5
@@ -10254,16 +10560,22 @@ define inreg <2 x float> @bitcast_v4f16_to_v2f32_scalar(<4 x half> inreg %a, i32
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB79_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1
-; SI-NEXT: s_branch .LBB79_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB79_2
+; SI-NEXT: s_branch .LBB79_3
;
; VI-LABEL: bitcast_v4f16_to_v2f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB79_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB79_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB79_4
-; VI-NEXT: .LBB79_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB79_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB79_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s17, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -10276,8 +10588,6 @@ define inreg <2 x float> @bitcast_v4f16_to_v2f32_scalar(<4 x half> inreg %a, i32
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v2
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB79_3:
-; VI-NEXT: s_branch .LBB79_2
; VI-NEXT: .LBB79_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -10287,16 +10597,18 @@ define inreg <2 x float> @bitcast_v4f16_to_v2f32_scalar(<4 x half> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB79_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB79_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB79_4
-; GFX9-NEXT: .LBB79_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB79_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB79_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB79_3:
-; GFX9-NEXT: s_branch .LBB79_2
; GFX9-NEXT: .LBB79_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -10306,17 +10618,18 @@ define inreg <2 x float> @bitcast_v4f16_to_v2f32_scalar(<4 x half> inreg %a, i32
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB79_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB79_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB79_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB79_4
-; GFX11-NEXT: .LBB79_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB79_3:
-; GFX11-NEXT: s_branch .LBB79_2
; GFX11-NEXT: .LBB79_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -10439,6 +10752,7 @@ define inreg <4 x bfloat> @bitcast_v2f32_to_v4bf16_scalar(<2 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB81_3
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s6, s17, 0xffff0000
@@ -10459,7 +10773,8 @@ define inreg <4 x bfloat> @bitcast_v2f32_to_v4bf16_scalar(<2 x float> inreg %a,
; SI-NEXT: ; implicit-def: $sgpr8
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB81_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB81_2
; SI-NEXT: .LBB81_4:
; SI-NEXT: v_mov_b32_e32 v0, s9
; SI-NEXT: v_mov_b32_e32 v1, s8
@@ -10471,15 +10786,17 @@ define inreg <4 x bfloat> @bitcast_v2f32_to_v4bf16_scalar(<2 x float> inreg %a,
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB81_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB81_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB81_4
-; VI-NEXT: .LBB81_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB81_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB81_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB81_3:
-; VI-NEXT: s_branch .LBB81_2
; VI-NEXT: .LBB81_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -10489,15 +10806,17 @@ define inreg <4 x bfloat> @bitcast_v2f32_to_v4bf16_scalar(<2 x float> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB81_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB81_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB81_4
-; GFX9-NEXT: .LBB81_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB81_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB81_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB81_3:
-; GFX9-NEXT: s_branch .LBB81_2
; GFX9-NEXT: .LBB81_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -10507,17 +10826,18 @@ define inreg <4 x bfloat> @bitcast_v2f32_to_v4bf16_scalar(<2 x float> inreg %a,
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB81_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB81_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB81_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB81_4
-; GFX11-NEXT: .LBB81_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB81_3:
-; GFX11-NEXT: s_branch .LBB81_2
; GFX11-NEXT: .LBB81_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -10806,6 +11126,7 @@ define inreg <2 x float> @bitcast_v4bf16_to_v2f32_scalar(<4 x bfloat> inreg %a,
; SI-NEXT: v_mul_f32_e64 v5, 1.0, s16
; SI-NEXT: v_mul_f32_e64 v2, 1.0, s19
; SI-NEXT: v_mul_f32_e64 v3, 1.0, s18
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB83_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v4
@@ -10830,16 +11151,22 @@ define inreg <2 x float> @bitcast_v4bf16_to_v2f32_scalar(<4 x bfloat> inreg %a,
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB83_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1
-; SI-NEXT: s_branch .LBB83_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB83_2
+; SI-NEXT: s_branch .LBB83_3
;
; VI-LABEL: bitcast_v4bf16_to_v2f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB83_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB83_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB83_4
-; VI-NEXT: .LBB83_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB83_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB83_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s17, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x40c00000
; VI-NEXT: v_add_f32_e32 v1, s4, v0
@@ -10878,8 +11205,6 @@ define inreg <2 x float> @bitcast_v4bf16_to_v2f32_scalar(<4 x bfloat> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; VI-NEXT: v_alignbit_b32 v0, v0, v2, 16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB83_3:
-; VI-NEXT: s_branch .LBB83_2
; VI-NEXT: .LBB83_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -10889,10 +11214,14 @@ define inreg <2 x float> @bitcast_v4bf16_to_v2f32_scalar(<4 x bfloat> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB83_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB83_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB83_4
-; GFX9-NEXT: .LBB83_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB83_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB83_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_pack_lh_b32_b16 s4, 0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, 0x40c00000
; GFX9-NEXT: v_add_f32_e32 v0, s4, v1
@@ -10934,8 +11263,6 @@ define inreg <2 x float> @bitcast_v4bf16_to_v2f32_scalar(<4 x bfloat> inreg %a,
; GFX9-NEXT: v_and_b32_sdwa v1, v3, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: v_lshl_or_b32 v1, v2, 16, v1
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB83_3:
-; GFX9-NEXT: s_branch .LBB83_2
; GFX9-NEXT: .LBB83_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -10945,12 +11272,15 @@ define inreg <2 x float> @bitcast_v4bf16_to_v2f32_scalar(<4 x bfloat> inreg %a,
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB83_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB83_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB83_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB83_4
-; GFX11-NEXT: .LBB83_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_pack_lh_b32_b16 s2, 0, s0
; GFX11-NEXT: s_lshl_b32 s0, s0, 16
; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
@@ -10996,8 +11326,6 @@ define inreg <2 x float> @bitcast_v4bf16_to_v2f32_scalar(<4 x bfloat> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v3, 16, v3
; GFX11-NEXT: v_lshl_or_b32 v1, v3, 16, v2
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB83_3:
-; GFX11-NEXT: s_branch .LBB83_2
; GFX11-NEXT: .LBB83_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -11231,6 +11559,7 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s18, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB85_3
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s16
@@ -11258,7 +11587,8 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in
; SI-NEXT: ; implicit-def: $sgpr8
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB85_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB85_2
; SI-NEXT: .LBB85_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v4, s17
@@ -11271,6 +11601,7 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
+; VI-NEXT: s_mov_b64 s[6:7], -1
; VI-NEXT: s_cbranch_scc0 .LBB85_3
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
@@ -11297,7 +11628,8 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in
; VI-NEXT: ; implicit-def: $sgpr9
; VI-NEXT: ; implicit-def: $sgpr8
; VI-NEXT: ; implicit-def: $sgpr5
-; VI-NEXT: s_branch .LBB85_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; VI-NEXT: s_cbranch_vccz .LBB85_2
; VI-NEXT: .LBB85_4:
; VI-NEXT: v_mov_b32_e32 v8, s16
; VI-NEXT: v_mov_b32_e32 v9, s17
@@ -11316,6 +11648,7 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
+; GFX9-NEXT: s_mov_b64 s[6:7], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB85_3
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
@@ -11342,7 +11675,8 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in
; GFX9-NEXT: ; implicit-def: $sgpr9
; GFX9-NEXT: ; implicit-def: $sgpr8
; GFX9-NEXT: ; implicit-def: $sgpr5
-; GFX9-NEXT: s_branch .LBB85_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GFX9-NEXT: s_cbranch_vccz .LBB85_2
; GFX9-NEXT: .LBB85_4:
; GFX9-NEXT: v_mov_b32_e32 v8, s16
; GFX9-NEXT: v_mov_b32_e32 v9, s17
@@ -11361,17 +11695,16 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
-; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB85_3
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b64 s[2:3], s[0:1], 24
; GFX11-NEXT: s_lshr_b32 s3, s1, 24
-; GFX11-NEXT: s_lshr_b32 s5, s1, 16
-; GFX11-NEXT: s_lshr_b32 s6, s1, 8
-; GFX11-NEXT: s_lshr_b32 s7, s0, 16
-; GFX11-NEXT: s_lshr_b32 s8, s0, 8
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB85_4
+; GFX11-NEXT: s_lshr_b32 s4, s1, 16
+; GFX11-NEXT: s_lshr_b32 s5, s1, 8
+; GFX11-NEXT: s_lshr_b32 s6, s0, 16
+; GFX11-NEXT: s_lshr_b32 s7, s0, 8
+; GFX11-NEXT: s_cbranch_execnz .LBB85_4
; GFX11-NEXT: .LBB85_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v9, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v8, s0, 1.0
@@ -11384,17 +11717,18 @@ define inreg <8 x i8> @bitcast_v2f32_to_v8i8_scalar(<2 x float> inreg %a, i32 in
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v8
; GFX11-NEXT: s_branch .LBB85_5
; GFX11-NEXT: .LBB85_3:
-; GFX11-NEXT: ; implicit-def: $sgpr8
; GFX11-NEXT: ; implicit-def: $sgpr7
-; GFX11-NEXT: ; implicit-def: $sgpr2
; GFX11-NEXT: ; implicit-def: $sgpr6
+; GFX11-NEXT: ; implicit-def: $sgpr2
; GFX11-NEXT: ; implicit-def: $sgpr5
+; GFX11-NEXT: ; implicit-def: $sgpr4
; GFX11-NEXT: ; implicit-def: $sgpr3
-; GFX11-NEXT: s_branch .LBB85_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-NEXT: s_cbranch_vccz .LBB85_2
; GFX11-NEXT: .LBB85_4:
; GFX11-NEXT: v_dual_mov_b32 v8, s0 :: v_dual_mov_b32 v9, s1
-; GFX11-NEXT: v_dual_mov_b32 v1, s8 :: v_dual_mov_b32 v2, s7
-; GFX11-NEXT: v_dual_mov_b32 v5, s6 :: v_dual_mov_b32 v6, s5
+; GFX11-NEXT: v_dual_mov_b32 v1, s7 :: v_dual_mov_b32 v2, s6
+; GFX11-NEXT: v_dual_mov_b32 v5, s5 :: v_dual_mov_b32 v6, s4
; GFX11-NEXT: v_mov_b32_e32 v7, s3
; GFX11-NEXT: v_mov_b32_e32 v3, s2
; GFX11-NEXT: .LBB85_5: ; %end
@@ -11787,6 +12121,7 @@ define inreg <2 x float> @bitcast_v8i8_to_v2f32_scalar(<8 x i8> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
+; SI-NEXT: s_mov_b64 s[6:7], -1
; SI-NEXT: s_cbranch_scc0 .LBB87_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
@@ -11841,12 +12176,15 @@ define inreg <2 x float> @bitcast_v8i8_to_v2f32_scalar(<8 x i8> inreg %a, i32 in
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB87_4:
; SI-NEXT: ; implicit-def: $sgpr4_sgpr5
-; SI-NEXT: s_branch .LBB87_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; SI-NEXT: s_cbranch_vccz .LBB87_2
+; SI-NEXT: s_branch .LBB87_3
;
; VI-LABEL: bitcast_v8i8_to_v2f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
+; VI-NEXT: s_mov_b64 s[6:7], -1
; VI-NEXT: s_cbranch_scc0 .LBB87_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, s16, 0xff
@@ -11901,12 +12239,15 @@ define inreg <2 x float> @bitcast_v8i8_to_v2f32_scalar(<8 x i8> inreg %a, i32 in
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB87_4:
; VI-NEXT: ; implicit-def: $sgpr4_sgpr5
-; VI-NEXT: s_branch .LBB87_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; VI-NEXT: s_cbranch_vccz .LBB87_2
+; VI-NEXT: s_branch .LBB87_3
;
; GFX9-LABEL: bitcast_v8i8_to_v2f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
+; GFX9-NEXT: s_mov_b64 s[6:7], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB87_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_and_b32 s4, s16, 0xff
@@ -11961,35 +12302,36 @@ define inreg <2 x float> @bitcast_v8i8_to_v2f32_scalar(<8 x i8> inreg %a, i32 in
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB87_4:
; GFX9-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX9-NEXT: s_branch .LBB87_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GFX9-NEXT: s_cbranch_vccz .LBB87_2
+; GFX9-NEXT: s_branch .LBB87_3
;
; GFX11-LABEL: bitcast_v8i8_to_v2f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
-; GFX11-NEXT: s_mov_b32 s6, 0
+; GFX11-NEXT: s_mov_b32 s6, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB87_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_and_b32 s4, s0, 0xff
; GFX11-NEXT: s_lshl_b32 s5, s1, 8
-; GFX11-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s3, 8
+; GFX11-NEXT: s_and_b32 s6, s2, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s3, 8
; GFX11-NEXT: s_or_b32 s4, s4, s5
-; GFX11-NEXT: s_or_b32 s5, s7, s8
-; GFX11-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
+; GFX11-NEXT: s_or_b32 s5, s6, s7
+; GFX11-NEXT: s_and_b32 s6, s16, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-NEXT: s_or_b32 s6, s6, s7
+; GFX11-NEXT: s_or_b32 s7, s8, s9
; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
; GFX11-NEXT: s_lshl_b32 s5, s5, 16
-; GFX11-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-NEXT: s_lshl_b32 s8, s8, 16
+; GFX11-NEXT: s_and_b32 s6, s6, 0xffff
+; GFX11-NEXT: s_lshl_b32 s7, s7, 16
; GFX11-NEXT: s_or_b32 s4, s4, s5
-; GFX11-NEXT: s_or_b32 s5, s7, s8
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
-; GFX11-NEXT: s_cbranch_vccnz .LBB87_3
+; GFX11-NEXT: s_or_b32 s5, s6, s7
+; GFX11-NEXT: s_cbranch_execnz .LBB87_3
; GFX11-NEXT: .LBB87_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
; GFX11-NEXT: s_lshl_b32 s1, s1, 8
@@ -12023,7 +12365,9 @@ define inreg <2 x float> @bitcast_v8i8_to_v2f32_scalar(<8 x i8> inreg %a, i32 in
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB87_4:
; GFX11-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX11-NEXT: s_branch .LBB87_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX11-NEXT: s_cbranch_vccz .LBB87_2
+; GFX11-NEXT: s_branch .LBB87_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -12155,6 +12499,7 @@ define inreg <4 x half> @bitcast_v4i16_to_v4f16_scalar(<4 x i16> inreg %a, i32 i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB89_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_cvt_f32_f16_e32 v0, s16
@@ -12178,16 +12523,22 @@ define inreg <4 x half> @bitcast_v4i16_to_v4f16_scalar(<4 x i16> inreg %a, i32 i
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr3
-; SI-NEXT: s_branch .LBB89_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB89_2
+; SI-NEXT: s_branch .LBB89_3
;
; VI-LABEL: bitcast_v4i16_to_v4f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB89_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB89_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB89_3
-; VI-NEXT: .LBB89_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB89_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB89_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s16, 3
; VI-NEXT: s_add_i32 s7, s17, 3
; VI-NEXT: s_and_b32 s4, s16, 0xffff0000
@@ -12198,26 +12549,26 @@ define inreg <4 x half> @bitcast_v4i16_to_v4f16_scalar(<4 x i16> inreg %a, i32 i
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s17, s6, 0x30000
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB89_3: ; %end
+; VI-NEXT: .LBB89_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB89_4:
-; VI-NEXT: s_branch .LBB89_2
;
; GFX9-LABEL: bitcast_v4i16_to_v4f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB89_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB89_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB89_4
-; GFX9-NEXT: .LBB89_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB89_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB89_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB89_3:
-; GFX9-NEXT: s_branch .LBB89_2
; GFX9-NEXT: .LBB89_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -12227,17 +12578,18 @@ define inreg <4 x half> @bitcast_v4i16_to_v4f16_scalar(<4 x i16> inreg %a, i32 i
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB89_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB89_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB89_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB89_4
-; GFX11-NEXT: .LBB89_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB89_3:
-; GFX11-NEXT: s_branch .LBB89_2
; GFX11-NEXT: .LBB89_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -12367,10 +12719,14 @@ define inreg <4 x i16> @bitcast_v4f16_to_v4i16_scalar(<4 x half> inreg %a, i32 i
; SI-NEXT: v_cvt_f16_f32_e32 v2, s18
; SI-NEXT: v_cvt_f16_f32_e32 v3, s19
; SI-NEXT: s_cmp_lg_u32 s20, 0
-; SI-NEXT: s_cbranch_scc0 .LBB91_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB91_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB91_3
-; SI-NEXT: .LBB91_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB91_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB91_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
@@ -12388,19 +12744,21 @@ define inreg <4 x i16> @bitcast_v4f16_to_v4i16_scalar(<4 x half> inreg %a, i32 i
; SI-NEXT: v_or_b32_e32 v2, v2, v4
; SI-NEXT: v_or_b32_e32 v0, v0, v1
; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16
-; SI-NEXT: .LBB91_3: ; %end
+; SI-NEXT: .LBB91_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB91_4:
-; SI-NEXT: s_branch .LBB91_2
;
; VI-LABEL: bitcast_v4f16_to_v4i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB91_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB91_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB91_4
-; VI-NEXT: .LBB91_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB91_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB91_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v1, s4
; VI-NEXT: s_lshr_b32 s4, s17, 16
@@ -12413,8 +12771,6 @@ define inreg <4 x i16> @bitcast_v4f16_to_v4i16_scalar(<4 x half> inreg %a, i32 i
; VI-NEXT: v_or_b32_e32 v1, v1, v0
; VI-NEXT: v_or_b32_e32 v0, v2, v3
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB91_3:
-; VI-NEXT: s_branch .LBB91_2
; VI-NEXT: .LBB91_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -12424,16 +12780,18 @@ define inreg <4 x i16> @bitcast_v4f16_to_v4i16_scalar(<4 x half> inreg %a, i32 i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB91_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB91_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB91_4
-; GFX9-NEXT: .LBB91_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB91_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB91_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB91_3:
-; GFX9-NEXT: s_branch .LBB91_2
; GFX9-NEXT: .LBB91_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -12443,17 +12801,18 @@ define inreg <4 x i16> @bitcast_v4f16_to_v4i16_scalar(<4 x half> inreg %a, i32 i
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB91_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB91_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB91_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB91_4
-; GFX11-NEXT: .LBB91_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB91_3:
-; GFX11-NEXT: s_branch .LBB91_2
; GFX11-NEXT: .LBB91_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -12586,6 +12945,7 @@ define inreg <4 x bfloat> @bitcast_v4i16_to_v4bf16_scalar(<4 x i16> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB93_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshl_b32 s6, s16, 16
@@ -12619,16 +12979,22 @@ define inreg <4 x bfloat> @bitcast_v4i16_to_v4bf16_scalar(<4 x i16> inreg %a, i3
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $sgpr9
; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: s_branch .LBB93_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB93_2
+; SI-NEXT: s_branch .LBB93_3
;
; VI-LABEL: bitcast_v4i16_to_v4bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB93_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB93_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB93_3
-; VI-NEXT: .LBB93_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB93_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB93_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s16, 3
; VI-NEXT: s_add_i32 s7, s17, 3
; VI-NEXT: s_and_b32 s4, s16, 0xffff0000
@@ -12639,26 +13005,26 @@ define inreg <4 x bfloat> @bitcast_v4i16_to_v4bf16_scalar(<4 x i16> inreg %a, i3
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s17, s6, 0x30000
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB93_3: ; %end
+; VI-NEXT: .LBB93_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB93_4:
-; VI-NEXT: s_branch .LBB93_2
;
; GFX9-LABEL: bitcast_v4i16_to_v4bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB93_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB93_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB93_4
-; GFX9-NEXT: .LBB93_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB93_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB93_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB93_3:
-; GFX9-NEXT: s_branch .LBB93_2
; GFX9-NEXT: .LBB93_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -12668,17 +13034,18 @@ define inreg <4 x bfloat> @bitcast_v4i16_to_v4bf16_scalar(<4 x i16> inreg %a, i3
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB93_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB93_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB93_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB93_4
-; GFX11-NEXT: .LBB93_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB93_3:
-; GFX11-NEXT: s_branch .LBB93_2
; GFX11-NEXT: .LBB93_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -12975,6 +13342,7 @@ define inreg <4 x i16> @bitcast_v4bf16_to_v4i16_scalar(<4 x bfloat> inreg %a, i3
; SI-NEXT: v_mul_f32_e64 v6, 1.0, s17
; SI-NEXT: v_mul_f32_e64 v5, 1.0, s18
; SI-NEXT: v_mul_f32_e64 v4, 1.0, s19
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB95_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v7
@@ -13004,16 +13372,22 @@ define inreg <4 x i16> @bitcast_v4bf16_to_v4i16_scalar(<4 x bfloat> inreg %a, i3
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr3
-; SI-NEXT: s_branch .LBB95_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB95_2
+; SI-NEXT: s_branch .LBB95_3
;
; VI-LABEL: bitcast_v4bf16_to_v4i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB95_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB95_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB95_4
-; VI-NEXT: .LBB95_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB95_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB95_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x40c00000
; VI-NEXT: v_add_f32_e32 v1, s4, v0
@@ -13052,8 +13426,6 @@ define inreg <4 x i16> @bitcast_v4bf16_to_v4i16_scalar(<4 x bfloat> inreg %a, i3
; VI-NEXT: v_alignbit_b32 v1, v0, v1, 16
; VI-NEXT: v_alignbit_b32 v0, v3, v2, 16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB95_3:
-; VI-NEXT: s_branch .LBB95_2
; VI-NEXT: .LBB95_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -13063,10 +13435,14 @@ define inreg <4 x i16> @bitcast_v4bf16_to_v4i16_scalar(<4 x bfloat> inreg %a, i3
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB95_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB95_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB95_4
-; GFX9-NEXT: .LBB95_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB95_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB95_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_pack_lh_b32_b16 s4, 0, s17
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
; GFX9-NEXT: v_add_f32_e32 v1, s4, v0
@@ -13106,8 +13482,6 @@ define inreg <4 x i16> @bitcast_v4bf16_to_v4i16_scalar(<4 x bfloat> inreg %a, i3
; GFX9-NEXT: v_and_or_b32 v0, v3, v4, v0
; GFX9-NEXT: v_and_or_b32 v1, v1, v4, v2
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB95_3:
-; GFX9-NEXT: s_branch .LBB95_2
; GFX9-NEXT: .LBB95_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -13117,12 +13491,15 @@ define inreg <4 x i16> @bitcast_v4bf16_to_v4i16_scalar(<4 x bfloat> inreg %a, i3
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB95_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB95_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB95_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB95_4
-; GFX11-NEXT: .LBB95_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_pack_lh_b32_b16 s2, 0, s1
; GFX11-NEXT: s_lshl_b32 s1, s1, 16
; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
@@ -13162,8 +13539,6 @@ define inreg <4 x i16> @bitcast_v4bf16_to_v4i16_scalar(<4 x bfloat> inreg %a, i3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_and_or_b32 v0, 0xffff0000, v0, v2
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB95_3:
-; GFX11-NEXT: s_branch .LBB95_2
; GFX11-NEXT: .LBB95_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -13427,6 +13802,7 @@ define inreg <8 x i8> @bitcast_v4i16_to_v8i8_scalar(<4 x i16> inreg %a, i32 inre
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s20, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB97_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
@@ -13477,12 +13853,15 @@ define inreg <8 x i8> @bitcast_v4i16_to_v8i8_scalar(<4 x i16> inreg %a, i32 inre
; SI-NEXT: ; implicit-def: $sgpr9
; SI-NEXT: ; implicit-def: $sgpr10
; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: s_branch .LBB97_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB97_2
+; SI-NEXT: s_branch .LBB97_3
;
; VI-LABEL: bitcast_v4i16_to_v8i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
+; VI-NEXT: s_mov_b64 s[6:7], -1
; VI-NEXT: s_cbranch_scc0 .LBB97_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
@@ -13528,12 +13907,15 @@ define inreg <8 x i8> @bitcast_v4i16_to_v8i8_scalar(<4 x i16> inreg %a, i32 inre
; VI-NEXT: ; implicit-def: $sgpr5
; VI-NEXT: ; implicit-def: $sgpr8
; VI-NEXT: ; implicit-def: $sgpr10
-; VI-NEXT: s_branch .LBB97_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; VI-NEXT: s_cbranch_vccz .LBB97_2
+; VI-NEXT: s_branch .LBB97_3
;
; GFX9-LABEL: bitcast_v4i16_to_v8i8_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
+; GFX9-NEXT: s_mov_b64 s[6:7], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB97_3
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
@@ -13560,7 +13942,8 @@ define inreg <8 x i8> @bitcast_v4i16_to_v8i8_scalar(<4 x i16> inreg %a, i32 inre
; GFX9-NEXT: ; implicit-def: $sgpr9
; GFX9-NEXT: ; implicit-def: $sgpr8
; GFX9-NEXT: ; implicit-def: $sgpr5
-; GFX9-NEXT: s_branch .LBB97_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GFX9-NEXT: s_cbranch_vccz .LBB97_2
; GFX9-NEXT: .LBB97_4:
; GFX9-NEXT: v_mov_b32_e32 v8, s16
; GFX9-NEXT: v_mov_b32_e32 v9, s17
@@ -13579,17 +13962,16 @@ define inreg <8 x i8> @bitcast_v4i16_to_v8i8_scalar(<4 x i16> inreg %a, i32 inre
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
-; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB97_3
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b64 s[2:3], s[0:1], 24
; GFX11-NEXT: s_lshr_b32 s3, s1, 24
-; GFX11-NEXT: s_lshr_b32 s5, s1, 16
-; GFX11-NEXT: s_lshr_b32 s6, s1, 8
-; GFX11-NEXT: s_lshr_b32 s7, s0, 16
-; GFX11-NEXT: s_lshr_b32 s8, s0, 8
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB97_4
+; GFX11-NEXT: s_lshr_b32 s4, s1, 16
+; GFX11-NEXT: s_lshr_b32 s5, s1, 8
+; GFX11-NEXT: s_lshr_b32 s6, s0, 16
+; GFX11-NEXT: s_lshr_b32 s7, s0, 8
+; GFX11-NEXT: s_cbranch_execnz .LBB97_4
; GFX11-NEXT: .LBB97_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v9, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v8, s0, 3 op_sel_hi:[1,0]
@@ -13602,17 +13984,18 @@ define inreg <8 x i8> @bitcast_v4i16_to_v8i8_scalar(<4 x i16> inreg %a, i32 inre
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v8
; GFX11-NEXT: s_branch .LBB97_5
; GFX11-NEXT: .LBB97_3:
-; GFX11-NEXT: ; implicit-def: $sgpr8
; GFX11-NEXT: ; implicit-def: $sgpr7
-; GFX11-NEXT: ; implicit-def: $sgpr2
; GFX11-NEXT: ; implicit-def: $sgpr6
+; GFX11-NEXT: ; implicit-def: $sgpr2
; GFX11-NEXT: ; implicit-def: $sgpr5
+; GFX11-NEXT: ; implicit-def: $sgpr4
; GFX11-NEXT: ; implicit-def: $sgpr3
-; GFX11-NEXT: s_branch .LBB97_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-NEXT: s_cbranch_vccz .LBB97_2
; GFX11-NEXT: .LBB97_4:
; GFX11-NEXT: v_dual_mov_b32 v8, s0 :: v_dual_mov_b32 v9, s1
-; GFX11-NEXT: v_dual_mov_b32 v1, s8 :: v_dual_mov_b32 v2, s7
-; GFX11-NEXT: v_dual_mov_b32 v5, s6 :: v_dual_mov_b32 v6, s5
+; GFX11-NEXT: v_dual_mov_b32 v1, s7 :: v_dual_mov_b32 v2, s6
+; GFX11-NEXT: v_dual_mov_b32 v5, s5 :: v_dual_mov_b32 v6, s4
; GFX11-NEXT: v_mov_b32_e32 v7, s3
; GFX11-NEXT: v_mov_b32_e32 v3, s2
; GFX11-NEXT: .LBB97_5: ; %end
@@ -14013,6 +14396,7 @@ define inreg <4 x i16> @bitcast_v8i8_to_v4i16_scalar(<8 x i8> inreg %a, i32 inre
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB99_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s20, 0xff
@@ -14077,12 +14461,15 @@ define inreg <4 x i16> @bitcast_v8i8_to_v4i16_scalar(<8 x i8> inreg %a, i32 inre
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $sgpr8
-; SI-NEXT: s_branch .LBB99_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB99_2
+; SI-NEXT: s_branch .LBB99_3
;
; VI-LABEL: bitcast_v8i8_to_v4i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
+; VI-NEXT: s_mov_b64 s[6:7], -1
; VI-NEXT: s_cbranch_scc0 .LBB99_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, s16, 0xff
@@ -14137,12 +14524,15 @@ define inreg <4 x i16> @bitcast_v8i8_to_v4i16_scalar(<8 x i8> inreg %a, i32 inre
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB99_4:
; VI-NEXT: ; implicit-def: $sgpr4_sgpr5
-; VI-NEXT: s_branch .LBB99_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; VI-NEXT: s_cbranch_vccz .LBB99_2
+; VI-NEXT: s_branch .LBB99_3
;
; GFX9-LABEL: bitcast_v8i8_to_v4i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
+; GFX9-NEXT: s_mov_b64 s[6:7], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB99_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_and_b32 s4, s16, 0xff
@@ -14197,35 +14587,36 @@ define inreg <4 x i16> @bitcast_v8i8_to_v4i16_scalar(<8 x i8> inreg %a, i32 inre
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB99_4:
; GFX9-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX9-NEXT: s_branch .LBB99_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GFX9-NEXT: s_cbranch_vccz .LBB99_2
+; GFX9-NEXT: s_branch .LBB99_3
;
; GFX11-LABEL: bitcast_v8i8_to_v4i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
-; GFX11-NEXT: s_mov_b32 s6, 0
+; GFX11-NEXT: s_mov_b32 s6, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB99_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_and_b32 s4, s0, 0xff
; GFX11-NEXT: s_lshl_b32 s5, s1, 8
-; GFX11-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s3, 8
+; GFX11-NEXT: s_and_b32 s6, s2, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s3, 8
; GFX11-NEXT: s_or_b32 s4, s4, s5
-; GFX11-NEXT: s_or_b32 s5, s7, s8
-; GFX11-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
+; GFX11-NEXT: s_or_b32 s5, s6, s7
+; GFX11-NEXT: s_and_b32 s6, s16, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-NEXT: s_or_b32 s6, s6, s7
+; GFX11-NEXT: s_or_b32 s7, s8, s9
; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
; GFX11-NEXT: s_lshl_b32 s5, s5, 16
-; GFX11-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-NEXT: s_lshl_b32 s8, s8, 16
+; GFX11-NEXT: s_and_b32 s6, s6, 0xffff
+; GFX11-NEXT: s_lshl_b32 s7, s7, 16
; GFX11-NEXT: s_or_b32 s4, s4, s5
-; GFX11-NEXT: s_or_b32 s5, s7, s8
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
-; GFX11-NEXT: s_cbranch_vccnz .LBB99_3
+; GFX11-NEXT: s_or_b32 s5, s6, s7
+; GFX11-NEXT: s_cbranch_execnz .LBB99_3
; GFX11-NEXT: .LBB99_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
; GFX11-NEXT: s_lshl_b32 s1, s1, 8
@@ -14259,7 +14650,9 @@ define inreg <4 x i16> @bitcast_v8i8_to_v4i16_scalar(<8 x i8> inreg %a, i32 inre
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB99_4:
; GFX11-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX11-NEXT: s_branch .LBB99_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX11-NEXT: s_cbranch_vccz .LBB99_2
+; GFX11-NEXT: s_branch .LBB99_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -14404,6 +14797,7 @@ define inreg <4 x bfloat> @bitcast_v4f16_to_v4bf16_scalar(<4 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v6, s18
; SI-NEXT: v_cvt_f16_f32_e32 v7, s19
; SI-NEXT: s_cmp_lg_u32 s20, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB101_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v4
@@ -14435,16 +14829,22 @@ define inreg <4 x bfloat> @bitcast_v4f16_to_v4bf16_scalar(<4 x half> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr3
-; SI-NEXT: s_branch .LBB101_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB101_2
+; SI-NEXT: s_branch .LBB101_3
;
; VI-LABEL: bitcast_v4f16_to_v4bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB101_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB101_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB101_4
-; VI-NEXT: .LBB101_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB101_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB101_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v1, s4
; VI-NEXT: s_lshr_b32 s4, s17, 16
@@ -14457,8 +14857,6 @@ define inreg <4 x bfloat> @bitcast_v4f16_to_v4bf16_scalar(<4 x half> inreg %a, i
; VI-NEXT: v_or_b32_e32 v1, v1, v0
; VI-NEXT: v_or_b32_e32 v0, v2, v3
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB101_3:
-; VI-NEXT: s_branch .LBB101_2
; VI-NEXT: .LBB101_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -14468,16 +14866,18 @@ define inreg <4 x bfloat> @bitcast_v4f16_to_v4bf16_scalar(<4 x half> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB101_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB101_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB101_4
-; GFX9-NEXT: .LBB101_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB101_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB101_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB101_3:
-; GFX9-NEXT: s_branch .LBB101_2
; GFX9-NEXT: .LBB101_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -14487,17 +14887,18 @@ define inreg <4 x bfloat> @bitcast_v4f16_to_v4bf16_scalar(<4 x half> inreg %a, i
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB101_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB101_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB101_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB101_4
-; GFX11-NEXT: .LBB101_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB101_3:
-; GFX11-NEXT: s_branch .LBB101_2
; GFX11-NEXT: .LBB101_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -14797,6 +15198,7 @@ define inreg <4 x half> @bitcast_v4bf16_to_v4f16_scalar(<4 x bfloat> inreg %a, i
; SI-NEXT: v_mul_f32_e64 v5, 1.0, s17
; SI-NEXT: v_mul_f32_e64 v6, 1.0, s18
; SI-NEXT: v_mul_f32_e64 v7, 1.0, s19
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB103_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v4
@@ -14832,16 +15234,22 @@ define inreg <4 x half> @bitcast_v4bf16_to_v4f16_scalar(<4 x bfloat> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr3
-; SI-NEXT: s_branch .LBB103_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB103_2
+; SI-NEXT: s_branch .LBB103_3
;
; VI-LABEL: bitcast_v4bf16_to_v4f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
-; VI-NEXT: s_cbranch_scc0 .LBB103_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB103_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB103_4
-; VI-NEXT: .LBB103_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB103_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB103_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x40c00000
; VI-NEXT: v_add_f32_e32 v1, s4, v0
@@ -14880,8 +15288,6 @@ define inreg <4 x half> @bitcast_v4bf16_to_v4f16_scalar(<4 x bfloat> inreg %a, i
; VI-NEXT: v_alignbit_b32 v1, v0, v1, 16
; VI-NEXT: v_alignbit_b32 v0, v3, v2, 16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB103_3:
-; VI-NEXT: s_branch .LBB103_2
; VI-NEXT: .LBB103_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -14891,10 +15297,14 @@ define inreg <4 x half> @bitcast_v4bf16_to_v4f16_scalar(<4 x bfloat> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB103_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB103_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB103_4
-; GFX9-NEXT: .LBB103_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB103_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB103_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_pack_lh_b32_b16 s4, 0, s17
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
; GFX9-NEXT: v_add_f32_e32 v1, s4, v0
@@ -14936,8 +15346,6 @@ define inreg <4 x half> @bitcast_v4bf16_to_v4f16_scalar(<4 x bfloat> inreg %a, i
; GFX9-NEXT: v_lshl_or_b32 v0, v3, 16, v0
; GFX9-NEXT: v_lshl_or_b32 v1, v1, 16, v2
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB103_3:
-; GFX9-NEXT: s_branch .LBB103_2
; GFX9-NEXT: .LBB103_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -14947,12 +15355,15 @@ define inreg <4 x half> @bitcast_v4bf16_to_v4f16_scalar(<4 x bfloat> inreg %a, i
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-NEXT: s_mov_b32 s2, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB103_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s2, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB103_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB103_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s2
; GFX11-NEXT: s_cbranch_vccnz .LBB103_4
-; GFX11-NEXT: .LBB103_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_pack_lh_b32_b16 s2, 0, s1
; GFX11-NEXT: s_lshl_b32 s1, s1, 16
; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s2
@@ -14997,8 +15408,6 @@ define inreg <4 x half> @bitcast_v4bf16_to_v4f16_scalar(<4 x bfloat> inreg %a, i
; GFX11-NEXT: v_lshl_or_b32 v1, v4, 16, v1
; GFX11-NEXT: v_lshl_or_b32 v0, v0, 16, v2
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB103_3:
-; GFX11-NEXT: s_branch .LBB103_2
; GFX11-NEXT: .LBB103_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
@@ -15267,6 +15676,7 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr
; SI-NEXT: v_cvt_f16_f32_e32 v6, s19
; SI-NEXT: v_cvt_f16_f32_e32 v8, s18
; SI-NEXT: s_cmp_lg_u32 s20, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB105_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v10
@@ -15311,12 +15721,15 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr
; SI-NEXT: ; implicit-def: $vgpr4
; SI-NEXT: ; implicit-def: $vgpr5
; SI-NEXT: ; implicit-def: $vgpr7
-; SI-NEXT: s_branch .LBB105_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB105_2
+; SI-NEXT: s_branch .LBB105_3
;
; VI-LABEL: bitcast_v4f16_to_v8i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
+; VI-NEXT: s_mov_b64 s[6:7], -1
; VI-NEXT: s_cbranch_scc0 .LBB105_3
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
@@ -15351,7 +15764,8 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr
; VI-NEXT: ; implicit-def: $sgpr5
; VI-NEXT: ; implicit-def: $sgpr10
; VI-NEXT: ; implicit-def: $sgpr8
-; VI-NEXT: s_branch .LBB105_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; VI-NEXT: s_cbranch_vccz .LBB105_2
; VI-NEXT: .LBB105_4:
; VI-NEXT: v_mov_b32_e32 v2, s11
; VI-NEXT: v_mov_b32_e32 v6, s10
@@ -15367,6 +15781,7 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
+; GFX9-NEXT: s_mov_b64 s[6:7], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB105_3
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
@@ -15394,7 +15809,8 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr
; GFX9-NEXT: ; implicit-def: $sgpr9
; GFX9-NEXT: ; implicit-def: $sgpr8
; GFX9-NEXT: ; implicit-def: $sgpr5
-; GFX9-NEXT: s_branch .LBB105_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GFX9-NEXT: s_cbranch_vccz .LBB105_2
; GFX9-NEXT: .LBB105_4:
; GFX9-NEXT: v_mov_b32_e32 v8, s16
; GFX9-NEXT: v_mov_b32_e32 v9, s17
@@ -15413,17 +15829,16 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
-; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB105_3
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b64 s[2:3], s[0:1], 24
; GFX11-NEXT: s_lshr_b32 s3, s1, 24
-; GFX11-NEXT: s_lshr_b32 s5, s1, 16
-; GFX11-NEXT: s_lshr_b32 s6, s1, 8
-; GFX11-NEXT: s_lshr_b32 s7, s0, 16
-; GFX11-NEXT: s_lshr_b32 s8, s0, 8
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB105_4
+; GFX11-NEXT: s_lshr_b32 s4, s1, 16
+; GFX11-NEXT: s_lshr_b32 s5, s1, 8
+; GFX11-NEXT: s_lshr_b32 s6, s0, 16
+; GFX11-NEXT: s_lshr_b32 s7, s0, 8
+; GFX11-NEXT: s_cbranch_execnz .LBB105_4
; GFX11-NEXT: .LBB105_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v9, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v8, 0x200, s0 op_sel_hi:[0,1]
@@ -15436,17 +15851,18 @@ define inreg <8 x i8> @bitcast_v4f16_to_v8i8_scalar(<4 x half> inreg %a, i32 inr
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v8
; GFX11-NEXT: s_branch .LBB105_5
; GFX11-NEXT: .LBB105_3:
-; GFX11-NEXT: ; implicit-def: $sgpr8
; GFX11-NEXT: ; implicit-def: $sgpr7
-; GFX11-NEXT: ; implicit-def: $sgpr2
; GFX11-NEXT: ; implicit-def: $sgpr6
+; GFX11-NEXT: ; implicit-def: $sgpr2
; GFX11-NEXT: ; implicit-def: $sgpr5
+; GFX11-NEXT: ; implicit-def: $sgpr4
; GFX11-NEXT: ; implicit-def: $sgpr3
-; GFX11-NEXT: s_branch .LBB105_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-NEXT: s_cbranch_vccz .LBB105_2
; GFX11-NEXT: .LBB105_4:
; GFX11-NEXT: v_dual_mov_b32 v8, s0 :: v_dual_mov_b32 v9, s1
-; GFX11-NEXT: v_dual_mov_b32 v1, s8 :: v_dual_mov_b32 v2, s7
-; GFX11-NEXT: v_dual_mov_b32 v5, s6 :: v_dual_mov_b32 v6, s5
+; GFX11-NEXT: v_dual_mov_b32 v1, s7 :: v_dual_mov_b32 v2, s6
+; GFX11-NEXT: v_dual_mov_b32 v5, s5 :: v_dual_mov_b32 v6, s4
; GFX11-NEXT: v_mov_b32_e32 v7, s3
; GFX11-NEXT: v_mov_b32_e32 v3, s2
; GFX11-NEXT: .LBB105_5: ; %end
@@ -15839,6 +16255,7 @@ define inreg <4 x half> @bitcast_v8i8_to_v4f16_scalar(<8 x i8> inreg %a, i32 inr
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB107_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
@@ -15890,12 +16307,15 @@ define inreg <4 x half> @bitcast_v8i8_to_v4f16_scalar(<8 x i8> inreg %a, i32 inr
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr3
-; SI-NEXT: s_branch .LBB107_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB107_2
+; SI-NEXT: s_branch .LBB107_3
;
; VI-LABEL: bitcast_v8i8_to_v4f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
+; VI-NEXT: s_mov_b64 s[6:7], -1
; VI-NEXT: s_cbranch_scc0 .LBB107_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, s16, 0xff
@@ -15950,12 +16370,15 @@ define inreg <4 x half> @bitcast_v8i8_to_v4f16_scalar(<8 x i8> inreg %a, i32 inr
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB107_4:
; VI-NEXT: ; implicit-def: $sgpr4_sgpr5
-; VI-NEXT: s_branch .LBB107_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; VI-NEXT: s_cbranch_vccz .LBB107_2
+; VI-NEXT: s_branch .LBB107_3
;
; GFX9-LABEL: bitcast_v8i8_to_v4f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
+; GFX9-NEXT: s_mov_b64 s[6:7], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB107_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_and_b32 s4, s16, 0xff
@@ -16010,35 +16433,36 @@ define inreg <4 x half> @bitcast_v8i8_to_v4f16_scalar(<8 x i8> inreg %a, i32 inr
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB107_4:
; GFX9-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX9-NEXT: s_branch .LBB107_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GFX9-NEXT: s_cbranch_vccz .LBB107_2
+; GFX9-NEXT: s_branch .LBB107_3
;
; GFX11-LABEL: bitcast_v8i8_to_v4f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
-; GFX11-NEXT: s_mov_b32 s6, 0
+; GFX11-NEXT: s_mov_b32 s6, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB107_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_and_b32 s4, s0, 0xff
; GFX11-NEXT: s_lshl_b32 s5, s1, 8
-; GFX11-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s3, 8
+; GFX11-NEXT: s_and_b32 s6, s2, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s3, 8
; GFX11-NEXT: s_or_b32 s4, s4, s5
-; GFX11-NEXT: s_or_b32 s5, s7, s8
-; GFX11-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
+; GFX11-NEXT: s_or_b32 s5, s6, s7
+; GFX11-NEXT: s_and_b32 s6, s16, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-NEXT: s_or_b32 s6, s6, s7
+; GFX11-NEXT: s_or_b32 s7, s8, s9
; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
; GFX11-NEXT: s_lshl_b32 s5, s5, 16
-; GFX11-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-NEXT: s_lshl_b32 s8, s8, 16
+; GFX11-NEXT: s_and_b32 s6, s6, 0xffff
+; GFX11-NEXT: s_lshl_b32 s7, s7, 16
; GFX11-NEXT: s_or_b32 s4, s4, s5
-; GFX11-NEXT: s_or_b32 s5, s7, s8
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
-; GFX11-NEXT: s_cbranch_vccnz .LBB107_3
+; GFX11-NEXT: s_or_b32 s5, s6, s7
+; GFX11-NEXT: s_cbranch_execnz .LBB107_3
; GFX11-NEXT: .LBB107_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
; GFX11-NEXT: s_lshl_b32 s1, s1, 8
@@ -16072,7 +16496,9 @@ define inreg <4 x half> @bitcast_v8i8_to_v4f16_scalar(<8 x i8> inreg %a, i32 inr
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB107_4:
; GFX11-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX11-NEXT: s_branch .LBB107_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX11-NEXT: s_cbranch_vccz .LBB107_2
+; GFX11-NEXT: s_branch .LBB107_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -16475,6 +16901,7 @@ define inreg <8 x i8> @bitcast_v4bf16_to_v8i8_scalar(<4 x bfloat> inreg %a, i32
; SI-NEXT: v_mul_f32_e64 v11, 1.0, s16
; SI-NEXT: v_mul_f32_e64 v8, 1.0, s19
; SI-NEXT: v_mul_f32_e64 v9, 1.0, s18
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB109_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v10
@@ -16516,12 +16943,15 @@ define inreg <8 x i8> @bitcast_v4bf16_to_v8i8_scalar(<4 x bfloat> inreg %a, i32
; SI-NEXT: ; implicit-def: $vgpr5
; SI-NEXT: ; implicit-def: $vgpr6
; SI-NEXT: ; implicit-def: $vgpr7
-; SI-NEXT: s_branch .LBB109_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB109_2
+; SI-NEXT: s_branch .LBB109_3
;
; VI-LABEL: bitcast_v4bf16_to_v8i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s18, 0
+; VI-NEXT: s_mov_b64 s[6:7], -1
; VI-NEXT: s_cbranch_scc0 .LBB109_3
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
@@ -16585,7 +17015,8 @@ define inreg <8 x i8> @bitcast_v4bf16_to_v8i8_scalar(<4 x bfloat> inreg %a, i32
; VI-NEXT: ; implicit-def: $sgpr9
; VI-NEXT: ; implicit-def: $sgpr5
; VI-NEXT: ; implicit-def: $sgpr8
-; VI-NEXT: s_branch .LBB109_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; VI-NEXT: s_cbranch_vccz .LBB109_2
; VI-NEXT: .LBB109_4:
; VI-NEXT: v_mov_b32_e32 v1, s11
; VI-NEXT: v_mov_b32_e32 v2, s10
@@ -16601,6 +17032,7 @@ define inreg <8 x i8> @bitcast_v4bf16_to_v8i8_scalar(<4 x bfloat> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s18, 0
+; GFX9-NEXT: s_mov_b64 s[6:7], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB109_3
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b64 s[4:5], s[16:17], 24
@@ -16666,7 +17098,8 @@ define inreg <8 x i8> @bitcast_v4bf16_to_v8i8_scalar(<4 x bfloat> inreg %a, i32
; GFX9-NEXT: ; implicit-def: $sgpr10
; GFX9-NEXT: ; implicit-def: $sgpr11
; GFX9-NEXT: ; implicit-def: $sgpr9
-; GFX9-NEXT: s_branch .LBB109_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GFX9-NEXT: s_cbranch_vccz .LBB109_2
; GFX9-NEXT: .LBB109_4:
; GFX9-NEXT: v_mov_b32_e32 v6, s11
; GFX9-NEXT: v_mov_b32_e32 v0, s16
@@ -16682,17 +17115,16 @@ define inreg <8 x i8> @bitcast_v4bf16_to_v8i8_scalar(<4 x bfloat> inreg %a, i32
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s2, 0
-; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB109_3
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b64 s[2:3], s[0:1], 24
-; GFX11-NEXT: s_lshr_b32 s6, s1, 24
-; GFX11-NEXT: s_lshr_b32 s8, s1, 16
-; GFX11-NEXT: s_lshr_b32 s7, s1, 8
-; GFX11-NEXT: s_lshr_b32 s5, s0, 16
+; GFX11-NEXT: s_lshr_b32 s5, s1, 24
+; GFX11-NEXT: s_lshr_b32 s7, s1, 16
+; GFX11-NEXT: s_lshr_b32 s6, s1, 8
+; GFX11-NEXT: s_lshr_b32 s4, s0, 16
; GFX11-NEXT: s_lshr_b32 s3, s0, 8
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB109_4
+; GFX11-NEXT: s_cbranch_execnz .LBB109_4
; GFX11-NEXT: .LBB109_2: ; %cmp.true
; GFX11-NEXT: s_pack_lh_b32_b16 s2, 0, s0
; GFX11-NEXT: s_lshl_b32 s0, s0, 16
@@ -16748,16 +17180,17 @@ define inreg <8 x i8> @bitcast_v4bf16_to_v8i8_scalar(<4 x bfloat> inreg %a, i32
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB109_3:
; GFX11-NEXT: ; implicit-def: $sgpr3
-; GFX11-NEXT: ; implicit-def: $sgpr5
+; GFX11-NEXT: ; implicit-def: $sgpr4
; GFX11-NEXT: ; implicit-def: $sgpr2
-; GFX11-NEXT: ; implicit-def: $sgpr7
-; GFX11-NEXT: ; implicit-def: $sgpr8
; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: s_branch .LBB109_2
+; GFX11-NEXT: ; implicit-def: $sgpr7
+; GFX11-NEXT: ; implicit-def: $sgpr5
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-NEXT: s_cbranch_vccz .LBB109_2
; GFX11-NEXT: .LBB109_4:
-; GFX11-NEXT: v_dual_mov_b32 v6, s8 :: v_dual_mov_b32 v7, s6
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v5, s7
-; GFX11-NEXT: v_dual_mov_b32 v2, s5 :: v_dual_mov_b32 v1, s3
+; GFX11-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s5
+; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v5, s6
+; GFX11-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v1, s3
; GFX11-NEXT: v_dual_mov_b32 v3, s2 :: v_dual_mov_b32 v4, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -17150,6 +17583,7 @@ define inreg <4 x bfloat> @bitcast_v8i8_to_v4bf16_scalar(<8 x i8> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s24, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB111_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
@@ -17211,12 +17645,15 @@ define inreg <4 x bfloat> @bitcast_v8i8_to_v4bf16_scalar(<8 x i8> inreg %a, i32
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $sgpr8
; SI-NEXT: ; implicit-def: $sgpr9
-; SI-NEXT: s_branch .LBB111_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB111_2
+; SI-NEXT: s_branch .LBB111_3
;
; VI-LABEL: bitcast_v8i8_to_v4bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s24, 0
+; VI-NEXT: s_mov_b64 s[6:7], -1
; VI-NEXT: s_cbranch_scc0 .LBB111_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, s16, 0xff
@@ -17271,12 +17708,15 @@ define inreg <4 x bfloat> @bitcast_v8i8_to_v4bf16_scalar(<8 x i8> inreg %a, i32
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB111_4:
; VI-NEXT: ; implicit-def: $sgpr4_sgpr5
-; VI-NEXT: s_branch .LBB111_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; VI-NEXT: s_cbranch_vccz .LBB111_2
+; VI-NEXT: s_branch .LBB111_3
;
; GFX9-LABEL: bitcast_v8i8_to_v4bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s24, 0
+; GFX9-NEXT: s_mov_b64 s[6:7], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB111_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_and_b32 s4, s16, 0xff
@@ -17331,35 +17771,36 @@ define inreg <4 x bfloat> @bitcast_v8i8_to_v4bf16_scalar(<8 x i8> inreg %a, i32
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB111_4:
; GFX9-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX9-NEXT: s_branch .LBB111_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GFX9-NEXT: s_cbranch_vccz .LBB111_2
+; GFX9-NEXT: s_branch .LBB111_3
;
; GFX11-LABEL: bitcast_v8i8_to_v4bf16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s20, 0
-; GFX11-NEXT: s_mov_b32 s6, 0
+; GFX11-NEXT: s_mov_b32 s6, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB111_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_and_b32 s4, s0, 0xff
; GFX11-NEXT: s_lshl_b32 s5, s1, 8
-; GFX11-NEXT: s_and_b32 s7, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s3, 8
+; GFX11-NEXT: s_and_b32 s6, s2, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s3, 8
; GFX11-NEXT: s_or_b32 s4, s4, s5
-; GFX11-NEXT: s_or_b32 s5, s7, s8
-; GFX11-NEXT: s_and_b32 s7, s16, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s17, 8
-; GFX11-NEXT: s_and_b32 s9, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s10, s19, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s8
-; GFX11-NEXT: s_or_b32 s8, s9, s10
+; GFX11-NEXT: s_or_b32 s5, s6, s7
+; GFX11-NEXT: s_and_b32 s6, s16, 0xff
+; GFX11-NEXT: s_lshl_b32 s7, s17, 8
+; GFX11-NEXT: s_and_b32 s8, s18, 0xff
+; GFX11-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-NEXT: s_or_b32 s6, s6, s7
+; GFX11-NEXT: s_or_b32 s7, s8, s9
; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
; GFX11-NEXT: s_lshl_b32 s5, s5, 16
-; GFX11-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-NEXT: s_lshl_b32 s8, s8, 16
+; GFX11-NEXT: s_and_b32 s6, s6, 0xffff
+; GFX11-NEXT: s_lshl_b32 s7, s7, 16
; GFX11-NEXT: s_or_b32 s4, s4, s5
-; GFX11-NEXT: s_or_b32 s5, s7, s8
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
-; GFX11-NEXT: s_cbranch_vccnz .LBB111_3
+; GFX11-NEXT: s_or_b32 s5, s6, s7
+; GFX11-NEXT: s_cbranch_execnz .LBB111_3
; GFX11-NEXT: .LBB111_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
; GFX11-NEXT: s_lshl_b32 s1, s1, 8
@@ -17393,7 +17834,9 @@ define inreg <4 x bfloat> @bitcast_v8i8_to_v4bf16_scalar(<8 x i8> inreg %a, i32
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB111_4:
; GFX11-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX11-NEXT: s_branch .LBB111_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX11-NEXT: s_cbranch_vccz .LBB111_2
+; GFX11-NEXT: s_branch .LBB111_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.704bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.704bit.ll
index 9f5c9c4..10baf25 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.704bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.704bit.ll
@@ -170,6 +170,7 @@ define inreg <22 x float> @bitcast_v22i32_to_v22f32_scalar(<22 x i32> inreg %a,
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v11, v8
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v21, v7
; SI-NEXT: v_mov_b32_e32 v20, v6
; SI-NEXT: v_mov_b32_e32 v19, v5
@@ -190,13 +191,16 @@ define inreg <22 x float> @bitcast_v22i32_to_v22f32_scalar(<22 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v9, s25
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB1_3
-; SI-NEXT: .LBB1_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB1_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB1_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v21, vcc, 3, v21
; SI-NEXT: v_add_i32_e32 v20, vcc, 3, v20
; SI-NEXT: v_add_i32_e32 v19, vcc, 3, v19
@@ -219,16 +223,15 @@ define inreg <22 x float> @bitcast_v22i32_to_v22f32_scalar(<22 x i32> inreg %a,
; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
-; SI-NEXT: .LBB1_3: ; %end
+; SI-NEXT: .LBB1_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: s_branch .LBB1_2
;
; VI-LABEL: bitcast_v22i32_to_v22f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v11, v8
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v21, v7
; VI-NEXT: v_mov_b32_e32 v20, v6
; VI-NEXT: v_mov_b32_e32 v19, v5
@@ -249,13 +252,16 @@ define inreg <22 x float> @bitcast_v22i32_to_v22f32_scalar(<22 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB1_4
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB1_3
-; VI-NEXT: .LBB1_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB1_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB1_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v21, vcc, 3, v21
; VI-NEXT: v_add_u32_e32 v20, vcc, 3, v20
; VI-NEXT: v_add_u32_e32 v19, vcc, 3, v19
@@ -278,16 +284,15 @@ define inreg <22 x float> @bitcast_v22i32_to_v22f32_scalar(<22 x i32> inreg %a,
; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: .LBB1_3: ; %end
+; VI-NEXT: .LBB1_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_4:
-; VI-NEXT: s_branch .LBB1_2
;
; GFX9-LABEL: bitcast_v22i32_to_v22f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v11, v8
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v21, v7
; GFX9-NEXT: v_mov_b32_e32 v20, v6
; GFX9-NEXT: v_mov_b32_e32 v19, v5
@@ -308,13 +313,16 @@ define inreg <22 x float> @bitcast_v22i32_to_v22f32_scalar(<22 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB1_3
-; GFX9-NEXT: .LBB1_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB1_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_u32_e32 v21, 3, v21
; GFX9-NEXT: v_add_u32_e32 v20, 3, v20
; GFX9-NEXT: v_add_u32_e32 v19, 3, v19
@@ -337,38 +345,36 @@ define inreg <22 x float> @bitcast_v22i32_to_v22f32_scalar(<22 x i32> inreg %a,
; GFX9-NEXT: v_add_u32_e32 v2, 3, v2
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: .LBB1_3: ; %end
+; GFX9-NEXT: .LBB1_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-LABEL: bitcast_v22i32_to_v22f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-NEXT: v_dual_mov_b32 v15, v4 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB1_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB1_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_3:
-; GFX11-NEXT: .LBB1_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_nc_u32_e32 v21, 3, v21
; GFX11-NEXT: v_add_nc_u32_e32 v20, 3, v20
; GFX11-NEXT: v_add_nc_u32_e32 v19, 3, v19
@@ -391,6 +397,7 @@ define inreg <22 x float> @bitcast_v22i32_to_v22f32_scalar(<22 x i32> inreg %a,
; GFX11-NEXT: v_add_nc_u32_e32 v2, 3, v2
; GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v1
; GFX11-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-NEXT: .LBB1_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -562,6 +569,7 @@ define inreg <22 x i32> @bitcast_v22f32_to_v22i32_scalar(<22 x float> inreg %a,
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v11, v8
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v21, v7
; SI-NEXT: v_mov_b32_e32 v20, v6
; SI-NEXT: v_mov_b32_e32 v19, v5
@@ -582,13 +590,16 @@ define inreg <22 x i32> @bitcast_v22f32_to_v22i32_scalar(<22 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v9, s25
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB3_4
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB3_3
-; SI-NEXT: .LBB3_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB3_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB3_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e32 v21, 1.0, v21
; SI-NEXT: v_add_f32_e32 v20, 1.0, v20
; SI-NEXT: v_add_f32_e32 v19, 1.0, v19
@@ -611,16 +622,15 @@ define inreg <22 x i32> @bitcast_v22f32_to_v22i32_scalar(<22 x float> inreg %a,
; SI-NEXT: v_add_f32_e32 v2, 1.0, v2
; SI-NEXT: v_add_f32_e32 v1, 1.0, v1
; SI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; SI-NEXT: .LBB3_3: ; %end
+; SI-NEXT: .LBB3_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB3_4:
-; SI-NEXT: s_branch .LBB3_2
;
; VI-LABEL: bitcast_v22f32_to_v22i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v11, v8
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v21, v7
; VI-NEXT: v_mov_b32_e32 v20, v6
; VI-NEXT: v_mov_b32_e32 v19, v5
@@ -641,13 +651,16 @@ define inreg <22 x i32> @bitcast_v22f32_to_v22i32_scalar(<22 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB3_4
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_3
-; VI-NEXT: .LBB3_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB3_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB3_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e32 v21, 1.0, v21
; VI-NEXT: v_add_f32_e32 v20, 1.0, v20
; VI-NEXT: v_add_f32_e32 v19, 1.0, v19
@@ -670,16 +683,15 @@ define inreg <22 x i32> @bitcast_v22f32_to_v22i32_scalar(<22 x float> inreg %a,
; VI-NEXT: v_add_f32_e32 v2, 1.0, v2
; VI-NEXT: v_add_f32_e32 v1, 1.0, v1
; VI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; VI-NEXT: .LBB3_3: ; %end
+; VI-NEXT: .LBB3_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB3_4:
-; VI-NEXT: s_branch .LBB3_2
;
; GFX9-LABEL: bitcast_v22f32_to_v22i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v11, v8
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v21, v7
; GFX9-NEXT: v_mov_b32_e32 v20, v6
; GFX9-NEXT: v_mov_b32_e32 v19, v5
@@ -700,13 +712,16 @@ define inreg <22 x i32> @bitcast_v22f32_to_v22i32_scalar(<22 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_3
-; GFX9-NEXT: .LBB3_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB3_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e32 v21, 1.0, v21
; GFX9-NEXT: v_add_f32_e32 v20, 1.0, v20
; GFX9-NEXT: v_add_f32_e32 v19, 1.0, v19
@@ -729,38 +744,36 @@ define inreg <22 x i32> @bitcast_v22f32_to_v22i32_scalar(<22 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e32 v2, 1.0, v2
; GFX9-NEXT: v_add_f32_e32 v1, 1.0, v1
; GFX9-NEXT: v_add_f32_e32 v0, 1.0, v0
-; GFX9-NEXT: .LBB3_3: ; %end
+; GFX9-NEXT: .LBB3_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB3_4:
-; GFX9-NEXT: s_branch .LBB3_2
;
; GFX11-LABEL: bitcast_v22f32_to_v22i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-NEXT: v_dual_mov_b32 v15, v4 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB3_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB3_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB3_3:
-; GFX11-NEXT: .LBB3_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
; GFX11-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
@@ -772,6 +785,7 @@ define inreg <22 x i32> @bitcast_v22f32_to_v22i32_scalar(<22 x float> inreg %a,
; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-NEXT: .LBB3_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -954,6 +968,7 @@ define inreg <11 x i64> @bitcast_v22i32_to_v11i64_scalar(<22 x i32> inreg %a, i3
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v11, v8
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v21, v7
; SI-NEXT: v_mov_b32_e32 v20, v6
; SI-NEXT: v_mov_b32_e32 v19, v5
@@ -974,13 +989,16 @@ define inreg <11 x i64> @bitcast_v22i32_to_v11i64_scalar(<22 x i32> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v9, s25
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB5_4
+; SI-NEXT: s_cbranch_scc0 .LBB5_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB5_3
-; SI-NEXT: .LBB5_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB5_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB5_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v21, vcc, 3, v21
; SI-NEXT: v_add_i32_e32 v20, vcc, 3, v20
; SI-NEXT: v_add_i32_e32 v19, vcc, 3, v19
@@ -1003,16 +1021,15 @@ define inreg <11 x i64> @bitcast_v22i32_to_v11i64_scalar(<22 x i32> inreg %a, i3
; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
-; SI-NEXT: .LBB5_3: ; %end
+; SI-NEXT: .LBB5_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB5_4:
-; SI-NEXT: s_branch .LBB5_2
;
; VI-LABEL: bitcast_v22i32_to_v11i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v11, v8
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v21, v7
; VI-NEXT: v_mov_b32_e32 v20, v6
; VI-NEXT: v_mov_b32_e32 v19, v5
@@ -1033,13 +1050,16 @@ define inreg <11 x i64> @bitcast_v22i32_to_v11i64_scalar(<22 x i32> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB5_4
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB5_3
-; VI-NEXT: .LBB5_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB5_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB5_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v21, vcc, 3, v21
; VI-NEXT: v_add_u32_e32 v20, vcc, 3, v20
; VI-NEXT: v_add_u32_e32 v19, vcc, 3, v19
@@ -1062,16 +1082,15 @@ define inreg <11 x i64> @bitcast_v22i32_to_v11i64_scalar(<22 x i32> inreg %a, i3
; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: .LBB5_3: ; %end
+; VI-NEXT: .LBB5_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB5_4:
-; VI-NEXT: s_branch .LBB5_2
;
; GFX9-LABEL: bitcast_v22i32_to_v11i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v11, v8
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v21, v7
; GFX9-NEXT: v_mov_b32_e32 v20, v6
; GFX9-NEXT: v_mov_b32_e32 v19, v5
@@ -1092,13 +1111,16 @@ define inreg <11 x i64> @bitcast_v22i32_to_v11i64_scalar(<22 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB5_3
-; GFX9-NEXT: .LBB5_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB5_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_u32_e32 v21, 3, v21
; GFX9-NEXT: v_add_u32_e32 v20, 3, v20
; GFX9-NEXT: v_add_u32_e32 v19, 3, v19
@@ -1121,38 +1143,36 @@ define inreg <11 x i64> @bitcast_v22i32_to_v11i64_scalar(<22 x i32> inreg %a, i3
; GFX9-NEXT: v_add_u32_e32 v2, 3, v2
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: .LBB5_3: ; %end
+; GFX9-NEXT: .LBB5_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_4:
-; GFX9-NEXT: s_branch .LBB5_2
;
; GFX11-LABEL: bitcast_v22i32_to_v11i64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-NEXT: v_dual_mov_b32 v15, v4 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB5_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB5_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB5_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB5_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB5_3:
-; GFX11-NEXT: .LBB5_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_nc_u32_e32 v21, 3, v21
; GFX11-NEXT: v_add_nc_u32_e32 v20, 3, v20
; GFX11-NEXT: v_add_nc_u32_e32 v19, 3, v19
@@ -1175,6 +1195,7 @@ define inreg <11 x i64> @bitcast_v22i32_to_v11i64_scalar(<22 x i32> inreg %a, i3
; GFX11-NEXT: v_add_nc_u32_e32 v2, 3, v2
; GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v1
; GFX11-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-NEXT: .LBB5_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1363,6 +1384,7 @@ define inreg <22 x i32> @bitcast_v11i64_to_v22i32_scalar(<11 x i64> inreg %a, i3
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v11, v8
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v21, v7
; SI-NEXT: v_mov_b32_e32 v20, v6
; SI-NEXT: v_mov_b32_e32 v19, v5
@@ -1383,13 +1405,16 @@ define inreg <22 x i32> @bitcast_v11i64_to_v22i32_scalar(<11 x i64> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v9, s25
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB7_4
+; SI-NEXT: s_cbranch_scc0 .LBB7_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB7_3
-; SI-NEXT: .LBB7_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB7_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB7_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v20, vcc, 3, v20
; SI-NEXT: v_addc_u32_e32 v21, vcc, 0, v21, vcc
; SI-NEXT: v_add_i32_e32 v18, vcc, 3, v18
@@ -1412,16 +1437,15 @@ define inreg <22 x i32> @bitcast_v11i64_to_v22i32_scalar(<11 x i64> inreg %a, i3
; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT: .LBB7_3: ; %end
+; SI-NEXT: .LBB7_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB7_4:
-; SI-NEXT: s_branch .LBB7_2
;
; VI-LABEL: bitcast_v11i64_to_v22i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v11, v8
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v21, v7
; VI-NEXT: v_mov_b32_e32 v20, v6
; VI-NEXT: v_mov_b32_e32 v19, v5
@@ -1442,13 +1466,16 @@ define inreg <22 x i32> @bitcast_v11i64_to_v22i32_scalar(<11 x i64> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB7_4
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB7_3
-; VI-NEXT: .LBB7_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB7_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB7_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v20, vcc, 3, v20
; VI-NEXT: v_addc_u32_e32 v21, vcc, 0, v21, vcc
; VI-NEXT: v_add_u32_e32 v18, vcc, 3, v18
@@ -1471,16 +1498,15 @@ define inreg <22 x i32> @bitcast_v11i64_to_v22i32_scalar(<11 x i64> inreg %a, i3
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT: .LBB7_3: ; %end
+; VI-NEXT: .LBB7_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB7_4:
-; VI-NEXT: s_branch .LBB7_2
;
; GFX9-LABEL: bitcast_v11i64_to_v22i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v11, v8
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v21, v7
; GFX9-NEXT: v_mov_b32_e32 v20, v6
; GFX9-NEXT: v_mov_b32_e32 v19, v5
@@ -1501,13 +1527,16 @@ define inreg <22 x i32> @bitcast_v11i64_to_v22i32_scalar(<11 x i64> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB7_3
-; GFX9-NEXT: .LBB7_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB7_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB7_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_co_u32_e32 v20, vcc, 3, v20
; GFX9-NEXT: v_addc_co_u32_e32 v21, vcc, 0, v21, vcc
; GFX9-NEXT: v_add_co_u32_e32 v18, vcc, 3, v18
@@ -1530,38 +1559,36 @@ define inreg <22 x i32> @bitcast_v11i64_to_v22i32_scalar(<11 x i64> inreg %a, i3
; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 3, v0
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
-; GFX9-NEXT: .LBB7_3: ; %end
+; GFX9-NEXT: .LBB7_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB7_4:
-; GFX9-NEXT: s_branch .LBB7_2
;
; GFX11-LABEL: bitcast_v11i64_to_v22i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-NEXT: v_dual_mov_b32 v15, v4 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB7_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB7_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB7_3:
-; GFX11-NEXT: .LBB7_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB7_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_co_u32 v20, vcc_lo, v20, 3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add_co_ci_u32_e64 v21, null, 0, v21, vcc_lo
@@ -1590,6 +1617,7 @@ define inreg <22 x i32> @bitcast_v11i64_to_v22i32_scalar(<11 x i64> inreg %a, i3
; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-NEXT: .LBB7_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1772,6 +1800,7 @@ define inreg <11 x double> @bitcast_v22i32_to_v11f64_scalar(<22 x i32> inreg %a,
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v11, v8
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v21, v7
; SI-NEXT: v_mov_b32_e32 v20, v6
; SI-NEXT: v_mov_b32_e32 v19, v5
@@ -1792,13 +1821,16 @@ define inreg <11 x double> @bitcast_v22i32_to_v11f64_scalar(<22 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v9, s25
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB9_4
+; SI-NEXT: s_cbranch_scc0 .LBB9_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB9_3
-; SI-NEXT: .LBB9_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB9_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB9_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v21, vcc, 3, v21
; SI-NEXT: v_add_i32_e32 v20, vcc, 3, v20
; SI-NEXT: v_add_i32_e32 v19, vcc, 3, v19
@@ -1821,16 +1853,15 @@ define inreg <11 x double> @bitcast_v22i32_to_v11f64_scalar(<22 x i32> inreg %a,
; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
-; SI-NEXT: .LBB9_3: ; %end
+; SI-NEXT: .LBB9_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB9_4:
-; SI-NEXT: s_branch .LBB9_2
;
; VI-LABEL: bitcast_v22i32_to_v11f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v11, v8
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v21, v7
; VI-NEXT: v_mov_b32_e32 v20, v6
; VI-NEXT: v_mov_b32_e32 v19, v5
@@ -1851,13 +1882,16 @@ define inreg <11 x double> @bitcast_v22i32_to_v11f64_scalar(<22 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB9_4
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB9_3
-; VI-NEXT: .LBB9_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB9_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB9_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v21, vcc, 3, v21
; VI-NEXT: v_add_u32_e32 v20, vcc, 3, v20
; VI-NEXT: v_add_u32_e32 v19, vcc, 3, v19
@@ -1880,16 +1914,15 @@ define inreg <11 x double> @bitcast_v22i32_to_v11f64_scalar(<22 x i32> inreg %a,
; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: .LBB9_3: ; %end
+; VI-NEXT: .LBB9_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB9_4:
-; VI-NEXT: s_branch .LBB9_2
;
; GFX9-LABEL: bitcast_v22i32_to_v11f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v11, v8
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v21, v7
; GFX9-NEXT: v_mov_b32_e32 v20, v6
; GFX9-NEXT: v_mov_b32_e32 v19, v5
@@ -1910,13 +1943,16 @@ define inreg <11 x double> @bitcast_v22i32_to_v11f64_scalar(<22 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB9_3
-; GFX9-NEXT: .LBB9_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB9_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_u32_e32 v21, 3, v21
; GFX9-NEXT: v_add_u32_e32 v20, 3, v20
; GFX9-NEXT: v_add_u32_e32 v19, 3, v19
@@ -1939,38 +1975,36 @@ define inreg <11 x double> @bitcast_v22i32_to_v11f64_scalar(<22 x i32> inreg %a,
; GFX9-NEXT: v_add_u32_e32 v2, 3, v2
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: .LBB9_3: ; %end
+; GFX9-NEXT: .LBB9_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB9_4:
-; GFX9-NEXT: s_branch .LBB9_2
;
; GFX11-LABEL: bitcast_v22i32_to_v11f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-NEXT: v_dual_mov_b32 v15, v4 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB9_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB9_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB9_3:
-; GFX11-NEXT: .LBB9_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_nc_u32_e32 v21, 3, v21
; GFX11-NEXT: v_add_nc_u32_e32 v20, 3, v20
; GFX11-NEXT: v_add_nc_u32_e32 v19, 3, v19
@@ -1993,6 +2027,7 @@ define inreg <11 x double> @bitcast_v22i32_to_v11f64_scalar(<22 x i32> inreg %a,
; GFX11-NEXT: v_add_nc_u32_e32 v2, 3, v2
; GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v1
; GFX11-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-NEXT: .LBB9_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2131,6 +2166,7 @@ define inreg <22 x i32> @bitcast_v11f64_to_v22i32_scalar(<11 x double> inreg %a,
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v10, v8
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v21, v7
; SI-NEXT: v_mov_b32_e32 v20, v6
; SI-NEXT: v_mov_b32_e32 v19, v5
@@ -2151,13 +2187,16 @@ define inreg <22 x i32> @bitcast_v11f64_to_v22i32_scalar(<11 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v9, s25
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB11_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB11_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB11_3
-; SI-NEXT: .LBB11_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB11_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB11_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
; SI-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
; SI-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
@@ -2169,16 +2208,15 @@ define inreg <22 x i32> @bitcast_v11f64_to_v22i32_scalar(<11 x double> inreg %a,
; SI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; SI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; SI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; SI-NEXT: .LBB11_3: ; %end
+; SI-NEXT: .LBB11_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB11_4:
-; SI-NEXT: s_branch .LBB11_2
;
; VI-LABEL: bitcast_v11f64_to_v22i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v10, v8
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v21, v7
; VI-NEXT: v_mov_b32_e32 v20, v6
; VI-NEXT: v_mov_b32_e32 v19, v5
@@ -2199,13 +2237,16 @@ define inreg <22 x i32> @bitcast_v11f64_to_v22i32_scalar(<11 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB11_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB11_3
-; VI-NEXT: .LBB11_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB11_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB11_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
; VI-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
; VI-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
@@ -2217,16 +2258,15 @@ define inreg <22 x i32> @bitcast_v11f64_to_v22i32_scalar(<11 x double> inreg %a,
; VI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; VI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; VI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; VI-NEXT: .LBB11_3: ; %end
+; VI-NEXT: .LBB11_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB11_4:
-; VI-NEXT: s_branch .LBB11_2
;
; GFX9-LABEL: bitcast_v11f64_to_v22i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v10, v8
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v21, v7
; GFX9-NEXT: v_mov_b32_e32 v20, v6
; GFX9-NEXT: v_mov_b32_e32 v19, v5
@@ -2247,13 +2287,16 @@ define inreg <22 x i32> @bitcast_v11f64_to_v22i32_scalar(<11 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_3
-; GFX9-NEXT: .LBB11_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB11_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
; GFX9-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
@@ -2265,38 +2308,36 @@ define inreg <22 x i32> @bitcast_v11f64_to_v22i32_scalar(<11 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX9-NEXT: .LBB11_3: ; %end
+; GFX9-NEXT: .LBB11_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB11_4:
-; GFX9-NEXT: s_branch .LBB11_2
;
; GFX11-LABEL: bitcast_v11f64_to_v22i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-NEXT: v_dual_mov_b32 v15, v4 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB11_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB11_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: .LBB11_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
; GFX11-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
; GFX11-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
@@ -2308,6 +2349,7 @@ define inreg <22 x i32> @bitcast_v11f64_to_v22i32_scalar(<11 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-NEXT: .LBB11_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -3022,6 +3064,7 @@ define inreg <44 x i16> @bitcast_v22i32_to_v44i16_scalar(<22 x i32> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v9
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s13, v1
; SI-NEXT: v_readfirstlane_b32 s12, v2
; SI-NEXT: v_readfirstlane_b32 s11, v3
@@ -3029,8 +3072,8 @@ define inreg <44 x i16> @bitcast_v22i32_to_v44i16_scalar(<22 x i32> inreg %a, i3
; SI-NEXT: v_readfirstlane_b32 s9, v5
; SI-NEXT: v_readfirstlane_b32 s8, v6
; SI-NEXT: v_readfirstlane_b32 s7, v7
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s6, v8
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB13_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v1, s7
@@ -3280,25 +3323,28 @@ define inreg <44 x i16> @bitcast_v22i32_to_v44i16_scalar(<22 x i32> inreg %a, i3
; SI-NEXT: ; implicit-def: $sgpr15
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: s_branch .LBB13_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB13_2
+; SI-NEXT: s_branch .LBB13_3
;
; VI-LABEL: bitcast_v22i32_to_v44i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s13, v0
; VI-NEXT: v_readfirstlane_b32 s12, v1
; VI-NEXT: v_readfirstlane_b32 s11, v2
; VI-NEXT: v_readfirstlane_b32 s10, v3
; VI-NEXT: v_readfirstlane_b32 s9, v4
; VI-NEXT: v_readfirstlane_b32 s8, v5
-; VI-NEXT: v_readfirstlane_b32 s6, v6
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: v_readfirstlane_b32 s7, v7
+; VI-NEXT: v_readfirstlane_b32 s7, v6
+; VI-NEXT: v_readfirstlane_b32 s6, v7
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB13_4
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_lshr_b32 s14, s7, 16
-; VI-NEXT: s_lshr_b32 s15, s6, 16
+; VI-NEXT: s_lshr_b32 s14, s6, 16
+; VI-NEXT: s_lshr_b32 s15, s7, 16
; VI-NEXT: s_lshr_b32 s40, s8, 16
; VI-NEXT: s_lshr_b32 s41, s9, 16
; VI-NEXT: s_lshr_b32 s42, s10, 16
@@ -3321,8 +3367,8 @@ define inreg <44 x i16> @bitcast_v22i32_to_v44i16_scalar(<22 x i32> inreg %a, i3
; VI-NEXT: s_lshr_b32 s75, s16, 16
; VI-NEXT: s_cbranch_execnz .LBB13_3
; VI-NEXT: .LBB13_2: ; %cmp.true
-; VI-NEXT: s_add_i32 s7, s7, 3
; VI-NEXT: s_add_i32 s6, s6, 3
+; VI-NEXT: s_add_i32 s7, s7, 3
; VI-NEXT: s_add_i32 s8, s8, 3
; VI-NEXT: s_add_i32 s9, s9, 3
; VI-NEXT: s_add_i32 s10, s10, 3
@@ -3343,8 +3389,8 @@ define inreg <44 x i16> @bitcast_v22i32_to_v44i16_scalar(<22 x i32> inreg %a, i3
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: s_lshr_b32 s14, s7, 16
-; VI-NEXT: s_lshr_b32 s15, s6, 16
+; VI-NEXT: s_lshr_b32 s14, s6, 16
+; VI-NEXT: s_lshr_b32 s15, s7, 16
; VI-NEXT: s_lshr_b32 s40, s8, 16
; VI-NEXT: s_lshr_b32 s41, s9, 16
; VI-NEXT: s_lshr_b32 s42, s10, 16
@@ -3425,13 +3471,13 @@ define inreg <44 x i16> @bitcast_v22i32_to_v44i16_scalar(<22 x i32> inreg %a, i3
; VI-NEXT: s_or_b32 s9, s9, s28
; VI-NEXT: s_and_b32 s8, 0xffff, s8
; VI-NEXT: s_lshl_b32 s28, s40, 16
-; VI-NEXT: s_and_b32 s6, 0xffff, s6
-; VI-NEXT: s_lshl_b32 s15, s15, 16
; VI-NEXT: s_and_b32 s7, 0xffff, s7
+; VI-NEXT: s_lshl_b32 s15, s15, 16
+; VI-NEXT: s_and_b32 s6, 0xffff, s6
; VI-NEXT: s_lshl_b32 s14, s14, 16
; VI-NEXT: s_or_b32 s8, s8, s28
-; VI-NEXT: s_or_b32 s6, s6, s15
-; VI-NEXT: s_or_b32 s7, s7, s14
+; VI-NEXT: s_or_b32 s7, s7, s15
+; VI-NEXT: s_or_b32 s6, s6, s14
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: v_mov_b32_e32 v2, s16
@@ -3452,8 +3498,8 @@ define inreg <44 x i16> @bitcast_v22i32_to_v44i16_scalar(<22 x i32> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v17, s10
; VI-NEXT: v_mov_b32_e32 v18, s9
; VI-NEXT: v_mov_b32_e32 v19, s8
-; VI-NEXT: v_mov_b32_e32 v20, s6
-; VI-NEXT: v_mov_b32_e32 v21, s7
+; VI-NEXT: v_mov_b32_e32 v20, s7
+; VI-NEXT: v_mov_b32_e32 v21, s6
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB13_4:
; VI-NEXT: ; implicit-def: $sgpr75
@@ -3478,31 +3524,34 @@ define inreg <44 x i16> @bitcast_v22i32_to_v44i16_scalar(<22 x i32> inreg %a, i3
; VI-NEXT: ; implicit-def: $sgpr40
; VI-NEXT: ; implicit-def: $sgpr15
; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: s_branch .LBB13_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB13_2
+; VI-NEXT: s_branch .LBB13_3
;
; GFX9-LABEL: bitcast_v22i32_to_v44i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
-; GFX9-NEXT: v_readfirstlane_b32 s6, v0
-; GFX9-NEXT: v_readfirstlane_b32 s7, v1
-; GFX9-NEXT: v_readfirstlane_b32 s8, v2
-; GFX9-NEXT: v_readfirstlane_b32 s9, v3
-; GFX9-NEXT: v_readfirstlane_b32 s10, v4
-; GFX9-NEXT: v_readfirstlane_b32 s11, v5
-; GFX9-NEXT: v_readfirstlane_b32 s12, v6
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: v_readfirstlane_b32 s13, v7
+; GFX9-NEXT: v_readfirstlane_b32 s7, v0
+; GFX9-NEXT: v_readfirstlane_b32 s8, v1
+; GFX9-NEXT: v_readfirstlane_b32 s9, v2
+; GFX9-NEXT: v_readfirstlane_b32 s10, v3
+; GFX9-NEXT: v_readfirstlane_b32 s11, v4
+; GFX9-NEXT: v_readfirstlane_b32 s12, v5
+; GFX9-NEXT: v_readfirstlane_b32 s13, v6
+; GFX9-NEXT: v_readfirstlane_b32 s6, v7
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB13_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_lshr_b32 s14, s13, 16
-; GFX9-NEXT: s_lshr_b32 s15, s12, 16
-; GFX9-NEXT: s_lshr_b32 s40, s11, 16
-; GFX9-NEXT: s_lshr_b32 s41, s10, 16
-; GFX9-NEXT: s_lshr_b32 s42, s9, 16
-; GFX9-NEXT: s_lshr_b32 s43, s8, 16
-; GFX9-NEXT: s_lshr_b32 s44, s7, 16
-; GFX9-NEXT: s_lshr_b32 s45, s6, 16
+; GFX9-NEXT: s_lshr_b32 s14, s6, 16
+; GFX9-NEXT: s_lshr_b32 s15, s13, 16
+; GFX9-NEXT: s_lshr_b32 s40, s12, 16
+; GFX9-NEXT: s_lshr_b32 s41, s11, 16
+; GFX9-NEXT: s_lshr_b32 s42, s10, 16
+; GFX9-NEXT: s_lshr_b32 s43, s9, 16
+; GFX9-NEXT: s_lshr_b32 s44, s8, 16
+; GFX9-NEXT: s_lshr_b32 s45, s7, 16
; GFX9-NEXT: s_lshr_b32 s46, s29, 16
; GFX9-NEXT: s_lshr_b32 s47, s28, 16
; GFX9-NEXT: s_lshr_b32 s56, s27, 16
@@ -3519,6 +3568,7 @@ define inreg <44 x i16> @bitcast_v22i32_to_v44i16_scalar(<22 x i32> inreg %a, i3
; GFX9-NEXT: s_lshr_b32 s75, s16, 16
; GFX9-NEXT: s_cbranch_execnz .LBB13_3
; GFX9-NEXT: .LBB13_2: ; %cmp.true
+; GFX9-NEXT: s_add_i32 s6, s6, 3
; GFX9-NEXT: s_add_i32 s13, s13, 3
; GFX9-NEXT: s_add_i32 s12, s12, 3
; GFX9-NEXT: s_add_i32 s11, s11, 3
@@ -3526,7 +3576,6 @@ define inreg <44 x i16> @bitcast_v22i32_to_v44i16_scalar(<22 x i32> inreg %a, i3
; GFX9-NEXT: s_add_i32 s9, s9, 3
; GFX9-NEXT: s_add_i32 s8, s8, 3
; GFX9-NEXT: s_add_i32 s7, s7, 3
-; GFX9-NEXT: s_add_i32 s6, s6, 3
; GFX9-NEXT: s_add_i32 s29, s29, 3
; GFX9-NEXT: s_add_i32 s28, s28, 3
; GFX9-NEXT: s_add_i32 s27, s27, 3
@@ -3541,14 +3590,14 @@ define inreg <44 x i16> @bitcast_v22i32_to_v44i16_scalar(<22 x i32> inreg %a, i3
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: s_lshr_b32 s14, s13, 16
-; GFX9-NEXT: s_lshr_b32 s15, s12, 16
-; GFX9-NEXT: s_lshr_b32 s40, s11, 16
-; GFX9-NEXT: s_lshr_b32 s41, s10, 16
-; GFX9-NEXT: s_lshr_b32 s42, s9, 16
-; GFX9-NEXT: s_lshr_b32 s43, s8, 16
-; GFX9-NEXT: s_lshr_b32 s44, s7, 16
-; GFX9-NEXT: s_lshr_b32 s45, s6, 16
+; GFX9-NEXT: s_lshr_b32 s14, s6, 16
+; GFX9-NEXT: s_lshr_b32 s15, s13, 16
+; GFX9-NEXT: s_lshr_b32 s40, s12, 16
+; GFX9-NEXT: s_lshr_b32 s41, s11, 16
+; GFX9-NEXT: s_lshr_b32 s42, s10, 16
+; GFX9-NEXT: s_lshr_b32 s43, s9, 16
+; GFX9-NEXT: s_lshr_b32 s44, s8, 16
+; GFX9-NEXT: s_lshr_b32 s45, s7, 16
; GFX9-NEXT: s_lshr_b32 s46, s29, 16
; GFX9-NEXT: s_lshr_b32 s47, s28, 16
; GFX9-NEXT: s_lshr_b32 s56, s27, 16
@@ -3578,14 +3627,14 @@ define inreg <44 x i16> @bitcast_v22i32_to_v44i16_scalar(<22 x i32> inreg %a, i3
; GFX9-NEXT: s_pack_ll_b32_b16 s25, s27, s56
; GFX9-NEXT: s_pack_ll_b32_b16 s26, s28, s47
; GFX9-NEXT: s_pack_ll_b32_b16 s27, s29, s46
-; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s45
-; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s44
-; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s43
-; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s42
-; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s41
-; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s40
-; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s15
-; GFX9-NEXT: s_pack_ll_b32_b16 s13, s13, s14
+; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s45
+; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s44
+; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s43
+; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s42
+; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s41
+; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s40
+; GFX9-NEXT: s_pack_ll_b32_b16 s13, s13, s15
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s14
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: v_mov_b32_e32 v2, s16
@@ -3600,14 +3649,14 @@ define inreg <44 x i16> @bitcast_v22i32_to_v44i16_scalar(<22 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v11, s25
; GFX9-NEXT: v_mov_b32_e32 v12, s26
; GFX9-NEXT: v_mov_b32_e32 v13, s27
-; GFX9-NEXT: v_mov_b32_e32 v14, s6
-; GFX9-NEXT: v_mov_b32_e32 v15, s7
-; GFX9-NEXT: v_mov_b32_e32 v16, s8
-; GFX9-NEXT: v_mov_b32_e32 v17, s9
-; GFX9-NEXT: v_mov_b32_e32 v18, s10
-; GFX9-NEXT: v_mov_b32_e32 v19, s11
-; GFX9-NEXT: v_mov_b32_e32 v20, s12
-; GFX9-NEXT: v_mov_b32_e32 v21, s13
+; GFX9-NEXT: v_mov_b32_e32 v14, s7
+; GFX9-NEXT: v_mov_b32_e32 v15, s8
+; GFX9-NEXT: v_mov_b32_e32 v16, s9
+; GFX9-NEXT: v_mov_b32_e32 v17, s10
+; GFX9-NEXT: v_mov_b32_e32 v18, s11
+; GFX9-NEXT: v_mov_b32_e32 v19, s12
+; GFX9-NEXT: v_mov_b32_e32 v20, s13
+; GFX9-NEXT: v_mov_b32_e32 v21, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB13_4:
; GFX9-NEXT: ; implicit-def: $sgpr75
@@ -3632,23 +3681,25 @@ define inreg <44 x i16> @bitcast_v22i32_to_v44i16_scalar(<22 x i32> inreg %a, i3
; GFX9-NEXT: ; implicit-def: $sgpr40
; GFX9-NEXT: ; implicit-def: $sgpr15
; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: s_branch .LBB13_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB13_2
+; GFX9-NEXT: s_branch .LBB13_3
;
; GFX11-LABEL: bitcast_v22i32_to_v44i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
; GFX11-NEXT: v_readfirstlane_b32 s4, v0
-; GFX11-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-NEXT: v_readfirstlane_b32 s6, v1
; GFX11-NEXT: v_readfirstlane_b32 s7, v2
-; GFX11-NEXT: v_readfirstlane_b32 s6, v3
-; GFX11-NEXT: s_mov_b32 s62, 0
+; GFX11-NEXT: v_readfirstlane_b32 s5, v3
+; GFX11-NEXT: s_mov_b32 s62, -1
; GFX11-NEXT: s_and_b32 s8, vcc_lo, exec_lo
; GFX11-NEXT: s_cbranch_scc0 .LBB13_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s8, s6, 16
+; GFX11-NEXT: s_lshr_b32 s8, s5, 16
; GFX11-NEXT: s_lshr_b32 s9, s7, 16
-; GFX11-NEXT: s_lshr_b32 s10, s5, 16
+; GFX11-NEXT: s_lshr_b32 s10, s6, 16
; GFX11-NEXT: s_lshr_b32 s11, s4, 16
; GFX11-NEXT: s_lshr_b32 s12, s29, 16
; GFX11-NEXT: s_lshr_b32 s13, s28, 16
@@ -3668,12 +3719,11 @@ define inreg <44 x i16> @bitcast_v22i32_to_v44i16_scalar(<22 x i32> inreg %a, i3
; GFX11-NEXT: s_lshr_b32 s59, s2, 16
; GFX11-NEXT: s_lshr_b32 s60, s1, 16
; GFX11-NEXT: s_lshr_b32 s61, s0, 16
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s62
-; GFX11-NEXT: s_cbranch_vccnz .LBB13_3
+; GFX11-NEXT: s_cbranch_execnz .LBB13_3
; GFX11-NEXT: .LBB13_2: ; %cmp.true
-; GFX11-NEXT: s_add_i32 s6, s6, 3
-; GFX11-NEXT: s_add_i32 s7, s7, 3
; GFX11-NEXT: s_add_i32 s5, s5, 3
+; GFX11-NEXT: s_add_i32 s7, s7, 3
+; GFX11-NEXT: s_add_i32 s6, s6, 3
; GFX11-NEXT: s_add_i32 s4, s4, 3
; GFX11-NEXT: s_add_i32 s29, s29, 3
; GFX11-NEXT: s_add_i32 s28, s28, 3
@@ -3693,9 +3743,9 @@ define inreg <44 x i16> @bitcast_v22i32_to_v44i16_scalar(<22 x i32> inreg %a, i3
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: s_lshr_b32 s8, s6, 16
+; GFX11-NEXT: s_lshr_b32 s8, s5, 16
; GFX11-NEXT: s_lshr_b32 s9, s7, 16
-; GFX11-NEXT: s_lshr_b32 s10, s5, 16
+; GFX11-NEXT: s_lshr_b32 s10, s6, 16
; GFX11-NEXT: s_lshr_b32 s11, s4, 16
; GFX11-NEXT: s_lshr_b32 s12, s29, 16
; GFX11-NEXT: s_lshr_b32 s13, s28, 16
@@ -3736,9 +3786,9 @@ define inreg <44 x i16> @bitcast_v22i32_to_v44i16_scalar(<22 x i32> inreg %a, i3
; GFX11-NEXT: s_pack_ll_b32_b16 s13, s28, s13
; GFX11-NEXT: s_pack_ll_b32_b16 s12, s29, s12
; GFX11-NEXT: s_pack_ll_b32_b16 s4, s4, s11
-; GFX11-NEXT: s_pack_ll_b32_b16 s5, s5, s10
+; GFX11-NEXT: s_pack_ll_b32_b16 s6, s6, s10
; GFX11-NEXT: s_pack_ll_b32_b16 s7, s7, s9
-; GFX11-NEXT: s_pack_ll_b32_b16 s6, s6, s8
+; GFX11-NEXT: s_pack_ll_b32_b16 s5, s5, s8
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -3748,8 +3798,8 @@ define inreg <44 x i16> @bitcast_v22i32_to_v44i16_scalar(<22 x i32> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s14
; GFX11-NEXT: v_dual_mov_b32 v16, s13 :: v_dual_mov_b32 v17, s12
-; GFX11-NEXT: v_dual_mov_b32 v18, s4 :: v_dual_mov_b32 v19, s5
-; GFX11-NEXT: v_dual_mov_b32 v20, s7 :: v_dual_mov_b32 v21, s6
+; GFX11-NEXT: v_dual_mov_b32 v18, s4 :: v_dual_mov_b32 v19, s6
+; GFX11-NEXT: v_dual_mov_b32 v20, s7 :: v_dual_mov_b32 v21, s5
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB13_4:
; GFX11-NEXT: ; implicit-def: $sgpr61
@@ -3774,7 +3824,9 @@ define inreg <44 x i16> @bitcast_v22i32_to_v44i16_scalar(<22 x i32> inreg %a, i3
; GFX11-NEXT: ; implicit-def: $sgpr10
; GFX11-NEXT: ; implicit-def: $sgpr9
; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: s_branch .LBB13_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s62
+; GFX11-NEXT: s_cbranch_vccz .LBB13_2
+; GFX11-NEXT: s_branch .LBB13_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -4744,6 +4796,7 @@ define inreg <22 x i32> @bitcast_v44i16_to_v22i32_scalar(<44 x i16> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v30
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
@@ -4773,7 +4826,7 @@ define inreg <22 x i32> @bitcast_v44i16_to_v22i32_scalar(<44 x i16> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v52, v4
; SI-NEXT: v_mov_b32_e32 v53, v2
; SI-NEXT: v_mov_b32_e32 v54, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v60, 16, v3
@@ -4973,7 +5026,9 @@ define inreg <22 x i32> @bitcast_v44i16_to_v22i32_scalar(<44 x i16> inreg %a, i3
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB15_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; SI-NEXT: s_branch .LBB15_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB15_2
+; SI-NEXT: s_branch .LBB15_3
;
; VI-LABEL: bitcast_v44i16_to_v22i32_scalar:
; VI: ; %bb.0:
@@ -4993,6 +5048,7 @@ define inreg <22 x i32> @bitcast_v44i16_to_v22i32_scalar(<44 x i16> inreg %a, i3
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v7
; VI-NEXT: v_mov_b32_e32 v33, v6
; VI-NEXT: v_mov_b32_e32 v34, v5
@@ -5001,7 +5057,7 @@ define inreg <22 x i32> @bitcast_v44i16_to_v22i32_scalar(<44 x i16> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v37, v2
; VI-NEXT: v_mov_b32_e32 v38, v1
; VI-NEXT: v_mov_b32_e32 v39, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB15_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -5151,21 +5207,22 @@ define inreg <22 x i32> @bitcast_v44i16_to_v22i32_scalar(<44 x i16> inreg %a, i3
; VI-NEXT: s_and_b32 s18, s27, 0xffff
; VI-NEXT: s_lshl_b32 s8, s8, 16
; VI-NEXT: s_add_i32 s28, s28, 3
+; VI-NEXT: v_add_u32_e32 v19, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v33
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s8, s8, s18
; VI-NEXT: s_and_b32 s18, s28, 0xffff
; VI-NEXT: s_lshl_b32 s7, s7, 16
; VI-NEXT: s_add_i32 s29, s29, 3
-; VI-NEXT: v_add_u32_e32 v19, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v33
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s7, s7, s18
; VI-NEXT: s_and_b32 s18, s29, 0xffff
; VI-NEXT: s_lshl_b32 s6, s6, 16
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: s_or_b32 s6, s6, s18
; VI-NEXT: v_add_u32_e32 v20, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v32
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v1, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_lshlrev_b32_sdwa v0, v1, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v32
+; VI-NEXT: s_or_b32 s6, s6, s18
+; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_add_i32 s4, s4, 0x30000
; VI-NEXT: s_add_i32 s5, s5, 0x30000
; VI-NEXT: s_add_i32 s16, s16, 0x30000
@@ -5180,7 +5237,6 @@ define inreg <22 x i32> @bitcast_v44i16_to_v22i32_scalar(<44 x i16> inreg %a, i3
; VI-NEXT: s_add_i32 s8, s8, 0x30000
; VI-NEXT: s_add_i32 s7, s7, 0x30000
; VI-NEXT: s_add_i32 s6, s6, 0x30000
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v21, vcc, 0x30000, v0
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
@@ -5200,19 +5256,13 @@ define inreg <22 x i32> @bitcast_v44i16_to_v22i32_scalar(<44 x i16> inreg %a, i3
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB15_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB15_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB15_2
+; VI-NEXT: s_branch .LBB15_3
;
; GFX9-LABEL: bitcast_v44i16_to_v22i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v7
-; GFX9-NEXT: v_mov_b32_e32 v33, v6
-; GFX9-NEXT: v_mov_b32_e32 v34, v5
-; GFX9-NEXT: v_mov_b32_e32 v35, v4
-; GFX9-NEXT: v_mov_b32_e32 v36, v3
-; GFX9-NEXT: v_mov_b32_e32 v37, v2
-; GFX9-NEXT: v_mov_b32_e32 v38, v1
-; GFX9-NEXT: v_mov_b32_e32 v39, v0
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
; GFX9-NEXT: s_lshr_b32 s42, s27, 16
@@ -5228,6 +5278,15 @@ define inreg <22 x i32> @bitcast_v44i16_to_v22i32_scalar(<44 x i16> inreg %a, i3
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; GFX9-NEXT: v_mov_b32_e32 v32, v7
+; GFX9-NEXT: v_mov_b32_e32 v33, v6
+; GFX9-NEXT: v_mov_b32_e32 v34, v5
+; GFX9-NEXT: v_mov_b32_e32 v35, v4
+; GFX9-NEXT: v_mov_b32_e32 v36, v3
+; GFX9-NEXT: v_mov_b32_e32 v37, v2
+; GFX9-NEXT: v_mov_b32_e32 v38, v1
+; GFX9-NEXT: v_mov_b32_e32 v39, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_lshrrev_b32_e32 v48, 16, v32
; GFX9-NEXT: v_lshrrev_b32_e32 v49, 16, v33
; GFX9-NEXT: v_lshrrev_b32_e32 v50, 16, v34
@@ -5236,7 +5295,6 @@ define inreg <22 x i32> @bitcast_v44i16_to_v22i32_scalar(<44 x i16> inreg %a, i3
; GFX9-NEXT: v_lshrrev_b32_e32 v53, 16, v37
; GFX9-NEXT: v_lshrrev_b32_e32 v54, 16, v38
; GFX9-NEXT: v_lshrrev_b32_e32 v55, 16, v39
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -5251,6 +5309,7 @@ define inreg <22 x i32> @bitcast_v44i16_to_v22i32_scalar(<44 x i16> inreg %a, i3
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB15_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v39
@@ -5286,23 +5345,27 @@ define inreg <22 x i32> @bitcast_v44i16_to_v22i32_scalar(<44 x i16> inreg %a, i3
; GFX9-NEXT: s_cbranch_execnz .LBB15_3
; GFX9-NEXT: .LBB15_2: ; %cmp.true
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v39
+; GFX9-NEXT: v_lshl_or_b32 v0, v55, 16, v0
+; GFX9-NEXT: v_pk_add_u16 v14, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v37
+; GFX9-NEXT: v_lshl_or_b32 v0, v53, 16, v0
+; GFX9-NEXT: v_pk_add_u16 v16, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v36
+; GFX9-NEXT: v_lshl_or_b32 v0, v52, 16, v0
+; GFX9-NEXT: v_pk_add_u16 v17, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v35
+; GFX9-NEXT: v_lshl_or_b32 v0, v51, 16, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v38
-; GFX9-NEXT: v_and_b32_e32 v16, 0xffff, v37
-; GFX9-NEXT: v_and_b32_e32 v17, 0xffff, v36
-; GFX9-NEXT: v_and_b32_e32 v18, 0xffff, v35
-; GFX9-NEXT: v_and_b32_e32 v19, 0xffff, v34
+; GFX9-NEXT: v_pk_add_u16 v18, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v34
; GFX9-NEXT: v_and_b32_e32 v20, 0xffff, v33
; GFX9-NEXT: v_and_b32_e32 v21, 0xffff, v32
-; GFX9-NEXT: v_lshl_or_b32 v0, v55, 16, v0
; GFX9-NEXT: v_lshl_or_b32 v1, v54, 16, v1
-; GFX9-NEXT: v_lshl_or_b32 v16, v53, 16, v16
-; GFX9-NEXT: v_lshl_or_b32 v17, v52, 16, v17
-; GFX9-NEXT: v_lshl_or_b32 v18, v51, 16, v18
-; GFX9-NEXT: v_lshl_or_b32 v19, v50, 16, v19
+; GFX9-NEXT: v_lshl_or_b32 v0, v50, 16, v0
; GFX9-NEXT: v_lshl_or_b32 v20, v49, 16, v20
; GFX9-NEXT: v_lshl_or_b32 v21, v48, 16, v21
-; GFX9-NEXT: v_pk_add_u16 v14, v0, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v15, v1, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_pk_add_u16 v19, v0, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s6, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s7, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v2, s8, 3 op_sel_hi:[1,0]
@@ -5317,17 +5380,15 @@ define inreg <22 x i32> @bitcast_v44i16_to_v22i32_scalar(<44 x i16> inreg %a, i3
; GFX9-NEXT: v_pk_add_u16 v11, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v12, s18, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v13, s19, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
; GFX9-NEXT: .LBB15_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB15_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB15_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB15_2
+; GFX9-NEXT: s_branch .LBB15_3
;
; GFX11-TRUE16-LABEL: bitcast_v44i16_to_v22i32_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -5345,9 +5406,9 @@ define inreg <22 x i32> @bitcast_v44i16_to_v22i32_scalar(<44 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v37, 0xffff, v2
; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v3
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s26, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
@@ -5359,15 +5420,14 @@ define inreg <22 x i32> @bitcast_v44i16_to_v22i32_scalar(<44 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
@@ -5379,10 +5439,11 @@ define inreg <22 x i32> @bitcast_v44i16_to_v22i32_scalar(<44 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s29, s41
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB15_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
@@ -5398,8 +5459,7 @@ define inreg <22 x i32> @bitcast_v44i16_to_v22i32_scalar(<44 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s16
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s0
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB15_3
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB15_3
; GFX11-TRUE16-NEXT: .LBB15_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
@@ -5431,7 +5491,9 @@ define inreg <22 x i32> @bitcast_v44i16_to_v22i32_scalar(<44 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB15_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB15_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB15_2
+; GFX11-TRUE16-NEXT: s_branch .LBB15_3
;
; GFX11-FAKE16-LABEL: bitcast_v44i16_to_v22i32_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -5445,9 +5507,9 @@ define inreg <22 x i32> @bitcast_v44i16_to_v22i32_scalar(<44 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: v_and_b32_e32 v38, 0xffff, v1
; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff, v2
; GFX11-FAKE16-NEXT: v_and_b32_e32 v36, 0xffff, v3
-; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s26, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s25, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s24, 16
@@ -5459,15 +5521,14 @@ define inreg <22 x i32> @bitcast_v44i16_to_v22i32_scalar(<44 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s18, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s17, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s40, 0
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
@@ -5479,10 +5540,11 @@ define inreg <22 x i32> @bitcast_v44i16_to_v22i32_scalar(<44 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s27, s43
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s28, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s29, s41
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB15_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
@@ -5498,8 +5560,7 @@ define inreg <22 x i32> @bitcast_v44i16_to_v22i32_scalar(<44 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s16
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s0
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB15_3
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB15_3
; GFX11-FAKE16-NEXT: .LBB15_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
@@ -5531,7 +5592,9 @@ define inreg <22 x i32> @bitcast_v44i16_to_v22i32_scalar(<44 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB15_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB15_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB15_2
+; GFX11-FAKE16-NEXT: s_branch .LBB15_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -6451,24 +6514,25 @@ define inreg <44 x half> @bitcast_v22i32_to_v44f16_scalar(<22 x i32> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v9
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s13, v1
; SI-NEXT: v_readfirstlane_b32 s12, v2
; SI-NEXT: v_readfirstlane_b32 s11, v3
; SI-NEXT: v_readfirstlane_b32 s10, v4
-; SI-NEXT: v_readfirstlane_b32 s8, v5
-; SI-NEXT: v_readfirstlane_b32 s7, v6
-; SI-NEXT: v_readfirstlane_b32 s6, v7
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: v_readfirstlane_b32 s9, v8
+; SI-NEXT: v_readfirstlane_b32 s9, v5
+; SI-NEXT: v_readfirstlane_b32 s8, v6
+; SI-NEXT: v_readfirstlane_b32 s7, v7
+; SI-NEXT: v_readfirstlane_b32 s6, v8
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB17_4
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_lshr_b32 s4, s9, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v1, s4
; SI-NEXT: s_lshr_b32 s4, s6, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v2, s4
+; SI-NEXT: v_cvt_f32_f16_e32 v1, s4
; SI-NEXT: s_lshr_b32 s4, s7, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v4, s4
+; SI-NEXT: v_cvt_f32_f16_e32 v2, s4
; SI-NEXT: s_lshr_b32 s4, s8, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v4, s4
+; SI-NEXT: s_lshr_b32 s4, s9, 16
; SI-NEXT: v_cvt_f32_f16_e32 v6, s4
; SI-NEXT: s_lshr_b32 s4, s10, 16
; SI-NEXT: v_cvt_f32_f16_e32 v8, s4
@@ -6506,10 +6570,10 @@ define inreg <44 x half> @bitcast_v22i32_to_v44f16_scalar(<22 x i32> inreg %a, i
; SI-NEXT: v_cvt_f32_f16_e32 v50, s4
; SI-NEXT: s_lshr_b32 s4, s16, 16
; SI-NEXT: v_cvt_f32_f16_e32 v52, s4
-; SI-NEXT: v_cvt_f32_f16_e32 v3, s9
-; SI-NEXT: v_cvt_f32_f16_e32 v5, s6
-; SI-NEXT: v_cvt_f32_f16_e32 v7, s7
-; SI-NEXT: v_cvt_f32_f16_e32 v9, s8
+; SI-NEXT: v_cvt_f32_f16_e32 v3, s6
+; SI-NEXT: v_cvt_f32_f16_e32 v5, s7
+; SI-NEXT: v_cvt_f32_f16_e32 v7, s8
+; SI-NEXT: v_cvt_f32_f16_e32 v9, s9
; SI-NEXT: v_cvt_f32_f16_e32 v11, s10
; SI-NEXT: v_cvt_f32_f16_e32 v13, s11
; SI-NEXT: v_cvt_f32_f16_e32 v15, s12
@@ -6548,10 +6612,10 @@ define inreg <44 x half> @bitcast_v22i32_to_v44f16_scalar(<22 x i32> inreg %a, i
; SI-NEXT: s_add_i32 s12, s12, 3
; SI-NEXT: s_add_i32 s11, s11, 3
; SI-NEXT: s_add_i32 s10, s10, 3
+; SI-NEXT: s_add_i32 s9, s9, 3
; SI-NEXT: s_add_i32 s8, s8, 3
; SI-NEXT: s_add_i32 s7, s7, 3
; SI-NEXT: s_add_i32 s6, s6, 3
-; SI-NEXT: s_add_i32 s9, s9, 3
; SI-NEXT: s_lshr_b32 s4, s16, 16
; SI-NEXT: s_lshr_b32 s5, s17, 16
; SI-NEXT: s_lshr_b32 s14, s18, 16
@@ -6570,14 +6634,14 @@ define inreg <44 x half> @bitcast_v22i32_to_v44f16_scalar(<22 x i32> inreg %a, i
; SI-NEXT: s_lshr_b32 s59, s12, 16
; SI-NEXT: s_lshr_b32 s60, s11, 16
; SI-NEXT: s_lshr_b32 s61, s10, 16
-; SI-NEXT: s_lshr_b32 s62, s8, 16
-; SI-NEXT: s_lshr_b32 s63, s7, 16
-; SI-NEXT: s_lshr_b32 s72, s6, 16
-; SI-NEXT: s_lshr_b32 s73, s9, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v3, s9
-; SI-NEXT: v_cvt_f32_f16_e32 v5, s6
-; SI-NEXT: v_cvt_f32_f16_e32 v7, s7
-; SI-NEXT: v_cvt_f32_f16_e32 v9, s8
+; SI-NEXT: s_lshr_b32 s62, s9, 16
+; SI-NEXT: s_lshr_b32 s63, s8, 16
+; SI-NEXT: s_lshr_b32 s72, s7, 16
+; SI-NEXT: s_lshr_b32 s73, s6, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v3, s6
+; SI-NEXT: v_cvt_f32_f16_e32 v5, s7
+; SI-NEXT: v_cvt_f32_f16_e32 v7, s8
+; SI-NEXT: v_cvt_f32_f16_e32 v9, s9
; SI-NEXT: v_cvt_f32_f16_e32 v11, s10
; SI-NEXT: v_cvt_f32_f16_e32 v13, s11
; SI-NEXT: v_cvt_f32_f16_e32 v15, s12
@@ -6818,25 +6882,28 @@ define inreg <44 x half> @bitcast_v22i32_to_v44f16_scalar(<22 x i32> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: s_branch .LBB17_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB17_2
+; SI-NEXT: s_branch .LBB17_3
;
; VI-LABEL: bitcast_v22i32_to_v44f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s13, v0
; VI-NEXT: v_readfirstlane_b32 s12, v1
; VI-NEXT: v_readfirstlane_b32 s11, v2
; VI-NEXT: v_readfirstlane_b32 s10, v3
; VI-NEXT: v_readfirstlane_b32 s9, v4
; VI-NEXT: v_readfirstlane_b32 s8, v5
-; VI-NEXT: v_readfirstlane_b32 s6, v6
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: v_readfirstlane_b32 s7, v7
+; VI-NEXT: v_readfirstlane_b32 s7, v6
+; VI-NEXT: v_readfirstlane_b32 s6, v7
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB17_4
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_lshr_b32 s14, s7, 16
-; VI-NEXT: s_lshr_b32 s15, s6, 16
+; VI-NEXT: s_lshr_b32 s14, s6, 16
+; VI-NEXT: s_lshr_b32 s15, s7, 16
; VI-NEXT: s_lshr_b32 s40, s8, 16
; VI-NEXT: s_lshr_b32 s41, s9, 16
; VI-NEXT: s_lshr_b32 s42, s10, 16
@@ -6859,8 +6926,8 @@ define inreg <44 x half> @bitcast_v22i32_to_v44f16_scalar(<22 x i32> inreg %a, i
; VI-NEXT: s_lshr_b32 s75, s16, 16
; VI-NEXT: s_cbranch_execnz .LBB17_3
; VI-NEXT: .LBB17_2: ; %cmp.true
-; VI-NEXT: s_add_i32 s7, s7, 3
; VI-NEXT: s_add_i32 s6, s6, 3
+; VI-NEXT: s_add_i32 s7, s7, 3
; VI-NEXT: s_add_i32 s8, s8, 3
; VI-NEXT: s_add_i32 s9, s9, 3
; VI-NEXT: s_add_i32 s10, s10, 3
@@ -6881,8 +6948,8 @@ define inreg <44 x half> @bitcast_v22i32_to_v44f16_scalar(<22 x i32> inreg %a, i
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: s_lshr_b32 s14, s7, 16
-; VI-NEXT: s_lshr_b32 s15, s6, 16
+; VI-NEXT: s_lshr_b32 s14, s6, 16
+; VI-NEXT: s_lshr_b32 s15, s7, 16
; VI-NEXT: s_lshr_b32 s40, s8, 16
; VI-NEXT: s_lshr_b32 s41, s9, 16
; VI-NEXT: s_lshr_b32 s42, s10, 16
@@ -6963,13 +7030,13 @@ define inreg <44 x half> @bitcast_v22i32_to_v44f16_scalar(<22 x i32> inreg %a, i
; VI-NEXT: s_or_b32 s9, s9, s28
; VI-NEXT: s_and_b32 s8, 0xffff, s8
; VI-NEXT: s_lshl_b32 s28, s40, 16
-; VI-NEXT: s_and_b32 s6, 0xffff, s6
-; VI-NEXT: s_lshl_b32 s15, s15, 16
; VI-NEXT: s_and_b32 s7, 0xffff, s7
+; VI-NEXT: s_lshl_b32 s15, s15, 16
+; VI-NEXT: s_and_b32 s6, 0xffff, s6
; VI-NEXT: s_lshl_b32 s14, s14, 16
; VI-NEXT: s_or_b32 s8, s8, s28
-; VI-NEXT: s_or_b32 s6, s6, s15
-; VI-NEXT: s_or_b32 s7, s7, s14
+; VI-NEXT: s_or_b32 s7, s7, s15
+; VI-NEXT: s_or_b32 s6, s6, s14
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: v_mov_b32_e32 v2, s16
@@ -6990,8 +7057,8 @@ define inreg <44 x half> @bitcast_v22i32_to_v44f16_scalar(<22 x i32> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v17, s10
; VI-NEXT: v_mov_b32_e32 v18, s9
; VI-NEXT: v_mov_b32_e32 v19, s8
-; VI-NEXT: v_mov_b32_e32 v20, s6
-; VI-NEXT: v_mov_b32_e32 v21, s7
+; VI-NEXT: v_mov_b32_e32 v20, s7
+; VI-NEXT: v_mov_b32_e32 v21, s6
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB17_4:
; VI-NEXT: ; implicit-def: $sgpr75
@@ -7016,31 +7083,34 @@ define inreg <44 x half> @bitcast_v22i32_to_v44f16_scalar(<22 x i32> inreg %a, i
; VI-NEXT: ; implicit-def: $sgpr40
; VI-NEXT: ; implicit-def: $sgpr15
; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: s_branch .LBB17_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB17_2
+; VI-NEXT: s_branch .LBB17_3
;
; GFX9-LABEL: bitcast_v22i32_to_v44f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
-; GFX9-NEXT: v_readfirstlane_b32 s6, v0
-; GFX9-NEXT: v_readfirstlane_b32 s7, v1
-; GFX9-NEXT: v_readfirstlane_b32 s8, v2
-; GFX9-NEXT: v_readfirstlane_b32 s9, v3
-; GFX9-NEXT: v_readfirstlane_b32 s10, v4
-; GFX9-NEXT: v_readfirstlane_b32 s11, v5
-; GFX9-NEXT: v_readfirstlane_b32 s12, v6
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: v_readfirstlane_b32 s13, v7
+; GFX9-NEXT: v_readfirstlane_b32 s7, v0
+; GFX9-NEXT: v_readfirstlane_b32 s8, v1
+; GFX9-NEXT: v_readfirstlane_b32 s9, v2
+; GFX9-NEXT: v_readfirstlane_b32 s10, v3
+; GFX9-NEXT: v_readfirstlane_b32 s11, v4
+; GFX9-NEXT: v_readfirstlane_b32 s12, v5
+; GFX9-NEXT: v_readfirstlane_b32 s13, v6
+; GFX9-NEXT: v_readfirstlane_b32 s6, v7
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB17_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_lshr_b32 s14, s13, 16
-; GFX9-NEXT: s_lshr_b32 s15, s12, 16
-; GFX9-NEXT: s_lshr_b32 s40, s11, 16
-; GFX9-NEXT: s_lshr_b32 s41, s10, 16
-; GFX9-NEXT: s_lshr_b32 s42, s9, 16
-; GFX9-NEXT: s_lshr_b32 s43, s8, 16
-; GFX9-NEXT: s_lshr_b32 s44, s7, 16
-; GFX9-NEXT: s_lshr_b32 s45, s6, 16
+; GFX9-NEXT: s_lshr_b32 s14, s6, 16
+; GFX9-NEXT: s_lshr_b32 s15, s13, 16
+; GFX9-NEXT: s_lshr_b32 s40, s12, 16
+; GFX9-NEXT: s_lshr_b32 s41, s11, 16
+; GFX9-NEXT: s_lshr_b32 s42, s10, 16
+; GFX9-NEXT: s_lshr_b32 s43, s9, 16
+; GFX9-NEXT: s_lshr_b32 s44, s8, 16
+; GFX9-NEXT: s_lshr_b32 s45, s7, 16
; GFX9-NEXT: s_lshr_b32 s46, s29, 16
; GFX9-NEXT: s_lshr_b32 s47, s28, 16
; GFX9-NEXT: s_lshr_b32 s56, s27, 16
@@ -7057,6 +7127,7 @@ define inreg <44 x half> @bitcast_v22i32_to_v44f16_scalar(<22 x i32> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s75, s16, 16
; GFX9-NEXT: s_cbranch_execnz .LBB17_3
; GFX9-NEXT: .LBB17_2: ; %cmp.true
+; GFX9-NEXT: s_add_i32 s6, s6, 3
; GFX9-NEXT: s_add_i32 s13, s13, 3
; GFX9-NEXT: s_add_i32 s12, s12, 3
; GFX9-NEXT: s_add_i32 s11, s11, 3
@@ -7064,7 +7135,6 @@ define inreg <44 x half> @bitcast_v22i32_to_v44f16_scalar(<22 x i32> inreg %a, i
; GFX9-NEXT: s_add_i32 s9, s9, 3
; GFX9-NEXT: s_add_i32 s8, s8, 3
; GFX9-NEXT: s_add_i32 s7, s7, 3
-; GFX9-NEXT: s_add_i32 s6, s6, 3
; GFX9-NEXT: s_add_i32 s29, s29, 3
; GFX9-NEXT: s_add_i32 s28, s28, 3
; GFX9-NEXT: s_add_i32 s27, s27, 3
@@ -7079,14 +7149,14 @@ define inreg <44 x half> @bitcast_v22i32_to_v44f16_scalar(<22 x i32> inreg %a, i
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: s_lshr_b32 s14, s13, 16
-; GFX9-NEXT: s_lshr_b32 s15, s12, 16
-; GFX9-NEXT: s_lshr_b32 s40, s11, 16
-; GFX9-NEXT: s_lshr_b32 s41, s10, 16
-; GFX9-NEXT: s_lshr_b32 s42, s9, 16
-; GFX9-NEXT: s_lshr_b32 s43, s8, 16
-; GFX9-NEXT: s_lshr_b32 s44, s7, 16
-; GFX9-NEXT: s_lshr_b32 s45, s6, 16
+; GFX9-NEXT: s_lshr_b32 s14, s6, 16
+; GFX9-NEXT: s_lshr_b32 s15, s13, 16
+; GFX9-NEXT: s_lshr_b32 s40, s12, 16
+; GFX9-NEXT: s_lshr_b32 s41, s11, 16
+; GFX9-NEXT: s_lshr_b32 s42, s10, 16
+; GFX9-NEXT: s_lshr_b32 s43, s9, 16
+; GFX9-NEXT: s_lshr_b32 s44, s8, 16
+; GFX9-NEXT: s_lshr_b32 s45, s7, 16
; GFX9-NEXT: s_lshr_b32 s46, s29, 16
; GFX9-NEXT: s_lshr_b32 s47, s28, 16
; GFX9-NEXT: s_lshr_b32 s56, s27, 16
@@ -7116,14 +7186,14 @@ define inreg <44 x half> @bitcast_v22i32_to_v44f16_scalar(<22 x i32> inreg %a, i
; GFX9-NEXT: s_pack_ll_b32_b16 s25, s27, s56
; GFX9-NEXT: s_pack_ll_b32_b16 s26, s28, s47
; GFX9-NEXT: s_pack_ll_b32_b16 s27, s29, s46
-; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s45
-; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s44
-; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s43
-; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s42
-; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s41
-; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s40
-; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s15
-; GFX9-NEXT: s_pack_ll_b32_b16 s13, s13, s14
+; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s45
+; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s44
+; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s43
+; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s42
+; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s41
+; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s40
+; GFX9-NEXT: s_pack_ll_b32_b16 s13, s13, s15
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s14
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: v_mov_b32_e32 v2, s16
@@ -7138,14 +7208,14 @@ define inreg <44 x half> @bitcast_v22i32_to_v44f16_scalar(<22 x i32> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v11, s25
; GFX9-NEXT: v_mov_b32_e32 v12, s26
; GFX9-NEXT: v_mov_b32_e32 v13, s27
-; GFX9-NEXT: v_mov_b32_e32 v14, s6
-; GFX9-NEXT: v_mov_b32_e32 v15, s7
-; GFX9-NEXT: v_mov_b32_e32 v16, s8
-; GFX9-NEXT: v_mov_b32_e32 v17, s9
-; GFX9-NEXT: v_mov_b32_e32 v18, s10
-; GFX9-NEXT: v_mov_b32_e32 v19, s11
-; GFX9-NEXT: v_mov_b32_e32 v20, s12
-; GFX9-NEXT: v_mov_b32_e32 v21, s13
+; GFX9-NEXT: v_mov_b32_e32 v14, s7
+; GFX9-NEXT: v_mov_b32_e32 v15, s8
+; GFX9-NEXT: v_mov_b32_e32 v16, s9
+; GFX9-NEXT: v_mov_b32_e32 v17, s10
+; GFX9-NEXT: v_mov_b32_e32 v18, s11
+; GFX9-NEXT: v_mov_b32_e32 v19, s12
+; GFX9-NEXT: v_mov_b32_e32 v20, s13
+; GFX9-NEXT: v_mov_b32_e32 v21, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB17_4:
; GFX9-NEXT: ; implicit-def: $sgpr75
@@ -7170,23 +7240,25 @@ define inreg <44 x half> @bitcast_v22i32_to_v44f16_scalar(<22 x i32> inreg %a, i
; GFX9-NEXT: ; implicit-def: $sgpr40
; GFX9-NEXT: ; implicit-def: $sgpr15
; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: s_branch .LBB17_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB17_2
+; GFX9-NEXT: s_branch .LBB17_3
;
; GFX11-LABEL: bitcast_v22i32_to_v44f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
; GFX11-NEXT: v_readfirstlane_b32 s4, v0
-; GFX11-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-NEXT: v_readfirstlane_b32 s6, v1
; GFX11-NEXT: v_readfirstlane_b32 s7, v2
-; GFX11-NEXT: v_readfirstlane_b32 s6, v3
-; GFX11-NEXT: s_mov_b32 s62, 0
+; GFX11-NEXT: v_readfirstlane_b32 s5, v3
+; GFX11-NEXT: s_mov_b32 s62, -1
; GFX11-NEXT: s_and_b32 s8, vcc_lo, exec_lo
; GFX11-NEXT: s_cbranch_scc0 .LBB17_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s8, s6, 16
+; GFX11-NEXT: s_lshr_b32 s8, s5, 16
; GFX11-NEXT: s_lshr_b32 s9, s7, 16
-; GFX11-NEXT: s_lshr_b32 s10, s5, 16
+; GFX11-NEXT: s_lshr_b32 s10, s6, 16
; GFX11-NEXT: s_lshr_b32 s11, s4, 16
; GFX11-NEXT: s_lshr_b32 s12, s29, 16
; GFX11-NEXT: s_lshr_b32 s13, s28, 16
@@ -7206,12 +7278,11 @@ define inreg <44 x half> @bitcast_v22i32_to_v44f16_scalar(<22 x i32> inreg %a, i
; GFX11-NEXT: s_lshr_b32 s59, s2, 16
; GFX11-NEXT: s_lshr_b32 s60, s1, 16
; GFX11-NEXT: s_lshr_b32 s61, s0, 16
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s62
-; GFX11-NEXT: s_cbranch_vccnz .LBB17_3
+; GFX11-NEXT: s_cbranch_execnz .LBB17_3
; GFX11-NEXT: .LBB17_2: ; %cmp.true
-; GFX11-NEXT: s_add_i32 s6, s6, 3
-; GFX11-NEXT: s_add_i32 s7, s7, 3
; GFX11-NEXT: s_add_i32 s5, s5, 3
+; GFX11-NEXT: s_add_i32 s7, s7, 3
+; GFX11-NEXT: s_add_i32 s6, s6, 3
; GFX11-NEXT: s_add_i32 s4, s4, 3
; GFX11-NEXT: s_add_i32 s29, s29, 3
; GFX11-NEXT: s_add_i32 s28, s28, 3
@@ -7231,9 +7302,9 @@ define inreg <44 x half> @bitcast_v22i32_to_v44f16_scalar(<22 x i32> inreg %a, i
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: s_lshr_b32 s8, s6, 16
+; GFX11-NEXT: s_lshr_b32 s8, s5, 16
; GFX11-NEXT: s_lshr_b32 s9, s7, 16
-; GFX11-NEXT: s_lshr_b32 s10, s5, 16
+; GFX11-NEXT: s_lshr_b32 s10, s6, 16
; GFX11-NEXT: s_lshr_b32 s11, s4, 16
; GFX11-NEXT: s_lshr_b32 s12, s29, 16
; GFX11-NEXT: s_lshr_b32 s13, s28, 16
@@ -7274,9 +7345,9 @@ define inreg <44 x half> @bitcast_v22i32_to_v44f16_scalar(<22 x i32> inreg %a, i
; GFX11-NEXT: s_pack_ll_b32_b16 s13, s28, s13
; GFX11-NEXT: s_pack_ll_b32_b16 s12, s29, s12
; GFX11-NEXT: s_pack_ll_b32_b16 s4, s4, s11
-; GFX11-NEXT: s_pack_ll_b32_b16 s5, s5, s10
+; GFX11-NEXT: s_pack_ll_b32_b16 s6, s6, s10
; GFX11-NEXT: s_pack_ll_b32_b16 s7, s7, s9
-; GFX11-NEXT: s_pack_ll_b32_b16 s6, s6, s8
+; GFX11-NEXT: s_pack_ll_b32_b16 s5, s5, s8
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -7286,8 +7357,8 @@ define inreg <44 x half> @bitcast_v22i32_to_v44f16_scalar(<22 x i32> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s14
; GFX11-NEXT: v_dual_mov_b32 v16, s13 :: v_dual_mov_b32 v17, s12
-; GFX11-NEXT: v_dual_mov_b32 v18, s4 :: v_dual_mov_b32 v19, s5
-; GFX11-NEXT: v_dual_mov_b32 v20, s7 :: v_dual_mov_b32 v21, s6
+; GFX11-NEXT: v_dual_mov_b32 v18, s4 :: v_dual_mov_b32 v19, s6
+; GFX11-NEXT: v_dual_mov_b32 v20, s7 :: v_dual_mov_b32 v21, s5
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB17_4:
; GFX11-NEXT: ; implicit-def: $sgpr61
@@ -7312,7 +7383,9 @@ define inreg <44 x half> @bitcast_v22i32_to_v44f16_scalar(<22 x i32> inreg %a, i
; GFX11-NEXT: ; implicit-def: $sgpr10
; GFX11-NEXT: ; implicit-def: $sgpr9
; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: s_branch .LBB17_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s62
+; GFX11-NEXT: s_cbranch_vccz .LBB17_2
+; GFX11-NEXT: s_branch .LBB17_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -8477,6 +8550,7 @@ define inreg <22 x i32> @bitcast_v44f16_to_v22i32_scalar(<44 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v26, s28
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v30
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
@@ -8815,7 +8889,9 @@ define inreg <22 x i32> @bitcast_v44f16_to_v22i32_scalar(<44 x half> inreg %a, i
; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
-; SI-NEXT: s_branch .LBB19_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB19_2
+; SI-NEXT: s_branch .LBB19_3
;
; VI-LABEL: bitcast_v44f16_to_v22i32_scalar:
; VI: ; %bb.0:
@@ -8835,6 +8911,7 @@ define inreg <22 x i32> @bitcast_v44f16_to_v22i32_scalar(<44 x half> inreg %a, i
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v7
; VI-NEXT: v_mov_b32_e32 v33, v6
; VI-NEXT: v_mov_b32_e32 v34, v5
@@ -8843,7 +8920,7 @@ define inreg <22 x i32> @bitcast_v44f16_to_v22i32_scalar(<44 x half> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v37, v2
; VI-NEXT: v_mov_b32_e32 v38, v1
; VI-NEXT: v_mov_b32_e32 v39, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB19_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -9007,19 +9084,13 @@ define inreg <22 x i32> @bitcast_v44f16_to_v22i32_scalar(<44 x half> inreg %a, i
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB19_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB19_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB19_2
+; VI-NEXT: s_branch .LBB19_3
;
; GFX9-LABEL: bitcast_v44f16_to_v22i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v7
-; GFX9-NEXT: v_mov_b32_e32 v33, v6
-; GFX9-NEXT: v_mov_b32_e32 v34, v5
-; GFX9-NEXT: v_mov_b32_e32 v35, v4
-; GFX9-NEXT: v_mov_b32_e32 v36, v3
-; GFX9-NEXT: v_mov_b32_e32 v37, v2
-; GFX9-NEXT: v_mov_b32_e32 v38, v1
-; GFX9-NEXT: v_mov_b32_e32 v39, v0
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
; GFX9-NEXT: s_lshr_b32 s42, s27, 16
@@ -9035,6 +9106,15 @@ define inreg <22 x i32> @bitcast_v44f16_to_v22i32_scalar(<44 x half> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; GFX9-NEXT: v_mov_b32_e32 v32, v7
+; GFX9-NEXT: v_mov_b32_e32 v33, v6
+; GFX9-NEXT: v_mov_b32_e32 v34, v5
+; GFX9-NEXT: v_mov_b32_e32 v35, v4
+; GFX9-NEXT: v_mov_b32_e32 v36, v3
+; GFX9-NEXT: v_mov_b32_e32 v37, v2
+; GFX9-NEXT: v_mov_b32_e32 v38, v1
+; GFX9-NEXT: v_mov_b32_e32 v39, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_lshrrev_b32_e32 v48, 16, v32
; GFX9-NEXT: v_lshrrev_b32_e32 v49, 16, v33
; GFX9-NEXT: v_lshrrev_b32_e32 v50, 16, v34
@@ -9043,7 +9123,6 @@ define inreg <22 x i32> @bitcast_v44f16_to_v22i32_scalar(<44 x half> inreg %a, i
; GFX9-NEXT: v_lshrrev_b32_e32 v53, 16, v37
; GFX9-NEXT: v_lshrrev_b32_e32 v54, 16, v38
; GFX9-NEXT: v_lshrrev_b32_e32 v55, 16, v39
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -9058,6 +9137,7 @@ define inreg <22 x i32> @bitcast_v44f16_to_v22i32_scalar(<44 x half> inreg %a, i
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB19_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v39
@@ -9136,7 +9216,9 @@ define inreg <22 x i32> @bitcast_v44f16_to_v22i32_scalar(<44 x half> inreg %a, i
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB19_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB19_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB19_2
+; GFX9-NEXT: s_branch .LBB19_3
;
; GFX11-TRUE16-LABEL: bitcast_v44f16_to_v22i32_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -9154,9 +9236,9 @@ define inreg <22 x i32> @bitcast_v44f16_to_v22i32_scalar(<44 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v37, 0xffff, v2
; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v3
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s26, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
@@ -9168,15 +9250,14 @@ define inreg <22 x i32> @bitcast_v44f16_to_v22i32_scalar(<44 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
@@ -9188,10 +9269,11 @@ define inreg <22 x i32> @bitcast_v44f16_to_v22i32_scalar(<44 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s29, s41
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB19_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
@@ -9207,8 +9289,7 @@ define inreg <22 x i32> @bitcast_v44f16_to_v22i32_scalar(<44 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s16
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s0
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB19_3
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB19_3
; GFX11-TRUE16-NEXT: .LBB19_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
@@ -9240,7 +9321,9 @@ define inreg <22 x i32> @bitcast_v44f16_to_v22i32_scalar(<44 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB19_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB19_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB19_2
+; GFX11-TRUE16-NEXT: s_branch .LBB19_3
;
; GFX11-FAKE16-LABEL: bitcast_v44f16_to_v22i32_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -9254,9 +9337,9 @@ define inreg <22 x i32> @bitcast_v44f16_to_v22i32_scalar(<44 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_and_b32_e32 v38, 0xffff, v1
; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff, v2
; GFX11-FAKE16-NEXT: v_and_b32_e32 v36, 0xffff, v3
-; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s26, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s25, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s24, 16
@@ -9268,15 +9351,14 @@ define inreg <22 x i32> @bitcast_v44f16_to_v22i32_scalar(<44 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s18, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s17, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s40, 0
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
@@ -9288,10 +9370,11 @@ define inreg <22 x i32> @bitcast_v44f16_to_v22i32_scalar(<44 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s27, s43
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s28, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s29, s41
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB19_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
@@ -9307,8 +9390,7 @@ define inreg <22 x i32> @bitcast_v44f16_to_v22i32_scalar(<44 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s16
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s0
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB19_3
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB19_3
; GFX11-FAKE16-NEXT: .LBB19_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
@@ -9340,7 +9422,9 @@ define inreg <22 x i32> @bitcast_v44f16_to_v22i32_scalar(<44 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB19_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB19_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB19_2
+; GFX11-FAKE16-NEXT: s_branch .LBB19_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -9511,6 +9595,7 @@ define inreg <11 x i64> @bitcast_v22f32_to_v11i64_scalar(<22 x float> inreg %a,
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v11, v8
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v21, v7
; SI-NEXT: v_mov_b32_e32 v20, v6
; SI-NEXT: v_mov_b32_e32 v19, v5
@@ -9531,13 +9616,16 @@ define inreg <11 x i64> @bitcast_v22f32_to_v11i64_scalar(<22 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v9, s25
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB21_4
+; SI-NEXT: s_cbranch_scc0 .LBB21_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB21_3
-; SI-NEXT: .LBB21_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB21_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB21_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e32 v21, 1.0, v21
; SI-NEXT: v_add_f32_e32 v20, 1.0, v20
; SI-NEXT: v_add_f32_e32 v19, 1.0, v19
@@ -9560,16 +9648,15 @@ define inreg <11 x i64> @bitcast_v22f32_to_v11i64_scalar(<22 x float> inreg %a,
; SI-NEXT: v_add_f32_e32 v2, 1.0, v2
; SI-NEXT: v_add_f32_e32 v1, 1.0, v1
; SI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; SI-NEXT: .LBB21_3: ; %end
+; SI-NEXT: .LBB21_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB21_4:
-; SI-NEXT: s_branch .LBB21_2
;
; VI-LABEL: bitcast_v22f32_to_v11i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v11, v8
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v21, v7
; VI-NEXT: v_mov_b32_e32 v20, v6
; VI-NEXT: v_mov_b32_e32 v19, v5
@@ -9590,13 +9677,16 @@ define inreg <11 x i64> @bitcast_v22f32_to_v11i64_scalar(<22 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB21_4
+; VI-NEXT: s_cbranch_scc0 .LBB21_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB21_3
-; VI-NEXT: .LBB21_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB21_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB21_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e32 v21, 1.0, v21
; VI-NEXT: v_add_f32_e32 v20, 1.0, v20
; VI-NEXT: v_add_f32_e32 v19, 1.0, v19
@@ -9619,16 +9709,15 @@ define inreg <11 x i64> @bitcast_v22f32_to_v11i64_scalar(<22 x float> inreg %a,
; VI-NEXT: v_add_f32_e32 v2, 1.0, v2
; VI-NEXT: v_add_f32_e32 v1, 1.0, v1
; VI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; VI-NEXT: .LBB21_3: ; %end
+; VI-NEXT: .LBB21_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB21_4:
-; VI-NEXT: s_branch .LBB21_2
;
; GFX9-LABEL: bitcast_v22f32_to_v11i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v11, v8
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v21, v7
; GFX9-NEXT: v_mov_b32_e32 v20, v6
; GFX9-NEXT: v_mov_b32_e32 v19, v5
@@ -9649,13 +9738,16 @@ define inreg <11 x i64> @bitcast_v22f32_to_v11i64_scalar(<22 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB21_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB21_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB21_3
-; GFX9-NEXT: .LBB21_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB21_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e32 v21, 1.0, v21
; GFX9-NEXT: v_add_f32_e32 v20, 1.0, v20
; GFX9-NEXT: v_add_f32_e32 v19, 1.0, v19
@@ -9678,38 +9770,36 @@ define inreg <11 x i64> @bitcast_v22f32_to_v11i64_scalar(<22 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e32 v2, 1.0, v2
; GFX9-NEXT: v_add_f32_e32 v1, 1.0, v1
; GFX9-NEXT: v_add_f32_e32 v0, 1.0, v0
-; GFX9-NEXT: .LBB21_3: ; %end
+; GFX9-NEXT: .LBB21_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB21_4:
-; GFX9-NEXT: s_branch .LBB21_2
;
; GFX11-LABEL: bitcast_v22f32_to_v11i64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-NEXT: v_dual_mov_b32 v15, v4 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB21_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB21_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB21_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB21_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB21_3:
-; GFX11-NEXT: .LBB21_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
; GFX11-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
@@ -9721,6 +9811,7 @@ define inreg <11 x i64> @bitcast_v22f32_to_v11i64_scalar(<22 x float> inreg %a,
; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-NEXT: .LBB21_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -9909,6 +10000,7 @@ define inreg <22 x float> @bitcast_v11i64_to_v22f32_scalar(<11 x i64> inreg %a,
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v11, v8
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v21, v7
; SI-NEXT: v_mov_b32_e32 v20, v6
; SI-NEXT: v_mov_b32_e32 v19, v5
@@ -9929,13 +10021,16 @@ define inreg <22 x float> @bitcast_v11i64_to_v22f32_scalar(<11 x i64> inreg %a,
; SI-NEXT: v_mov_b32_e32 v9, s25
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB23_4
+; SI-NEXT: s_cbranch_scc0 .LBB23_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB23_3
-; SI-NEXT: .LBB23_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB23_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB23_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v20, vcc, 3, v20
; SI-NEXT: v_addc_u32_e32 v21, vcc, 0, v21, vcc
; SI-NEXT: v_add_i32_e32 v18, vcc, 3, v18
@@ -9958,16 +10053,15 @@ define inreg <22 x float> @bitcast_v11i64_to_v22f32_scalar(<11 x i64> inreg %a,
; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT: .LBB23_3: ; %end
+; SI-NEXT: .LBB23_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB23_4:
-; SI-NEXT: s_branch .LBB23_2
;
; VI-LABEL: bitcast_v11i64_to_v22f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v11, v8
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v21, v7
; VI-NEXT: v_mov_b32_e32 v20, v6
; VI-NEXT: v_mov_b32_e32 v19, v5
@@ -9988,13 +10082,16 @@ define inreg <22 x float> @bitcast_v11i64_to_v22f32_scalar(<11 x i64> inreg %a,
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB23_4
+; VI-NEXT: s_cbranch_scc0 .LBB23_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB23_3
-; VI-NEXT: .LBB23_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB23_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB23_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v20, vcc, 3, v20
; VI-NEXT: v_addc_u32_e32 v21, vcc, 0, v21, vcc
; VI-NEXT: v_add_u32_e32 v18, vcc, 3, v18
@@ -10017,16 +10114,15 @@ define inreg <22 x float> @bitcast_v11i64_to_v22f32_scalar(<11 x i64> inreg %a,
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT: .LBB23_3: ; %end
+; VI-NEXT: .LBB23_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB23_4:
-; VI-NEXT: s_branch .LBB23_2
;
; GFX9-LABEL: bitcast_v11i64_to_v22f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v11, v8
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v21, v7
; GFX9-NEXT: v_mov_b32_e32 v20, v6
; GFX9-NEXT: v_mov_b32_e32 v19, v5
@@ -10047,13 +10143,16 @@ define inreg <22 x float> @bitcast_v11i64_to_v22f32_scalar(<11 x i64> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB23_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB23_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB23_3
-; GFX9-NEXT: .LBB23_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB23_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_co_u32_e32 v20, vcc, 3, v20
; GFX9-NEXT: v_addc_co_u32_e32 v21, vcc, 0, v21, vcc
; GFX9-NEXT: v_add_co_u32_e32 v18, vcc, 3, v18
@@ -10076,38 +10175,36 @@ define inreg <22 x float> @bitcast_v11i64_to_v22f32_scalar(<11 x i64> inreg %a,
; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 3, v0
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
-; GFX9-NEXT: .LBB23_3: ; %end
+; GFX9-NEXT: .LBB23_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB23_4:
-; GFX9-NEXT: s_branch .LBB23_2
;
; GFX11-LABEL: bitcast_v11i64_to_v22f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-NEXT: v_dual_mov_b32 v15, v4 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB23_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB23_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB23_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB23_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB23_3:
-; GFX11-NEXT: .LBB23_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_co_u32 v20, vcc_lo, v20, 3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add_co_ci_u32_e64 v21, null, 0, v21, vcc_lo
@@ -10136,6 +10233,7 @@ define inreg <22 x float> @bitcast_v11i64_to_v22f32_scalar(<11 x i64> inreg %a,
; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-NEXT: .LBB23_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -10307,6 +10405,7 @@ define inreg <11 x double> @bitcast_v22f32_to_v11f64_scalar(<22 x float> inreg %
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v11, v8
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v21, v7
; SI-NEXT: v_mov_b32_e32 v20, v6
; SI-NEXT: v_mov_b32_e32 v19, v5
@@ -10327,13 +10426,16 @@ define inreg <11 x double> @bitcast_v22f32_to_v11f64_scalar(<22 x float> inreg %
; SI-NEXT: v_mov_b32_e32 v9, s25
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB25_4
+; SI-NEXT: s_cbranch_scc0 .LBB25_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB25_3
-; SI-NEXT: .LBB25_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB25_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB25_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e32 v21, 1.0, v21
; SI-NEXT: v_add_f32_e32 v20, 1.0, v20
; SI-NEXT: v_add_f32_e32 v19, 1.0, v19
@@ -10356,16 +10458,15 @@ define inreg <11 x double> @bitcast_v22f32_to_v11f64_scalar(<22 x float> inreg %
; SI-NEXT: v_add_f32_e32 v2, 1.0, v2
; SI-NEXT: v_add_f32_e32 v1, 1.0, v1
; SI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; SI-NEXT: .LBB25_3: ; %end
+; SI-NEXT: .LBB25_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB25_4:
-; SI-NEXT: s_branch .LBB25_2
;
; VI-LABEL: bitcast_v22f32_to_v11f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v11, v8
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v21, v7
; VI-NEXT: v_mov_b32_e32 v20, v6
; VI-NEXT: v_mov_b32_e32 v19, v5
@@ -10386,13 +10487,16 @@ define inreg <11 x double> @bitcast_v22f32_to_v11f64_scalar(<22 x float> inreg %
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB25_4
+; VI-NEXT: s_cbranch_scc0 .LBB25_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB25_3
-; VI-NEXT: .LBB25_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB25_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB25_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e32 v21, 1.0, v21
; VI-NEXT: v_add_f32_e32 v20, 1.0, v20
; VI-NEXT: v_add_f32_e32 v19, 1.0, v19
@@ -10415,16 +10519,15 @@ define inreg <11 x double> @bitcast_v22f32_to_v11f64_scalar(<22 x float> inreg %
; VI-NEXT: v_add_f32_e32 v2, 1.0, v2
; VI-NEXT: v_add_f32_e32 v1, 1.0, v1
; VI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; VI-NEXT: .LBB25_3: ; %end
+; VI-NEXT: .LBB25_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB25_4:
-; VI-NEXT: s_branch .LBB25_2
;
; GFX9-LABEL: bitcast_v22f32_to_v11f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v11, v8
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v21, v7
; GFX9-NEXT: v_mov_b32_e32 v20, v6
; GFX9-NEXT: v_mov_b32_e32 v19, v5
@@ -10445,13 +10548,16 @@ define inreg <11 x double> @bitcast_v22f32_to_v11f64_scalar(<22 x float> inreg %
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB25_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB25_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB25_3
-; GFX9-NEXT: .LBB25_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB25_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB25_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e32 v21, 1.0, v21
; GFX9-NEXT: v_add_f32_e32 v20, 1.0, v20
; GFX9-NEXT: v_add_f32_e32 v19, 1.0, v19
@@ -10474,38 +10580,36 @@ define inreg <11 x double> @bitcast_v22f32_to_v11f64_scalar(<22 x float> inreg %
; GFX9-NEXT: v_add_f32_e32 v2, 1.0, v2
; GFX9-NEXT: v_add_f32_e32 v1, 1.0, v1
; GFX9-NEXT: v_add_f32_e32 v0, 1.0, v0
-; GFX9-NEXT: .LBB25_3: ; %end
+; GFX9-NEXT: .LBB25_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB25_4:
-; GFX9-NEXT: s_branch .LBB25_2
;
; GFX11-LABEL: bitcast_v22f32_to_v11f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-NEXT: v_dual_mov_b32 v15, v4 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB25_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB25_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB25_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB25_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB25_3:
-; GFX11-NEXT: .LBB25_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB25_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
; GFX11-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
@@ -10517,6 +10621,7 @@ define inreg <11 x double> @bitcast_v22f32_to_v11f64_scalar(<22 x float> inreg %
; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-NEXT: .LBB25_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -10655,6 +10760,7 @@ define inreg <22 x float> @bitcast_v11f64_to_v22f32_scalar(<11 x double> inreg %
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v10, v8
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v21, v7
; SI-NEXT: v_mov_b32_e32 v20, v6
; SI-NEXT: v_mov_b32_e32 v19, v5
@@ -10675,13 +10781,16 @@ define inreg <22 x float> @bitcast_v11f64_to_v22f32_scalar(<11 x double> inreg %
; SI-NEXT: v_mov_b32_e32 v9, s25
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB27_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB27_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB27_3
-; SI-NEXT: .LBB27_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB27_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB27_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
; SI-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
; SI-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
@@ -10693,16 +10802,15 @@ define inreg <22 x float> @bitcast_v11f64_to_v22f32_scalar(<11 x double> inreg %
; SI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; SI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; SI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; SI-NEXT: .LBB27_3: ; %end
+; SI-NEXT: .LBB27_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB27_4:
-; SI-NEXT: s_branch .LBB27_2
;
; VI-LABEL: bitcast_v11f64_to_v22f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v10, v8
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v21, v7
; VI-NEXT: v_mov_b32_e32 v20, v6
; VI-NEXT: v_mov_b32_e32 v19, v5
@@ -10723,13 +10831,16 @@ define inreg <22 x float> @bitcast_v11f64_to_v22f32_scalar(<11 x double> inreg %
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB27_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB27_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB27_3
-; VI-NEXT: .LBB27_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB27_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB27_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
; VI-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
; VI-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
@@ -10741,16 +10852,15 @@ define inreg <22 x float> @bitcast_v11f64_to_v22f32_scalar(<11 x double> inreg %
; VI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; VI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; VI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; VI-NEXT: .LBB27_3: ; %end
+; VI-NEXT: .LBB27_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB27_4:
-; VI-NEXT: s_branch .LBB27_2
;
; GFX9-LABEL: bitcast_v11f64_to_v22f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v10, v8
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v21, v7
; GFX9-NEXT: v_mov_b32_e32 v20, v6
; GFX9-NEXT: v_mov_b32_e32 v19, v5
@@ -10771,13 +10881,16 @@ define inreg <22 x float> @bitcast_v11f64_to_v22f32_scalar(<11 x double> inreg %
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB27_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB27_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB27_3
-; GFX9-NEXT: .LBB27_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB27_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB27_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
; GFX9-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
@@ -10789,38 +10902,36 @@ define inreg <22 x float> @bitcast_v11f64_to_v22f32_scalar(<11 x double> inreg %
; GFX9-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX9-NEXT: .LBB27_3: ; %end
+; GFX9-NEXT: .LBB27_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB27_4:
-; GFX9-NEXT: s_branch .LBB27_2
;
; GFX11-LABEL: bitcast_v11f64_to_v22f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-NEXT: v_dual_mov_b32 v15, v4 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB27_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB27_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB27_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB27_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB27_3:
-; GFX11-NEXT: .LBB27_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB27_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
; GFX11-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
; GFX11-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
@@ -10832,6 +10943,7 @@ define inreg <22 x float> @bitcast_v11f64_to_v22f32_scalar(<11 x double> inreg %
; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-NEXT: .LBB27_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -11524,6 +11636,7 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v9
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v23, s16
; SI-NEXT: v_mov_b32_e32 v22, s17
; SI-NEXT: v_mov_b32_e32 v21, s18
@@ -11535,7 +11648,7 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v14, s24
; SI-NEXT: v_mov_b32_e32 v12, s25
; SI-NEXT: v_mov_b32_e32 v13, s26
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v10, s28
; SI-NEXT: v_mov_b32_e32 v9, s29
@@ -11765,27 +11878,30 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a,
; SI-NEXT: ; implicit-def: $vgpr29
; SI-NEXT: ; implicit-def: $vgpr20
; SI-NEXT: ; implicit-def: $vgpr27
-; SI-NEXT: s_branch .LBB29_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB29_2
+; SI-NEXT: s_branch .LBB29_3
;
; VI-LABEL: bitcast_v22f32_to_v44i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v22, s16
; VI-NEXT: v_mov_b32_e32 v21, s17
; VI-NEXT: v_mov_b32_e32 v20, s18
; VI-NEXT: v_mov_b32_e32 v19, s19
; VI-NEXT: v_mov_b32_e32 v18, s20
; VI-NEXT: v_mov_b32_e32 v17, s21
-; VI-NEXT: v_mov_b32_e32 v16, s22
+; VI-NEXT: v_mov_b32_e32 v15, s22
; VI-NEXT: v_mov_b32_e32 v12, s23
; VI-NEXT: v_mov_b32_e32 v11, s24
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: v_mov_b32_e32 v10, s26
-; VI-NEXT: v_mov_b32_e32 v14, s27
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: v_mov_b32_e32 v15, s28
-; VI-NEXT: v_mov_b32_e32 v13, s29
+; VI-NEXT: v_mov_b32_e32 v13, s27
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: v_mov_b32_e32 v16, s28
+; VI-NEXT: v_mov_b32_e32 v14, s29
; VI-NEXT: s_cbranch_scc0 .LBB29_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_lshrrev_b32_e32 v30, 16, v7
@@ -11796,14 +11912,14 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v35, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v36, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v37, 16, v0
-; VI-NEXT: v_lshrrev_b32_e32 v38, 16, v13
-; VI-NEXT: v_lshrrev_b32_e32 v39, 16, v15
-; VI-NEXT: v_lshrrev_b32_e32 v48, 16, v14
+; VI-NEXT: v_lshrrev_b32_e32 v38, 16, v14
+; VI-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; VI-NEXT: v_lshrrev_b32_e32 v48, 16, v13
; VI-NEXT: v_lshrrev_b32_e32 v49, 16, v10
; VI-NEXT: v_lshrrev_b32_e32 v50, 16, v9
; VI-NEXT: v_lshrrev_b32_e32 v8, 16, v11
; VI-NEXT: v_lshrrev_b32_e32 v23, 16, v12
-; VI-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; VI-NEXT: v_lshrrev_b32_e32 v51, 16, v15
; VI-NEXT: v_lshrrev_b32_e32 v29, 16, v17
; VI-NEXT: v_lshrrev_b32_e32 v28, 16, v18
; VI-NEXT: v_lshrrev_b32_e32 v27, 16, v19
@@ -11820,14 +11936,14 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a,
; VI-NEXT: v_add_f32_e32 v2, 1.0, v2
; VI-NEXT: v_add_f32_e32 v1, 1.0, v1
; VI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; VI-NEXT: v_add_f32_e32 v13, 1.0, v13
-; VI-NEXT: v_add_f32_e32 v15, 1.0, v15
; VI-NEXT: v_add_f32_e32 v14, 1.0, v14
+; VI-NEXT: v_add_f32_e32 v16, 1.0, v16
+; VI-NEXT: v_add_f32_e32 v13, 1.0, v13
; VI-NEXT: v_add_f32_e32 v10, 1.0, v10
; VI-NEXT: v_add_f32_e32 v9, 1.0, v9
; VI-NEXT: v_add_f32_e32 v11, 1.0, v11
; VI-NEXT: v_add_f32_e32 v12, 1.0, v12
-; VI-NEXT: v_add_f32_e32 v16, 1.0, v16
+; VI-NEXT: v_add_f32_e32 v15, 1.0, v15
; VI-NEXT: v_add_f32_e32 v17, 1.0, v17
; VI-NEXT: v_add_f32_e32 v18, 1.0, v18
; VI-NEXT: v_add_f32_e32 v19, 1.0, v19
@@ -11842,14 +11958,14 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v35, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v36, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v37, 16, v0
-; VI-NEXT: v_lshrrev_b32_e32 v38, 16, v13
-; VI-NEXT: v_lshrrev_b32_e32 v39, 16, v15
-; VI-NEXT: v_lshrrev_b32_e32 v48, 16, v14
+; VI-NEXT: v_lshrrev_b32_e32 v38, 16, v14
+; VI-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; VI-NEXT: v_lshrrev_b32_e32 v48, 16, v13
; VI-NEXT: v_lshrrev_b32_e32 v49, 16, v10
; VI-NEXT: v_lshrrev_b32_e32 v50, 16, v9
; VI-NEXT: v_lshrrev_b32_e32 v8, 16, v11
; VI-NEXT: v_lshrrev_b32_e32 v23, 16, v12
-; VI-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; VI-NEXT: v_lshrrev_b32_e32 v51, 16, v15
; VI-NEXT: v_lshrrev_b32_e32 v29, 16, v17
; VI-NEXT: v_lshrrev_b32_e32 v28, 16, v18
; VI-NEXT: v_lshrrev_b32_e32 v27, 16, v19
@@ -11860,36 +11976,36 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a,
; VI-NEXT: v_lshlrev_b32_e32 v24, 16, v24
; VI-NEXT: v_or_b32_sdwa v24, v22, v24 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v22, 16, v25
+; VI-NEXT: v_lshlrev_b32_e32 v8, 16, v8
; VI-NEXT: v_or_b32_sdwa v25, v21, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v21, 16, v26
-; VI-NEXT: v_lshlrev_b32_e32 v8, 16, v8
-; VI-NEXT: v_or_b32_sdwa v26, v20, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_e32 v20, 16, v27
; VI-NEXT: v_or_b32_sdwa v8, v11, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v11, 16, v50
-; VI-NEXT: v_or_b32_sdwa v27, v19, v20 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_e32 v19, 16, v28
+; VI-NEXT: v_or_b32_sdwa v26, v20, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_e32 v20, 16, v27
; VI-NEXT: v_or_b32_sdwa v9, v9, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v11, 16, v49
-; VI-NEXT: v_or_b32_sdwa v28, v18, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_e32 v18, 16, v29
+; VI-NEXT: v_or_b32_sdwa v27, v19, v20 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_e32 v19, 16, v28
; VI-NEXT: v_or_b32_sdwa v10, v10, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v11, 16, v48
+; VI-NEXT: v_or_b32_sdwa v28, v18, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_e32 v18, 16, v29
+; VI-NEXT: v_or_b32_sdwa v11, v13, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_e32 v13, 16, v38
; VI-NEXT: v_or_b32_sdwa v29, v17, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v17, 16, v51
-; VI-NEXT: v_or_b32_sdwa v11, v14, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_e32 v14, 16, v38
-; VI-NEXT: v_or_b32_sdwa v22, v16, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_e32 v16, 16, v23
-; VI-NEXT: v_or_b32_sdwa v13, v13, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v13, v14, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v14, 16, v37
-; VI-NEXT: v_or_b32_sdwa v23, v12, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_e32 v12, 16, v39
+; VI-NEXT: v_or_b32_sdwa v22, v15, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_e32 v15, 16, v23
; VI-NEXT: v_or_b32_sdwa v14, v0, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v36
-; VI-NEXT: v_or_b32_sdwa v12, v15, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v23, v12, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_e32 v12, 16, v39
; VI-NEXT: v_or_b32_sdwa v15, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v35
+; VI-NEXT: v_or_b32_sdwa v12, v16, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v16, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v34
; VI-NEXT: v_or_b32_sdwa v17, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
@@ -11933,27 +12049,30 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a,
; VI-NEXT: ; implicit-def: $vgpr32
; VI-NEXT: ; implicit-def: $vgpr31
; VI-NEXT: ; implicit-def: $vgpr30
-; VI-NEXT: s_branch .LBB29_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB29_2
+; VI-NEXT: s_branch .LBB29_3
;
; GFX9-LABEL: bitcast_v22f32_to_v44i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v22, s16
; GFX9-NEXT: v_mov_b32_e32 v21, s17
; GFX9-NEXT: v_mov_b32_e32 v20, s18
; GFX9-NEXT: v_mov_b32_e32 v19, s19
; GFX9-NEXT: v_mov_b32_e32 v18, s20
; GFX9-NEXT: v_mov_b32_e32 v17, s21
-; GFX9-NEXT: v_mov_b32_e32 v16, s22
+; GFX9-NEXT: v_mov_b32_e32 v15, s22
; GFX9-NEXT: v_mov_b32_e32 v12, s23
; GFX9-NEXT: v_mov_b32_e32 v11, s24
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: v_mov_b32_e32 v10, s26
-; GFX9-NEXT: v_mov_b32_e32 v14, s27
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: v_mov_b32_e32 v15, s28
-; GFX9-NEXT: v_mov_b32_e32 v13, s29
+; GFX9-NEXT: v_mov_b32_e32 v13, s27
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: v_mov_b32_e32 v16, s28
+; GFX9-NEXT: v_mov_b32_e32 v14, s29
; GFX9-NEXT: s_cbranch_scc0 .LBB29_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_lshrrev_b32_e32 v30, 16, v7
@@ -11964,14 +12083,14 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v35, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v36, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v37, 16, v0
-; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v13
-; GFX9-NEXT: v_lshrrev_b32_e32 v39, 16, v15
-; GFX9-NEXT: v_lshrrev_b32_e32 v48, 16, v14
+; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v14
+; GFX9-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; GFX9-NEXT: v_lshrrev_b32_e32 v48, 16, v13
; GFX9-NEXT: v_lshrrev_b32_e32 v49, 16, v10
; GFX9-NEXT: v_lshrrev_b32_e32 v50, 16, v9
; GFX9-NEXT: v_lshrrev_b32_e32 v8, 16, v11
; GFX9-NEXT: v_lshrrev_b32_e32 v23, 16, v12
-; GFX9-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX9-NEXT: v_lshrrev_b32_e32 v51, 16, v15
; GFX9-NEXT: v_lshrrev_b32_e32 v29, 16, v17
; GFX9-NEXT: v_lshrrev_b32_e32 v28, 16, v18
; GFX9-NEXT: v_lshrrev_b32_e32 v27, 16, v19
@@ -11988,14 +12107,14 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e32 v2, 1.0, v2
; GFX9-NEXT: v_add_f32_e32 v1, 1.0, v1
; GFX9-NEXT: v_add_f32_e32 v0, 1.0, v0
-; GFX9-NEXT: v_add_f32_e32 v13, 1.0, v13
-; GFX9-NEXT: v_add_f32_e32 v15, 1.0, v15
; GFX9-NEXT: v_add_f32_e32 v14, 1.0, v14
+; GFX9-NEXT: v_add_f32_e32 v16, 1.0, v16
+; GFX9-NEXT: v_add_f32_e32 v13, 1.0, v13
; GFX9-NEXT: v_add_f32_e32 v10, 1.0, v10
; GFX9-NEXT: v_add_f32_e32 v9, 1.0, v9
; GFX9-NEXT: v_add_f32_e32 v11, 1.0, v11
; GFX9-NEXT: v_add_f32_e32 v12, 1.0, v12
-; GFX9-NEXT: v_add_f32_e32 v16, 1.0, v16
+; GFX9-NEXT: v_add_f32_e32 v15, 1.0, v15
; GFX9-NEXT: v_add_f32_e32 v17, 1.0, v17
; GFX9-NEXT: v_add_f32_e32 v18, 1.0, v18
; GFX9-NEXT: v_add_f32_e32 v19, 1.0, v19
@@ -12010,14 +12129,14 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v35, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v36, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v37, 16, v0
-; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v13
-; GFX9-NEXT: v_lshrrev_b32_e32 v39, 16, v15
-; GFX9-NEXT: v_lshrrev_b32_e32 v48, 16, v14
+; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v14
+; GFX9-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; GFX9-NEXT: v_lshrrev_b32_e32 v48, 16, v13
; GFX9-NEXT: v_lshrrev_b32_e32 v49, 16, v10
; GFX9-NEXT: v_lshrrev_b32_e32 v50, 16, v9
; GFX9-NEXT: v_lshrrev_b32_e32 v8, 16, v11
; GFX9-NEXT: v_lshrrev_b32_e32 v23, 16, v12
-; GFX9-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX9-NEXT: v_lshrrev_b32_e32 v51, 16, v15
; GFX9-NEXT: v_lshrrev_b32_e32 v29, 16, v17
; GFX9-NEXT: v_lshrrev_b32_e32 v28, 16, v18
; GFX9-NEXT: v_lshrrev_b32_e32 v27, 16, v19
@@ -12027,20 +12146,21 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a,
; GFX9-NEXT: .LBB29_3: ; %end
; GFX9-NEXT: v_and_b32_e32 v11, 0xffff, v11
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: v_and_b32_e32 v12, 0xffff, v12
+; GFX9-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX9-NEXT: v_and_b32_e32 v15, 0xffff, v15
; GFX9-NEXT: v_lshl_or_b32 v8, v8, 16, v11
-; GFX9-NEXT: v_and_b32_e32 v11, 0xffff, v14
+; GFX9-NEXT: v_and_b32_e32 v11, 0xffff, v13
+; GFX9-NEXT: v_and_b32_e32 v13, 0xffff, v14
; GFX9-NEXT: v_lshl_or_b32 v14, v37, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v1
-; GFX9-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX9-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX9-NEXT: v_lshl_or_b32 v23, v23, 16, v12
-; GFX9-NEXT: v_and_b32_e32 v12, 0xffff, v15
+; GFX9-NEXT: v_lshl_or_b32 v24, v24, 16, v22
+; GFX9-NEXT: v_lshl_or_b32 v22, v51, 16, v15
+; GFX9-NEXT: v_and_b32_e32 v12, 0xffff, v12
; GFX9-NEXT: v_lshl_or_b32 v15, v36, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v2
-; GFX9-NEXT: v_lshl_or_b32 v24, v24, 16, v22
; GFX9-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX9-NEXT: v_lshl_or_b32 v22, v51, 16, v16
+; GFX9-NEXT: v_lshl_or_b32 v23, v23, 16, v12
+; GFX9-NEXT: v_and_b32_e32 v12, 0xffff, v16
; GFX9-NEXT: v_lshl_or_b32 v16, v35, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v3
; GFX9-NEXT: v_and_b32_e32 v18, 0xffff, v18
@@ -12060,7 +12180,6 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a,
; GFX9-NEXT: v_lshl_or_b32 v26, v26, 16, v20
; GFX9-NEXT: v_and_b32_e32 v9, 0xffff, v9
; GFX9-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX9-NEXT: v_and_b32_e32 v13, 0xffff, v13
; GFX9-NEXT: v_lshl_or_b32 v20, v31, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v7
; GFX9-NEXT: v_lshl_or_b32 v9, v50, 16, v9
@@ -12101,7 +12220,9 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr32
; GFX9-NEXT: ; implicit-def: $vgpr31
; GFX9-NEXT: ; implicit-def: $vgpr30
-; GFX9-NEXT: s_branch .LBB29_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB29_2
+; GFX9-NEXT: s_branch .LBB29_3
;
; GFX11-LABEL: bitcast_v22f32_to_v44i16_scalar:
; GFX11: ; %bb.0:
@@ -12113,11 +12234,11 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v6, s17 :: v_dual_mov_b32 v11, s19
; GFX11-NEXT: v_dual_mov_b32 v10, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: v_dual_mov_b32 v8, s22 :: v_dual_mov_b32 v7, s23
-; GFX11-NEXT: v_dual_mov_b32 v15, s24 :: v_dual_mov_b32 v14, s25
-; GFX11-NEXT: v_dual_mov_b32 v13, s26 :: v_dual_mov_b32 v12, s27
-; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-NEXT: v_dual_mov_b32 v14, s24 :: v_dual_mov_b32 v13, s25
+; GFX11-NEXT: v_dual_mov_b32 v12, s26 :: v_dual_mov_b32 v15, s28
+; GFX11-NEXT: v_dual_mov_b32 v16, s27 :: v_dual_mov_b32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB29_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v3
@@ -12125,11 +12246,11 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v1
; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v0
; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v16
+; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v12
+; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v13
+; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v14
; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v7
; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v8
; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
@@ -12142,14 +12263,13 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v20
; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v21
; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v22
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB29_3
+; GFX11-NEXT: s_cbranch_execnz .LBB29_3
; GFX11-NEXT: .LBB29_2: ; %cmp.true
; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
; GFX11-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
-; GFX11-NEXT: v_dual_add_f32 v12, 1.0, v12 :: v_dual_add_f32 v13, 1.0, v13
-; GFX11-NEXT: v_dual_add_f32 v14, 1.0, v14 :: v_dual_add_f32 v15, 1.0, v15
+; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v14, 1.0, v14
; GFX11-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v8, 1.0, v8
; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v10, 1.0, v10
; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v6, 1.0, v6
@@ -12161,11 +12281,11 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v1
; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v0
; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v16
+; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v12
+; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v13
+; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v14
; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v7
; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v8
; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
@@ -12195,8 +12315,8 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a,
; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v8
; GFX11-NEXT: v_lshl_or_b32 v7, v48, 16, v11
; GFX11-NEXT: v_lshl_or_b32 v11, v36, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v12
+; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v15
; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
; GFX11-NEXT: v_lshl_or_b32 v5, v50, 16, v6
@@ -12205,10 +12325,10 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a,
; GFX11-NEXT: v_lshl_or_b32 v8, v39, 16, v10
; GFX11-NEXT: v_lshl_or_b32 v10, v37, 16, v18
; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v13
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-NEXT: v_lshl_or_b32 v12, v35, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v15, v32, 16, v19
+; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v13
+; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v12
+; GFX11-NEXT: v_lshl_or_b32 v15, v32, 16, v16
+; GFX11-NEXT: v_lshl_or_b32 v16, v31, 16, v19
; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -12219,9 +12339,9 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a,
; GFX11-NEXT: v_mov_b32_e32 v3, v23
; GFX11-NEXT: v_lshl_or_b32 v22, v51, 16, v20
; GFX11-NEXT: v_lshl_or_b32 v9, v38, 16, v9
-; GFX11-NEXT: v_lshl_or_b32 v13, v34, 16, v14
+; GFX11-NEXT: v_lshl_or_b32 v12, v35, 16, v14
+; GFX11-NEXT: v_lshl_or_b32 v13, v34, 16, v13
; GFX11-NEXT: v_lshl_or_b32 v14, v33, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v16, v31, 16, v16
; GFX11-NEXT: v_lshl_or_b32 v17, v30, 16, v17
; GFX11-NEXT: v_lshl_or_b32 v18, v29, 16, v0
; GFX11-NEXT: v_lshl_or_b32 v20, v27, 16, v2
@@ -12251,7 +12371,9 @@ define inreg <44 x i16> @bitcast_v22f32_to_v44i16_scalar(<22 x float> inreg %a,
; GFX11-NEXT: ; implicit-def: $vgpr28
; GFX11-NEXT: ; implicit-def: $vgpr27
; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: s_branch .LBB29_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_vccz .LBB29_2
+; GFX11-NEXT: s_branch .LBB29_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -13221,6 +13343,7 @@ define inreg <22 x float> @bitcast_v44i16_to_v22f32_scalar(<44 x i16> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v30
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
@@ -13250,7 +13373,7 @@ define inreg <22 x float> @bitcast_v44i16_to_v22f32_scalar(<44 x i16> inreg %a,
; SI-NEXT: v_mov_b32_e32 v52, v4
; SI-NEXT: v_mov_b32_e32 v53, v2
; SI-NEXT: v_mov_b32_e32 v54, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v60, 16, v3
@@ -13450,7 +13573,9 @@ define inreg <22 x float> @bitcast_v44i16_to_v22f32_scalar(<44 x i16> inreg %a,
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB31_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; SI-NEXT: s_branch .LBB31_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB31_2
+; SI-NEXT: s_branch .LBB31_3
;
; VI-LABEL: bitcast_v44i16_to_v22f32_scalar:
; VI: ; %bb.0:
@@ -13470,6 +13595,7 @@ define inreg <22 x float> @bitcast_v44i16_to_v22f32_scalar(<44 x i16> inreg %a,
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v7
; VI-NEXT: v_mov_b32_e32 v33, v6
; VI-NEXT: v_mov_b32_e32 v34, v5
@@ -13478,7 +13604,7 @@ define inreg <22 x float> @bitcast_v44i16_to_v22f32_scalar(<44 x i16> inreg %a,
; VI-NEXT: v_mov_b32_e32 v37, v2
; VI-NEXT: v_mov_b32_e32 v38, v1
; VI-NEXT: v_mov_b32_e32 v39, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB31_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -13628,21 +13754,22 @@ define inreg <22 x float> @bitcast_v44i16_to_v22f32_scalar(<44 x i16> inreg %a,
; VI-NEXT: s_and_b32 s18, s27, 0xffff
; VI-NEXT: s_lshl_b32 s8, s8, 16
; VI-NEXT: s_add_i32 s28, s28, 3
+; VI-NEXT: v_add_u32_e32 v19, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v33
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s8, s8, s18
; VI-NEXT: s_and_b32 s18, s28, 0xffff
; VI-NEXT: s_lshl_b32 s7, s7, 16
; VI-NEXT: s_add_i32 s29, s29, 3
-; VI-NEXT: v_add_u32_e32 v19, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v33
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s7, s7, s18
; VI-NEXT: s_and_b32 s18, s29, 0xffff
; VI-NEXT: s_lshl_b32 s6, s6, 16
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: s_or_b32 s6, s6, s18
; VI-NEXT: v_add_u32_e32 v20, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v32
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v1, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_lshlrev_b32_sdwa v0, v1, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v32
+; VI-NEXT: s_or_b32 s6, s6, s18
+; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_add_i32 s4, s4, 0x30000
; VI-NEXT: s_add_i32 s5, s5, 0x30000
; VI-NEXT: s_add_i32 s16, s16, 0x30000
@@ -13657,7 +13784,6 @@ define inreg <22 x float> @bitcast_v44i16_to_v22f32_scalar(<44 x i16> inreg %a,
; VI-NEXT: s_add_i32 s8, s8, 0x30000
; VI-NEXT: s_add_i32 s7, s7, 0x30000
; VI-NEXT: s_add_i32 s6, s6, 0x30000
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v21, vcc, 0x30000, v0
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
@@ -13677,19 +13803,13 @@ define inreg <22 x float> @bitcast_v44i16_to_v22f32_scalar(<44 x i16> inreg %a,
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB31_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB31_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB31_2
+; VI-NEXT: s_branch .LBB31_3
;
; GFX9-LABEL: bitcast_v44i16_to_v22f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v7
-; GFX9-NEXT: v_mov_b32_e32 v33, v6
-; GFX9-NEXT: v_mov_b32_e32 v34, v5
-; GFX9-NEXT: v_mov_b32_e32 v35, v4
-; GFX9-NEXT: v_mov_b32_e32 v36, v3
-; GFX9-NEXT: v_mov_b32_e32 v37, v2
-; GFX9-NEXT: v_mov_b32_e32 v38, v1
-; GFX9-NEXT: v_mov_b32_e32 v39, v0
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
; GFX9-NEXT: s_lshr_b32 s42, s27, 16
@@ -13705,6 +13825,15 @@ define inreg <22 x float> @bitcast_v44i16_to_v22f32_scalar(<44 x i16> inreg %a,
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; GFX9-NEXT: v_mov_b32_e32 v32, v7
+; GFX9-NEXT: v_mov_b32_e32 v33, v6
+; GFX9-NEXT: v_mov_b32_e32 v34, v5
+; GFX9-NEXT: v_mov_b32_e32 v35, v4
+; GFX9-NEXT: v_mov_b32_e32 v36, v3
+; GFX9-NEXT: v_mov_b32_e32 v37, v2
+; GFX9-NEXT: v_mov_b32_e32 v38, v1
+; GFX9-NEXT: v_mov_b32_e32 v39, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_lshrrev_b32_e32 v48, 16, v32
; GFX9-NEXT: v_lshrrev_b32_e32 v49, 16, v33
; GFX9-NEXT: v_lshrrev_b32_e32 v50, 16, v34
@@ -13713,7 +13842,6 @@ define inreg <22 x float> @bitcast_v44i16_to_v22f32_scalar(<44 x i16> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v53, 16, v37
; GFX9-NEXT: v_lshrrev_b32_e32 v54, 16, v38
; GFX9-NEXT: v_lshrrev_b32_e32 v55, 16, v39
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -13728,6 +13856,7 @@ define inreg <22 x float> @bitcast_v44i16_to_v22f32_scalar(<44 x i16> inreg %a,
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB31_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v39
@@ -13763,23 +13892,27 @@ define inreg <22 x float> @bitcast_v44i16_to_v22f32_scalar(<44 x i16> inreg %a,
; GFX9-NEXT: s_cbranch_execnz .LBB31_3
; GFX9-NEXT: .LBB31_2: ; %cmp.true
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v39
+; GFX9-NEXT: v_lshl_or_b32 v0, v55, 16, v0
+; GFX9-NEXT: v_pk_add_u16 v14, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v37
+; GFX9-NEXT: v_lshl_or_b32 v0, v53, 16, v0
+; GFX9-NEXT: v_pk_add_u16 v16, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v36
+; GFX9-NEXT: v_lshl_or_b32 v0, v52, 16, v0
+; GFX9-NEXT: v_pk_add_u16 v17, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v35
+; GFX9-NEXT: v_lshl_or_b32 v0, v51, 16, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v38
-; GFX9-NEXT: v_and_b32_e32 v16, 0xffff, v37
-; GFX9-NEXT: v_and_b32_e32 v17, 0xffff, v36
-; GFX9-NEXT: v_and_b32_e32 v18, 0xffff, v35
-; GFX9-NEXT: v_and_b32_e32 v19, 0xffff, v34
+; GFX9-NEXT: v_pk_add_u16 v18, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v34
; GFX9-NEXT: v_and_b32_e32 v20, 0xffff, v33
; GFX9-NEXT: v_and_b32_e32 v21, 0xffff, v32
-; GFX9-NEXT: v_lshl_or_b32 v0, v55, 16, v0
; GFX9-NEXT: v_lshl_or_b32 v1, v54, 16, v1
-; GFX9-NEXT: v_lshl_or_b32 v16, v53, 16, v16
-; GFX9-NEXT: v_lshl_or_b32 v17, v52, 16, v17
-; GFX9-NEXT: v_lshl_or_b32 v18, v51, 16, v18
-; GFX9-NEXT: v_lshl_or_b32 v19, v50, 16, v19
+; GFX9-NEXT: v_lshl_or_b32 v0, v50, 16, v0
; GFX9-NEXT: v_lshl_or_b32 v20, v49, 16, v20
; GFX9-NEXT: v_lshl_or_b32 v21, v48, 16, v21
-; GFX9-NEXT: v_pk_add_u16 v14, v0, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v15, v1, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_pk_add_u16 v19, v0, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s6, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s7, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v2, s8, 3 op_sel_hi:[1,0]
@@ -13794,17 +13927,15 @@ define inreg <22 x float> @bitcast_v44i16_to_v22f32_scalar(<44 x i16> inreg %a,
; GFX9-NEXT: v_pk_add_u16 v11, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v12, s18, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v13, s19, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
; GFX9-NEXT: .LBB31_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB31_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB31_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB31_2
+; GFX9-NEXT: s_branch .LBB31_3
;
; GFX11-TRUE16-LABEL: bitcast_v44i16_to_v22f32_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -13822,9 +13953,9 @@ define inreg <22 x float> @bitcast_v44i16_to_v22f32_scalar(<44 x i16> inreg %a,
; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v37, 0xffff, v2
; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v3
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s26, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
@@ -13836,15 +13967,14 @@ define inreg <22 x float> @bitcast_v44i16_to_v22f32_scalar(<44 x i16> inreg %a,
; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
@@ -13856,10 +13986,11 @@ define inreg <22 x float> @bitcast_v44i16_to_v22f32_scalar(<44 x i16> inreg %a,
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s29, s41
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB31_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
@@ -13875,8 +14006,7 @@ define inreg <22 x float> @bitcast_v44i16_to_v22f32_scalar(<44 x i16> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s16
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s0
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB31_3
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB31_3
; GFX11-TRUE16-NEXT: .LBB31_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
@@ -13908,7 +14038,9 @@ define inreg <22 x float> @bitcast_v44i16_to_v22f32_scalar(<44 x i16> inreg %a,
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB31_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB31_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB31_2
+; GFX11-TRUE16-NEXT: s_branch .LBB31_3
;
; GFX11-FAKE16-LABEL: bitcast_v44i16_to_v22f32_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -13922,9 +14054,9 @@ define inreg <22 x float> @bitcast_v44i16_to_v22f32_scalar(<44 x i16> inreg %a,
; GFX11-FAKE16-NEXT: v_and_b32_e32 v38, 0xffff, v1
; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff, v2
; GFX11-FAKE16-NEXT: v_and_b32_e32 v36, 0xffff, v3
-; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s26, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s25, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s24, 16
@@ -13936,15 +14068,14 @@ define inreg <22 x float> @bitcast_v44i16_to_v22f32_scalar(<44 x i16> inreg %a,
; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s18, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s17, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s40, 0
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
@@ -13956,10 +14087,11 @@ define inreg <22 x float> @bitcast_v44i16_to_v22f32_scalar(<44 x i16> inreg %a,
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s27, s43
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s28, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s29, s41
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB31_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
@@ -13975,8 +14107,7 @@ define inreg <22 x float> @bitcast_v44i16_to_v22f32_scalar(<44 x i16> inreg %a,
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s16
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s0
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB31_3
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB31_3
; GFX11-FAKE16-NEXT: .LBB31_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
@@ -14008,7 +14139,9 @@ define inreg <22 x float> @bitcast_v44i16_to_v22f32_scalar(<44 x i16> inreg %a,
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB31_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB31_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB31_2
+; GFX11-FAKE16-NEXT: s_branch .LBB31_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -14906,15 +15039,16 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v9
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s13, v1
; SI-NEXT: v_readfirstlane_b32 s12, v2
; SI-NEXT: v_readfirstlane_b32 s11, v3
; SI-NEXT: v_readfirstlane_b32 s10, v4
-; SI-NEXT: v_readfirstlane_b32 s8, v5
-; SI-NEXT: v_readfirstlane_b32 s7, v6
-; SI-NEXT: v_readfirstlane_b32 s6, v7
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: v_readfirstlane_b32 s9, v8
+; SI-NEXT: v_readfirstlane_b32 s9, v5
+; SI-NEXT: v_readfirstlane_b32 s8, v6
+; SI-NEXT: v_readfirstlane_b32 s7, v7
+; SI-NEXT: v_readfirstlane_b32 s6, v8
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
@@ -14925,13 +15059,13 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
; SI-NEXT: buffer_store_dword v47, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB33_4
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_lshr_b32 s4, s9, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v1, s4
; SI-NEXT: s_lshr_b32 s4, s6, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v2, s4
+; SI-NEXT: v_cvt_f32_f16_e32 v1, s4
; SI-NEXT: s_lshr_b32 s4, s7, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v4, s4
+; SI-NEXT: v_cvt_f32_f16_e32 v2, s4
; SI-NEXT: s_lshr_b32 s4, s8, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v4, s4
+; SI-NEXT: s_lshr_b32 s4, s9, 16
; SI-NEXT: v_cvt_f32_f16_e32 v6, s4
; SI-NEXT: s_lshr_b32 s4, s10, 16
; SI-NEXT: v_cvt_f32_f16_e32 v8, s4
@@ -14969,10 +15103,10 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
; SI-NEXT: v_cvt_f32_f16_e32 v50, s4
; SI-NEXT: s_lshr_b32 s4, s16, 16
; SI-NEXT: v_cvt_f32_f16_e32 v52, s4
-; SI-NEXT: v_cvt_f32_f16_e32 v3, s9
-; SI-NEXT: v_cvt_f32_f16_e32 v5, s6
-; SI-NEXT: v_cvt_f32_f16_e32 v7, s7
-; SI-NEXT: v_cvt_f32_f16_e32 v9, s8
+; SI-NEXT: v_cvt_f32_f16_e32 v3, s6
+; SI-NEXT: v_cvt_f32_f16_e32 v5, s7
+; SI-NEXT: v_cvt_f32_f16_e32 v7, s8
+; SI-NEXT: v_cvt_f32_f16_e32 v9, s9
; SI-NEXT: v_cvt_f32_f16_e32 v11, s10
; SI-NEXT: v_cvt_f32_f16_e32 v13, s11
; SI-NEXT: v_cvt_f32_f16_e32 v15, s12
@@ -15011,10 +15145,10 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
; SI-NEXT: v_add_f32_e64 v15, s12, 1.0
; SI-NEXT: v_add_f32_e64 v13, s11, 1.0
; SI-NEXT: v_add_f32_e64 v11, s10, 1.0
-; SI-NEXT: v_add_f32_e64 v9, s8, 1.0
-; SI-NEXT: v_add_f32_e64 v7, s7, 1.0
-; SI-NEXT: v_add_f32_e64 v5, s6, 1.0
-; SI-NEXT: v_add_f32_e64 v3, s9, 1.0
+; SI-NEXT: v_add_f32_e64 v9, s9, 1.0
+; SI-NEXT: v_add_f32_e64 v7, s8, 1.0
+; SI-NEXT: v_add_f32_e64 v5, s7, 1.0
+; SI-NEXT: v_add_f32_e64 v3, s6, 1.0
; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v1
; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v2
; SI-NEXT: v_lshrrev_b32_e32 v48, 16, v4
@@ -15296,27 +15430,30 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: s_branch .LBB33_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB33_2
+; SI-NEXT: s_branch .LBB33_3
;
; VI-LABEL: bitcast_v22f32_to_v44f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v22, s16
; VI-NEXT: v_mov_b32_e32 v21, s17
; VI-NEXT: v_mov_b32_e32 v20, s18
; VI-NEXT: v_mov_b32_e32 v19, s19
; VI-NEXT: v_mov_b32_e32 v18, s20
; VI-NEXT: v_mov_b32_e32 v17, s21
-; VI-NEXT: v_mov_b32_e32 v16, s22
+; VI-NEXT: v_mov_b32_e32 v15, s22
; VI-NEXT: v_mov_b32_e32 v12, s23
; VI-NEXT: v_mov_b32_e32 v11, s24
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: v_mov_b32_e32 v10, s26
-; VI-NEXT: v_mov_b32_e32 v14, s27
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: v_mov_b32_e32 v15, s28
-; VI-NEXT: v_mov_b32_e32 v13, s29
+; VI-NEXT: v_mov_b32_e32 v13, s27
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: v_mov_b32_e32 v16, s28
+; VI-NEXT: v_mov_b32_e32 v14, s29
; VI-NEXT: s_cbranch_scc0 .LBB33_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_lshrrev_b32_e32 v30, 16, v7
@@ -15327,14 +15464,14 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v35, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v36, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v37, 16, v0
-; VI-NEXT: v_lshrrev_b32_e32 v38, 16, v13
-; VI-NEXT: v_lshrrev_b32_e32 v39, 16, v15
-; VI-NEXT: v_lshrrev_b32_e32 v48, 16, v14
+; VI-NEXT: v_lshrrev_b32_e32 v38, 16, v14
+; VI-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; VI-NEXT: v_lshrrev_b32_e32 v48, 16, v13
; VI-NEXT: v_lshrrev_b32_e32 v49, 16, v10
; VI-NEXT: v_lshrrev_b32_e32 v50, 16, v9
; VI-NEXT: v_lshrrev_b32_e32 v8, 16, v11
; VI-NEXT: v_lshrrev_b32_e32 v23, 16, v12
-; VI-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; VI-NEXT: v_lshrrev_b32_e32 v51, 16, v15
; VI-NEXT: v_lshrrev_b32_e32 v29, 16, v17
; VI-NEXT: v_lshrrev_b32_e32 v28, 16, v18
; VI-NEXT: v_lshrrev_b32_e32 v27, 16, v19
@@ -15351,14 +15488,14 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
; VI-NEXT: v_add_f32_e32 v2, 1.0, v2
; VI-NEXT: v_add_f32_e32 v1, 1.0, v1
; VI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; VI-NEXT: v_add_f32_e32 v13, 1.0, v13
-; VI-NEXT: v_add_f32_e32 v15, 1.0, v15
; VI-NEXT: v_add_f32_e32 v14, 1.0, v14
+; VI-NEXT: v_add_f32_e32 v16, 1.0, v16
+; VI-NEXT: v_add_f32_e32 v13, 1.0, v13
; VI-NEXT: v_add_f32_e32 v10, 1.0, v10
; VI-NEXT: v_add_f32_e32 v9, 1.0, v9
; VI-NEXT: v_add_f32_e32 v11, 1.0, v11
; VI-NEXT: v_add_f32_e32 v12, 1.0, v12
-; VI-NEXT: v_add_f32_e32 v16, 1.0, v16
+; VI-NEXT: v_add_f32_e32 v15, 1.0, v15
; VI-NEXT: v_add_f32_e32 v17, 1.0, v17
; VI-NEXT: v_add_f32_e32 v18, 1.0, v18
; VI-NEXT: v_add_f32_e32 v19, 1.0, v19
@@ -15373,14 +15510,14 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v35, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v36, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v37, 16, v0
-; VI-NEXT: v_lshrrev_b32_e32 v38, 16, v13
-; VI-NEXT: v_lshrrev_b32_e32 v39, 16, v15
-; VI-NEXT: v_lshrrev_b32_e32 v48, 16, v14
+; VI-NEXT: v_lshrrev_b32_e32 v38, 16, v14
+; VI-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; VI-NEXT: v_lshrrev_b32_e32 v48, 16, v13
; VI-NEXT: v_lshrrev_b32_e32 v49, 16, v10
; VI-NEXT: v_lshrrev_b32_e32 v50, 16, v9
; VI-NEXT: v_lshrrev_b32_e32 v8, 16, v11
; VI-NEXT: v_lshrrev_b32_e32 v23, 16, v12
-; VI-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; VI-NEXT: v_lshrrev_b32_e32 v51, 16, v15
; VI-NEXT: v_lshrrev_b32_e32 v29, 16, v17
; VI-NEXT: v_lshrrev_b32_e32 v28, 16, v18
; VI-NEXT: v_lshrrev_b32_e32 v27, 16, v19
@@ -15391,36 +15528,36 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
; VI-NEXT: v_lshlrev_b32_e32 v24, 16, v24
; VI-NEXT: v_or_b32_sdwa v24, v22, v24 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v22, 16, v25
+; VI-NEXT: v_lshlrev_b32_e32 v8, 16, v8
; VI-NEXT: v_or_b32_sdwa v25, v21, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v21, 16, v26
-; VI-NEXT: v_lshlrev_b32_e32 v8, 16, v8
-; VI-NEXT: v_or_b32_sdwa v26, v20, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_e32 v20, 16, v27
; VI-NEXT: v_or_b32_sdwa v8, v11, v8 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v11, 16, v50
-; VI-NEXT: v_or_b32_sdwa v27, v19, v20 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_e32 v19, 16, v28
+; VI-NEXT: v_or_b32_sdwa v26, v20, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_e32 v20, 16, v27
; VI-NEXT: v_or_b32_sdwa v9, v9, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v11, 16, v49
-; VI-NEXT: v_or_b32_sdwa v28, v18, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_e32 v18, 16, v29
+; VI-NEXT: v_or_b32_sdwa v27, v19, v20 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_e32 v19, 16, v28
; VI-NEXT: v_or_b32_sdwa v10, v10, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v11, 16, v48
+; VI-NEXT: v_or_b32_sdwa v28, v18, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_e32 v18, 16, v29
+; VI-NEXT: v_or_b32_sdwa v11, v13, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_e32 v13, 16, v38
; VI-NEXT: v_or_b32_sdwa v29, v17, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v17, 16, v51
-; VI-NEXT: v_or_b32_sdwa v11, v14, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_e32 v14, 16, v38
-; VI-NEXT: v_or_b32_sdwa v22, v16, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_e32 v16, 16, v23
-; VI-NEXT: v_or_b32_sdwa v13, v13, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v13, v14, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v14, 16, v37
-; VI-NEXT: v_or_b32_sdwa v23, v12, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_e32 v12, 16, v39
+; VI-NEXT: v_or_b32_sdwa v22, v15, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_e32 v15, 16, v23
; VI-NEXT: v_or_b32_sdwa v14, v0, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v36
-; VI-NEXT: v_or_b32_sdwa v12, v15, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v23, v12, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_e32 v12, 16, v39
; VI-NEXT: v_or_b32_sdwa v15, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v35
+; VI-NEXT: v_or_b32_sdwa v12, v16, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v16, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v34
; VI-NEXT: v_or_b32_sdwa v17, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
@@ -15464,27 +15601,30 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
; VI-NEXT: ; implicit-def: $vgpr32
; VI-NEXT: ; implicit-def: $vgpr31
; VI-NEXT: ; implicit-def: $vgpr30
-; VI-NEXT: s_branch .LBB33_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB33_2
+; VI-NEXT: s_branch .LBB33_3
;
; GFX9-LABEL: bitcast_v22f32_to_v44f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v22, s16
; GFX9-NEXT: v_mov_b32_e32 v21, s17
; GFX9-NEXT: v_mov_b32_e32 v20, s18
; GFX9-NEXT: v_mov_b32_e32 v19, s19
; GFX9-NEXT: v_mov_b32_e32 v18, s20
; GFX9-NEXT: v_mov_b32_e32 v17, s21
-; GFX9-NEXT: v_mov_b32_e32 v16, s22
+; GFX9-NEXT: v_mov_b32_e32 v15, s22
; GFX9-NEXT: v_mov_b32_e32 v12, s23
; GFX9-NEXT: v_mov_b32_e32 v11, s24
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: v_mov_b32_e32 v10, s26
-; GFX9-NEXT: v_mov_b32_e32 v14, s27
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: v_mov_b32_e32 v15, s28
-; GFX9-NEXT: v_mov_b32_e32 v13, s29
+; GFX9-NEXT: v_mov_b32_e32 v13, s27
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: v_mov_b32_e32 v16, s28
+; GFX9-NEXT: v_mov_b32_e32 v14, s29
; GFX9-NEXT: s_cbranch_scc0 .LBB33_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_lshrrev_b32_e32 v30, 16, v7
@@ -15495,14 +15635,14 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v35, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v36, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v37, 16, v0
-; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v13
-; GFX9-NEXT: v_lshrrev_b32_e32 v39, 16, v15
-; GFX9-NEXT: v_lshrrev_b32_e32 v48, 16, v14
+; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v14
+; GFX9-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; GFX9-NEXT: v_lshrrev_b32_e32 v48, 16, v13
; GFX9-NEXT: v_lshrrev_b32_e32 v49, 16, v10
; GFX9-NEXT: v_lshrrev_b32_e32 v50, 16, v9
; GFX9-NEXT: v_lshrrev_b32_e32 v8, 16, v11
; GFX9-NEXT: v_lshrrev_b32_e32 v23, 16, v12
-; GFX9-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX9-NEXT: v_lshrrev_b32_e32 v51, 16, v15
; GFX9-NEXT: v_lshrrev_b32_e32 v29, 16, v17
; GFX9-NEXT: v_lshrrev_b32_e32 v28, 16, v18
; GFX9-NEXT: v_lshrrev_b32_e32 v27, 16, v19
@@ -15519,14 +15659,14 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e32 v2, 1.0, v2
; GFX9-NEXT: v_add_f32_e32 v1, 1.0, v1
; GFX9-NEXT: v_add_f32_e32 v0, 1.0, v0
-; GFX9-NEXT: v_add_f32_e32 v13, 1.0, v13
-; GFX9-NEXT: v_add_f32_e32 v15, 1.0, v15
; GFX9-NEXT: v_add_f32_e32 v14, 1.0, v14
+; GFX9-NEXT: v_add_f32_e32 v16, 1.0, v16
+; GFX9-NEXT: v_add_f32_e32 v13, 1.0, v13
; GFX9-NEXT: v_add_f32_e32 v10, 1.0, v10
; GFX9-NEXT: v_add_f32_e32 v9, 1.0, v9
; GFX9-NEXT: v_add_f32_e32 v11, 1.0, v11
; GFX9-NEXT: v_add_f32_e32 v12, 1.0, v12
-; GFX9-NEXT: v_add_f32_e32 v16, 1.0, v16
+; GFX9-NEXT: v_add_f32_e32 v15, 1.0, v15
; GFX9-NEXT: v_add_f32_e32 v17, 1.0, v17
; GFX9-NEXT: v_add_f32_e32 v18, 1.0, v18
; GFX9-NEXT: v_add_f32_e32 v19, 1.0, v19
@@ -15541,14 +15681,14 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v35, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v36, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v37, 16, v0
-; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v13
-; GFX9-NEXT: v_lshrrev_b32_e32 v39, 16, v15
-; GFX9-NEXT: v_lshrrev_b32_e32 v48, 16, v14
+; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v14
+; GFX9-NEXT: v_lshrrev_b32_e32 v39, 16, v16
+; GFX9-NEXT: v_lshrrev_b32_e32 v48, 16, v13
; GFX9-NEXT: v_lshrrev_b32_e32 v49, 16, v10
; GFX9-NEXT: v_lshrrev_b32_e32 v50, 16, v9
; GFX9-NEXT: v_lshrrev_b32_e32 v8, 16, v11
; GFX9-NEXT: v_lshrrev_b32_e32 v23, 16, v12
-; GFX9-NEXT: v_lshrrev_b32_e32 v51, 16, v16
+; GFX9-NEXT: v_lshrrev_b32_e32 v51, 16, v15
; GFX9-NEXT: v_lshrrev_b32_e32 v29, 16, v17
; GFX9-NEXT: v_lshrrev_b32_e32 v28, 16, v18
; GFX9-NEXT: v_lshrrev_b32_e32 v27, 16, v19
@@ -15558,20 +15698,21 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
; GFX9-NEXT: .LBB33_3: ; %end
; GFX9-NEXT: v_and_b32_e32 v11, 0xffff, v11
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: v_and_b32_e32 v12, 0xffff, v12
+; GFX9-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX9-NEXT: v_and_b32_e32 v15, 0xffff, v15
; GFX9-NEXT: v_lshl_or_b32 v8, v8, 16, v11
-; GFX9-NEXT: v_and_b32_e32 v11, 0xffff, v14
+; GFX9-NEXT: v_and_b32_e32 v11, 0xffff, v13
+; GFX9-NEXT: v_and_b32_e32 v13, 0xffff, v14
; GFX9-NEXT: v_lshl_or_b32 v14, v37, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v1
-; GFX9-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX9-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX9-NEXT: v_lshl_or_b32 v23, v23, 16, v12
-; GFX9-NEXT: v_and_b32_e32 v12, 0xffff, v15
+; GFX9-NEXT: v_lshl_or_b32 v24, v24, 16, v22
+; GFX9-NEXT: v_lshl_or_b32 v22, v51, 16, v15
+; GFX9-NEXT: v_and_b32_e32 v12, 0xffff, v12
; GFX9-NEXT: v_lshl_or_b32 v15, v36, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v2
-; GFX9-NEXT: v_lshl_or_b32 v24, v24, 16, v22
; GFX9-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX9-NEXT: v_lshl_or_b32 v22, v51, 16, v16
+; GFX9-NEXT: v_lshl_or_b32 v23, v23, 16, v12
+; GFX9-NEXT: v_and_b32_e32 v12, 0xffff, v16
; GFX9-NEXT: v_lshl_or_b32 v16, v35, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v3
; GFX9-NEXT: v_and_b32_e32 v18, 0xffff, v18
@@ -15591,7 +15732,6 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
; GFX9-NEXT: v_lshl_or_b32 v26, v26, 16, v20
; GFX9-NEXT: v_and_b32_e32 v9, 0xffff, v9
; GFX9-NEXT: v_and_b32_e32 v10, 0xffff, v10
-; GFX9-NEXT: v_and_b32_e32 v13, 0xffff, v13
; GFX9-NEXT: v_lshl_or_b32 v20, v31, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v7
; GFX9-NEXT: v_lshl_or_b32 v9, v50, 16, v9
@@ -15632,7 +15772,9 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr32
; GFX9-NEXT: ; implicit-def: $vgpr31
; GFX9-NEXT: ; implicit-def: $vgpr30
-; GFX9-NEXT: s_branch .LBB33_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB33_2
+; GFX9-NEXT: s_branch .LBB33_3
;
; GFX11-LABEL: bitcast_v22f32_to_v44f16_scalar:
; GFX11: ; %bb.0:
@@ -15644,11 +15786,11 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v6, s17 :: v_dual_mov_b32 v11, s19
; GFX11-NEXT: v_dual_mov_b32 v10, s20 :: v_dual_mov_b32 v9, s21
; GFX11-NEXT: v_dual_mov_b32 v8, s22 :: v_dual_mov_b32 v7, s23
-; GFX11-NEXT: v_dual_mov_b32 v15, s24 :: v_dual_mov_b32 v14, s25
-; GFX11-NEXT: v_dual_mov_b32 v13, s26 :: v_dual_mov_b32 v12, s27
-; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-NEXT: v_dual_mov_b32 v14, s24 :: v_dual_mov_b32 v13, s25
+; GFX11-NEXT: v_dual_mov_b32 v12, s26 :: v_dual_mov_b32 v15, s28
+; GFX11-NEXT: v_dual_mov_b32 v16, s27 :: v_dual_mov_b32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB33_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v3
@@ -15656,11 +15798,11 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v1
; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v0
; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v16
+; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v12
+; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v13
+; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v14
; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v7
; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v8
; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
@@ -15673,14 +15815,13 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v20
; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v21
; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v22
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB33_3
+; GFX11-NEXT: s_cbranch_execnz .LBB33_3
; GFX11-NEXT: .LBB33_2: ; %cmp.true
; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
; GFX11-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v16, 1.0, v16
-; GFX11-NEXT: v_dual_add_f32 v12, 1.0, v12 :: v_dual_add_f32 v13, 1.0, v13
-; GFX11-NEXT: v_dual_add_f32 v14, 1.0, v14 :: v_dual_add_f32 v15, 1.0, v15
+; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v14, 1.0, v14
; GFX11-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v8, 1.0, v8
; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v10, 1.0, v10
; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v6, 1.0, v6
@@ -15692,11 +15833,11 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v28, 16, v1
; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v0
; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v16
+; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v12
+; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v13
+; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v14
; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v7
; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v8
; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
@@ -15726,8 +15867,8 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v8
; GFX11-NEXT: v_lshl_or_b32 v7, v48, 16, v11
; GFX11-NEXT: v_lshl_or_b32 v11, v36, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v12
+; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v15
; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
; GFX11-NEXT: v_lshl_or_b32 v5, v50, 16, v6
@@ -15736,10 +15877,10 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
; GFX11-NEXT: v_lshl_or_b32 v8, v39, 16, v10
; GFX11-NEXT: v_lshl_or_b32 v10, v37, 16, v18
; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v13
-; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
-; GFX11-NEXT: v_lshl_or_b32 v12, v35, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v15, v32, 16, v19
+; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v13
+; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v12
+; GFX11-NEXT: v_lshl_or_b32 v15, v32, 16, v16
+; GFX11-NEXT: v_lshl_or_b32 v16, v31, 16, v19
; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -15750,9 +15891,9 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
; GFX11-NEXT: v_mov_b32_e32 v3, v23
; GFX11-NEXT: v_lshl_or_b32 v22, v51, 16, v20
; GFX11-NEXT: v_lshl_or_b32 v9, v38, 16, v9
-; GFX11-NEXT: v_lshl_or_b32 v13, v34, 16, v14
+; GFX11-NEXT: v_lshl_or_b32 v12, v35, 16, v14
+; GFX11-NEXT: v_lshl_or_b32 v13, v34, 16, v13
; GFX11-NEXT: v_lshl_or_b32 v14, v33, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v16, v31, 16, v16
; GFX11-NEXT: v_lshl_or_b32 v17, v30, 16, v17
; GFX11-NEXT: v_lshl_or_b32 v18, v29, 16, v0
; GFX11-NEXT: v_lshl_or_b32 v20, v27, 16, v2
@@ -15782,7 +15923,9 @@ define inreg <44 x half> @bitcast_v22f32_to_v44f16_scalar(<22 x float> inreg %a,
; GFX11-NEXT: ; implicit-def: $vgpr28
; GFX11-NEXT: ; implicit-def: $vgpr27
; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: s_branch .LBB33_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_vccz .LBB33_2
+; GFX11-NEXT: s_branch .LBB33_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -16947,6 +17090,7 @@ define inreg <22 x float> @bitcast_v44f16_to_v22f32_scalar(<44 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v26, s28
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v30
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
@@ -17285,7 +17429,9 @@ define inreg <22 x float> @bitcast_v44f16_to_v22f32_scalar(<44 x half> inreg %a,
; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
-; SI-NEXT: s_branch .LBB35_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB35_2
+; SI-NEXT: s_branch .LBB35_3
;
; VI-LABEL: bitcast_v44f16_to_v22f32_scalar:
; VI: ; %bb.0:
@@ -17305,6 +17451,7 @@ define inreg <22 x float> @bitcast_v44f16_to_v22f32_scalar(<44 x half> inreg %a,
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v7
; VI-NEXT: v_mov_b32_e32 v33, v6
; VI-NEXT: v_mov_b32_e32 v34, v5
@@ -17313,7 +17460,7 @@ define inreg <22 x float> @bitcast_v44f16_to_v22f32_scalar(<44 x half> inreg %a,
; VI-NEXT: v_mov_b32_e32 v37, v2
; VI-NEXT: v_mov_b32_e32 v38, v1
; VI-NEXT: v_mov_b32_e32 v39, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB35_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -17477,19 +17624,13 @@ define inreg <22 x float> @bitcast_v44f16_to_v22f32_scalar(<44 x half> inreg %a,
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB35_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB35_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB35_2
+; VI-NEXT: s_branch .LBB35_3
;
; GFX9-LABEL: bitcast_v44f16_to_v22f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v7
-; GFX9-NEXT: v_mov_b32_e32 v33, v6
-; GFX9-NEXT: v_mov_b32_e32 v34, v5
-; GFX9-NEXT: v_mov_b32_e32 v35, v4
-; GFX9-NEXT: v_mov_b32_e32 v36, v3
-; GFX9-NEXT: v_mov_b32_e32 v37, v2
-; GFX9-NEXT: v_mov_b32_e32 v38, v1
-; GFX9-NEXT: v_mov_b32_e32 v39, v0
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
; GFX9-NEXT: s_lshr_b32 s42, s27, 16
@@ -17505,6 +17646,15 @@ define inreg <22 x float> @bitcast_v44f16_to_v22f32_scalar(<44 x half> inreg %a,
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; GFX9-NEXT: v_mov_b32_e32 v32, v7
+; GFX9-NEXT: v_mov_b32_e32 v33, v6
+; GFX9-NEXT: v_mov_b32_e32 v34, v5
+; GFX9-NEXT: v_mov_b32_e32 v35, v4
+; GFX9-NEXT: v_mov_b32_e32 v36, v3
+; GFX9-NEXT: v_mov_b32_e32 v37, v2
+; GFX9-NEXT: v_mov_b32_e32 v38, v1
+; GFX9-NEXT: v_mov_b32_e32 v39, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_lshrrev_b32_e32 v48, 16, v32
; GFX9-NEXT: v_lshrrev_b32_e32 v49, 16, v33
; GFX9-NEXT: v_lshrrev_b32_e32 v50, 16, v34
@@ -17513,7 +17663,6 @@ define inreg <22 x float> @bitcast_v44f16_to_v22f32_scalar(<44 x half> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v53, 16, v37
; GFX9-NEXT: v_lshrrev_b32_e32 v54, 16, v38
; GFX9-NEXT: v_lshrrev_b32_e32 v55, 16, v39
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -17528,6 +17677,7 @@ define inreg <22 x float> @bitcast_v44f16_to_v22f32_scalar(<44 x half> inreg %a,
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB35_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v39
@@ -17606,7 +17756,9 @@ define inreg <22 x float> @bitcast_v44f16_to_v22f32_scalar(<44 x half> inreg %a,
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB35_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB35_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB35_2
+; GFX9-NEXT: s_branch .LBB35_3
;
; GFX11-TRUE16-LABEL: bitcast_v44f16_to_v22f32_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -17624,9 +17776,9 @@ define inreg <22 x float> @bitcast_v44f16_to_v22f32_scalar(<44 x half> inreg %a,
; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v37, 0xffff, v2
; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v3
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s26, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
@@ -17638,15 +17790,14 @@ define inreg <22 x float> @bitcast_v44f16_to_v22f32_scalar(<44 x half> inreg %a,
; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
@@ -17658,10 +17809,11 @@ define inreg <22 x float> @bitcast_v44f16_to_v22f32_scalar(<44 x half> inreg %a,
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s29, s41
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB35_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
@@ -17677,8 +17829,7 @@ define inreg <22 x float> @bitcast_v44f16_to_v22f32_scalar(<44 x half> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s16
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s0
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB35_3
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB35_3
; GFX11-TRUE16-NEXT: .LBB35_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
@@ -17710,7 +17861,9 @@ define inreg <22 x float> @bitcast_v44f16_to_v22f32_scalar(<44 x half> inreg %a,
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB35_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB35_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB35_2
+; GFX11-TRUE16-NEXT: s_branch .LBB35_3
;
; GFX11-FAKE16-LABEL: bitcast_v44f16_to_v22f32_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -17724,9 +17877,9 @@ define inreg <22 x float> @bitcast_v44f16_to_v22f32_scalar(<44 x half> inreg %a,
; GFX11-FAKE16-NEXT: v_and_b32_e32 v38, 0xffff, v1
; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff, v2
; GFX11-FAKE16-NEXT: v_and_b32_e32 v36, 0xffff, v3
-; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s26, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s25, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s24, 16
@@ -17738,15 +17891,14 @@ define inreg <22 x float> @bitcast_v44f16_to_v22f32_scalar(<44 x half> inreg %a,
; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s18, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s17, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s40, 0
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
@@ -17758,10 +17910,11 @@ define inreg <22 x float> @bitcast_v44f16_to_v22f32_scalar(<44 x half> inreg %a,
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s27, s43
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s28, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s29, s41
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB35_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
@@ -17777,8 +17930,7 @@ define inreg <22 x float> @bitcast_v44f16_to_v22f32_scalar(<44 x half> inreg %a,
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s16
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s0
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB35_3
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB35_3
; GFX11-FAKE16-NEXT: .LBB35_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
@@ -17810,7 +17962,9 @@ define inreg <22 x float> @bitcast_v44f16_to_v22f32_scalar(<44 x half> inreg %a,
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB35_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB35_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB35_2
+; GFX11-FAKE16-NEXT: s_branch .LBB35_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -17998,6 +18152,7 @@ define inreg <11 x double> @bitcast_v11i64_to_v11f64_scalar(<11 x i64> inreg %a,
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v11, v8
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v21, v7
; SI-NEXT: v_mov_b32_e32 v20, v6
; SI-NEXT: v_mov_b32_e32 v19, v5
@@ -18018,13 +18173,16 @@ define inreg <11 x double> @bitcast_v11i64_to_v11f64_scalar(<11 x i64> inreg %a,
; SI-NEXT: v_mov_b32_e32 v9, s25
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB37_4
+; SI-NEXT: s_cbranch_scc0 .LBB37_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB37_3
-; SI-NEXT: .LBB37_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB37_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB37_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2
@@ -18047,16 +18205,15 @@ define inreg <11 x double> @bitcast_v11i64_to_v11f64_scalar(<11 x i64> inreg %a,
; SI-NEXT: v_addc_u32_e32 v19, vcc, 0, v19, vcc
; SI-NEXT: v_add_i32_e32 v20, vcc, 3, v20
; SI-NEXT: v_addc_u32_e32 v21, vcc, 0, v21, vcc
-; SI-NEXT: .LBB37_3: ; %end
+; SI-NEXT: .LBB37_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB37_4:
-; SI-NEXT: s_branch .LBB37_2
;
; VI-LABEL: bitcast_v11i64_to_v11f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v11, v8
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v21, v7
; VI-NEXT: v_mov_b32_e32 v20, v6
; VI-NEXT: v_mov_b32_e32 v19, v5
@@ -18077,13 +18234,16 @@ define inreg <11 x double> @bitcast_v11i64_to_v11f64_scalar(<11 x i64> inreg %a,
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB37_4
+; VI-NEXT: s_cbranch_scc0 .LBB37_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB37_3
-; VI-NEXT: .LBB37_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB37_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB37_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
@@ -18106,16 +18266,15 @@ define inreg <11 x double> @bitcast_v11i64_to_v11f64_scalar(<11 x i64> inreg %a,
; VI-NEXT: v_addc_u32_e32 v19, vcc, 0, v19, vcc
; VI-NEXT: v_add_u32_e32 v20, vcc, 3, v20
; VI-NEXT: v_addc_u32_e32 v21, vcc, 0, v21, vcc
-; VI-NEXT: .LBB37_3: ; %end
+; VI-NEXT: .LBB37_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB37_4:
-; VI-NEXT: s_branch .LBB37_2
;
; GFX9-LABEL: bitcast_v11i64_to_v11f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v11, v8
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v21, v7
; GFX9-NEXT: v_mov_b32_e32 v20, v6
; GFX9-NEXT: v_mov_b32_e32 v19, v5
@@ -18136,13 +18295,16 @@ define inreg <11 x double> @bitcast_v11i64_to_v11f64_scalar(<11 x i64> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB37_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB37_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB37_3
-; GFX9-NEXT: .LBB37_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB37_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB37_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 3, v0
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, 3, v2
@@ -18165,38 +18327,36 @@ define inreg <11 x double> @bitcast_v11i64_to_v11f64_scalar(<11 x i64> inreg %a,
; GFX9-NEXT: v_addc_co_u32_e32 v19, vcc, 0, v19, vcc
; GFX9-NEXT: v_add_co_u32_e32 v20, vcc, 3, v20
; GFX9-NEXT: v_addc_co_u32_e32 v21, vcc, 0, v21, vcc
-; GFX9-NEXT: .LBB37_3: ; %end
+; GFX9-NEXT: .LBB37_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB37_4:
-; GFX9-NEXT: s_branch .LBB37_2
;
; GFX11-LABEL: bitcast_v11i64_to_v11f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-NEXT: v_dual_mov_b32 v15, v4 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB37_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB37_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB37_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB37_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB37_3:
-; GFX11-NEXT: .LBB37_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB37_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
@@ -18225,6 +18385,7 @@ define inreg <11 x double> @bitcast_v11i64_to_v11f64_scalar(<11 x i64> inreg %a,
; GFX11-NEXT: v_add_co_u32 v20, vcc_lo, v20, 3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_add_co_ci_u32_e64 v21, null, 0, v21, vcc_lo
+; GFX11-NEXT: .LBB37_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -18363,6 +18524,7 @@ define inreg <11 x i64> @bitcast_v11f64_to_v11i64_scalar(<11 x double> inreg %a,
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v10, v8
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v21, v7
; SI-NEXT: v_mov_b32_e32 v20, v6
; SI-NEXT: v_mov_b32_e32 v19, v5
@@ -18383,13 +18545,16 @@ define inreg <11 x i64> @bitcast_v11f64_to_v11i64_scalar(<11 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v9, s25
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB39_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB39_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB39_3
-; SI-NEXT: .LBB39_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB39_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB39_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
; SI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; SI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
@@ -18401,16 +18566,15 @@ define inreg <11 x i64> @bitcast_v11f64_to_v11i64_scalar(<11 x double> inreg %a,
; SI-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
; SI-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
; SI-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
-; SI-NEXT: .LBB39_3: ; %end
+; SI-NEXT: .LBB39_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB39_4:
-; SI-NEXT: s_branch .LBB39_2
;
; VI-LABEL: bitcast_v11f64_to_v11i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v10, v8
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v21, v7
; VI-NEXT: v_mov_b32_e32 v20, v6
; VI-NEXT: v_mov_b32_e32 v19, v5
@@ -18431,13 +18595,16 @@ define inreg <11 x i64> @bitcast_v11f64_to_v11i64_scalar(<11 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB39_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB39_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB39_3
-; VI-NEXT: .LBB39_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB39_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB39_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
; VI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; VI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
@@ -18449,16 +18616,15 @@ define inreg <11 x i64> @bitcast_v11f64_to_v11i64_scalar(<11 x double> inreg %a,
; VI-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
; VI-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
; VI-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
-; VI-NEXT: .LBB39_3: ; %end
+; VI-NEXT: .LBB39_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB39_4:
-; VI-NEXT: s_branch .LBB39_2
;
; GFX9-LABEL: bitcast_v11f64_to_v11i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_mov_b32_e32 v10, v8
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v21, v7
; GFX9-NEXT: v_mov_b32_e32 v20, v6
; GFX9-NEXT: v_mov_b32_e32 v19, v5
@@ -18479,13 +18645,16 @@ define inreg <11 x i64> @bitcast_v11f64_to_v11i64_scalar(<11 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB39_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB39_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB39_3
-; GFX9-NEXT: .LBB39_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB39_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB39_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX9-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
@@ -18497,38 +18666,36 @@ define inreg <11 x i64> @bitcast_v11f64_to_v11i64_scalar(<11 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
; GFX9-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
-; GFX9-NEXT: .LBB39_3: ; %end
+; GFX9-NEXT: .LBB39_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB39_4:
-; GFX9-NEXT: s_branch .LBB39_2
;
; GFX11-LABEL: bitcast_v11f64_to_v11i64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
+; GFX11-NEXT: v_dual_mov_b32 v15, v4 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB39_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB39_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB39_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB39_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB39_3:
-; GFX11-NEXT: .LBB39_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB39_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
@@ -18540,6 +18707,7 @@ define inreg <11 x i64> @bitcast_v11f64_to_v11i64_scalar(<11 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[16:17], v[16:17], 1.0
; GFX11-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
; GFX11-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
+; GFX11-NEXT: .LBB39_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -19266,6 +19434,7 @@ define inreg <44 x i16> @bitcast_v11i64_to_v44i16_scalar(<11 x i64> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v9
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s13, v1
; SI-NEXT: v_readfirstlane_b32 s12, v2
; SI-NEXT: v_readfirstlane_b32 s11, v3
@@ -19273,8 +19442,8 @@ define inreg <44 x i16> @bitcast_v11i64_to_v44i16_scalar(<11 x i64> inreg %a, i3
; SI-NEXT: v_readfirstlane_b32 s9, v5
; SI-NEXT: v_readfirstlane_b32 s8, v6
; SI-NEXT: v_readfirstlane_b32 s7, v7
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s6, v8
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB41_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v1, s7
@@ -19524,25 +19693,28 @@ define inreg <44 x i16> @bitcast_v11i64_to_v44i16_scalar(<11 x i64> inreg %a, i3
; SI-NEXT: ; implicit-def: $sgpr15
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: s_branch .LBB41_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB41_2
+; SI-NEXT: s_branch .LBB41_3
;
; VI-LABEL: bitcast_v11i64_to_v44i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s13, v0
; VI-NEXT: v_readfirstlane_b32 s12, v1
; VI-NEXT: v_readfirstlane_b32 s11, v2
; VI-NEXT: v_readfirstlane_b32 s10, v3
; VI-NEXT: v_readfirstlane_b32 s9, v4
; VI-NEXT: v_readfirstlane_b32 s8, v5
-; VI-NEXT: v_readfirstlane_b32 s6, v6
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: v_readfirstlane_b32 s7, v7
+; VI-NEXT: v_readfirstlane_b32 s7, v6
+; VI-NEXT: v_readfirstlane_b32 s6, v7
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB41_4
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_lshr_b32 s14, s7, 16
-; VI-NEXT: s_lshr_b32 s15, s6, 16
+; VI-NEXT: s_lshr_b32 s14, s6, 16
+; VI-NEXT: s_lshr_b32 s15, s7, 16
; VI-NEXT: s_lshr_b32 s40, s8, 16
; VI-NEXT: s_lshr_b32 s41, s9, 16
; VI-NEXT: s_lshr_b32 s42, s10, 16
@@ -19565,8 +19737,8 @@ define inreg <44 x i16> @bitcast_v11i64_to_v44i16_scalar(<11 x i64> inreg %a, i3
; VI-NEXT: s_lshr_b32 s75, s16, 16
; VI-NEXT: s_cbranch_execnz .LBB41_3
; VI-NEXT: .LBB41_2: ; %cmp.true
-; VI-NEXT: s_add_u32 s6, s6, 3
-; VI-NEXT: s_addc_u32 s7, s7, 0
+; VI-NEXT: s_add_u32 s7, s7, 3
+; VI-NEXT: s_addc_u32 s6, s6, 0
; VI-NEXT: s_add_u32 s9, s9, 3
; VI-NEXT: s_addc_u32 s8, s8, 0
; VI-NEXT: s_add_u32 s11, s11, 3
@@ -19587,8 +19759,8 @@ define inreg <44 x i16> @bitcast_v11i64_to_v44i16_scalar(<11 x i64> inreg %a, i3
; VI-NEXT: s_addc_u32 s19, s19, 0
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
-; VI-NEXT: s_lshr_b32 s14, s7, 16
-; VI-NEXT: s_lshr_b32 s15, s6, 16
+; VI-NEXT: s_lshr_b32 s14, s6, 16
+; VI-NEXT: s_lshr_b32 s15, s7, 16
; VI-NEXT: s_lshr_b32 s40, s8, 16
; VI-NEXT: s_lshr_b32 s41, s9, 16
; VI-NEXT: s_lshr_b32 s42, s10, 16
@@ -19669,13 +19841,13 @@ define inreg <44 x i16> @bitcast_v11i64_to_v44i16_scalar(<11 x i64> inreg %a, i3
; VI-NEXT: s_or_b32 s9, s9, s28
; VI-NEXT: s_and_b32 s8, 0xffff, s8
; VI-NEXT: s_lshl_b32 s28, s40, 16
-; VI-NEXT: s_and_b32 s6, 0xffff, s6
-; VI-NEXT: s_lshl_b32 s15, s15, 16
; VI-NEXT: s_and_b32 s7, 0xffff, s7
+; VI-NEXT: s_lshl_b32 s15, s15, 16
+; VI-NEXT: s_and_b32 s6, 0xffff, s6
; VI-NEXT: s_lshl_b32 s14, s14, 16
; VI-NEXT: s_or_b32 s8, s8, s28
-; VI-NEXT: s_or_b32 s6, s6, s15
-; VI-NEXT: s_or_b32 s7, s7, s14
+; VI-NEXT: s_or_b32 s7, s7, s15
+; VI-NEXT: s_or_b32 s6, s6, s14
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: v_mov_b32_e32 v2, s16
@@ -19696,8 +19868,8 @@ define inreg <44 x i16> @bitcast_v11i64_to_v44i16_scalar(<11 x i64> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v17, s10
; VI-NEXT: v_mov_b32_e32 v18, s9
; VI-NEXT: v_mov_b32_e32 v19, s8
-; VI-NEXT: v_mov_b32_e32 v20, s6
-; VI-NEXT: v_mov_b32_e32 v21, s7
+; VI-NEXT: v_mov_b32_e32 v20, s7
+; VI-NEXT: v_mov_b32_e32 v21, s6
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB41_4:
; VI-NEXT: ; implicit-def: $sgpr75
@@ -19722,31 +19894,34 @@ define inreg <44 x i16> @bitcast_v11i64_to_v44i16_scalar(<11 x i64> inreg %a, i3
; VI-NEXT: ; implicit-def: $sgpr40
; VI-NEXT: ; implicit-def: $sgpr15
; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: s_branch .LBB41_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB41_2
+; VI-NEXT: s_branch .LBB41_3
;
; GFX9-LABEL: bitcast_v11i64_to_v44i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
-; GFX9-NEXT: v_readfirstlane_b32 s6, v0
-; GFX9-NEXT: v_readfirstlane_b32 s7, v1
-; GFX9-NEXT: v_readfirstlane_b32 s8, v2
-; GFX9-NEXT: v_readfirstlane_b32 s9, v3
-; GFX9-NEXT: v_readfirstlane_b32 s10, v4
-; GFX9-NEXT: v_readfirstlane_b32 s11, v5
-; GFX9-NEXT: v_readfirstlane_b32 s12, v6
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: v_readfirstlane_b32 s13, v7
+; GFX9-NEXT: v_readfirstlane_b32 s7, v0
+; GFX9-NEXT: v_readfirstlane_b32 s8, v1
+; GFX9-NEXT: v_readfirstlane_b32 s9, v2
+; GFX9-NEXT: v_readfirstlane_b32 s10, v3
+; GFX9-NEXT: v_readfirstlane_b32 s11, v4
+; GFX9-NEXT: v_readfirstlane_b32 s12, v5
+; GFX9-NEXT: v_readfirstlane_b32 s13, v6
+; GFX9-NEXT: v_readfirstlane_b32 s6, v7
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB41_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_lshr_b32 s14, s13, 16
-; GFX9-NEXT: s_lshr_b32 s15, s12, 16
-; GFX9-NEXT: s_lshr_b32 s40, s11, 16
-; GFX9-NEXT: s_lshr_b32 s41, s10, 16
-; GFX9-NEXT: s_lshr_b32 s42, s9, 16
-; GFX9-NEXT: s_lshr_b32 s43, s8, 16
-; GFX9-NEXT: s_lshr_b32 s44, s7, 16
-; GFX9-NEXT: s_lshr_b32 s45, s6, 16
+; GFX9-NEXT: s_lshr_b32 s14, s6, 16
+; GFX9-NEXT: s_lshr_b32 s15, s13, 16
+; GFX9-NEXT: s_lshr_b32 s40, s12, 16
+; GFX9-NEXT: s_lshr_b32 s41, s11, 16
+; GFX9-NEXT: s_lshr_b32 s42, s10, 16
+; GFX9-NEXT: s_lshr_b32 s43, s9, 16
+; GFX9-NEXT: s_lshr_b32 s44, s8, 16
+; GFX9-NEXT: s_lshr_b32 s45, s7, 16
; GFX9-NEXT: s_lshr_b32 s46, s29, 16
; GFX9-NEXT: s_lshr_b32 s47, s28, 16
; GFX9-NEXT: s_lshr_b32 s56, s27, 16
@@ -19763,14 +19938,14 @@ define inreg <44 x i16> @bitcast_v11i64_to_v44i16_scalar(<11 x i64> inreg %a, i3
; GFX9-NEXT: s_lshr_b32 s75, s16, 16
; GFX9-NEXT: s_cbranch_execnz .LBB41_3
; GFX9-NEXT: .LBB41_2: ; %cmp.true
-; GFX9-NEXT: s_add_u32 s12, s12, 3
-; GFX9-NEXT: s_addc_u32 s13, s13, 0
-; GFX9-NEXT: s_add_u32 s10, s10, 3
-; GFX9-NEXT: s_addc_u32 s11, s11, 0
-; GFX9-NEXT: s_add_u32 s8, s8, 3
-; GFX9-NEXT: s_addc_u32 s9, s9, 0
-; GFX9-NEXT: s_add_u32 s6, s6, 3
-; GFX9-NEXT: s_addc_u32 s7, s7, 0
+; GFX9-NEXT: s_add_u32 s13, s13, 3
+; GFX9-NEXT: s_addc_u32 s6, s6, 0
+; GFX9-NEXT: s_add_u32 s11, s11, 3
+; GFX9-NEXT: s_addc_u32 s12, s12, 0
+; GFX9-NEXT: s_add_u32 s9, s9, 3
+; GFX9-NEXT: s_addc_u32 s10, s10, 0
+; GFX9-NEXT: s_add_u32 s7, s7, 3
+; GFX9-NEXT: s_addc_u32 s8, s8, 0
; GFX9-NEXT: s_add_u32 s28, s28, 3
; GFX9-NEXT: s_addc_u32 s29, s29, 0
; GFX9-NEXT: s_add_u32 s26, s26, 3
@@ -19785,14 +19960,14 @@ define inreg <44 x i16> @bitcast_v11i64_to_v44i16_scalar(<11 x i64> inreg %a, i3
; GFX9-NEXT: s_addc_u32 s19, s19, 0
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
-; GFX9-NEXT: s_lshr_b32 s14, s13, 16
-; GFX9-NEXT: s_lshr_b32 s15, s12, 16
-; GFX9-NEXT: s_lshr_b32 s40, s11, 16
-; GFX9-NEXT: s_lshr_b32 s41, s10, 16
-; GFX9-NEXT: s_lshr_b32 s42, s9, 16
-; GFX9-NEXT: s_lshr_b32 s43, s8, 16
-; GFX9-NEXT: s_lshr_b32 s44, s7, 16
-; GFX9-NEXT: s_lshr_b32 s45, s6, 16
+; GFX9-NEXT: s_lshr_b32 s14, s6, 16
+; GFX9-NEXT: s_lshr_b32 s15, s13, 16
+; GFX9-NEXT: s_lshr_b32 s40, s12, 16
+; GFX9-NEXT: s_lshr_b32 s41, s11, 16
+; GFX9-NEXT: s_lshr_b32 s42, s10, 16
+; GFX9-NEXT: s_lshr_b32 s43, s9, 16
+; GFX9-NEXT: s_lshr_b32 s44, s8, 16
+; GFX9-NEXT: s_lshr_b32 s45, s7, 16
; GFX9-NEXT: s_lshr_b32 s46, s29, 16
; GFX9-NEXT: s_lshr_b32 s47, s28, 16
; GFX9-NEXT: s_lshr_b32 s56, s27, 16
@@ -19822,14 +19997,14 @@ define inreg <44 x i16> @bitcast_v11i64_to_v44i16_scalar(<11 x i64> inreg %a, i3
; GFX9-NEXT: s_pack_ll_b32_b16 s25, s27, s56
; GFX9-NEXT: s_pack_ll_b32_b16 s26, s28, s47
; GFX9-NEXT: s_pack_ll_b32_b16 s27, s29, s46
-; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s45
-; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s44
-; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s43
-; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s42
-; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s41
-; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s40
-; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s15
-; GFX9-NEXT: s_pack_ll_b32_b16 s13, s13, s14
+; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s45
+; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s44
+; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s43
+; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s42
+; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s41
+; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s40
+; GFX9-NEXT: s_pack_ll_b32_b16 s13, s13, s15
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s14
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: v_mov_b32_e32 v2, s16
@@ -19844,14 +20019,14 @@ define inreg <44 x i16> @bitcast_v11i64_to_v44i16_scalar(<11 x i64> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v11, s25
; GFX9-NEXT: v_mov_b32_e32 v12, s26
; GFX9-NEXT: v_mov_b32_e32 v13, s27
-; GFX9-NEXT: v_mov_b32_e32 v14, s6
-; GFX9-NEXT: v_mov_b32_e32 v15, s7
-; GFX9-NEXT: v_mov_b32_e32 v16, s8
-; GFX9-NEXT: v_mov_b32_e32 v17, s9
-; GFX9-NEXT: v_mov_b32_e32 v18, s10
-; GFX9-NEXT: v_mov_b32_e32 v19, s11
-; GFX9-NEXT: v_mov_b32_e32 v20, s12
-; GFX9-NEXT: v_mov_b32_e32 v21, s13
+; GFX9-NEXT: v_mov_b32_e32 v14, s7
+; GFX9-NEXT: v_mov_b32_e32 v15, s8
+; GFX9-NEXT: v_mov_b32_e32 v16, s9
+; GFX9-NEXT: v_mov_b32_e32 v17, s10
+; GFX9-NEXT: v_mov_b32_e32 v18, s11
+; GFX9-NEXT: v_mov_b32_e32 v19, s12
+; GFX9-NEXT: v_mov_b32_e32 v20, s13
+; GFX9-NEXT: v_mov_b32_e32 v21, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB41_4:
; GFX9-NEXT: ; implicit-def: $sgpr75
@@ -19876,23 +20051,25 @@ define inreg <44 x i16> @bitcast_v11i64_to_v44i16_scalar(<11 x i64> inreg %a, i3
; GFX9-NEXT: ; implicit-def: $sgpr40
; GFX9-NEXT: ; implicit-def: $sgpr15
; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: s_branch .LBB41_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB41_2
+; GFX9-NEXT: s_branch .LBB41_3
;
; GFX11-LABEL: bitcast_v11i64_to_v44i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
; GFX11-NEXT: v_readfirstlane_b32 s4, v0
-; GFX11-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-NEXT: v_readfirstlane_b32 s6, v1
; GFX11-NEXT: v_readfirstlane_b32 s7, v2
-; GFX11-NEXT: v_readfirstlane_b32 s6, v3
-; GFX11-NEXT: s_mov_b32 s62, 0
+; GFX11-NEXT: v_readfirstlane_b32 s5, v3
+; GFX11-NEXT: s_mov_b32 s62, -1
; GFX11-NEXT: s_and_b32 s8, vcc_lo, exec_lo
; GFX11-NEXT: s_cbranch_scc0 .LBB41_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s8, s6, 16
+; GFX11-NEXT: s_lshr_b32 s8, s5, 16
; GFX11-NEXT: s_lshr_b32 s9, s7, 16
-; GFX11-NEXT: s_lshr_b32 s10, s5, 16
+; GFX11-NEXT: s_lshr_b32 s10, s6, 16
; GFX11-NEXT: s_lshr_b32 s11, s4, 16
; GFX11-NEXT: s_lshr_b32 s12, s29, 16
; GFX11-NEXT: s_lshr_b32 s13, s28, 16
@@ -19912,13 +20089,12 @@ define inreg <44 x i16> @bitcast_v11i64_to_v44i16_scalar(<11 x i64> inreg %a, i3
; GFX11-NEXT: s_lshr_b32 s59, s2, 16
; GFX11-NEXT: s_lshr_b32 s60, s1, 16
; GFX11-NEXT: s_lshr_b32 s61, s0, 16
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s62
-; GFX11-NEXT: s_cbranch_vccnz .LBB41_3
+; GFX11-NEXT: s_cbranch_execnz .LBB41_3
; GFX11-NEXT: .LBB41_2: ; %cmp.true
; GFX11-NEXT: s_add_u32 s7, s7, 3
-; GFX11-NEXT: s_addc_u32 s6, s6, 0
-; GFX11-NEXT: s_add_u32 s4, s4, 3
; GFX11-NEXT: s_addc_u32 s5, s5, 0
+; GFX11-NEXT: s_add_u32 s4, s4, 3
+; GFX11-NEXT: s_addc_u32 s6, s6, 0
; GFX11-NEXT: s_add_u32 s28, s28, 3
; GFX11-NEXT: s_addc_u32 s29, s29, 0
; GFX11-NEXT: s_add_u32 s26, s26, 3
@@ -19937,9 +20113,9 @@ define inreg <44 x i16> @bitcast_v11i64_to_v44i16_scalar(<11 x i64> inreg %a, i3
; GFX11-NEXT: s_addc_u32 s3, s3, 0
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: s_lshr_b32 s8, s6, 16
+; GFX11-NEXT: s_lshr_b32 s8, s5, 16
; GFX11-NEXT: s_lshr_b32 s9, s7, 16
-; GFX11-NEXT: s_lshr_b32 s10, s5, 16
+; GFX11-NEXT: s_lshr_b32 s10, s6, 16
; GFX11-NEXT: s_lshr_b32 s11, s4, 16
; GFX11-NEXT: s_lshr_b32 s12, s29, 16
; GFX11-NEXT: s_lshr_b32 s13, s28, 16
@@ -19980,9 +20156,9 @@ define inreg <44 x i16> @bitcast_v11i64_to_v44i16_scalar(<11 x i64> inreg %a, i3
; GFX11-NEXT: s_pack_ll_b32_b16 s13, s28, s13
; GFX11-NEXT: s_pack_ll_b32_b16 s12, s29, s12
; GFX11-NEXT: s_pack_ll_b32_b16 s4, s4, s11
-; GFX11-NEXT: s_pack_ll_b32_b16 s5, s5, s10
+; GFX11-NEXT: s_pack_ll_b32_b16 s6, s6, s10
; GFX11-NEXT: s_pack_ll_b32_b16 s7, s7, s9
-; GFX11-NEXT: s_pack_ll_b32_b16 s6, s6, s8
+; GFX11-NEXT: s_pack_ll_b32_b16 s5, s5, s8
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -19992,8 +20168,8 @@ define inreg <44 x i16> @bitcast_v11i64_to_v44i16_scalar(<11 x i64> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s14
; GFX11-NEXT: v_dual_mov_b32 v16, s13 :: v_dual_mov_b32 v17, s12
-; GFX11-NEXT: v_dual_mov_b32 v18, s4 :: v_dual_mov_b32 v19, s5
-; GFX11-NEXT: v_dual_mov_b32 v20, s7 :: v_dual_mov_b32 v21, s6
+; GFX11-NEXT: v_dual_mov_b32 v18, s4 :: v_dual_mov_b32 v19, s6
+; GFX11-NEXT: v_dual_mov_b32 v20, s7 :: v_dual_mov_b32 v21, s5
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB41_4:
; GFX11-NEXT: ; implicit-def: $sgpr61
@@ -20018,7 +20194,9 @@ define inreg <44 x i16> @bitcast_v11i64_to_v44i16_scalar(<11 x i64> inreg %a, i3
; GFX11-NEXT: ; implicit-def: $sgpr10
; GFX11-NEXT: ; implicit-def: $sgpr9
; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: s_branch .LBB41_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s62
+; GFX11-NEXT: s_cbranch_vccz .LBB41_2
+; GFX11-NEXT: s_branch .LBB41_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -20988,6 +21166,7 @@ define inreg <11 x i64> @bitcast_v44i16_to_v11i64_scalar(<44 x i16> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v30
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
@@ -21017,7 +21196,7 @@ define inreg <11 x i64> @bitcast_v44i16_to_v11i64_scalar(<44 x i16> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v52, v4
; SI-NEXT: v_mov_b32_e32 v53, v2
; SI-NEXT: v_mov_b32_e32 v54, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v60, 16, v3
@@ -21217,7 +21396,9 @@ define inreg <11 x i64> @bitcast_v44i16_to_v11i64_scalar(<44 x i16> inreg %a, i3
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB43_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; SI-NEXT: s_branch .LBB43_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB43_2
+; SI-NEXT: s_branch .LBB43_3
;
; VI-LABEL: bitcast_v44i16_to_v11i64_scalar:
; VI: ; %bb.0:
@@ -21237,6 +21418,7 @@ define inreg <11 x i64> @bitcast_v44i16_to_v11i64_scalar(<44 x i16> inreg %a, i3
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v7
; VI-NEXT: v_mov_b32_e32 v33, v6
; VI-NEXT: v_mov_b32_e32 v34, v5
@@ -21245,7 +21427,7 @@ define inreg <11 x i64> @bitcast_v44i16_to_v11i64_scalar(<44 x i16> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v37, v2
; VI-NEXT: v_mov_b32_e32 v38, v1
; VI-NEXT: v_mov_b32_e32 v39, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB43_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -21395,21 +21577,22 @@ define inreg <11 x i64> @bitcast_v44i16_to_v11i64_scalar(<44 x i16> inreg %a, i3
; VI-NEXT: s_and_b32 s18, s27, 0xffff
; VI-NEXT: s_lshl_b32 s8, s8, 16
; VI-NEXT: s_add_i32 s28, s28, 3
+; VI-NEXT: v_add_u32_e32 v19, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v33
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s8, s8, s18
; VI-NEXT: s_and_b32 s18, s28, 0xffff
; VI-NEXT: s_lshl_b32 s7, s7, 16
; VI-NEXT: s_add_i32 s29, s29, 3
-; VI-NEXT: v_add_u32_e32 v19, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v33
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s7, s7, s18
; VI-NEXT: s_and_b32 s18, s29, 0xffff
; VI-NEXT: s_lshl_b32 s6, s6, 16
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: s_or_b32 s6, s6, s18
; VI-NEXT: v_add_u32_e32 v20, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v32
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v1, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_lshlrev_b32_sdwa v0, v1, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v32
+; VI-NEXT: s_or_b32 s6, s6, s18
+; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_add_i32 s4, s4, 0x30000
; VI-NEXT: s_add_i32 s5, s5, 0x30000
; VI-NEXT: s_add_i32 s16, s16, 0x30000
@@ -21424,7 +21607,6 @@ define inreg <11 x i64> @bitcast_v44i16_to_v11i64_scalar(<44 x i16> inreg %a, i3
; VI-NEXT: s_add_i32 s8, s8, 0x30000
; VI-NEXT: s_add_i32 s7, s7, 0x30000
; VI-NEXT: s_add_i32 s6, s6, 0x30000
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v21, vcc, 0x30000, v0
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
@@ -21444,19 +21626,13 @@ define inreg <11 x i64> @bitcast_v44i16_to_v11i64_scalar(<44 x i16> inreg %a, i3
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB43_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB43_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB43_2
+; VI-NEXT: s_branch .LBB43_3
;
; GFX9-LABEL: bitcast_v44i16_to_v11i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v7
-; GFX9-NEXT: v_mov_b32_e32 v33, v6
-; GFX9-NEXT: v_mov_b32_e32 v34, v5
-; GFX9-NEXT: v_mov_b32_e32 v35, v4
-; GFX9-NEXT: v_mov_b32_e32 v36, v3
-; GFX9-NEXT: v_mov_b32_e32 v37, v2
-; GFX9-NEXT: v_mov_b32_e32 v38, v1
-; GFX9-NEXT: v_mov_b32_e32 v39, v0
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
; GFX9-NEXT: s_lshr_b32 s42, s27, 16
@@ -21472,6 +21648,15 @@ define inreg <11 x i64> @bitcast_v44i16_to_v11i64_scalar(<44 x i16> inreg %a, i3
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; GFX9-NEXT: v_mov_b32_e32 v32, v7
+; GFX9-NEXT: v_mov_b32_e32 v33, v6
+; GFX9-NEXT: v_mov_b32_e32 v34, v5
+; GFX9-NEXT: v_mov_b32_e32 v35, v4
+; GFX9-NEXT: v_mov_b32_e32 v36, v3
+; GFX9-NEXT: v_mov_b32_e32 v37, v2
+; GFX9-NEXT: v_mov_b32_e32 v38, v1
+; GFX9-NEXT: v_mov_b32_e32 v39, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_lshrrev_b32_e32 v48, 16, v32
; GFX9-NEXT: v_lshrrev_b32_e32 v49, 16, v33
; GFX9-NEXT: v_lshrrev_b32_e32 v50, 16, v34
@@ -21480,7 +21665,6 @@ define inreg <11 x i64> @bitcast_v44i16_to_v11i64_scalar(<44 x i16> inreg %a, i3
; GFX9-NEXT: v_lshrrev_b32_e32 v53, 16, v37
; GFX9-NEXT: v_lshrrev_b32_e32 v54, 16, v38
; GFX9-NEXT: v_lshrrev_b32_e32 v55, 16, v39
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -21495,6 +21679,7 @@ define inreg <11 x i64> @bitcast_v44i16_to_v11i64_scalar(<44 x i16> inreg %a, i3
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB43_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v39
@@ -21530,23 +21715,27 @@ define inreg <11 x i64> @bitcast_v44i16_to_v11i64_scalar(<44 x i16> inreg %a, i3
; GFX9-NEXT: s_cbranch_execnz .LBB43_3
; GFX9-NEXT: .LBB43_2: ; %cmp.true
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v39
+; GFX9-NEXT: v_lshl_or_b32 v0, v55, 16, v0
+; GFX9-NEXT: v_pk_add_u16 v14, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v37
+; GFX9-NEXT: v_lshl_or_b32 v0, v53, 16, v0
+; GFX9-NEXT: v_pk_add_u16 v16, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v36
+; GFX9-NEXT: v_lshl_or_b32 v0, v52, 16, v0
+; GFX9-NEXT: v_pk_add_u16 v17, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v35
+; GFX9-NEXT: v_lshl_or_b32 v0, v51, 16, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v38
-; GFX9-NEXT: v_and_b32_e32 v16, 0xffff, v37
-; GFX9-NEXT: v_and_b32_e32 v17, 0xffff, v36
-; GFX9-NEXT: v_and_b32_e32 v18, 0xffff, v35
-; GFX9-NEXT: v_and_b32_e32 v19, 0xffff, v34
+; GFX9-NEXT: v_pk_add_u16 v18, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v34
; GFX9-NEXT: v_and_b32_e32 v20, 0xffff, v33
; GFX9-NEXT: v_and_b32_e32 v21, 0xffff, v32
-; GFX9-NEXT: v_lshl_or_b32 v0, v55, 16, v0
; GFX9-NEXT: v_lshl_or_b32 v1, v54, 16, v1
-; GFX9-NEXT: v_lshl_or_b32 v16, v53, 16, v16
-; GFX9-NEXT: v_lshl_or_b32 v17, v52, 16, v17
-; GFX9-NEXT: v_lshl_or_b32 v18, v51, 16, v18
-; GFX9-NEXT: v_lshl_or_b32 v19, v50, 16, v19
+; GFX9-NEXT: v_lshl_or_b32 v0, v50, 16, v0
; GFX9-NEXT: v_lshl_or_b32 v20, v49, 16, v20
; GFX9-NEXT: v_lshl_or_b32 v21, v48, 16, v21
-; GFX9-NEXT: v_pk_add_u16 v14, v0, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v15, v1, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_pk_add_u16 v19, v0, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s6, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s7, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v2, s8, 3 op_sel_hi:[1,0]
@@ -21561,17 +21750,15 @@ define inreg <11 x i64> @bitcast_v44i16_to_v11i64_scalar(<44 x i16> inreg %a, i3
; GFX9-NEXT: v_pk_add_u16 v11, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v12, s18, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v13, s19, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
; GFX9-NEXT: .LBB43_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB43_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB43_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB43_2
+; GFX9-NEXT: s_branch .LBB43_3
;
; GFX11-TRUE16-LABEL: bitcast_v44i16_to_v11i64_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -21589,9 +21776,9 @@ define inreg <11 x i64> @bitcast_v44i16_to_v11i64_scalar(<44 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v37, 0xffff, v2
; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v3
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s26, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
@@ -21603,15 +21790,14 @@ define inreg <11 x i64> @bitcast_v44i16_to_v11i64_scalar(<44 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
@@ -21623,10 +21809,11 @@ define inreg <11 x i64> @bitcast_v44i16_to_v11i64_scalar(<44 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s29, s41
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB43_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
@@ -21642,8 +21829,7 @@ define inreg <11 x i64> @bitcast_v44i16_to_v11i64_scalar(<44 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s16
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s0
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB43_3
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB43_3
; GFX11-TRUE16-NEXT: .LBB43_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
@@ -21675,7 +21861,9 @@ define inreg <11 x i64> @bitcast_v44i16_to_v11i64_scalar(<44 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB43_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB43_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB43_2
+; GFX11-TRUE16-NEXT: s_branch .LBB43_3
;
; GFX11-FAKE16-LABEL: bitcast_v44i16_to_v11i64_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -21689,9 +21877,9 @@ define inreg <11 x i64> @bitcast_v44i16_to_v11i64_scalar(<44 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: v_and_b32_e32 v38, 0xffff, v1
; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff, v2
; GFX11-FAKE16-NEXT: v_and_b32_e32 v36, 0xffff, v3
-; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s26, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s25, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s24, 16
@@ -21703,15 +21891,14 @@ define inreg <11 x i64> @bitcast_v44i16_to_v11i64_scalar(<44 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s18, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s17, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s40, 0
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
@@ -21723,10 +21910,11 @@ define inreg <11 x i64> @bitcast_v44i16_to_v11i64_scalar(<44 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s27, s43
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s28, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s29, s41
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB43_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
@@ -21742,8 +21930,7 @@ define inreg <11 x i64> @bitcast_v44i16_to_v11i64_scalar(<44 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s16
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s0
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB43_3
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB43_3
; GFX11-FAKE16-NEXT: .LBB43_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
@@ -21775,7 +21962,9 @@ define inreg <11 x i64> @bitcast_v44i16_to_v11i64_scalar(<44 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB43_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB43_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB43_2
+; GFX11-FAKE16-NEXT: s_branch .LBB43_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -22707,24 +22896,25 @@ define inreg <44 x half> @bitcast_v11i64_to_v44f16_scalar(<11 x i64> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v9
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s12, v1
; SI-NEXT: v_readfirstlane_b32 s13, v2
; SI-NEXT: v_readfirstlane_b32 s10, v3
; SI-NEXT: v_readfirstlane_b32 s11, v4
-; SI-NEXT: v_readfirstlane_b32 s7, v5
-; SI-NEXT: v_readfirstlane_b32 s8, v6
+; SI-NEXT: v_readfirstlane_b32 s8, v5
+; SI-NEXT: v_readfirstlane_b32 s9, v6
; SI-NEXT: v_readfirstlane_b32 s6, v7
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: v_readfirstlane_b32 s9, v8
+; SI-NEXT: v_readfirstlane_b32 s7, v8
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB45_4
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_lshr_b32 s4, s9, 16
+; SI-NEXT: s_lshr_b32 s4, s7, 16
; SI-NEXT: v_cvt_f32_f16_e32 v1, s4
; SI-NEXT: s_lshr_b32 s4, s6, 16
; SI-NEXT: v_cvt_f32_f16_e32 v2, s4
-; SI-NEXT: s_lshr_b32 s4, s8, 16
+; SI-NEXT: s_lshr_b32 s4, s9, 16
; SI-NEXT: v_cvt_f32_f16_e32 v4, s4
-; SI-NEXT: s_lshr_b32 s4, s7, 16
+; SI-NEXT: s_lshr_b32 s4, s8, 16
; SI-NEXT: v_cvt_f32_f16_e32 v6, s4
; SI-NEXT: s_lshr_b32 s4, s11, 16
; SI-NEXT: v_cvt_f32_f16_e32 v8, s4
@@ -22762,10 +22952,10 @@ define inreg <44 x half> @bitcast_v11i64_to_v44f16_scalar(<11 x i64> inreg %a, i
; SI-NEXT: v_cvt_f32_f16_e32 v50, s4
; SI-NEXT: s_lshr_b32 s4, s16, 16
; SI-NEXT: v_cvt_f32_f16_e32 v52, s4
-; SI-NEXT: v_cvt_f32_f16_e32 v3, s9
+; SI-NEXT: v_cvt_f32_f16_e32 v3, s7
; SI-NEXT: v_cvt_f32_f16_e32 v5, s6
-; SI-NEXT: v_cvt_f32_f16_e32 v7, s8
-; SI-NEXT: v_cvt_f32_f16_e32 v9, s7
+; SI-NEXT: v_cvt_f32_f16_e32 v7, s9
+; SI-NEXT: v_cvt_f32_f16_e32 v9, s8
; SI-NEXT: v_cvt_f32_f16_e32 v11, s11
; SI-NEXT: v_cvt_f32_f16_e32 v13, s10
; SI-NEXT: v_cvt_f32_f16_e32 v15, s13
@@ -22822,18 +23012,18 @@ define inreg <44 x half> @bitcast_v11i64_to_v44f16_scalar(<11 x i64> inreg %a, i
; SI-NEXT: s_addc_u32 s11, s11, 0
; SI-NEXT: s_lshr_b32 s60, s10, 16
; SI-NEXT: s_lshr_b32 s61, s11, 16
-; SI-NEXT: s_add_u32 s7, s7, 3
-; SI-NEXT: s_addc_u32 s8, s8, 0
-; SI-NEXT: s_lshr_b32 s62, s7, 16
-; SI-NEXT: s_lshr_b32 s63, s8, 16
-; SI-NEXT: s_add_u32 s6, s6, 3
+; SI-NEXT: s_add_u32 s8, s8, 3
; SI-NEXT: s_addc_u32 s9, s9, 0
+; SI-NEXT: s_lshr_b32 s62, s8, 16
+; SI-NEXT: s_lshr_b32 s63, s9, 16
+; SI-NEXT: s_add_u32 s6, s6, 3
+; SI-NEXT: s_addc_u32 s7, s7, 0
; SI-NEXT: s_lshr_b32 s72, s6, 16
-; SI-NEXT: s_lshr_b32 s73, s9, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v3, s9
+; SI-NEXT: s_lshr_b32 s73, s7, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v3, s7
; SI-NEXT: v_cvt_f32_f16_e32 v5, s6
-; SI-NEXT: v_cvt_f32_f16_e32 v7, s8
-; SI-NEXT: v_cvt_f32_f16_e32 v9, s7
+; SI-NEXT: v_cvt_f32_f16_e32 v7, s9
+; SI-NEXT: v_cvt_f32_f16_e32 v9, s8
; SI-NEXT: v_cvt_f32_f16_e32 v11, s11
; SI-NEXT: v_cvt_f32_f16_e32 v13, s10
; SI-NEXT: v_cvt_f32_f16_e32 v15, s13
@@ -23074,25 +23264,28 @@ define inreg <44 x half> @bitcast_v11i64_to_v44f16_scalar(<11 x i64> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: s_branch .LBB45_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB45_2
+; SI-NEXT: s_branch .LBB45_3
;
; VI-LABEL: bitcast_v11i64_to_v44f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s13, v0
; VI-NEXT: v_readfirstlane_b32 s12, v1
; VI-NEXT: v_readfirstlane_b32 s11, v2
; VI-NEXT: v_readfirstlane_b32 s10, v3
; VI-NEXT: v_readfirstlane_b32 s9, v4
; VI-NEXT: v_readfirstlane_b32 s8, v5
-; VI-NEXT: v_readfirstlane_b32 s6, v6
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: v_readfirstlane_b32 s7, v7
+; VI-NEXT: v_readfirstlane_b32 s7, v6
+; VI-NEXT: v_readfirstlane_b32 s6, v7
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB45_4
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_lshr_b32 s14, s7, 16
-; VI-NEXT: s_lshr_b32 s15, s6, 16
+; VI-NEXT: s_lshr_b32 s14, s6, 16
+; VI-NEXT: s_lshr_b32 s15, s7, 16
; VI-NEXT: s_lshr_b32 s40, s8, 16
; VI-NEXT: s_lshr_b32 s41, s9, 16
; VI-NEXT: s_lshr_b32 s42, s10, 16
@@ -23115,8 +23308,8 @@ define inreg <44 x half> @bitcast_v11i64_to_v44f16_scalar(<11 x i64> inreg %a, i
; VI-NEXT: s_lshr_b32 s75, s16, 16
; VI-NEXT: s_cbranch_execnz .LBB45_3
; VI-NEXT: .LBB45_2: ; %cmp.true
-; VI-NEXT: s_add_u32 s6, s6, 3
-; VI-NEXT: s_addc_u32 s7, s7, 0
+; VI-NEXT: s_add_u32 s7, s7, 3
+; VI-NEXT: s_addc_u32 s6, s6, 0
; VI-NEXT: s_add_u32 s9, s9, 3
; VI-NEXT: s_addc_u32 s8, s8, 0
; VI-NEXT: s_add_u32 s11, s11, 3
@@ -23137,8 +23330,8 @@ define inreg <44 x half> @bitcast_v11i64_to_v44f16_scalar(<11 x i64> inreg %a, i
; VI-NEXT: s_addc_u32 s19, s19, 0
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
-; VI-NEXT: s_lshr_b32 s14, s7, 16
-; VI-NEXT: s_lshr_b32 s15, s6, 16
+; VI-NEXT: s_lshr_b32 s14, s6, 16
+; VI-NEXT: s_lshr_b32 s15, s7, 16
; VI-NEXT: s_lshr_b32 s40, s8, 16
; VI-NEXT: s_lshr_b32 s41, s9, 16
; VI-NEXT: s_lshr_b32 s42, s10, 16
@@ -23219,13 +23412,13 @@ define inreg <44 x half> @bitcast_v11i64_to_v44f16_scalar(<11 x i64> inreg %a, i
; VI-NEXT: s_or_b32 s9, s9, s28
; VI-NEXT: s_and_b32 s8, 0xffff, s8
; VI-NEXT: s_lshl_b32 s28, s40, 16
-; VI-NEXT: s_and_b32 s6, 0xffff, s6
-; VI-NEXT: s_lshl_b32 s15, s15, 16
; VI-NEXT: s_and_b32 s7, 0xffff, s7
+; VI-NEXT: s_lshl_b32 s15, s15, 16
+; VI-NEXT: s_and_b32 s6, 0xffff, s6
; VI-NEXT: s_lshl_b32 s14, s14, 16
; VI-NEXT: s_or_b32 s8, s8, s28
-; VI-NEXT: s_or_b32 s6, s6, s15
-; VI-NEXT: s_or_b32 s7, s7, s14
+; VI-NEXT: s_or_b32 s7, s7, s15
+; VI-NEXT: s_or_b32 s6, s6, s14
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: v_mov_b32_e32 v2, s16
@@ -23246,8 +23439,8 @@ define inreg <44 x half> @bitcast_v11i64_to_v44f16_scalar(<11 x i64> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v17, s10
; VI-NEXT: v_mov_b32_e32 v18, s9
; VI-NEXT: v_mov_b32_e32 v19, s8
-; VI-NEXT: v_mov_b32_e32 v20, s6
-; VI-NEXT: v_mov_b32_e32 v21, s7
+; VI-NEXT: v_mov_b32_e32 v20, s7
+; VI-NEXT: v_mov_b32_e32 v21, s6
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB45_4:
; VI-NEXT: ; implicit-def: $sgpr75
@@ -23272,31 +23465,34 @@ define inreg <44 x half> @bitcast_v11i64_to_v44f16_scalar(<11 x i64> inreg %a, i
; VI-NEXT: ; implicit-def: $sgpr40
; VI-NEXT: ; implicit-def: $sgpr15
; VI-NEXT: ; implicit-def: $sgpr14
-; VI-NEXT: s_branch .LBB45_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB45_2
+; VI-NEXT: s_branch .LBB45_3
;
; GFX9-LABEL: bitcast_v11i64_to_v44f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
-; GFX9-NEXT: v_readfirstlane_b32 s6, v0
-; GFX9-NEXT: v_readfirstlane_b32 s7, v1
-; GFX9-NEXT: v_readfirstlane_b32 s8, v2
-; GFX9-NEXT: v_readfirstlane_b32 s9, v3
-; GFX9-NEXT: v_readfirstlane_b32 s10, v4
-; GFX9-NEXT: v_readfirstlane_b32 s11, v5
-; GFX9-NEXT: v_readfirstlane_b32 s12, v6
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: v_readfirstlane_b32 s13, v7
+; GFX9-NEXT: v_readfirstlane_b32 s7, v0
+; GFX9-NEXT: v_readfirstlane_b32 s8, v1
+; GFX9-NEXT: v_readfirstlane_b32 s9, v2
+; GFX9-NEXT: v_readfirstlane_b32 s10, v3
+; GFX9-NEXT: v_readfirstlane_b32 s11, v4
+; GFX9-NEXT: v_readfirstlane_b32 s12, v5
+; GFX9-NEXT: v_readfirstlane_b32 s13, v6
+; GFX9-NEXT: v_readfirstlane_b32 s6, v7
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB45_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_lshr_b32 s14, s13, 16
-; GFX9-NEXT: s_lshr_b32 s15, s12, 16
-; GFX9-NEXT: s_lshr_b32 s40, s11, 16
-; GFX9-NEXT: s_lshr_b32 s41, s10, 16
-; GFX9-NEXT: s_lshr_b32 s42, s9, 16
-; GFX9-NEXT: s_lshr_b32 s43, s8, 16
-; GFX9-NEXT: s_lshr_b32 s44, s7, 16
-; GFX9-NEXT: s_lshr_b32 s45, s6, 16
+; GFX9-NEXT: s_lshr_b32 s14, s6, 16
+; GFX9-NEXT: s_lshr_b32 s15, s13, 16
+; GFX9-NEXT: s_lshr_b32 s40, s12, 16
+; GFX9-NEXT: s_lshr_b32 s41, s11, 16
+; GFX9-NEXT: s_lshr_b32 s42, s10, 16
+; GFX9-NEXT: s_lshr_b32 s43, s9, 16
+; GFX9-NEXT: s_lshr_b32 s44, s8, 16
+; GFX9-NEXT: s_lshr_b32 s45, s7, 16
; GFX9-NEXT: s_lshr_b32 s46, s29, 16
; GFX9-NEXT: s_lshr_b32 s47, s28, 16
; GFX9-NEXT: s_lshr_b32 s56, s27, 16
@@ -23313,14 +23509,14 @@ define inreg <44 x half> @bitcast_v11i64_to_v44f16_scalar(<11 x i64> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s75, s16, 16
; GFX9-NEXT: s_cbranch_execnz .LBB45_3
; GFX9-NEXT: .LBB45_2: ; %cmp.true
-; GFX9-NEXT: s_add_u32 s12, s12, 3
-; GFX9-NEXT: s_addc_u32 s13, s13, 0
-; GFX9-NEXT: s_add_u32 s10, s10, 3
-; GFX9-NEXT: s_addc_u32 s11, s11, 0
-; GFX9-NEXT: s_add_u32 s8, s8, 3
-; GFX9-NEXT: s_addc_u32 s9, s9, 0
-; GFX9-NEXT: s_add_u32 s6, s6, 3
-; GFX9-NEXT: s_addc_u32 s7, s7, 0
+; GFX9-NEXT: s_add_u32 s13, s13, 3
+; GFX9-NEXT: s_addc_u32 s6, s6, 0
+; GFX9-NEXT: s_add_u32 s11, s11, 3
+; GFX9-NEXT: s_addc_u32 s12, s12, 0
+; GFX9-NEXT: s_add_u32 s9, s9, 3
+; GFX9-NEXT: s_addc_u32 s10, s10, 0
+; GFX9-NEXT: s_add_u32 s7, s7, 3
+; GFX9-NEXT: s_addc_u32 s8, s8, 0
; GFX9-NEXT: s_add_u32 s28, s28, 3
; GFX9-NEXT: s_addc_u32 s29, s29, 0
; GFX9-NEXT: s_add_u32 s26, s26, 3
@@ -23335,14 +23531,14 @@ define inreg <44 x half> @bitcast_v11i64_to_v44f16_scalar(<11 x i64> inreg %a, i
; GFX9-NEXT: s_addc_u32 s19, s19, 0
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
-; GFX9-NEXT: s_lshr_b32 s14, s13, 16
-; GFX9-NEXT: s_lshr_b32 s15, s12, 16
-; GFX9-NEXT: s_lshr_b32 s40, s11, 16
-; GFX9-NEXT: s_lshr_b32 s41, s10, 16
-; GFX9-NEXT: s_lshr_b32 s42, s9, 16
-; GFX9-NEXT: s_lshr_b32 s43, s8, 16
-; GFX9-NEXT: s_lshr_b32 s44, s7, 16
-; GFX9-NEXT: s_lshr_b32 s45, s6, 16
+; GFX9-NEXT: s_lshr_b32 s14, s6, 16
+; GFX9-NEXT: s_lshr_b32 s15, s13, 16
+; GFX9-NEXT: s_lshr_b32 s40, s12, 16
+; GFX9-NEXT: s_lshr_b32 s41, s11, 16
+; GFX9-NEXT: s_lshr_b32 s42, s10, 16
+; GFX9-NEXT: s_lshr_b32 s43, s9, 16
+; GFX9-NEXT: s_lshr_b32 s44, s8, 16
+; GFX9-NEXT: s_lshr_b32 s45, s7, 16
; GFX9-NEXT: s_lshr_b32 s46, s29, 16
; GFX9-NEXT: s_lshr_b32 s47, s28, 16
; GFX9-NEXT: s_lshr_b32 s56, s27, 16
@@ -23372,14 +23568,14 @@ define inreg <44 x half> @bitcast_v11i64_to_v44f16_scalar(<11 x i64> inreg %a, i
; GFX9-NEXT: s_pack_ll_b32_b16 s25, s27, s56
; GFX9-NEXT: s_pack_ll_b32_b16 s26, s28, s47
; GFX9-NEXT: s_pack_ll_b32_b16 s27, s29, s46
-; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s45
-; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s44
-; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s43
-; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s42
-; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s41
-; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s40
-; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s15
-; GFX9-NEXT: s_pack_ll_b32_b16 s13, s13, s14
+; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s45
+; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s44
+; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s43
+; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s42
+; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s41
+; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s40
+; GFX9-NEXT: s_pack_ll_b32_b16 s13, s13, s15
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s14
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: v_mov_b32_e32 v2, s16
@@ -23394,14 +23590,14 @@ define inreg <44 x half> @bitcast_v11i64_to_v44f16_scalar(<11 x i64> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v11, s25
; GFX9-NEXT: v_mov_b32_e32 v12, s26
; GFX9-NEXT: v_mov_b32_e32 v13, s27
-; GFX9-NEXT: v_mov_b32_e32 v14, s6
-; GFX9-NEXT: v_mov_b32_e32 v15, s7
-; GFX9-NEXT: v_mov_b32_e32 v16, s8
-; GFX9-NEXT: v_mov_b32_e32 v17, s9
-; GFX9-NEXT: v_mov_b32_e32 v18, s10
-; GFX9-NEXT: v_mov_b32_e32 v19, s11
-; GFX9-NEXT: v_mov_b32_e32 v20, s12
-; GFX9-NEXT: v_mov_b32_e32 v21, s13
+; GFX9-NEXT: v_mov_b32_e32 v14, s7
+; GFX9-NEXT: v_mov_b32_e32 v15, s8
+; GFX9-NEXT: v_mov_b32_e32 v16, s9
+; GFX9-NEXT: v_mov_b32_e32 v17, s10
+; GFX9-NEXT: v_mov_b32_e32 v18, s11
+; GFX9-NEXT: v_mov_b32_e32 v19, s12
+; GFX9-NEXT: v_mov_b32_e32 v20, s13
+; GFX9-NEXT: v_mov_b32_e32 v21, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB45_4:
; GFX9-NEXT: ; implicit-def: $sgpr75
@@ -23426,23 +23622,25 @@ define inreg <44 x half> @bitcast_v11i64_to_v44f16_scalar(<11 x i64> inreg %a, i
; GFX9-NEXT: ; implicit-def: $sgpr40
; GFX9-NEXT: ; implicit-def: $sgpr15
; GFX9-NEXT: ; implicit-def: $sgpr14
-; GFX9-NEXT: s_branch .LBB45_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB45_2
+; GFX9-NEXT: s_branch .LBB45_3
;
; GFX11-LABEL: bitcast_v11i64_to_v44f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v4
; GFX11-NEXT: v_readfirstlane_b32 s4, v0
-; GFX11-NEXT: v_readfirstlane_b32 s5, v1
+; GFX11-NEXT: v_readfirstlane_b32 s6, v1
; GFX11-NEXT: v_readfirstlane_b32 s7, v2
-; GFX11-NEXT: v_readfirstlane_b32 s6, v3
-; GFX11-NEXT: s_mov_b32 s62, 0
+; GFX11-NEXT: v_readfirstlane_b32 s5, v3
+; GFX11-NEXT: s_mov_b32 s62, -1
; GFX11-NEXT: s_and_b32 s8, vcc_lo, exec_lo
; GFX11-NEXT: s_cbranch_scc0 .LBB45_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s8, s6, 16
+; GFX11-NEXT: s_lshr_b32 s8, s5, 16
; GFX11-NEXT: s_lshr_b32 s9, s7, 16
-; GFX11-NEXT: s_lshr_b32 s10, s5, 16
+; GFX11-NEXT: s_lshr_b32 s10, s6, 16
; GFX11-NEXT: s_lshr_b32 s11, s4, 16
; GFX11-NEXT: s_lshr_b32 s12, s29, 16
; GFX11-NEXT: s_lshr_b32 s13, s28, 16
@@ -23462,13 +23660,12 @@ define inreg <44 x half> @bitcast_v11i64_to_v44f16_scalar(<11 x i64> inreg %a, i
; GFX11-NEXT: s_lshr_b32 s59, s2, 16
; GFX11-NEXT: s_lshr_b32 s60, s1, 16
; GFX11-NEXT: s_lshr_b32 s61, s0, 16
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s62
-; GFX11-NEXT: s_cbranch_vccnz .LBB45_3
+; GFX11-NEXT: s_cbranch_execnz .LBB45_3
; GFX11-NEXT: .LBB45_2: ; %cmp.true
; GFX11-NEXT: s_add_u32 s7, s7, 3
-; GFX11-NEXT: s_addc_u32 s6, s6, 0
-; GFX11-NEXT: s_add_u32 s4, s4, 3
; GFX11-NEXT: s_addc_u32 s5, s5, 0
+; GFX11-NEXT: s_add_u32 s4, s4, 3
+; GFX11-NEXT: s_addc_u32 s6, s6, 0
; GFX11-NEXT: s_add_u32 s28, s28, 3
; GFX11-NEXT: s_addc_u32 s29, s29, 0
; GFX11-NEXT: s_add_u32 s26, s26, 3
@@ -23487,9 +23684,9 @@ define inreg <44 x half> @bitcast_v11i64_to_v44f16_scalar(<11 x i64> inreg %a, i
; GFX11-NEXT: s_addc_u32 s3, s3, 0
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: s_lshr_b32 s8, s6, 16
+; GFX11-NEXT: s_lshr_b32 s8, s5, 16
; GFX11-NEXT: s_lshr_b32 s9, s7, 16
-; GFX11-NEXT: s_lshr_b32 s10, s5, 16
+; GFX11-NEXT: s_lshr_b32 s10, s6, 16
; GFX11-NEXT: s_lshr_b32 s11, s4, 16
; GFX11-NEXT: s_lshr_b32 s12, s29, 16
; GFX11-NEXT: s_lshr_b32 s13, s28, 16
@@ -23530,9 +23727,9 @@ define inreg <44 x half> @bitcast_v11i64_to_v44f16_scalar(<11 x i64> inreg %a, i
; GFX11-NEXT: s_pack_ll_b32_b16 s13, s28, s13
; GFX11-NEXT: s_pack_ll_b32_b16 s12, s29, s12
; GFX11-NEXT: s_pack_ll_b32_b16 s4, s4, s11
-; GFX11-NEXT: s_pack_ll_b32_b16 s5, s5, s10
+; GFX11-NEXT: s_pack_ll_b32_b16 s6, s6, s10
; GFX11-NEXT: s_pack_ll_b32_b16 s7, s7, s9
-; GFX11-NEXT: s_pack_ll_b32_b16 s6, s6, s8
+; GFX11-NEXT: s_pack_ll_b32_b16 s5, s5, s8
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -23542,8 +23739,8 @@ define inreg <44 x half> @bitcast_v11i64_to_v44f16_scalar(<11 x i64> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v13, s25
; GFX11-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s14
; GFX11-NEXT: v_dual_mov_b32 v16, s13 :: v_dual_mov_b32 v17, s12
-; GFX11-NEXT: v_dual_mov_b32 v18, s4 :: v_dual_mov_b32 v19, s5
-; GFX11-NEXT: v_dual_mov_b32 v20, s7 :: v_dual_mov_b32 v21, s6
+; GFX11-NEXT: v_dual_mov_b32 v18, s4 :: v_dual_mov_b32 v19, s6
+; GFX11-NEXT: v_dual_mov_b32 v20, s7 :: v_dual_mov_b32 v21, s5
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB45_4:
; GFX11-NEXT: ; implicit-def: $sgpr61
@@ -23568,7 +23765,9 @@ define inreg <44 x half> @bitcast_v11i64_to_v44f16_scalar(<11 x i64> inreg %a, i
; GFX11-NEXT: ; implicit-def: $sgpr10
; GFX11-NEXT: ; implicit-def: $sgpr9
; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: s_branch .LBB45_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s62
+; GFX11-NEXT: s_cbranch_vccz .LBB45_2
+; GFX11-NEXT: s_branch .LBB45_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -24733,6 +24932,7 @@ define inreg <11 x i64> @bitcast_v44f16_to_v11i64_scalar(<44 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v26, s28
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v30
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
@@ -25071,7 +25271,9 @@ define inreg <11 x i64> @bitcast_v44f16_to_v11i64_scalar(<44 x half> inreg %a, i
; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
-; SI-NEXT: s_branch .LBB47_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB47_2
+; SI-NEXT: s_branch .LBB47_3
;
; VI-LABEL: bitcast_v44f16_to_v11i64_scalar:
; VI: ; %bb.0:
@@ -25091,6 +25293,7 @@ define inreg <11 x i64> @bitcast_v44f16_to_v11i64_scalar(<44 x half> inreg %a, i
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v7
; VI-NEXT: v_mov_b32_e32 v33, v6
; VI-NEXT: v_mov_b32_e32 v34, v5
@@ -25099,7 +25302,7 @@ define inreg <11 x i64> @bitcast_v44f16_to_v11i64_scalar(<44 x half> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v37, v2
; VI-NEXT: v_mov_b32_e32 v38, v1
; VI-NEXT: v_mov_b32_e32 v39, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB47_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -25263,19 +25466,13 @@ define inreg <11 x i64> @bitcast_v44f16_to_v11i64_scalar(<44 x half> inreg %a, i
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB47_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB47_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB47_2
+; VI-NEXT: s_branch .LBB47_3
;
; GFX9-LABEL: bitcast_v44f16_to_v11i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v7
-; GFX9-NEXT: v_mov_b32_e32 v33, v6
-; GFX9-NEXT: v_mov_b32_e32 v34, v5
-; GFX9-NEXT: v_mov_b32_e32 v35, v4
-; GFX9-NEXT: v_mov_b32_e32 v36, v3
-; GFX9-NEXT: v_mov_b32_e32 v37, v2
-; GFX9-NEXT: v_mov_b32_e32 v38, v1
-; GFX9-NEXT: v_mov_b32_e32 v39, v0
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
; GFX9-NEXT: s_lshr_b32 s42, s27, 16
@@ -25291,6 +25488,15 @@ define inreg <11 x i64> @bitcast_v44f16_to_v11i64_scalar(<44 x half> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; GFX9-NEXT: v_mov_b32_e32 v32, v7
+; GFX9-NEXT: v_mov_b32_e32 v33, v6
+; GFX9-NEXT: v_mov_b32_e32 v34, v5
+; GFX9-NEXT: v_mov_b32_e32 v35, v4
+; GFX9-NEXT: v_mov_b32_e32 v36, v3
+; GFX9-NEXT: v_mov_b32_e32 v37, v2
+; GFX9-NEXT: v_mov_b32_e32 v38, v1
+; GFX9-NEXT: v_mov_b32_e32 v39, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_lshrrev_b32_e32 v48, 16, v32
; GFX9-NEXT: v_lshrrev_b32_e32 v49, 16, v33
; GFX9-NEXT: v_lshrrev_b32_e32 v50, 16, v34
@@ -25299,7 +25505,6 @@ define inreg <11 x i64> @bitcast_v44f16_to_v11i64_scalar(<44 x half> inreg %a, i
; GFX9-NEXT: v_lshrrev_b32_e32 v53, 16, v37
; GFX9-NEXT: v_lshrrev_b32_e32 v54, 16, v38
; GFX9-NEXT: v_lshrrev_b32_e32 v55, 16, v39
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -25314,6 +25519,7 @@ define inreg <11 x i64> @bitcast_v44f16_to_v11i64_scalar(<44 x half> inreg %a, i
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB47_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v39
@@ -25392,7 +25598,9 @@ define inreg <11 x i64> @bitcast_v44f16_to_v11i64_scalar(<44 x half> inreg %a, i
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB47_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB47_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB47_2
+; GFX9-NEXT: s_branch .LBB47_3
;
; GFX11-TRUE16-LABEL: bitcast_v44f16_to_v11i64_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -25410,9 +25618,9 @@ define inreg <11 x i64> @bitcast_v44f16_to_v11i64_scalar(<44 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v37, 0xffff, v2
; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v3
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s26, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
@@ -25424,15 +25632,14 @@ define inreg <11 x i64> @bitcast_v44f16_to_v11i64_scalar(<44 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
@@ -25444,10 +25651,11 @@ define inreg <11 x i64> @bitcast_v44f16_to_v11i64_scalar(<44 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s29, s41
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB47_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
@@ -25463,8 +25671,7 @@ define inreg <11 x i64> @bitcast_v44f16_to_v11i64_scalar(<44 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s16
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s0
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB47_3
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB47_3
; GFX11-TRUE16-NEXT: .LBB47_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
@@ -25496,7 +25703,9 @@ define inreg <11 x i64> @bitcast_v44f16_to_v11i64_scalar(<44 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB47_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB47_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB47_2
+; GFX11-TRUE16-NEXT: s_branch .LBB47_3
;
; GFX11-FAKE16-LABEL: bitcast_v44f16_to_v11i64_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -25510,9 +25719,9 @@ define inreg <11 x i64> @bitcast_v44f16_to_v11i64_scalar(<44 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_and_b32_e32 v38, 0xffff, v1
; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff, v2
; GFX11-FAKE16-NEXT: v_and_b32_e32 v36, 0xffff, v3
-; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s26, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s25, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s24, 16
@@ -25524,15 +25733,14 @@ define inreg <11 x i64> @bitcast_v44f16_to_v11i64_scalar(<44 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s18, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s17, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s40, 0
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
@@ -25544,10 +25752,11 @@ define inreg <11 x i64> @bitcast_v44f16_to_v11i64_scalar(<44 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s27, s43
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s28, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s29, s41
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB47_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
@@ -25563,8 +25772,7 @@ define inreg <11 x i64> @bitcast_v44f16_to_v11i64_scalar(<44 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s16
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s0
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB47_3
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB47_3
; GFX11-FAKE16-NEXT: .LBB47_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
@@ -25596,7 +25804,9 @@ define inreg <11 x i64> @bitcast_v44f16_to_v11i64_scalar(<44 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB47_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB47_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB47_2
+; GFX11-FAKE16-NEXT: s_branch .LBB47_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -26255,6 +26465,7 @@ define inreg <44 x i16> @bitcast_v11f64_to_v44i16_scalar(<11 x double> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v9
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v21, s16
; SI-NEXT: v_mov_b32_e32 v22, s17
; SI-NEXT: v_mov_b32_e32 v19, s18
@@ -26267,9 +26478,9 @@ define inreg <44 x i16> @bitcast_v11f64_to_v44i16_scalar(<11 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v14, s25
; SI-NEXT: v_mov_b32_e32 v11, s26
; SI-NEXT: v_mov_b32_e32 v12, s27
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v9, s28
; SI-NEXT: v_mov_b32_e32 v10, s29
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB49_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_alignbit_b32 v23, v8, v7, 16
@@ -26485,12 +26696,15 @@ define inreg <44 x i16> @bitcast_v11f64_to_v44i16_scalar(<11 x double> inreg %a,
; SI-NEXT: ; implicit-def: $vgpr30
; SI-NEXT: ; implicit-def: $vgpr23
; SI-NEXT: ; implicit-def: $vgpr28
-; SI-NEXT: s_branch .LBB49_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB49_2
+; SI-NEXT: s_branch .LBB49_3
;
; VI-LABEL: bitcast_v11f64_to_v44i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v22, s16
; VI-NEXT: v_mov_b32_e32 v23, s17
; VI-NEXT: v_mov_b32_e32 v20, s18
@@ -26503,9 +26717,9 @@ define inreg <44 x i16> @bitcast_v11f64_to_v44i16_scalar(<11 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v10, s25
; VI-NEXT: v_mov_b32_e32 v16, s26
; VI-NEXT: v_mov_b32_e32 v17, s27
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB49_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_lshrrev_b32_e32 v30, 16, v7
@@ -26642,12 +26856,15 @@ define inreg <44 x i16> @bitcast_v11f64_to_v44i16_scalar(<11 x double> inreg %a,
; VI-NEXT: ; implicit-def: $vgpr32
; VI-NEXT: ; implicit-def: $vgpr31
; VI-NEXT: ; implicit-def: $vgpr30
-; VI-NEXT: s_branch .LBB49_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB49_2
+; VI-NEXT: s_branch .LBB49_3
;
; GFX9-LABEL: bitcast_v11f64_to_v44i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v22, s16
; GFX9-NEXT: v_mov_b32_e32 v23, s17
; GFX9-NEXT: v_mov_b32_e32 v20, s18
@@ -26660,9 +26877,9 @@ define inreg <44 x i16> @bitcast_v11f64_to_v44i16_scalar(<11 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v10, s25
; GFX9-NEXT: v_mov_b32_e32 v16, s26
; GFX9-NEXT: v_mov_b32_e32 v17, s27
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB49_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_lshrrev_b32_e32 v30, 16, v7
@@ -26799,7 +27016,9 @@ define inreg <44 x i16> @bitcast_v11f64_to_v44i16_scalar(<11 x double> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr32
; GFX9-NEXT: ; implicit-def: $vgpr31
; GFX9-NEXT: ; implicit-def: $vgpr30
-; GFX9-NEXT: s_branch .LBB49_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB49_2
+; GFX9-NEXT: s_branch .LBB49_3
;
; GFX11-LABEL: bitcast_v11f64_to_v44i16_scalar:
; GFX11: ; %bb.0:
@@ -26814,8 +27033,8 @@ define inreg <44 x i16> @bitcast_v11f64_to_v44i16_scalar(<11 x double> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v14, s24 :: v_dual_mov_b32 v15, s25
; GFX11-NEXT: v_dual_mov_b32 v12, s26 :: v_dual_mov_b32 v13, s27
; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB49_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v3
@@ -26840,8 +27059,7 @@ define inreg <44 x i16> @bitcast_v11f64_to_v44i16_scalar(<11 x double> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v20
; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v23
; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v22
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB49_3
+; GFX11-NEXT: s_cbranch_execnz .LBB49_3
; GFX11-NEXT: .LBB49_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
@@ -26948,7 +27166,9 @@ define inreg <44 x i16> @bitcast_v11f64_to_v44i16_scalar(<11 x double> inreg %a,
; GFX11-NEXT: ; implicit-def: $vgpr28
; GFX11-NEXT: ; implicit-def: $vgpr27
; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: s_branch .LBB49_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_vccz .LBB49_2
+; GFX11-NEXT: s_branch .LBB49_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -27918,6 +28138,7 @@ define inreg <11 x double> @bitcast_v44i16_to_v11f64_scalar(<44 x i16> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v30
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
@@ -27947,7 +28168,7 @@ define inreg <11 x double> @bitcast_v44i16_to_v11f64_scalar(<44 x i16> inreg %a,
; SI-NEXT: v_mov_b32_e32 v52, v4
; SI-NEXT: v_mov_b32_e32 v53, v2
; SI-NEXT: v_mov_b32_e32 v54, v0
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v60, 16, v3
@@ -28147,7 +28368,9 @@ define inreg <11 x double> @bitcast_v44i16_to_v11f64_scalar(<44 x i16> inreg %a,
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB51_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; SI-NEXT: s_branch .LBB51_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB51_2
+; SI-NEXT: s_branch .LBB51_3
;
; VI-LABEL: bitcast_v44i16_to_v11f64_scalar:
; VI: ; %bb.0:
@@ -28167,6 +28390,7 @@ define inreg <11 x double> @bitcast_v44i16_to_v11f64_scalar(<44 x i16> inreg %a,
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v7
; VI-NEXT: v_mov_b32_e32 v33, v6
; VI-NEXT: v_mov_b32_e32 v34, v5
@@ -28175,7 +28399,7 @@ define inreg <11 x double> @bitcast_v44i16_to_v11f64_scalar(<44 x i16> inreg %a,
; VI-NEXT: v_mov_b32_e32 v37, v2
; VI-NEXT: v_mov_b32_e32 v38, v1
; VI-NEXT: v_mov_b32_e32 v39, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB51_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -28325,21 +28549,22 @@ define inreg <11 x double> @bitcast_v44i16_to_v11f64_scalar(<44 x i16> inreg %a,
; VI-NEXT: s_and_b32 s18, s27, 0xffff
; VI-NEXT: s_lshl_b32 s8, s8, 16
; VI-NEXT: s_add_i32 s28, s28, 3
+; VI-NEXT: v_add_u32_e32 v19, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v33
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s8, s8, s18
; VI-NEXT: s_and_b32 s18, s28, 0xffff
; VI-NEXT: s_lshl_b32 s7, s7, 16
; VI-NEXT: s_add_i32 s29, s29, 3
-; VI-NEXT: v_add_u32_e32 v19, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v33
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s7, s7, s18
; VI-NEXT: s_and_b32 s18, s29, 0xffff
; VI-NEXT: s_lshl_b32 s6, s6, 16
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: s_or_b32 s6, s6, s18
; VI-NEXT: v_add_u32_e32 v20, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v32
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v1, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_lshlrev_b32_sdwa v0, v1, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v32
+; VI-NEXT: s_or_b32 s6, s6, s18
+; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_add_i32 s4, s4, 0x30000
; VI-NEXT: s_add_i32 s5, s5, 0x30000
; VI-NEXT: s_add_i32 s16, s16, 0x30000
@@ -28354,7 +28579,6 @@ define inreg <11 x double> @bitcast_v44i16_to_v11f64_scalar(<44 x i16> inreg %a,
; VI-NEXT: s_add_i32 s8, s8, 0x30000
; VI-NEXT: s_add_i32 s7, s7, 0x30000
; VI-NEXT: s_add_i32 s6, s6, 0x30000
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v21, vcc, 0x30000, v0
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
@@ -28374,19 +28598,13 @@ define inreg <11 x double> @bitcast_v44i16_to_v11f64_scalar(<44 x i16> inreg %a,
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB51_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB51_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB51_2
+; VI-NEXT: s_branch .LBB51_3
;
; GFX9-LABEL: bitcast_v44i16_to_v11f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v7
-; GFX9-NEXT: v_mov_b32_e32 v33, v6
-; GFX9-NEXT: v_mov_b32_e32 v34, v5
-; GFX9-NEXT: v_mov_b32_e32 v35, v4
-; GFX9-NEXT: v_mov_b32_e32 v36, v3
-; GFX9-NEXT: v_mov_b32_e32 v37, v2
-; GFX9-NEXT: v_mov_b32_e32 v38, v1
-; GFX9-NEXT: v_mov_b32_e32 v39, v0
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
; GFX9-NEXT: s_lshr_b32 s42, s27, 16
@@ -28402,6 +28620,15 @@ define inreg <11 x double> @bitcast_v44i16_to_v11f64_scalar(<44 x i16> inreg %a,
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; GFX9-NEXT: v_mov_b32_e32 v32, v7
+; GFX9-NEXT: v_mov_b32_e32 v33, v6
+; GFX9-NEXT: v_mov_b32_e32 v34, v5
+; GFX9-NEXT: v_mov_b32_e32 v35, v4
+; GFX9-NEXT: v_mov_b32_e32 v36, v3
+; GFX9-NEXT: v_mov_b32_e32 v37, v2
+; GFX9-NEXT: v_mov_b32_e32 v38, v1
+; GFX9-NEXT: v_mov_b32_e32 v39, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_lshrrev_b32_e32 v48, 16, v32
; GFX9-NEXT: v_lshrrev_b32_e32 v49, 16, v33
; GFX9-NEXT: v_lshrrev_b32_e32 v50, 16, v34
@@ -28410,7 +28637,6 @@ define inreg <11 x double> @bitcast_v44i16_to_v11f64_scalar(<44 x i16> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v53, 16, v37
; GFX9-NEXT: v_lshrrev_b32_e32 v54, 16, v38
; GFX9-NEXT: v_lshrrev_b32_e32 v55, 16, v39
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -28425,6 +28651,7 @@ define inreg <11 x double> @bitcast_v44i16_to_v11f64_scalar(<44 x i16> inreg %a,
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB51_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v39
@@ -28460,23 +28687,27 @@ define inreg <11 x double> @bitcast_v44i16_to_v11f64_scalar(<44 x i16> inreg %a,
; GFX9-NEXT: s_cbranch_execnz .LBB51_3
; GFX9-NEXT: .LBB51_2: ; %cmp.true
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v39
+; GFX9-NEXT: v_lshl_or_b32 v0, v55, 16, v0
+; GFX9-NEXT: v_pk_add_u16 v14, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v37
+; GFX9-NEXT: v_lshl_or_b32 v0, v53, 16, v0
+; GFX9-NEXT: v_pk_add_u16 v16, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v36
+; GFX9-NEXT: v_lshl_or_b32 v0, v52, 16, v0
+; GFX9-NEXT: v_pk_add_u16 v17, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v35
+; GFX9-NEXT: v_lshl_or_b32 v0, v51, 16, v0
; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v38
-; GFX9-NEXT: v_and_b32_e32 v16, 0xffff, v37
-; GFX9-NEXT: v_and_b32_e32 v17, 0xffff, v36
-; GFX9-NEXT: v_and_b32_e32 v18, 0xffff, v35
-; GFX9-NEXT: v_and_b32_e32 v19, 0xffff, v34
+; GFX9-NEXT: v_pk_add_u16 v18, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v34
; GFX9-NEXT: v_and_b32_e32 v20, 0xffff, v33
; GFX9-NEXT: v_and_b32_e32 v21, 0xffff, v32
-; GFX9-NEXT: v_lshl_or_b32 v0, v55, 16, v0
; GFX9-NEXT: v_lshl_or_b32 v1, v54, 16, v1
-; GFX9-NEXT: v_lshl_or_b32 v16, v53, 16, v16
-; GFX9-NEXT: v_lshl_or_b32 v17, v52, 16, v17
-; GFX9-NEXT: v_lshl_or_b32 v18, v51, 16, v18
-; GFX9-NEXT: v_lshl_or_b32 v19, v50, 16, v19
+; GFX9-NEXT: v_lshl_or_b32 v0, v50, 16, v0
; GFX9-NEXT: v_lshl_or_b32 v20, v49, 16, v20
; GFX9-NEXT: v_lshl_or_b32 v21, v48, 16, v21
-; GFX9-NEXT: v_pk_add_u16 v14, v0, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v15, v1, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_pk_add_u16 v19, v0, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s6, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s7, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v2, s8, 3 op_sel_hi:[1,0]
@@ -28491,17 +28722,15 @@ define inreg <11 x double> @bitcast_v44i16_to_v11f64_scalar(<44 x i16> inreg %a,
; GFX9-NEXT: v_pk_add_u16 v11, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v12, s18, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v13, s19, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v21, v21, 3 op_sel_hi:[1,0]
; GFX9-NEXT: .LBB51_3: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB51_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB51_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB51_2
+; GFX9-NEXT: s_branch .LBB51_3
;
; GFX11-TRUE16-LABEL: bitcast_v44i16_to_v11f64_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -28519,9 +28748,9 @@ define inreg <11 x double> @bitcast_v44i16_to_v11f64_scalar(<44 x i16> inreg %a,
; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v37, 0xffff, v2
; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v3
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s26, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
@@ -28533,15 +28762,14 @@ define inreg <11 x double> @bitcast_v44i16_to_v11f64_scalar(<44 x i16> inreg %a,
; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
@@ -28553,10 +28781,11 @@ define inreg <11 x double> @bitcast_v44i16_to_v11f64_scalar(<44 x i16> inreg %a,
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s29, s41
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB51_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
@@ -28572,8 +28801,7 @@ define inreg <11 x double> @bitcast_v44i16_to_v11f64_scalar(<44 x i16> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s16
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s0
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB51_3
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB51_3
; GFX11-TRUE16-NEXT: .LBB51_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
@@ -28605,7 +28833,9 @@ define inreg <11 x double> @bitcast_v44i16_to_v11f64_scalar(<44 x i16> inreg %a,
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB51_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB51_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB51_2
+; GFX11-TRUE16-NEXT: s_branch .LBB51_3
;
; GFX11-FAKE16-LABEL: bitcast_v44i16_to_v11f64_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -28619,9 +28849,9 @@ define inreg <11 x double> @bitcast_v44i16_to_v11f64_scalar(<44 x i16> inreg %a,
; GFX11-FAKE16-NEXT: v_and_b32_e32 v38, 0xffff, v1
; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff, v2
; GFX11-FAKE16-NEXT: v_and_b32_e32 v36, 0xffff, v3
-; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s26, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s25, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s24, 16
@@ -28633,15 +28863,14 @@ define inreg <11 x double> @bitcast_v44i16_to_v11f64_scalar(<44 x i16> inreg %a,
; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s18, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s17, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s40, 0
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
@@ -28653,10 +28882,11 @@ define inreg <11 x double> @bitcast_v44i16_to_v11f64_scalar(<44 x i16> inreg %a,
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s27, s43
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s28, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s29, s41
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB51_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
@@ -28672,8 +28902,7 @@ define inreg <11 x double> @bitcast_v44i16_to_v11f64_scalar(<44 x i16> inreg %a,
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s16
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s0
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB51_3
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB51_3
; GFX11-FAKE16-NEXT: .LBB51_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
@@ -28705,7 +28934,9 @@ define inreg <11 x double> @bitcast_v44i16_to_v11f64_scalar(<44 x i16> inreg %a,
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB51_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB51_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB51_2
+; GFX11-FAKE16-NEXT: s_branch .LBB51_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -29559,6 +29790,7 @@ define inreg <44 x half> @bitcast_v11f64_to_v44f16_scalar(<11 x double> inreg %a
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v9
+; SI-NEXT: s_and_b64 s[12:13], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s10, v1
; SI-NEXT: v_readfirstlane_b32 s11, v2
; SI-NEXT: v_readfirstlane_b32 s8, v3
@@ -29566,8 +29798,8 @@ define inreg <44 x half> @bitcast_v11f64_to_v44f16_scalar(<11 x double> inreg %a
; SI-NEXT: v_readfirstlane_b32 s6, v5
; SI-NEXT: v_readfirstlane_b32 s7, v6
; SI-NEXT: v_readfirstlane_b32 s4, v7
-; SI-NEXT: s_and_b64 s[12:13], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s5, v8
+; SI-NEXT: s_mov_b64 s[12:13], -1
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
@@ -29940,12 +30172,15 @@ define inreg <44 x half> @bitcast_v11f64_to_v44f16_scalar(<11 x double> inreg %a
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: s_branch .LBB53_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[12:13]
+; SI-NEXT: s_cbranch_vccz .LBB53_2
+; SI-NEXT: s_branch .LBB53_3
;
; VI-LABEL: bitcast_v11f64_to_v44f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v22, s16
; VI-NEXT: v_mov_b32_e32 v23, s17
; VI-NEXT: v_mov_b32_e32 v20, s18
@@ -29958,9 +30193,9 @@ define inreg <44 x half> @bitcast_v11f64_to_v44f16_scalar(<11 x double> inreg %a
; VI-NEXT: v_mov_b32_e32 v10, s25
; VI-NEXT: v_mov_b32_e32 v16, s26
; VI-NEXT: v_mov_b32_e32 v17, s27
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB53_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_lshrrev_b32_e32 v30, 16, v7
@@ -30097,12 +30332,15 @@ define inreg <44 x half> @bitcast_v11f64_to_v44f16_scalar(<11 x double> inreg %a
; VI-NEXT: ; implicit-def: $vgpr32
; VI-NEXT: ; implicit-def: $vgpr31
; VI-NEXT: ; implicit-def: $vgpr30
-; VI-NEXT: s_branch .LBB53_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB53_2
+; VI-NEXT: s_branch .LBB53_3
;
; GFX9-LABEL: bitcast_v11f64_to_v44f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v22, s16
; GFX9-NEXT: v_mov_b32_e32 v23, s17
; GFX9-NEXT: v_mov_b32_e32 v20, s18
@@ -30115,9 +30353,9 @@ define inreg <44 x half> @bitcast_v11f64_to_v44f16_scalar(<11 x double> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v10, s25
; GFX9-NEXT: v_mov_b32_e32 v16, s26
; GFX9-NEXT: v_mov_b32_e32 v17, s27
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB53_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_lshrrev_b32_e32 v30, 16, v7
@@ -30254,7 +30492,9 @@ define inreg <44 x half> @bitcast_v11f64_to_v44f16_scalar(<11 x double> inreg %a
; GFX9-NEXT: ; implicit-def: $vgpr32
; GFX9-NEXT: ; implicit-def: $vgpr31
; GFX9-NEXT: ; implicit-def: $vgpr30
-; GFX9-NEXT: s_branch .LBB53_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB53_2
+; GFX9-NEXT: s_branch .LBB53_3
;
; GFX11-LABEL: bitcast_v11f64_to_v44f16_scalar:
; GFX11: ; %bb.0:
@@ -30269,8 +30509,8 @@ define inreg <44 x half> @bitcast_v11f64_to_v44f16_scalar(<11 x double> inreg %a
; GFX11-NEXT: v_dual_mov_b32 v14, s24 :: v_dual_mov_b32 v15, s25
; GFX11-NEXT: v_dual_mov_b32 v12, s26 :: v_dual_mov_b32 v13, s27
; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB53_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v3
@@ -30295,8 +30535,7 @@ define inreg <44 x half> @bitcast_v11f64_to_v44f16_scalar(<11 x double> inreg %a
; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v20
; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v23
; GFX11-NEXT: v_lshrrev_b32_e32 v24, 16, v22
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB53_3
+; GFX11-NEXT: s_cbranch_execnz .LBB53_3
; GFX11-NEXT: .LBB53_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
@@ -30403,7 +30642,9 @@ define inreg <44 x half> @bitcast_v11f64_to_v44f16_scalar(<11 x double> inreg %a
; GFX11-NEXT: ; implicit-def: $vgpr28
; GFX11-NEXT: ; implicit-def: $vgpr27
; GFX11-NEXT: ; implicit-def: $vgpr26
-; GFX11-NEXT: s_branch .LBB53_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_vccz .LBB53_2
+; GFX11-NEXT: s_branch .LBB53_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -31568,6 +31809,7 @@ define inreg <11 x double> @bitcast_v44f16_to_v11f64_scalar(<44 x half> inreg %a
; SI-NEXT: v_cvt_f16_f32_e32 v26, s28
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v30
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:68 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:72 ; 4-byte Folded Spill
@@ -31906,7 +32148,9 @@ define inreg <11 x double> @bitcast_v44f16_to_v11f64_scalar(<44 x half> inreg %a
; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:84 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:80 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
-; SI-NEXT: s_branch .LBB55_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB55_2
+; SI-NEXT: s_branch .LBB55_3
;
; VI-LABEL: bitcast_v44f16_to_v11f64_scalar:
; VI: ; %bb.0:
@@ -31926,6 +32170,7 @@ define inreg <11 x double> @bitcast_v44f16_to_v11f64_scalar(<44 x half> inreg %a
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v7
; VI-NEXT: v_mov_b32_e32 v33, v6
; VI-NEXT: v_mov_b32_e32 v34, v5
@@ -31934,7 +32179,7 @@ define inreg <11 x double> @bitcast_v44f16_to_v11f64_scalar(<44 x half> inreg %a
; VI-NEXT: v_mov_b32_e32 v37, v2
; VI-NEXT: v_mov_b32_e32 v38, v1
; VI-NEXT: v_mov_b32_e32 v39, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB55_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -32098,19 +32343,13 @@ define inreg <11 x double> @bitcast_v44f16_to_v11f64_scalar(<44 x half> inreg %a
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB55_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB55_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB55_2
+; VI-NEXT: s_branch .LBB55_3
;
; GFX9-LABEL: bitcast_v44f16_to_v11f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v7
-; GFX9-NEXT: v_mov_b32_e32 v33, v6
-; GFX9-NEXT: v_mov_b32_e32 v34, v5
-; GFX9-NEXT: v_mov_b32_e32 v35, v4
-; GFX9-NEXT: v_mov_b32_e32 v36, v3
-; GFX9-NEXT: v_mov_b32_e32 v37, v2
-; GFX9-NEXT: v_mov_b32_e32 v38, v1
-; GFX9-NEXT: v_mov_b32_e32 v39, v0
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
; GFX9-NEXT: s_lshr_b32 s42, s27, 16
@@ -32126,6 +32365,15 @@ define inreg <11 x double> @bitcast_v44f16_to_v11f64_scalar(<44 x half> inreg %a
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; GFX9-NEXT: v_mov_b32_e32 v32, v7
+; GFX9-NEXT: v_mov_b32_e32 v33, v6
+; GFX9-NEXT: v_mov_b32_e32 v34, v5
+; GFX9-NEXT: v_mov_b32_e32 v35, v4
+; GFX9-NEXT: v_mov_b32_e32 v36, v3
+; GFX9-NEXT: v_mov_b32_e32 v37, v2
+; GFX9-NEXT: v_mov_b32_e32 v38, v1
+; GFX9-NEXT: v_mov_b32_e32 v39, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_lshrrev_b32_e32 v48, 16, v32
; GFX9-NEXT: v_lshrrev_b32_e32 v49, 16, v33
; GFX9-NEXT: v_lshrrev_b32_e32 v50, 16, v34
@@ -32134,7 +32382,6 @@ define inreg <11 x double> @bitcast_v44f16_to_v11f64_scalar(<44 x half> inreg %a
; GFX9-NEXT: v_lshrrev_b32_e32 v53, 16, v37
; GFX9-NEXT: v_lshrrev_b32_e32 v54, 16, v38
; GFX9-NEXT: v_lshrrev_b32_e32 v55, 16, v39
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -32149,6 +32396,7 @@ define inreg <11 x double> @bitcast_v44f16_to_v11f64_scalar(<44 x half> inreg %a
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB55_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v39
@@ -32227,7 +32475,9 @@ define inreg <11 x double> @bitcast_v44f16_to_v11f64_scalar(<44 x half> inreg %a
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB55_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB55_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB55_2
+; GFX9-NEXT: s_branch .LBB55_3
;
; GFX11-TRUE16-LABEL: bitcast_v44f16_to_v11f64_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -32245,9 +32495,9 @@ define inreg <11 x double> @bitcast_v44f16_to_v11f64_scalar(<44 x half> inreg %a
; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v1
; GFX11-TRUE16-NEXT: v_and_b32_e32 v37, 0xffff, v2
; GFX11-TRUE16-NEXT: v_and_b32_e32 v36, 0xffff, v3
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s26, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
@@ -32259,15 +32509,14 @@ define inreg <11 x double> @bitcast_v44f16_to_v11f64_scalar(<44 x half> inreg %a
; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
@@ -32279,10 +32528,11 @@ define inreg <11 x double> @bitcast_v44f16_to_v11f64_scalar(<44 x half> inreg %a
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s29, s41
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB55_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
@@ -32298,8 +32548,7 @@ define inreg <11 x double> @bitcast_v44f16_to_v11f64_scalar(<44 x half> inreg %a
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s16
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s0
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB55_3
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB55_3
; GFX11-TRUE16-NEXT: .LBB55_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
@@ -32331,7 +32580,9 @@ define inreg <11 x double> @bitcast_v44f16_to_v11f64_scalar(<44 x half> inreg %a
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB55_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB55_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB55_2
+; GFX11-TRUE16-NEXT: s_branch .LBB55_3
;
; GFX11-FAKE16-LABEL: bitcast_v44f16_to_v11f64_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -32345,9 +32596,9 @@ define inreg <11 x double> @bitcast_v44f16_to_v11f64_scalar(<44 x half> inreg %a
; GFX11-FAKE16-NEXT: v_and_b32_e32 v38, 0xffff, v1
; GFX11-FAKE16-NEXT: v_and_b32_e32 v37, 0xffff, v2
; GFX11-FAKE16-NEXT: v_and_b32_e32 v36, 0xffff, v3
-; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s26, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s25, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s24, 16
@@ -32359,15 +32610,14 @@ define inreg <11 x double> @bitcast_v44f16_to_v11f64_scalar(<44 x half> inreg %a
; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s18, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s17, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s16, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s3, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s40, 0
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
@@ -32379,10 +32629,11 @@ define inreg <11 x double> @bitcast_v44f16_to_v11f64_scalar(<44 x half> inreg %a
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s27, s43
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s28, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s29, s41
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB55_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
@@ -32398,8 +32649,7 @@ define inreg <11 x double> @bitcast_v44f16_to_v11f64_scalar(<44 x half> inreg %a
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s16
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s0
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB55_3
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB55_3
; GFX11-FAKE16-NEXT: .LBB55_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v35, 16, v39
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v34, 16, v38
@@ -32431,7 +32681,9 @@ define inreg <11 x double> @bitcast_v44f16_to_v11f64_scalar(<44 x half> inreg %a
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB55_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB55_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB55_2
+; GFX11-FAKE16-NEXT: s_branch .LBB55_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -33513,6 +33765,7 @@ define inreg <44 x half> @bitcast_v44i16_to_v44f16_scalar(<44 x i16> inreg %a, i
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB57_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_cvt_f32_f16_e32 v31, v3
@@ -33889,8 +34142,6 @@ define inreg <44 x half> @bitcast_v44i16_to_v44f16_scalar(<44 x i16> inreg %a, i
; SI-NEXT: .LBB57_4:
; SI-NEXT: ; implicit-def: $vgpr31
; SI-NEXT: ; kill: killed $vgpr31
-; SI-NEXT: ; implicit-def: $vgpr31
-; SI-NEXT: ; kill: killed $vgpr31
; SI-NEXT: ; implicit-def: $vgpr35
; SI-NEXT: ; implicit-def: $vgpr48
; SI-NEXT: ; implicit-def: $vgpr36
@@ -33942,7 +34193,11 @@ define inreg <44 x half> @bitcast_v44i16_to_v44f16_scalar(<44 x i16> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr31
; SI-NEXT: ; kill: killed $vgpr31
; SI-NEXT: ; implicit-def: $vgpr31
-; SI-NEXT: s_branch .LBB57_2
+; SI-NEXT: ; kill: killed $vgpr31
+; SI-NEXT: ; implicit-def: $vgpr31
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB57_2
+; SI-NEXT: s_branch .LBB57_3
;
; VI-LABEL: bitcast_v44i16_to_v44f16_scalar:
; VI: ; %bb.0:
@@ -33962,6 +34217,7 @@ define inreg <44 x half> @bitcast_v44i16_to_v44f16_scalar(<44 x i16> inreg %a, i
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_lshrrev_b32_e32 v9, 16, v7
; VI-NEXT: v_lshrrev_b32_e32 v10, 16, v6
; VI-NEXT: v_lshrrev_b32_e32 v11, 16, v5
@@ -33969,12 +34225,15 @@ define inreg <44 x half> @bitcast_v44i16_to_v44f16_scalar(<44 x i16> inreg %a, i
; VI-NEXT: v_lshrrev_b32_e32 v13, 16, v3
; VI-NEXT: v_lshrrev_b32_e32 v16, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v15, 16, v1
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: v_lshrrev_b32_e32 v8, 16, v0
-; VI-NEXT: s_cbranch_scc0 .LBB57_4
+; VI-NEXT: v_lshrrev_b32_e32 v14, 16, v0
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB57_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB57_3
-; VI-NEXT: .LBB57_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB57_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB57_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s16, s16, 3
; VI-NEXT: s_add_i32 s43, s43, 3
; VI-NEXT: s_add_i32 s17, s17, 3
@@ -34004,7 +34263,7 @@ define inreg <44 x half> @bitcast_v44i16_to_v44f16_scalar(<44 x i16> inreg %a, i
; VI-NEXT: s_add_i32 s29, s29, 3
; VI-NEXT: s_add_i32 s6, s6, 3
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: v_add_u32_e32 v8, vcc, 3, v8
+; VI-NEXT: v_add_u32_e32 v14, vcc, 3, v14
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_add_u32_e32 v15, vcc, 3, v15
; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
@@ -34019,7 +34278,7 @@ define inreg <44 x half> @bitcast_v44i16_to_v44f16_scalar(<44 x i16> inreg %a, i
; VI-NEXT: v_add_u32_e32 v10, vcc, 3, v10
; VI-NEXT: v_add_u32_e32 v7, vcc, 3, v7
; VI-NEXT: v_add_u32_e32 v9, vcc, 3, v9
-; VI-NEXT: .LBB57_3: ; %end
+; VI-NEXT: .LBB57_4: ; %end
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s43, 16
; VI-NEXT: s_or_b32 s4, s4, s5
@@ -34043,7 +34302,7 @@ define inreg <44 x half> @bitcast_v44i16_to_v44f16_scalar(<44 x i16> inreg %a, i
; VI-NEXT: s_or_b32 s13, s18, s13
; VI-NEXT: s_and_b32 s18, 0xffff, s23
; VI-NEXT: s_lshl_b32 s12, s12, 16
-; VI-NEXT: v_lshlrev_b32_e32 v8, 16, v8
+; VI-NEXT: v_lshlrev_b32_e32 v8, 16, v14
; VI-NEXT: s_or_b32 s12, s18, s12
; VI-NEXT: s_and_b32 s18, 0xffff, s24
; VI-NEXT: s_lshl_b32 s11, s11, 16
@@ -34093,8 +34352,6 @@ define inreg <44 x half> @bitcast_v44i16_to_v44f16_scalar(<44 x i16> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v12, s7
; VI-NEXT: v_mov_b32_e32 v13, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB57_4:
-; VI-NEXT: s_branch .LBB57_2
;
; GFX9-LABEL: bitcast_v44i16_to_v44f16_scalar:
; GFX9: ; %bb.0:
@@ -34114,6 +34371,7 @@ define inreg <44 x half> @bitcast_v44i16_to_v44f16_scalar(<44 x i16> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_lshrrev_b32_e32 v21, 16, v7
; GFX9-NEXT: v_lshrrev_b32_e32 v20, 16, v6
; GFX9-NEXT: v_lshrrev_b32_e32 v19, 16, v5
@@ -34121,12 +34379,15 @@ define inreg <44 x half> @bitcast_v44i16_to_v44f16_scalar(<44 x i16> inreg %a, i
; GFX9-NEXT: v_lshrrev_b32_e32 v17, 16, v3
; GFX9-NEXT: v_lshrrev_b32_e32 v16, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v15, 16, v1
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_lshrrev_b32_e32 v14, 16, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB57_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB57_4
-; GFX9-NEXT: .LBB57_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB57_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB57_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_pack_ll_b32_b16 s4, s29, s43
; GFX9-NEXT: v_pk_add_u16 v13, s4, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_pack_ll_b32_b16 s4, s28, s42
@@ -34202,8 +34463,6 @@ define inreg <44 x half> @bitcast_v44i16_to_v44f16_scalar(<44 x i16> inreg %a, i
; GFX9-NEXT: v_lshrrev_b32_e32 v20, 16, v6
; GFX9-NEXT: v_lshrrev_b32_e32 v21, 16, v7
; GFX9-NEXT: s_branch .LBB57_5
-; GFX9-NEXT: .LBB57_3:
-; GFX9-NEXT: s_branch .LBB57_2
; GFX9-NEXT: .LBB57_4:
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v12, s28
@@ -34318,13 +34577,16 @@ define inreg <44 x half> @bitcast_v44i16_to_v44f16_scalar(<44 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s2, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s0, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s46, -1
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB57_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_mov_b32 s46, 0
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB57_3
-; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: .LBB57_2: ; %Flow
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB57_4
-; GFX11-TRUE16-NEXT: .LBB57_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: ; %bb.3: ; %cmp.true
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
@@ -34396,8 +34658,6 @@ define inreg <44 x half> @bitcast_v44i16_to_v44f16_scalar(<44 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v2
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v3
; GFX11-TRUE16-NEXT: s_branch .LBB57_5
-; GFX11-TRUE16-NEXT: .LBB57_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB57_2
; GFX11-TRUE16-NEXT: .LBB57_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s29 :: v_dual_mov_b32 v12, s28
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s27 :: v_dual_mov_b32 v14, s26
@@ -34487,19 +34747,22 @@ define inreg <44 x half> @bitcast_v44i16_to_v44f16_scalar(<44 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s20, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s19, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s18, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s17, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s3, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s17, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s16, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s2, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s0, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_mov_b32 s46, -1
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB57_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_mov_b32 s46, 0
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB57_3
-; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: .LBB57_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB57_4
-; GFX11-FAKE16-NEXT: .LBB57_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
@@ -34520,10 +34783,10 @@ define inreg <44 x half> @bitcast_v44i16_to_v44f16_scalar(<44 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s20, s12
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s19, s11
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s18, s10
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s17, s9
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s6
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s17, s8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s16, s6
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s9
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s7
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s5
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, v3, 3 op_sel_hi:[1,0]
@@ -34542,12 +34805,12 @@ define inreg <44 x half> @bitcast_v44i16_to_v44f16_scalar(<44 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, s12, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, s11, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, s10, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, s9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, s8, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v25, s0, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v24, s1, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v23, s2, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v22, s3, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, s7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v25
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v24
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v23
@@ -34571,8 +34834,6 @@ define inreg <44 x half> @bitcast_v44i16_to_v44f16_scalar(<44 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v2
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v3
; GFX11-FAKE16-NEXT: s_branch .LBB57_5
-; GFX11-FAKE16-NEXT: .LBB57_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB57_2
; GFX11-FAKE16-NEXT: .LBB57_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v17, s29 :: v_dual_mov_b32 v12, s28
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v13, s27 :: v_dual_mov_b32 v14, s26
@@ -34589,8 +34850,8 @@ define inreg <44 x half> @bitcast_v44i16_to_v44f16_scalar(<44 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v32, s15 :: v_dual_mov_b32 v33, s14
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v34, s13 :: v_dual_mov_b32 v35, s12
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v36, s11 :: v_dual_mov_b32 v37, s10
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v38, s9 :: v_dual_mov_b32 v39, s7
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v48, s6 :: v_dual_mov_b32 v49, s8
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v38, s8 :: v_dual_mov_b32 v39, s6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v48, s9 :: v_dual_mov_b32 v49, s7
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v50, s4 :: v_dual_mov_b32 v51, s5
; GFX11-FAKE16-NEXT: .LBB57_5: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xffff, v25
@@ -35560,13 +35821,17 @@ define inreg <44 x i16> @bitcast_v44f16_to_v44i16_scalar(<44 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v19, s27
; SI-NEXT: v_cvt_f16_f32_e32 v20, s28
; SI-NEXT: v_cvt_f16_f32_e32 v28, s29
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v53
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: s_cbranch_scc0 .LBB59_4
+; SI-NEXT: s_and_b64 s[6:7], vcc, exec
+; SI-NEXT: s_cbranch_scc0 .LBB59_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB59_3
-; SI-NEXT: .LBB59_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB59_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB59_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
; SI-NEXT: v_cvt_f32_f16_e32 v1, v1
; SI-NEXT: v_cvt_f32_f16_e32 v4, v4
@@ -35754,7 +36019,7 @@ define inreg <44 x i16> @bitcast_v44f16_to_v44i16_scalar(<44 x half> inreg %a, i
; SI-NEXT: v_alignbit_b32 v17, v6, v17, 16
; SI-NEXT: v_alignbit_b32 v16, v3, v16, 16
; SI-NEXT: v_alignbit_b32 v12, v1, v12, 16
-; SI-NEXT: .LBB59_3: ; %end
+; SI-NEXT: .LBB59_4: ; %end
; SI-NEXT: v_and_b32_e32 v27, 0xffff, v27
; SI-NEXT: v_lshlrev_b32_e32 v52, 16, v52
; SI-NEXT: v_and_b32_e32 v24, 0xffff, v24
@@ -35879,8 +36144,6 @@ define inreg <44 x i16> @bitcast_v44f16_to_v44i16_scalar(<44 x half> inreg %a, i
; SI-NEXT: buffer_store_dword v1, v0, s[0:3], 0 offen
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB59_4:
-; SI-NEXT: s_branch .LBB59_2
;
; VI-LABEL: bitcast_v44f16_to_v44i16_scalar:
; VI: ; %bb.0:
@@ -35900,6 +36163,7 @@ define inreg <44 x i16> @bitcast_v44f16_to_v44i16_scalar(<44 x half> inreg %a, i
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_lshrrev_b32_e32 v21, 16, v7
; VI-NEXT: v_lshrrev_b32_e32 v20, 16, v6
; VI-NEXT: v_lshrrev_b32_e32 v19, 16, v5
@@ -35907,12 +36171,15 @@ define inreg <44 x i16> @bitcast_v44f16_to_v44i16_scalar(<44 x half> inreg %a, i
; VI-NEXT: v_lshrrev_b32_e32 v17, 16, v3
; VI-NEXT: v_lshrrev_b32_e32 v16, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v15, 16, v1
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_lshrrev_b32_e32 v14, 16, v0
-; VI-NEXT: s_cbranch_scc0 .LBB59_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB59_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB59_4
-; VI-NEXT: .LBB59_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB59_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB59_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v30, 0x200
; VI-NEXT: v_add_f16_e32 v24, s16, v30
; VI-NEXT: v_add_f16_e32 v51, s43, v30
@@ -35959,8 +36226,6 @@ define inreg <44 x i16> @bitcast_v44f16_to_v44i16_scalar(<44 x half> inreg %a, i
; VI-NEXT: v_add_f16_e32 v7, 0x200, v7
; VI-NEXT: v_add_f16_e32 v21, 0x200, v21
; VI-NEXT: s_branch .LBB59_5
-; VI-NEXT: .LBB59_3:
-; VI-NEXT: s_branch .LBB59_2
; VI-NEXT: .LBB59_4:
; VI-NEXT: v_mov_b32_e32 v30, s6
; VI-NEXT: v_mov_b32_e32 v13, s29
@@ -36063,6 +36328,7 @@ define inreg <44 x i16> @bitcast_v44f16_to_v44i16_scalar(<44 x half> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v8
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_lshrrev_b32_e32 v21, 16, v7
; GFX9-NEXT: v_lshrrev_b32_e32 v20, 16, v6
; GFX9-NEXT: v_lshrrev_b32_e32 v19, 16, v5
@@ -36070,12 +36336,15 @@ define inreg <44 x i16> @bitcast_v44f16_to_v44i16_scalar(<44 x half> inreg %a, i
; GFX9-NEXT: v_lshrrev_b32_e32 v17, 16, v3
; GFX9-NEXT: v_lshrrev_b32_e32 v16, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v15, 16, v1
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_lshrrev_b32_e32 v14, 16, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB59_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB59_4
-; GFX9-NEXT: .LBB59_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB59_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB59_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_and_b32_e32 v7, 0xffff, v7
; GFX9-NEXT: v_and_b32_e32 v6, 0xffff, v6
; GFX9-NEXT: v_and_b32_e32 v5, 0xffff, v5
@@ -36153,8 +36422,6 @@ define inreg <44 x i16> @bitcast_v44f16_to_v44i16_scalar(<44 x half> inreg %a, i
; GFX9-NEXT: v_lshrrev_b32_e32 v20, 16, v6
; GFX9-NEXT: v_lshrrev_b32_e32 v21, 16, v7
; GFX9-NEXT: s_branch .LBB59_5
-; GFX9-NEXT: .LBB59_3:
-; GFX9-NEXT: s_branch .LBB59_2
; GFX9-NEXT: .LBB59_4:
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v12, s28
@@ -36269,13 +36536,16 @@ define inreg <44 x i16> @bitcast_v44f16_to_v44i16_scalar(<44 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s2, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s0, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s46, -1
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB59_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_mov_b32 s46, 0
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB59_3
-; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: .LBB59_2: ; %Flow
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB59_4
-; GFX11-TRUE16-NEXT: .LBB59_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: ; %bb.3: ; %cmp.true
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
; GFX11-TRUE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
@@ -36347,8 +36617,6 @@ define inreg <44 x i16> @bitcast_v44f16_to_v44i16_scalar(<44 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v20, 16, v2
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v21, 16, v3
; GFX11-TRUE16-NEXT: s_branch .LBB59_5
-; GFX11-TRUE16-NEXT: .LBB59_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB59_2
; GFX11-TRUE16-NEXT: .LBB59_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s29 :: v_dual_mov_b32 v12, s28
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s27 :: v_dual_mov_b32 v14, s26
@@ -36438,19 +36706,22 @@ define inreg <44 x i16> @bitcast_v44f16_to_v44i16_scalar(<44 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s20, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s19, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s18, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s17, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s3, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s17, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s16, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s2, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s0, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_mov_b32 s46, -1
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB59_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_mov_b32 s46, 0
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB59_3
-; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: .LBB59_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB59_4
-; GFX11-FAKE16-NEXT: .LBB59_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
; GFX11-FAKE16-NEXT: v_and_b32_e32 v2, 0xffff, v2
; GFX11-FAKE16-NEXT: v_and_b32_e32 v1, 0xffff, v1
@@ -36471,10 +36742,10 @@ define inreg <44 x i16> @bitcast_v44f16_to_v44i16_scalar(<44 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s20, s12
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s19, s11
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s18, s10
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s17, s9
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s6
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s17, s8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s16, s6
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s9
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s7
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s5
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, v3 op_sel_hi:[0,1]
@@ -36493,12 +36764,12 @@ define inreg <44 x i16> @bitcast_v44f16_to_v44i16_scalar(<44 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, s12 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, s11 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, s8 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v25, 0x200, s0 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v24, 0x200, s1 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v23, 0x200, s2 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v22, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v51, 16, v25
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v50, 16, v24
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v49, 16, v23
@@ -36522,8 +36793,6 @@ define inreg <44 x i16> @bitcast_v44f16_to_v44i16_scalar(<44 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v20, 16, v2
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v21, 16, v3
; GFX11-FAKE16-NEXT: s_branch .LBB59_5
-; GFX11-FAKE16-NEXT: .LBB59_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB59_2
; GFX11-FAKE16-NEXT: .LBB59_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v17, s29 :: v_dual_mov_b32 v12, s28
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v13, s27 :: v_dual_mov_b32 v14, s26
@@ -36540,8 +36809,8 @@ define inreg <44 x i16> @bitcast_v44f16_to_v44i16_scalar(<44 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v32, s15 :: v_dual_mov_b32 v33, s14
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v34, s13 :: v_dual_mov_b32 v35, s12
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v36, s11 :: v_dual_mov_b32 v37, s10
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v38, s9 :: v_dual_mov_b32 v39, s7
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v48, s6 :: v_dual_mov_b32 v49, s8
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v38, s8 :: v_dual_mov_b32 v39, s6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v48, s9 :: v_dual_mov_b32 v49, s7
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v50, s4 :: v_dual_mov_b32 v51, s5
; GFX11-FAKE16-NEXT: .LBB59_5: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v25, 0xffff, v25
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.768bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.768bit.ll
index 152a48b..5e99a95 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.768bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.768bit.ll
@@ -177,6 +177,7 @@ define inreg <24 x float> @bitcast_v24i32_to_v24f32_scalar(<24 x i32> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v23, v9
; SI-NEXT: v_mov_b32_e32 v22, v8
; SI-NEXT: v_mov_b32_e32 v21, v7
@@ -197,15 +198,18 @@ define inreg <24 x float> @bitcast_v24i32_to_v24f32_scalar(<24 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v7, s23
; SI-NEXT: v_mov_b32_e32 v8, s24
; SI-NEXT: v_mov_b32_e32 v9, s25
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB1_3
-; SI-NEXT: .LBB1_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB1_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB1_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v23, vcc, 3, v23
; SI-NEXT: v_add_i32_e32 v22, vcc, 3, v22
; SI-NEXT: v_add_i32_e32 v21, vcc, 3, v21
@@ -230,15 +234,14 @@ define inreg <24 x float> @bitcast_v24i32_to_v24f32_scalar(<24 x i32> inreg %a,
; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
-; SI-NEXT: .LBB1_3: ; %end
+; SI-NEXT: .LBB1_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: s_branch .LBB1_2
;
; VI-LABEL: bitcast_v24i32_to_v24f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v23, v9
; VI-NEXT: v_mov_b32_e32 v22, v8
; VI-NEXT: v_mov_b32_e32 v21, v7
@@ -259,15 +262,18 @@ define inreg <24 x float> @bitcast_v24i32_to_v24f32_scalar(<24 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: v_mov_b32_e32 v9, s25
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB1_4
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB1_3
-; VI-NEXT: .LBB1_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB1_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB1_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v23, vcc, 3, v23
; VI-NEXT: v_add_u32_e32 v22, vcc, 3, v22
; VI-NEXT: v_add_u32_e32 v21, vcc, 3, v21
@@ -292,15 +298,14 @@ define inreg <24 x float> @bitcast_v24i32_to_v24f32_scalar(<24 x i32> inreg %a,
; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: .LBB1_3: ; %end
+; VI-NEXT: .LBB1_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_4:
-; VI-NEXT: s_branch .LBB1_2
;
; GFX9-LABEL: bitcast_v24i32_to_v24f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v23, v9
; GFX9-NEXT: v_mov_b32_e32 v22, v8
; GFX9-NEXT: v_mov_b32_e32 v21, v7
@@ -321,15 +326,18 @@ define inreg <24 x float> @bitcast_v24i32_to_v24f32_scalar(<24 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: v_mov_b32_e32 v8, s24
; GFX9-NEXT: v_mov_b32_e32 v9, s25
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB1_3
-; GFX9-NEXT: .LBB1_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB1_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_u32_e32 v23, 3, v23
; GFX9-NEXT: v_add_u32_e32 v22, 3, v22
; GFX9-NEXT: v_add_u32_e32 v21, 3, v21
@@ -354,39 +362,37 @@ define inreg <24 x float> @bitcast_v24i32_to_v24f32_scalar(<24 x i32> inreg %a,
; GFX9-NEXT: v_add_u32_e32 v2, 3, v2
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: .LBB1_3: ; %end
+; GFX9-NEXT: .LBB1_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-LABEL: bitcast_v24i32_to_v24f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-NEXT: v_dual_mov_b32 v15, v6 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB1_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB1_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_3:
-; GFX11-NEXT: .LBB1_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_nc_u32_e32 v23, 3, v23
; GFX11-NEXT: v_add_nc_u32_e32 v22, 3, v22
; GFX11-NEXT: v_add_nc_u32_e32 v21, 3, v21
@@ -411,6 +417,7 @@ define inreg <24 x float> @bitcast_v24i32_to_v24f32_scalar(<24 x i32> inreg %a,
; GFX11-NEXT: v_add_nc_u32_e32 v2, 3, v2
; GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v1
; GFX11-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-NEXT: .LBB1_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -588,6 +595,7 @@ define inreg <24 x i32> @bitcast_v24f32_to_v24i32_scalar(<24 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v23, v9
; SI-NEXT: v_mov_b32_e32 v22, v8
; SI-NEXT: v_mov_b32_e32 v21, v7
@@ -608,15 +616,18 @@ define inreg <24 x i32> @bitcast_v24f32_to_v24i32_scalar(<24 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v7, s23
; SI-NEXT: v_mov_b32_e32 v8, s24
; SI-NEXT: v_mov_b32_e32 v9, s25
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB3_4
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB3_3
-; SI-NEXT: .LBB3_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB3_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB3_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e32 v23, 1.0, v23
; SI-NEXT: v_add_f32_e32 v22, 1.0, v22
; SI-NEXT: v_add_f32_e32 v21, 1.0, v21
@@ -641,15 +652,14 @@ define inreg <24 x i32> @bitcast_v24f32_to_v24i32_scalar(<24 x float> inreg %a,
; SI-NEXT: v_add_f32_e32 v2, 1.0, v2
; SI-NEXT: v_add_f32_e32 v1, 1.0, v1
; SI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; SI-NEXT: .LBB3_3: ; %end
+; SI-NEXT: .LBB3_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB3_4:
-; SI-NEXT: s_branch .LBB3_2
;
; VI-LABEL: bitcast_v24f32_to_v24i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v23, v9
; VI-NEXT: v_mov_b32_e32 v22, v8
; VI-NEXT: v_mov_b32_e32 v21, v7
@@ -670,15 +680,18 @@ define inreg <24 x i32> @bitcast_v24f32_to_v24i32_scalar(<24 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: v_mov_b32_e32 v9, s25
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB3_4
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_3
-; VI-NEXT: .LBB3_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB3_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB3_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e32 v23, 1.0, v23
; VI-NEXT: v_add_f32_e32 v22, 1.0, v22
; VI-NEXT: v_add_f32_e32 v21, 1.0, v21
@@ -703,15 +716,14 @@ define inreg <24 x i32> @bitcast_v24f32_to_v24i32_scalar(<24 x float> inreg %a,
; VI-NEXT: v_add_f32_e32 v2, 1.0, v2
; VI-NEXT: v_add_f32_e32 v1, 1.0, v1
; VI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; VI-NEXT: .LBB3_3: ; %end
+; VI-NEXT: .LBB3_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB3_4:
-; VI-NEXT: s_branch .LBB3_2
;
; GFX9-LABEL: bitcast_v24f32_to_v24i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v23, v9
; GFX9-NEXT: v_mov_b32_e32 v22, v8
; GFX9-NEXT: v_mov_b32_e32 v21, v7
@@ -732,15 +744,18 @@ define inreg <24 x i32> @bitcast_v24f32_to_v24i32_scalar(<24 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: v_mov_b32_e32 v8, s24
; GFX9-NEXT: v_mov_b32_e32 v9, s25
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_3
-; GFX9-NEXT: .LBB3_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB3_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e32 v23, 1.0, v23
; GFX9-NEXT: v_add_f32_e32 v22, 1.0, v22
; GFX9-NEXT: v_add_f32_e32 v21, 1.0, v21
@@ -765,39 +780,37 @@ define inreg <24 x i32> @bitcast_v24f32_to_v24i32_scalar(<24 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e32 v2, 1.0, v2
; GFX9-NEXT: v_add_f32_e32 v1, 1.0, v1
; GFX9-NEXT: v_add_f32_e32 v0, 1.0, v0
-; GFX9-NEXT: .LBB3_3: ; %end
+; GFX9-NEXT: .LBB3_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB3_4:
-; GFX9-NEXT: s_branch .LBB3_2
;
; GFX11-LABEL: bitcast_v24f32_to_v24i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-NEXT: v_dual_mov_b32 v15, v6 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB3_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB3_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB3_3:
-; GFX11-NEXT: .LBB3_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
@@ -810,6 +823,7 @@ define inreg <24 x i32> @bitcast_v24f32_to_v24i32_scalar(<24 x float> inreg %a,
; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-NEXT: .LBB3_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -999,6 +1013,7 @@ define inreg <12 x i64> @bitcast_v24i32_to_v12i64_scalar(<24 x i32> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v23, v9
; SI-NEXT: v_mov_b32_e32 v22, v8
; SI-NEXT: v_mov_b32_e32 v21, v7
@@ -1019,15 +1034,18 @@ define inreg <12 x i64> @bitcast_v24i32_to_v12i64_scalar(<24 x i32> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v7, s23
; SI-NEXT: v_mov_b32_e32 v8, s24
; SI-NEXT: v_mov_b32_e32 v9, s25
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB5_4
+; SI-NEXT: s_cbranch_scc0 .LBB5_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB5_3
-; SI-NEXT: .LBB5_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB5_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB5_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v23, vcc, 3, v23
; SI-NEXT: v_add_i32_e32 v22, vcc, 3, v22
; SI-NEXT: v_add_i32_e32 v21, vcc, 3, v21
@@ -1052,15 +1070,14 @@ define inreg <12 x i64> @bitcast_v24i32_to_v12i64_scalar(<24 x i32> inreg %a, i3
; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
-; SI-NEXT: .LBB5_3: ; %end
+; SI-NEXT: .LBB5_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB5_4:
-; SI-NEXT: s_branch .LBB5_2
;
; VI-LABEL: bitcast_v24i32_to_v12i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v23, v9
; VI-NEXT: v_mov_b32_e32 v22, v8
; VI-NEXT: v_mov_b32_e32 v21, v7
@@ -1081,15 +1098,18 @@ define inreg <12 x i64> @bitcast_v24i32_to_v12i64_scalar(<24 x i32> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: v_mov_b32_e32 v9, s25
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB5_4
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB5_3
-; VI-NEXT: .LBB5_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB5_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB5_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v23, vcc, 3, v23
; VI-NEXT: v_add_u32_e32 v22, vcc, 3, v22
; VI-NEXT: v_add_u32_e32 v21, vcc, 3, v21
@@ -1114,15 +1134,14 @@ define inreg <12 x i64> @bitcast_v24i32_to_v12i64_scalar(<24 x i32> inreg %a, i3
; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: .LBB5_3: ; %end
+; VI-NEXT: .LBB5_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB5_4:
-; VI-NEXT: s_branch .LBB5_2
;
; GFX9-LABEL: bitcast_v24i32_to_v12i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v23, v9
; GFX9-NEXT: v_mov_b32_e32 v22, v8
; GFX9-NEXT: v_mov_b32_e32 v21, v7
@@ -1143,15 +1162,18 @@ define inreg <12 x i64> @bitcast_v24i32_to_v12i64_scalar(<24 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: v_mov_b32_e32 v8, s24
; GFX9-NEXT: v_mov_b32_e32 v9, s25
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB5_3
-; GFX9-NEXT: .LBB5_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB5_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_u32_e32 v23, 3, v23
; GFX9-NEXT: v_add_u32_e32 v22, 3, v22
; GFX9-NEXT: v_add_u32_e32 v21, 3, v21
@@ -1176,39 +1198,37 @@ define inreg <12 x i64> @bitcast_v24i32_to_v12i64_scalar(<24 x i32> inreg %a, i3
; GFX9-NEXT: v_add_u32_e32 v2, 3, v2
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: .LBB5_3: ; %end
+; GFX9-NEXT: .LBB5_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_4:
-; GFX9-NEXT: s_branch .LBB5_2
;
; GFX11-LABEL: bitcast_v24i32_to_v12i64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-NEXT: v_dual_mov_b32 v15, v6 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB5_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB5_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB5_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB5_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB5_3:
-; GFX11-NEXT: .LBB5_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_nc_u32_e32 v23, 3, v23
; GFX11-NEXT: v_add_nc_u32_e32 v22, 3, v22
; GFX11-NEXT: v_add_nc_u32_e32 v21, 3, v21
@@ -1233,6 +1253,7 @@ define inreg <12 x i64> @bitcast_v24i32_to_v12i64_scalar(<24 x i32> inreg %a, i3
; GFX11-NEXT: v_add_nc_u32_e32 v2, 3, v2
; GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v1
; GFX11-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-NEXT: .LBB5_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1428,6 +1449,7 @@ define inreg <24 x i32> @bitcast_v12i64_to_v24i32_scalar(<12 x i64> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v23, v9
; SI-NEXT: v_mov_b32_e32 v22, v8
; SI-NEXT: v_mov_b32_e32 v21, v7
@@ -1448,15 +1470,18 @@ define inreg <24 x i32> @bitcast_v12i64_to_v24i32_scalar(<12 x i64> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v7, s23
; SI-NEXT: v_mov_b32_e32 v8, s24
; SI-NEXT: v_mov_b32_e32 v9, s25
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB7_4
+; SI-NEXT: s_cbranch_scc0 .LBB7_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB7_3
-; SI-NEXT: .LBB7_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB7_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB7_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v22, vcc, 3, v22
; SI-NEXT: v_addc_u32_e32 v23, vcc, 0, v23, vcc
; SI-NEXT: v_add_i32_e32 v20, vcc, 3, v20
@@ -1481,15 +1506,14 @@ define inreg <24 x i32> @bitcast_v12i64_to_v24i32_scalar(<12 x i64> inreg %a, i3
; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT: .LBB7_3: ; %end
+; SI-NEXT: .LBB7_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB7_4:
-; SI-NEXT: s_branch .LBB7_2
;
; VI-LABEL: bitcast_v12i64_to_v24i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v23, v9
; VI-NEXT: v_mov_b32_e32 v22, v8
; VI-NEXT: v_mov_b32_e32 v21, v7
@@ -1510,15 +1534,18 @@ define inreg <24 x i32> @bitcast_v12i64_to_v24i32_scalar(<12 x i64> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: v_mov_b32_e32 v9, s25
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB7_4
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB7_3
-; VI-NEXT: .LBB7_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB7_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB7_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v22, vcc, 3, v22
; VI-NEXT: v_addc_u32_e32 v23, vcc, 0, v23, vcc
; VI-NEXT: v_add_u32_e32 v20, vcc, 3, v20
@@ -1543,15 +1570,14 @@ define inreg <24 x i32> @bitcast_v12i64_to_v24i32_scalar(<12 x i64> inreg %a, i3
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT: .LBB7_3: ; %end
+; VI-NEXT: .LBB7_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB7_4:
-; VI-NEXT: s_branch .LBB7_2
;
; GFX9-LABEL: bitcast_v12i64_to_v24i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v23, v9
; GFX9-NEXT: v_mov_b32_e32 v22, v8
; GFX9-NEXT: v_mov_b32_e32 v21, v7
@@ -1572,15 +1598,18 @@ define inreg <24 x i32> @bitcast_v12i64_to_v24i32_scalar(<12 x i64> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: v_mov_b32_e32 v8, s24
; GFX9-NEXT: v_mov_b32_e32 v9, s25
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB7_3
-; GFX9-NEXT: .LBB7_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB7_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB7_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_co_u32_e32 v22, vcc, 3, v22
; GFX9-NEXT: v_addc_co_u32_e32 v23, vcc, 0, v23, vcc
; GFX9-NEXT: v_add_co_u32_e32 v20, vcc, 3, v20
@@ -1605,39 +1634,37 @@ define inreg <24 x i32> @bitcast_v12i64_to_v24i32_scalar(<12 x i64> inreg %a, i3
; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 3, v0
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
-; GFX9-NEXT: .LBB7_3: ; %end
+; GFX9-NEXT: .LBB7_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB7_4:
-; GFX9-NEXT: s_branch .LBB7_2
;
; GFX11-LABEL: bitcast_v12i64_to_v24i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-NEXT: v_dual_mov_b32 v15, v6 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB7_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB7_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB7_3:
-; GFX11-NEXT: .LBB7_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB7_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_co_u32 v22, vcc_lo, v22, 3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add_co_ci_u32_e64 v23, null, 0, v23, vcc_lo
@@ -1668,6 +1695,7 @@ define inreg <24 x i32> @bitcast_v12i64_to_v24i32_scalar(<12 x i64> inreg %a, i3
; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-NEXT: .LBB7_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1857,6 +1885,7 @@ define inreg <12 x double> @bitcast_v24i32_to_v12f64_scalar(<24 x i32> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v23, v9
; SI-NEXT: v_mov_b32_e32 v22, v8
; SI-NEXT: v_mov_b32_e32 v21, v7
@@ -1877,15 +1906,18 @@ define inreg <12 x double> @bitcast_v24i32_to_v12f64_scalar(<24 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v7, s23
; SI-NEXT: v_mov_b32_e32 v8, s24
; SI-NEXT: v_mov_b32_e32 v9, s25
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB9_4
+; SI-NEXT: s_cbranch_scc0 .LBB9_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB9_3
-; SI-NEXT: .LBB9_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB9_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB9_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v23, vcc, 3, v23
; SI-NEXT: v_add_i32_e32 v22, vcc, 3, v22
; SI-NEXT: v_add_i32_e32 v21, vcc, 3, v21
@@ -1910,15 +1942,14 @@ define inreg <12 x double> @bitcast_v24i32_to_v12f64_scalar(<24 x i32> inreg %a,
; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
-; SI-NEXT: .LBB9_3: ; %end
+; SI-NEXT: .LBB9_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB9_4:
-; SI-NEXT: s_branch .LBB9_2
;
; VI-LABEL: bitcast_v24i32_to_v12f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v23, v9
; VI-NEXT: v_mov_b32_e32 v22, v8
; VI-NEXT: v_mov_b32_e32 v21, v7
@@ -1939,15 +1970,18 @@ define inreg <12 x double> @bitcast_v24i32_to_v12f64_scalar(<24 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: v_mov_b32_e32 v9, s25
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB9_4
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB9_3
-; VI-NEXT: .LBB9_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB9_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB9_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v23, vcc, 3, v23
; VI-NEXT: v_add_u32_e32 v22, vcc, 3, v22
; VI-NEXT: v_add_u32_e32 v21, vcc, 3, v21
@@ -1972,15 +2006,14 @@ define inreg <12 x double> @bitcast_v24i32_to_v12f64_scalar(<24 x i32> inreg %a,
; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: .LBB9_3: ; %end
+; VI-NEXT: .LBB9_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB9_4:
-; VI-NEXT: s_branch .LBB9_2
;
; GFX9-LABEL: bitcast_v24i32_to_v12f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v23, v9
; GFX9-NEXT: v_mov_b32_e32 v22, v8
; GFX9-NEXT: v_mov_b32_e32 v21, v7
@@ -2001,15 +2034,18 @@ define inreg <12 x double> @bitcast_v24i32_to_v12f64_scalar(<24 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: v_mov_b32_e32 v8, s24
; GFX9-NEXT: v_mov_b32_e32 v9, s25
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB9_3
-; GFX9-NEXT: .LBB9_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB9_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_u32_e32 v23, 3, v23
; GFX9-NEXT: v_add_u32_e32 v22, 3, v22
; GFX9-NEXT: v_add_u32_e32 v21, 3, v21
@@ -2034,39 +2070,37 @@ define inreg <12 x double> @bitcast_v24i32_to_v12f64_scalar(<24 x i32> inreg %a,
; GFX9-NEXT: v_add_u32_e32 v2, 3, v2
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: .LBB9_3: ; %end
+; GFX9-NEXT: .LBB9_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB9_4:
-; GFX9-NEXT: s_branch .LBB9_2
;
; GFX11-LABEL: bitcast_v24i32_to_v12f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-NEXT: v_dual_mov_b32 v15, v6 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB9_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB9_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB9_3:
-; GFX11-NEXT: .LBB9_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_nc_u32_e32 v23, 3, v23
; GFX11-NEXT: v_add_nc_u32_e32 v22, 3, v22
; GFX11-NEXT: v_add_nc_u32_e32 v21, 3, v21
@@ -2091,6 +2125,7 @@ define inreg <12 x double> @bitcast_v24i32_to_v12f64_scalar(<24 x i32> inreg %a,
; GFX11-NEXT: v_add_nc_u32_e32 v2, 3, v2
; GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v1
; GFX11-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-NEXT: .LBB9_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2232,6 +2267,7 @@ define inreg <24 x i32> @bitcast_v12f64_to_v24i32_scalar(<12 x double> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v23, v9
; SI-NEXT: v_mov_b32_e32 v22, v8
; SI-NEXT: v_mov_b32_e32 v21, v7
@@ -2254,13 +2290,16 @@ define inreg <24 x i32> @bitcast_v12f64_to_v24i32_scalar(<12 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v9, s25
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB11_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB11_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB11_3
-; SI-NEXT: .LBB11_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB11_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB11_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
; SI-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
; SI-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
@@ -2273,15 +2312,14 @@ define inreg <24 x i32> @bitcast_v12f64_to_v24i32_scalar(<12 x double> inreg %a,
; SI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; SI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; SI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; SI-NEXT: .LBB11_3: ; %end
+; SI-NEXT: .LBB11_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB11_4:
-; SI-NEXT: s_branch .LBB11_2
;
; VI-LABEL: bitcast_v12f64_to_v24i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v23, v9
; VI-NEXT: v_mov_b32_e32 v22, v8
; VI-NEXT: v_mov_b32_e32 v21, v7
@@ -2304,13 +2342,16 @@ define inreg <24 x i32> @bitcast_v12f64_to_v24i32_scalar(<12 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB11_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB11_3
-; VI-NEXT: .LBB11_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB11_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB11_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
; VI-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
; VI-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
@@ -2323,15 +2364,14 @@ define inreg <24 x i32> @bitcast_v12f64_to_v24i32_scalar(<12 x double> inreg %a,
; VI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; VI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; VI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; VI-NEXT: .LBB11_3: ; %end
+; VI-NEXT: .LBB11_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB11_4:
-; VI-NEXT: s_branch .LBB11_2
;
; GFX9-LABEL: bitcast_v12f64_to_v24i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v23, v9
; GFX9-NEXT: v_mov_b32_e32 v22, v8
; GFX9-NEXT: v_mov_b32_e32 v21, v7
@@ -2354,13 +2394,16 @@ define inreg <24 x i32> @bitcast_v12f64_to_v24i32_scalar(<12 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_3
-; GFX9-NEXT: .LBB11_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB11_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
; GFX9-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
; GFX9-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
@@ -2373,39 +2416,37 @@ define inreg <24 x i32> @bitcast_v12f64_to_v24i32_scalar(<12 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX9-NEXT: .LBB11_3: ; %end
+; GFX9-NEXT: .LBB11_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB11_4:
-; GFX9-NEXT: s_branch .LBB11_2
;
; GFX11-LABEL: bitcast_v12f64_to_v24i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-NEXT: v_dual_mov_b32 v15, v6 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB11_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB11_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: .LBB11_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
; GFX11-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
; GFX11-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
@@ -2418,6 +2459,7 @@ define inreg <24 x i32> @bitcast_v12f64_to_v24i32_scalar(<12 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-NEXT: .LBB11_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -3190,6 +3232,7 @@ define inreg <48 x i16> @bitcast_v24i32_to_v48i16_scalar(<24 x i32> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s15, v1
; SI-NEXT: v_readfirstlane_b32 s14, v2
; SI-NEXT: v_readfirstlane_b32 s13, v3
@@ -3199,8 +3242,8 @@ define inreg <48 x i16> @bitcast_v24i32_to_v48i16_scalar(<24 x i32> inreg %a, i3
; SI-NEXT: v_readfirstlane_b32 s9, v7
; SI-NEXT: v_readfirstlane_b32 s8, v8
; SI-NEXT: v_readfirstlane_b32 s7, v9
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s6, v10
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB13_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v1, s7
@@ -3472,12 +3515,15 @@ define inreg <48 x i16> @bitcast_v24i32_to_v48i16_scalar(<24 x i32> inreg %a, i3
; SI-NEXT: ; implicit-def: $sgpr41
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: s_branch .LBB13_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB13_2
+; SI-NEXT: s_branch .LBB13_3
;
; VI-LABEL: bitcast_v24i32_to_v48i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s15, v0
; VI-NEXT: v_readfirstlane_b32 s14, v1
; VI-NEXT: v_readfirstlane_b32 s13, v2
@@ -3486,13 +3532,13 @@ define inreg <48 x i16> @bitcast_v24i32_to_v48i16_scalar(<24 x i32> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s10, v5
; VI-NEXT: v_readfirstlane_b32 s9, v6
; VI-NEXT: v_readfirstlane_b32 s8, v7
-; VI-NEXT: v_readfirstlane_b32 s6, v8
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: v_readfirstlane_b32 s7, v9
+; VI-NEXT: v_readfirstlane_b32 s7, v8
+; VI-NEXT: v_readfirstlane_b32 s6, v9
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB13_4
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_lshr_b32 s40, s7, 16
-; VI-NEXT: s_lshr_b32 s41, s6, 16
+; VI-NEXT: s_lshr_b32 s40, s6, 16
+; VI-NEXT: s_lshr_b32 s41, s7, 16
; VI-NEXT: s_lshr_b32 s42, s8, 16
; VI-NEXT: s_lshr_b32 s43, s9, 16
; VI-NEXT: s_lshr_b32 s44, s10, 16
@@ -3517,8 +3563,8 @@ define inreg <48 x i16> @bitcast_v24i32_to_v48i16_scalar(<24 x i32> inreg %a, i3
; VI-NEXT: s_lshr_b32 s79, s16, 16
; VI-NEXT: s_cbranch_execnz .LBB13_3
; VI-NEXT: .LBB13_2: ; %cmp.true
-; VI-NEXT: s_add_i32 s7, s7, 3
; VI-NEXT: s_add_i32 s6, s6, 3
+; VI-NEXT: s_add_i32 s7, s7, 3
; VI-NEXT: s_add_i32 s8, s8, 3
; VI-NEXT: s_add_i32 s9, s9, 3
; VI-NEXT: s_add_i32 s10, s10, 3
@@ -3541,8 +3587,8 @@ define inreg <48 x i16> @bitcast_v24i32_to_v48i16_scalar(<24 x i32> inreg %a, i3
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: s_lshr_b32 s40, s7, 16
-; VI-NEXT: s_lshr_b32 s41, s6, 16
+; VI-NEXT: s_lshr_b32 s40, s6, 16
+; VI-NEXT: s_lshr_b32 s41, s7, 16
; VI-NEXT: s_lshr_b32 s42, s8, 16
; VI-NEXT: s_lshr_b32 s43, s9, 16
; VI-NEXT: s_lshr_b32 s44, s10, 16
@@ -3632,12 +3678,12 @@ define inreg <48 x i16> @bitcast_v24i32_to_v48i16_scalar(<24 x i32> inreg %a, i3
; VI-NEXT: s_and_b32 s8, 0xffff, s8
; VI-NEXT: s_lshl_b32 s28, s42, 16
; VI-NEXT: s_or_b32 s8, s8, s28
-; VI-NEXT: s_and_b32 s6, 0xffff, s6
-; VI-NEXT: s_lshl_b32 s28, s41, 16
-; VI-NEXT: s_or_b32 s6, s6, s28
; VI-NEXT: s_and_b32 s7, 0xffff, s7
-; VI-NEXT: s_lshl_b32 s28, s40, 16
+; VI-NEXT: s_lshl_b32 s28, s41, 16
; VI-NEXT: s_or_b32 s7, s7, s28
+; VI-NEXT: s_and_b32 s6, 0xffff, s6
+; VI-NEXT: s_lshl_b32 s28, s40, 16
+; VI-NEXT: s_or_b32 s6, s6, s28
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: v_mov_b32_e32 v2, s16
@@ -3660,8 +3706,8 @@ define inreg <48 x i16> @bitcast_v24i32_to_v48i16_scalar(<24 x i32> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v19, s10
; VI-NEXT: v_mov_b32_e32 v20, s9
; VI-NEXT: v_mov_b32_e32 v21, s8
-; VI-NEXT: v_mov_b32_e32 v22, s6
-; VI-NEXT: v_mov_b32_e32 v23, s7
+; VI-NEXT: v_mov_b32_e32 v22, s7
+; VI-NEXT: v_mov_b32_e32 v23, s6
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB13_4:
; VI-NEXT: ; implicit-def: $sgpr79
@@ -3688,35 +3734,38 @@ define inreg <48 x i16> @bitcast_v24i32_to_v48i16_scalar(<24 x i32> inreg %a, i3
; VI-NEXT: ; implicit-def: $sgpr42
; VI-NEXT: ; implicit-def: $sgpr41
; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: s_branch .LBB13_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB13_2
+; VI-NEXT: s_branch .LBB13_3
;
; GFX9-LABEL: bitcast_v24i32_to_v48i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
-; GFX9-NEXT: v_readfirstlane_b32 s6, v0
-; GFX9-NEXT: v_readfirstlane_b32 s7, v1
-; GFX9-NEXT: v_readfirstlane_b32 s8, v2
-; GFX9-NEXT: v_readfirstlane_b32 s9, v3
-; GFX9-NEXT: v_readfirstlane_b32 s10, v4
-; GFX9-NEXT: v_readfirstlane_b32 s11, v5
-; GFX9-NEXT: v_readfirstlane_b32 s12, v6
-; GFX9-NEXT: v_readfirstlane_b32 s13, v7
-; GFX9-NEXT: v_readfirstlane_b32 s14, v8
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: v_readfirstlane_b32 s15, v9
+; GFX9-NEXT: v_readfirstlane_b32 s7, v0
+; GFX9-NEXT: v_readfirstlane_b32 s8, v1
+; GFX9-NEXT: v_readfirstlane_b32 s9, v2
+; GFX9-NEXT: v_readfirstlane_b32 s10, v3
+; GFX9-NEXT: v_readfirstlane_b32 s11, v4
+; GFX9-NEXT: v_readfirstlane_b32 s12, v5
+; GFX9-NEXT: v_readfirstlane_b32 s13, v6
+; GFX9-NEXT: v_readfirstlane_b32 s14, v7
+; GFX9-NEXT: v_readfirstlane_b32 s15, v8
+; GFX9-NEXT: v_readfirstlane_b32 s6, v9
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB13_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_lshr_b32 s40, s15, 16
-; GFX9-NEXT: s_lshr_b32 s41, s14, 16
-; GFX9-NEXT: s_lshr_b32 s42, s13, 16
-; GFX9-NEXT: s_lshr_b32 s43, s12, 16
-; GFX9-NEXT: s_lshr_b32 s44, s11, 16
-; GFX9-NEXT: s_lshr_b32 s45, s10, 16
-; GFX9-NEXT: s_lshr_b32 s46, s9, 16
-; GFX9-NEXT: s_lshr_b32 s47, s8, 16
-; GFX9-NEXT: s_lshr_b32 s56, s7, 16
-; GFX9-NEXT: s_lshr_b32 s57, s6, 16
+; GFX9-NEXT: s_lshr_b32 s40, s6, 16
+; GFX9-NEXT: s_lshr_b32 s41, s15, 16
+; GFX9-NEXT: s_lshr_b32 s42, s14, 16
+; GFX9-NEXT: s_lshr_b32 s43, s13, 16
+; GFX9-NEXT: s_lshr_b32 s44, s12, 16
+; GFX9-NEXT: s_lshr_b32 s45, s11, 16
+; GFX9-NEXT: s_lshr_b32 s46, s10, 16
+; GFX9-NEXT: s_lshr_b32 s47, s9, 16
+; GFX9-NEXT: s_lshr_b32 s56, s8, 16
+; GFX9-NEXT: s_lshr_b32 s57, s7, 16
; GFX9-NEXT: s_lshr_b32 s58, s29, 16
; GFX9-NEXT: s_lshr_b32 s59, s28, 16
; GFX9-NEXT: s_lshr_b32 s60, s27, 16
@@ -3733,6 +3782,7 @@ define inreg <48 x i16> @bitcast_v24i32_to_v48i16_scalar(<24 x i32> inreg %a, i3
; GFX9-NEXT: s_lshr_b32 s79, s16, 16
; GFX9-NEXT: s_cbranch_execnz .LBB13_3
; GFX9-NEXT: .LBB13_2: ; %cmp.true
+; GFX9-NEXT: s_add_i32 s6, s6, 3
; GFX9-NEXT: s_add_i32 s15, s15, 3
; GFX9-NEXT: s_add_i32 s14, s14, 3
; GFX9-NEXT: s_add_i32 s13, s13, 3
@@ -3742,7 +3792,6 @@ define inreg <48 x i16> @bitcast_v24i32_to_v48i16_scalar(<24 x i32> inreg %a, i3
; GFX9-NEXT: s_add_i32 s9, s9, 3
; GFX9-NEXT: s_add_i32 s8, s8, 3
; GFX9-NEXT: s_add_i32 s7, s7, 3
-; GFX9-NEXT: s_add_i32 s6, s6, 3
; GFX9-NEXT: s_add_i32 s29, s29, 3
; GFX9-NEXT: s_add_i32 s28, s28, 3
; GFX9-NEXT: s_add_i32 s27, s27, 3
@@ -3757,16 +3806,16 @@ define inreg <48 x i16> @bitcast_v24i32_to_v48i16_scalar(<24 x i32> inreg %a, i3
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: s_lshr_b32 s40, s15, 16
-; GFX9-NEXT: s_lshr_b32 s41, s14, 16
-; GFX9-NEXT: s_lshr_b32 s42, s13, 16
-; GFX9-NEXT: s_lshr_b32 s43, s12, 16
-; GFX9-NEXT: s_lshr_b32 s44, s11, 16
-; GFX9-NEXT: s_lshr_b32 s45, s10, 16
-; GFX9-NEXT: s_lshr_b32 s46, s9, 16
-; GFX9-NEXT: s_lshr_b32 s47, s8, 16
-; GFX9-NEXT: s_lshr_b32 s56, s7, 16
-; GFX9-NEXT: s_lshr_b32 s57, s6, 16
+; GFX9-NEXT: s_lshr_b32 s40, s6, 16
+; GFX9-NEXT: s_lshr_b32 s41, s15, 16
+; GFX9-NEXT: s_lshr_b32 s42, s14, 16
+; GFX9-NEXT: s_lshr_b32 s43, s13, 16
+; GFX9-NEXT: s_lshr_b32 s44, s12, 16
+; GFX9-NEXT: s_lshr_b32 s45, s11, 16
+; GFX9-NEXT: s_lshr_b32 s46, s10, 16
+; GFX9-NEXT: s_lshr_b32 s47, s9, 16
+; GFX9-NEXT: s_lshr_b32 s56, s8, 16
+; GFX9-NEXT: s_lshr_b32 s57, s7, 16
; GFX9-NEXT: s_lshr_b32 s58, s29, 16
; GFX9-NEXT: s_lshr_b32 s59, s28, 16
; GFX9-NEXT: s_lshr_b32 s60, s27, 16
@@ -3796,16 +3845,16 @@ define inreg <48 x i16> @bitcast_v24i32_to_v48i16_scalar(<24 x i32> inreg %a, i3
; GFX9-NEXT: s_pack_ll_b32_b16 s25, s27, s60
; GFX9-NEXT: s_pack_ll_b32_b16 s26, s28, s59
; GFX9-NEXT: s_pack_ll_b32_b16 s27, s29, s58
-; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s57
-; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s56
-; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s47
-; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s46
-; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s45
-; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s44
-; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s43
-; GFX9-NEXT: s_pack_ll_b32_b16 s13, s13, s42
-; GFX9-NEXT: s_pack_ll_b32_b16 s14, s14, s41
-; GFX9-NEXT: s_pack_ll_b32_b16 s15, s15, s40
+; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s57
+; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s56
+; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s47
+; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s46
+; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s45
+; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s44
+; GFX9-NEXT: s_pack_ll_b32_b16 s13, s13, s43
+; GFX9-NEXT: s_pack_ll_b32_b16 s14, s14, s42
+; GFX9-NEXT: s_pack_ll_b32_b16 s15, s15, s41
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s40
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: v_mov_b32_e32 v2, s16
@@ -3820,16 +3869,16 @@ define inreg <48 x i16> @bitcast_v24i32_to_v48i16_scalar(<24 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v11, s25
; GFX9-NEXT: v_mov_b32_e32 v12, s26
; GFX9-NEXT: v_mov_b32_e32 v13, s27
-; GFX9-NEXT: v_mov_b32_e32 v14, s6
-; GFX9-NEXT: v_mov_b32_e32 v15, s7
-; GFX9-NEXT: v_mov_b32_e32 v16, s8
-; GFX9-NEXT: v_mov_b32_e32 v17, s9
-; GFX9-NEXT: v_mov_b32_e32 v18, s10
-; GFX9-NEXT: v_mov_b32_e32 v19, s11
-; GFX9-NEXT: v_mov_b32_e32 v20, s12
-; GFX9-NEXT: v_mov_b32_e32 v21, s13
-; GFX9-NEXT: v_mov_b32_e32 v22, s14
-; GFX9-NEXT: v_mov_b32_e32 v23, s15
+; GFX9-NEXT: v_mov_b32_e32 v14, s7
+; GFX9-NEXT: v_mov_b32_e32 v15, s8
+; GFX9-NEXT: v_mov_b32_e32 v16, s9
+; GFX9-NEXT: v_mov_b32_e32 v17, s10
+; GFX9-NEXT: v_mov_b32_e32 v18, s11
+; GFX9-NEXT: v_mov_b32_e32 v19, s12
+; GFX9-NEXT: v_mov_b32_e32 v20, s13
+; GFX9-NEXT: v_mov_b32_e32 v21, s14
+; GFX9-NEXT: v_mov_b32_e32 v22, s15
+; GFX9-NEXT: v_mov_b32_e32 v23, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB13_4:
; GFX9-NEXT: ; implicit-def: $sgpr79
@@ -3856,7 +3905,9 @@ define inreg <48 x i16> @bitcast_v24i32_to_v48i16_scalar(<24 x i32> inreg %a, i3
; GFX9-NEXT: ; implicit-def: $sgpr42
; GFX9-NEXT: ; implicit-def: $sgpr41
; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: s_branch .LBB13_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB13_2
+; GFX9-NEXT: s_branch .LBB13_3
;
; GFX11-LABEL: bitcast_v24i32_to_v48i16_scalar:
; GFX11: ; %bb.0:
@@ -3865,16 +3916,16 @@ define inreg <48 x i16> @bitcast_v24i32_to_v48i16_scalar(<24 x i32> inreg %a, i3
; GFX11-NEXT: v_readfirstlane_b32 s4, v0
; GFX11-NEXT: v_readfirstlane_b32 s5, v1
; GFX11-NEXT: v_readfirstlane_b32 s6, v2
-; GFX11-NEXT: v_readfirstlane_b32 s7, v3
+; GFX11-NEXT: v_readfirstlane_b32 s8, v3
; GFX11-NEXT: v_readfirstlane_b32 s9, v4
-; GFX11-NEXT: v_readfirstlane_b32 s8, v5
-; GFX11-NEXT: s_mov_b32 s74, 0
+; GFX11-NEXT: v_readfirstlane_b32 s7, v5
+; GFX11-NEXT: s_mov_b32 s74, -1
; GFX11-NEXT: s_and_b32 s10, vcc_lo, exec_lo
; GFX11-NEXT: s_cbranch_scc0 .LBB13_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s10, s8, 16
+; GFX11-NEXT: s_lshr_b32 s10, s7, 16
; GFX11-NEXT: s_lshr_b32 s11, s9, 16
-; GFX11-NEXT: s_lshr_b32 s12, s7, 16
+; GFX11-NEXT: s_lshr_b32 s12, s8, 16
; GFX11-NEXT: s_lshr_b32 s13, s6, 16
; GFX11-NEXT: s_lshr_b32 s14, s5, 16
; GFX11-NEXT: s_lshr_b32 s15, s4, 16
@@ -3896,12 +3947,11 @@ define inreg <48 x i16> @bitcast_v24i32_to_v48i16_scalar(<24 x i32> inreg %a, i3
; GFX11-NEXT: s_lshr_b32 s63, s2, 16
; GFX11-NEXT: s_lshr_b32 s72, s1, 16
; GFX11-NEXT: s_lshr_b32 s73, s0, 16
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s74
-; GFX11-NEXT: s_cbranch_vccnz .LBB13_3
+; GFX11-NEXT: s_cbranch_execnz .LBB13_3
; GFX11-NEXT: .LBB13_2: ; %cmp.true
-; GFX11-NEXT: s_add_i32 s8, s8, 3
-; GFX11-NEXT: s_add_i32 s9, s9, 3
; GFX11-NEXT: s_add_i32 s7, s7, 3
+; GFX11-NEXT: s_add_i32 s9, s9, 3
+; GFX11-NEXT: s_add_i32 s8, s8, 3
; GFX11-NEXT: s_add_i32 s6, s6, 3
; GFX11-NEXT: s_add_i32 s5, s5, 3
; GFX11-NEXT: s_add_i32 s4, s4, 3
@@ -3923,9 +3973,9 @@ define inreg <48 x i16> @bitcast_v24i32_to_v48i16_scalar(<24 x i32> inreg %a, i3
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: s_lshr_b32 s10, s8, 16
+; GFX11-NEXT: s_lshr_b32 s10, s7, 16
; GFX11-NEXT: s_lshr_b32 s11, s9, 16
-; GFX11-NEXT: s_lshr_b32 s12, s7, 16
+; GFX11-NEXT: s_lshr_b32 s12, s8, 16
; GFX11-NEXT: s_lshr_b32 s13, s6, 16
; GFX11-NEXT: s_lshr_b32 s14, s5, 16
; GFX11-NEXT: s_lshr_b32 s15, s4, 16
@@ -3970,9 +4020,9 @@ define inreg <48 x i16> @bitcast_v24i32_to_v48i16_scalar(<24 x i32> inreg %a, i3
; GFX11-NEXT: s_pack_ll_b32_b16 s4, s4, s15
; GFX11-NEXT: s_pack_ll_b32_b16 s5, s5, s14
; GFX11-NEXT: s_pack_ll_b32_b16 s6, s6, s13
-; GFX11-NEXT: s_pack_ll_b32_b16 s7, s7, s12
+; GFX11-NEXT: s_pack_ll_b32_b16 s8, s8, s12
; GFX11-NEXT: s_pack_ll_b32_b16 s9, s9, s11
-; GFX11-NEXT: s_pack_ll_b32_b16 s8, s8, s10
+; GFX11-NEXT: s_pack_ll_b32_b16 s7, s7, s10
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -3983,8 +4033,8 @@ define inreg <48 x i16> @bitcast_v24i32_to_v48i16_scalar(<24 x i32> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
; GFX11-NEXT: v_dual_mov_b32 v18, s4 :: v_dual_mov_b32 v19, s5
-; GFX11-NEXT: v_dual_mov_b32 v20, s6 :: v_dual_mov_b32 v21, s7
-; GFX11-NEXT: v_dual_mov_b32 v22, s9 :: v_dual_mov_b32 v23, s8
+; GFX11-NEXT: v_dual_mov_b32 v20, s6 :: v_dual_mov_b32 v21, s8
+; GFX11-NEXT: v_dual_mov_b32 v22, s9 :: v_dual_mov_b32 v23, s7
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB13_4:
; GFX11-NEXT: ; implicit-def: $sgpr73
@@ -4011,7 +4061,9 @@ define inreg <48 x i16> @bitcast_v24i32_to_v48i16_scalar(<24 x i32> inreg %a, i3
; GFX11-NEXT: ; implicit-def: $sgpr12
; GFX11-NEXT: ; implicit-def: $sgpr11
; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: s_branch .LBB13_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s74
+; GFX11-NEXT: s_cbranch_vccz .LBB13_2
+; GFX11-NEXT: s_branch .LBB13_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -5116,6 +5168,7 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v49, v10
; SI-NEXT: v_mov_b32_e32 v51, v8
; SI-NEXT: v_mov_b32_e32 v52, v6
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v1
; SI-NEXT: s_waitcnt expcnt(5)
; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v3
@@ -5137,7 +5190,7 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v2
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_and_b64 s[6:7], vcc, exec
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v4
; SI-NEXT: s_cbranch_scc0 .LBB15_4
@@ -5399,7 +5452,9 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v27, v46
; SI-NEXT: v_mov_b32_e32 v29, v45
; SI-NEXT: v_mov_b32_e32 v34, v43
-; SI-NEXT: s_branch .LBB15_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB15_2
+; SI-NEXT: s_branch .LBB15_3
;
; VI-LABEL: bitcast_v48i16_to_v24i32_scalar:
; VI: ; %bb.0:
@@ -5419,6 +5474,7 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v9
; VI-NEXT: v_mov_b32_e32 v33, v8
; VI-NEXT: v_mov_b32_e32 v34, v7
@@ -5429,7 +5485,7 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v39, v2
; VI-NEXT: v_mov_b32_e32 v48, v1
; VI-NEXT: v_mov_b32_e32 v49, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB15_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -5458,41 +5514,41 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3
; VI-NEXT: s_and_b32 s57, 0xffff, s23
; VI-NEXT: s_lshl_b32 s58, s12, 16
; VI-NEXT: v_or_b32_sdwa v14, v49, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s57, s57, s58
; VI-NEXT: s_and_b32 s58, 0xffff, s24
; VI-NEXT: s_lshl_b32 s59, s11, 16
-; VI-NEXT: v_or_b32_sdwa v15, v48, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v16, v39, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s58, s58, s59
; VI-NEXT: s_and_b32 s59, 0xffff, s25
; VI-NEXT: s_lshl_b32 s60, s10, 16
-; VI-NEXT: v_or_b32_sdwa v16, v39, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v17, v38, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s59, s59, s60
; VI-NEXT: s_and_b32 s60, 0xffff, s26
; VI-NEXT: s_lshl_b32 s61, s9, 16
-; VI-NEXT: v_or_b32_sdwa v17, v38, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v18, v37, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s60, s60, s61
; VI-NEXT: s_and_b32 s61, 0xffff, s27
; VI-NEXT: s_lshl_b32 s62, s8, 16
-; VI-NEXT: v_or_b32_sdwa v18, v37, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v19, v36, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s61, s61, s62
; VI-NEXT: s_and_b32 s62, 0xffff, s28
; VI-NEXT: s_lshl_b32 s63, s7, 16
-; VI-NEXT: v_or_b32_sdwa v19, v36, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v20, v35, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s62, s62, s63
; VI-NEXT: s_and_b32 s63, 0xffff, s29
; VI-NEXT: s_lshl_b32 s72, s6, 16
-; VI-NEXT: v_or_b32_sdwa v20, v35, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; VI-NEXT: s_or_b32 s63, s63, s72
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v0, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: v_or_b32_sdwa v21, v34, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: v_lshlrev_b32_sdwa v0, v0, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: s_or_b32 s63, s63, s72
+; VI-NEXT: v_or_b32_sdwa v15, v48, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v22, v33, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v23, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v0, s4
@@ -5515,97 +5571,98 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v49
; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v49 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v14, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v48
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v48
+; VI-NEXT: v_lshlrev_b32_sdwa v3, v1, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v14, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v15, vcc, 0x30000, v2
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v39
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_and_b32 s4, s16, 0xffff
; VI-NEXT: s_lshl_b32 s5, s43, 16
; VI-NEXT: s_add_i32 s17, s17, 3
-; VI-NEXT: v_add_u32_e32 v15, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v39
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s4, s5, s4
; VI-NEXT: s_and_b32 s5, s17, 0xffff
; VI-NEXT: s_lshl_b32 s16, s42, 16
; VI-NEXT: s_add_i32 s18, s18, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v16, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v38
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s5, s16, s5
; VI-NEXT: s_and_b32 s16, s18, 0xffff
; VI-NEXT: s_lshl_b32 s17, s41, 16
; VI-NEXT: s_add_i32 s19, s19, 3
-; VI-NEXT: v_add_u32_e32 v16, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v38
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s16, s17, s16
; VI-NEXT: s_and_b32 s17, s19, 0xffff
; VI-NEXT: s_lshl_b32 s18, s40, 16
; VI-NEXT: s_add_i32 s20, s20, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v17, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v37
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s17, s18, s17
; VI-NEXT: s_and_b32 s18, s20, 0xffff
; VI-NEXT: s_lshl_b32 s15, s15, 16
; VI-NEXT: s_add_i32 s21, s21, 3
-; VI-NEXT: v_add_u32_e32 v17, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v37
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s15, s15, s18
; VI-NEXT: s_and_b32 s18, s21, 0xffff
; VI-NEXT: s_lshl_b32 s14, s14, 16
; VI-NEXT: s_add_i32 s22, s22, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v18, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v36
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s14, s14, s18
; VI-NEXT: s_and_b32 s18, s22, 0xffff
; VI-NEXT: s_lshl_b32 s13, s13, 16
; VI-NEXT: s_add_i32 s23, s23, 3
-; VI-NEXT: v_add_u32_e32 v18, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v36
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s13, s13, s18
; VI-NEXT: s_and_b32 s18, s23, 0xffff
; VI-NEXT: s_lshl_b32 s12, s12, 16
; VI-NEXT: s_add_i32 s24, s24, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v19, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v35
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s12, s12, s18
; VI-NEXT: s_and_b32 s18, s24, 0xffff
; VI-NEXT: s_lshl_b32 s11, s11, 16
; VI-NEXT: s_add_i32 s25, s25, 3
-; VI-NEXT: v_add_u32_e32 v19, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v35
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s11, s11, s18
; VI-NEXT: s_and_b32 s18, s25, 0xffff
; VI-NEXT: s_lshl_b32 s10, s10, 16
; VI-NEXT: s_add_i32 s26, s26, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v20, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v34
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s10, s10, s18
; VI-NEXT: s_and_b32 s18, s26, 0xffff
; VI-NEXT: s_lshl_b32 s9, s9, 16
; VI-NEXT: s_add_i32 s27, s27, 3
-; VI-NEXT: v_add_u32_e32 v20, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v34
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s9, s9, s18
; VI-NEXT: s_and_b32 s18, s27, 0xffff
; VI-NEXT: s_lshl_b32 s8, s8, 16
; VI-NEXT: s_add_i32 s28, s28, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v21, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v33
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s8, s8, s18
; VI-NEXT: s_and_b32 s18, s28, 0xffff
; VI-NEXT: s_lshl_b32 s7, s7, 16
; VI-NEXT: s_add_i32 s29, s29, 3
-; VI-NEXT: v_add_u32_e32 v21, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v33
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s7, s7, s18
; VI-NEXT: s_and_b32 s18, s29, 0xffff
; VI-NEXT: s_lshl_b32 s6, s6, 16
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: s_or_b32 s6, s6, s18
; VI-NEXT: v_add_u32_e32 v22, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v32
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v1, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_lshlrev_b32_sdwa v0, v1, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v32
+; VI-NEXT: s_or_b32 s6, s6, s18
+; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_add_i32 s4, s4, 0x30000
; VI-NEXT: s_add_i32 s5, s5, 0x30000
; VI-NEXT: s_add_i32 s16, s16, 0x30000
@@ -5620,7 +5677,6 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3
; VI-NEXT: s_add_i32 s8, s8, 0x30000
; VI-NEXT: s_add_i32 s7, s7, 0x30000
; VI-NEXT: s_add_i32 s6, s6, 0x30000
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v23, vcc, 0x30000, v0
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
@@ -5640,21 +5696,13 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB15_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB15_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB15_2
+; VI-NEXT: s_branch .LBB15_3
;
; GFX9-LABEL: bitcast_v48i16_to_v24i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v9
-; GFX9-NEXT: v_mov_b32_e32 v33, v8
-; GFX9-NEXT: v_mov_b32_e32 v34, v7
-; GFX9-NEXT: v_mov_b32_e32 v35, v6
-; GFX9-NEXT: v_mov_b32_e32 v36, v5
-; GFX9-NEXT: v_mov_b32_e32 v37, v4
-; GFX9-NEXT: v_mov_b32_e32 v38, v3
-; GFX9-NEXT: v_mov_b32_e32 v39, v2
-; GFX9-NEXT: v_mov_b32_e32 v48, v1
-; GFX9-NEXT: v_mov_b32_e32 v49, v0
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
@@ -5670,6 +5718,17 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3
; GFX9-NEXT: s_lshr_b32 s8, s18, 16
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
+; GFX9-NEXT: v_mov_b32_e32 v32, v9
+; GFX9-NEXT: v_mov_b32_e32 v33, v8
+; GFX9-NEXT: v_mov_b32_e32 v34, v7
+; GFX9-NEXT: v_mov_b32_e32 v35, v6
+; GFX9-NEXT: v_mov_b32_e32 v36, v5
+; GFX9-NEXT: v_mov_b32_e32 v37, v4
+; GFX9-NEXT: v_mov_b32_e32 v38, v3
+; GFX9-NEXT: v_mov_b32_e32 v39, v2
+; GFX9-NEXT: v_mov_b32_e32 v48, v1
+; GFX9-NEXT: v_mov_b32_e32 v49, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
@@ -5684,7 +5743,6 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3
; GFX9-NEXT: v_lshrrev_b32_e32 v41, 16, v39
; GFX9-NEXT: v_lshrrev_b32_e32 v42, 16, v48
; GFX9-NEXT: v_lshrrev_b32_e32 v43, 16, v49
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -5699,12 +5757,11 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB15_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v49
; GFX9-NEXT: v_lshl_or_b32 v14, v43, 16, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v48
-; GFX9-NEXT: v_lshl_or_b32 v15, v42, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v39
; GFX9-NEXT: v_lshl_or_b32 v16, v41, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v38
@@ -5718,8 +5775,10 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v34
; GFX9-NEXT: v_lshl_or_b32 v21, v52, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v33
+; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v48
; GFX9-NEXT: v_lshl_or_b32 v22, v51, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v32
+; GFX9-NEXT: v_lshl_or_b32 v15, v42, 16, v1
; GFX9-NEXT: v_lshl_or_b32 v23, v50, 16, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s6
; GFX9-NEXT: v_mov_b32_e32 v1, s7
@@ -5737,26 +5796,30 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v13, s19
; GFX9-NEXT: s_cbranch_execnz .LBB15_3
; GFX9-NEXT: .LBB15_2: ; %cmp.true
-; GFX9-NEXT: v_and_b32_e32 v14, 0xffff, v49
-; GFX9-NEXT: v_and_b32_e32 v15, 0xffff, v48
-; GFX9-NEXT: v_and_b32_e32 v16, 0xffff, v39
-; GFX9-NEXT: v_and_b32_e32 v17, 0xffff, v38
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v49
+; GFX9-NEXT: v_lshl_or_b32 v0, v43, 16, v0
+; GFX9-NEXT: v_pk_add_u16 v14, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v39
+; GFX9-NEXT: v_lshl_or_b32 v0, v41, 16, v0
+; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v48
+; GFX9-NEXT: v_pk_add_u16 v16, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v38
; GFX9-NEXT: v_and_b32_e32 v18, 0xffff, v37
; GFX9-NEXT: v_and_b32_e32 v19, 0xffff, v36
; GFX9-NEXT: v_and_b32_e32 v20, 0xffff, v35
; GFX9-NEXT: v_and_b32_e32 v21, 0xffff, v34
; GFX9-NEXT: v_and_b32_e32 v22, 0xffff, v33
; GFX9-NEXT: v_and_b32_e32 v23, 0xffff, v32
-; GFX9-NEXT: v_lshl_or_b32 v14, v43, 16, v14
-; GFX9-NEXT: v_lshl_or_b32 v15, v42, 16, v15
-; GFX9-NEXT: v_lshl_or_b32 v16, v41, 16, v16
-; GFX9-NEXT: v_lshl_or_b32 v17, v40, 16, v17
+; GFX9-NEXT: v_lshl_or_b32 v1, v42, 16, v1
+; GFX9-NEXT: v_lshl_or_b32 v0, v40, 16, v0
; GFX9-NEXT: v_lshl_or_b32 v18, v55, 16, v18
; GFX9-NEXT: v_lshl_or_b32 v19, v54, 16, v19
; GFX9-NEXT: v_lshl_or_b32 v20, v53, 16, v20
; GFX9-NEXT: v_lshl_or_b32 v21, v52, 16, v21
; GFX9-NEXT: v_lshl_or_b32 v22, v51, 16, v22
; GFX9-NEXT: v_lshl_or_b32 v23, v50, 16, v23
+; GFX9-NEXT: v_pk_add_u16 v15, v1, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_pk_add_u16 v17, v0, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s6, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s7, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v2, s8, 3 op_sel_hi:[1,0]
@@ -5771,10 +5834,6 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3
; GFX9-NEXT: v_pk_add_u16 v11, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v12, s18, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v13, s19, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
@@ -5790,7 +5849,9 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB15_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB15_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB15_2
+; GFX9-NEXT: s_branch .LBB15_3
;
; GFX11-TRUE16-LABEL: bitcast_v48i16_to_v24i32_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -5814,44 +5875,44 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v4
; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v5
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s16, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s16, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s17, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s18, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s19, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s20, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s21, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s22, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s23, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s24, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s25, s15
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s26, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s41
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s27, s42
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB15_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
@@ -5860,17 +5921,16 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s6 :: v_dual_mov_b32 v5, s7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s8 :: v_dual_mov_b32 v7, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s10 :: v_dual_mov_b32 v9, s11
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s12 :: v_dual_mov_b32 v11, s13
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s14 :: v_dual_mov_b32 v13, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB15_3
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB15_3
; GFX11-TRUE16-NEXT: .LBB15_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
@@ -5879,21 +5939,21 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s5, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s1, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s16, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
@@ -5906,7 +5966,9 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB15_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB15_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB15_2
+; GFX11-TRUE16-NEXT: s_branch .LBB15_3
;
; GFX11-FAKE16-LABEL: bitcast_v48i16_to_v24i32_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -5924,44 +5986,44 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: v_and_b32_e32 v48, 0xffff, v3
; GFX11-FAKE16-NEXT: v_and_b32_e32 v39, 0xffff, v4
; GFX11-FAKE16-NEXT: v_and_b32_e32 v38, 0xffff, v5
-; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s26, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s16, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s25, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s24, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s23, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s22, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s21, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s20, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s19, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s18, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s17, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s16, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s3, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s40, 0
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s16, s6
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s17, s7
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s18, s8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s19, s9
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s20, s10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s21, s11
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s22, s12
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s23, s13
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s24, s14
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s25, s15
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s26, s43
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s28, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s29, s41
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s27, s42
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB15_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
@@ -5970,17 +6032,16 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s6 :: v_dual_mov_b32 v5, s7
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s8 :: v_dual_mov_b32 v7, s9
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s10 :: v_dual_mov_b32 v9, s11
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s12 :: v_dual_mov_b32 v11, s13
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s14 :: v_dual_mov_b32 v13, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB15_3
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB15_3
; GFX11-FAKE16-NEXT: .LBB15_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
@@ -5989,21 +6050,21 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, s4, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, s5, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, s5, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, s6, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, s7, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, s8, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, s9, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, s10, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, s11, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, s12, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, s13, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, s14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, s6, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, s7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, s8, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, s9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, s10, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, s11, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, s12, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, s13, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, s14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, s15, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, s0, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, s15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, s1, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, s16, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
@@ -6016,7 +6077,9 @@ define inreg <24 x i32> @bitcast_v48i16_to_v24i32_scalar(<48 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB15_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB15_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB15_2
+; GFX11-FAKE16-NEXT: s_branch .LBB15_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -7033,27 +7096,28 @@ define inreg <48 x half> @bitcast_v24i32_to_v48f16_scalar(<24 x i32> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s15, v1
; SI-NEXT: v_readfirstlane_b32 s14, v2
; SI-NEXT: v_readfirstlane_b32 s13, v3
; SI-NEXT: v_readfirstlane_b32 s12, v4
; SI-NEXT: v_readfirstlane_b32 s11, v5
; SI-NEXT: v_readfirstlane_b32 s10, v6
-; SI-NEXT: v_readfirstlane_b32 s8, v7
-; SI-NEXT: v_readfirstlane_b32 s7, v8
-; SI-NEXT: v_readfirstlane_b32 s6, v9
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: v_readfirstlane_b32 s9, v10
+; SI-NEXT: v_readfirstlane_b32 s9, v7
+; SI-NEXT: v_readfirstlane_b32 s8, v8
+; SI-NEXT: v_readfirstlane_b32 s7, v9
+; SI-NEXT: v_readfirstlane_b32 s6, v10
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB17_4
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_lshr_b32 s4, s9, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v1, s4
; SI-NEXT: s_lshr_b32 s4, s6, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v2, s4
+; SI-NEXT: v_cvt_f32_f16_e32 v1, s4
; SI-NEXT: s_lshr_b32 s4, s7, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v3, s4
+; SI-NEXT: v_cvt_f32_f16_e32 v2, s4
; SI-NEXT: s_lshr_b32 s4, s8, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v3, s4
+; SI-NEXT: s_lshr_b32 s4, s9, 16
; SI-NEXT: v_cvt_f32_f16_e32 v5, s4
; SI-NEXT: s_lshr_b32 s4, s10, 16
; SI-NEXT: v_cvt_f32_f16_e32 v7, s4
@@ -7096,10 +7160,10 @@ define inreg <48 x half> @bitcast_v24i32_to_v48f16_scalar(<24 x i32> inreg %a, i
; SI-NEXT: s_lshr_b32 s4, s16, 16
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v40, s4
-; SI-NEXT: v_cvt_f32_f16_e32 v4, s9
-; SI-NEXT: v_cvt_f32_f16_e32 v6, s6
-; SI-NEXT: v_cvt_f32_f16_e32 v8, s7
-; SI-NEXT: v_cvt_f32_f16_e32 v9, s8
+; SI-NEXT: v_cvt_f32_f16_e32 v4, s6
+; SI-NEXT: v_cvt_f32_f16_e32 v6, s7
+; SI-NEXT: v_cvt_f32_f16_e32 v8, s8
+; SI-NEXT: v_cvt_f32_f16_e32 v9, s9
; SI-NEXT: v_cvt_f32_f16_e32 v11, s10
; SI-NEXT: v_cvt_f32_f16_e32 v13, s11
; SI-NEXT: v_cvt_f32_f16_e32 v15, s12
@@ -7142,10 +7206,10 @@ define inreg <48 x half> @bitcast_v24i32_to_v48f16_scalar(<24 x i32> inreg %a, i
; SI-NEXT: s_add_i32 s12, s12, 3
; SI-NEXT: s_add_i32 s11, s11, 3
; SI-NEXT: s_add_i32 s10, s10, 3
+; SI-NEXT: s_add_i32 s9, s9, 3
; SI-NEXT: s_add_i32 s8, s8, 3
; SI-NEXT: s_add_i32 s7, s7, 3
; SI-NEXT: s_add_i32 s6, s6, 3
-; SI-NEXT: s_add_i32 s9, s9, 3
; SI-NEXT: s_lshr_b32 s4, s16, 16
; SI-NEXT: s_lshr_b32 s5, s17, 16
; SI-NEXT: s_lshr_b32 s40, s18, 16
@@ -7166,14 +7230,14 @@ define inreg <48 x half> @bitcast_v24i32_to_v48f16_scalar(<24 x i32> inreg %a, i
; SI-NEXT: s_lshr_b32 s63, s12, 16
; SI-NEXT: s_lshr_b32 s72, s11, 16
; SI-NEXT: s_lshr_b32 s73, s10, 16
-; SI-NEXT: s_lshr_b32 s74, s8, 16
-; SI-NEXT: s_lshr_b32 s75, s7, 16
-; SI-NEXT: s_lshr_b32 s76, s6, 16
-; SI-NEXT: s_lshr_b32 s77, s9, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v4, s9
-; SI-NEXT: v_cvt_f32_f16_e32 v6, s6
-; SI-NEXT: v_cvt_f32_f16_e32 v8, s7
-; SI-NEXT: v_cvt_f32_f16_e32 v9, s8
+; SI-NEXT: s_lshr_b32 s74, s9, 16
+; SI-NEXT: s_lshr_b32 s75, s8, 16
+; SI-NEXT: s_lshr_b32 s76, s7, 16
+; SI-NEXT: s_lshr_b32 s77, s6, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v4, s6
+; SI-NEXT: v_cvt_f32_f16_e32 v6, s7
+; SI-NEXT: v_cvt_f32_f16_e32 v8, s8
+; SI-NEXT: v_cvt_f32_f16_e32 v9, s9
; SI-NEXT: v_cvt_f32_f16_e32 v11, s10
; SI-NEXT: v_cvt_f32_f16_e32 v13, s11
; SI-NEXT: v_cvt_f32_f16_e32 v15, s12
@@ -7439,12 +7503,15 @@ define inreg <48 x half> @bitcast_v24i32_to_v48f16_scalar(<24 x i32> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr4
; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: s_branch .LBB17_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB17_2
+; SI-NEXT: s_branch .LBB17_3
;
; VI-LABEL: bitcast_v24i32_to_v48f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s15, v0
; VI-NEXT: v_readfirstlane_b32 s14, v1
; VI-NEXT: v_readfirstlane_b32 s13, v2
@@ -7453,13 +7520,13 @@ define inreg <48 x half> @bitcast_v24i32_to_v48f16_scalar(<24 x i32> inreg %a, i
; VI-NEXT: v_readfirstlane_b32 s10, v5
; VI-NEXT: v_readfirstlane_b32 s9, v6
; VI-NEXT: v_readfirstlane_b32 s8, v7
-; VI-NEXT: v_readfirstlane_b32 s6, v8
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: v_readfirstlane_b32 s7, v9
+; VI-NEXT: v_readfirstlane_b32 s7, v8
+; VI-NEXT: v_readfirstlane_b32 s6, v9
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB17_4
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_lshr_b32 s40, s7, 16
-; VI-NEXT: s_lshr_b32 s41, s6, 16
+; VI-NEXT: s_lshr_b32 s40, s6, 16
+; VI-NEXT: s_lshr_b32 s41, s7, 16
; VI-NEXT: s_lshr_b32 s42, s8, 16
; VI-NEXT: s_lshr_b32 s43, s9, 16
; VI-NEXT: s_lshr_b32 s44, s10, 16
@@ -7484,8 +7551,8 @@ define inreg <48 x half> @bitcast_v24i32_to_v48f16_scalar(<24 x i32> inreg %a, i
; VI-NEXT: s_lshr_b32 s79, s16, 16
; VI-NEXT: s_cbranch_execnz .LBB17_3
; VI-NEXT: .LBB17_2: ; %cmp.true
-; VI-NEXT: s_add_i32 s7, s7, 3
; VI-NEXT: s_add_i32 s6, s6, 3
+; VI-NEXT: s_add_i32 s7, s7, 3
; VI-NEXT: s_add_i32 s8, s8, 3
; VI-NEXT: s_add_i32 s9, s9, 3
; VI-NEXT: s_add_i32 s10, s10, 3
@@ -7508,8 +7575,8 @@ define inreg <48 x half> @bitcast_v24i32_to_v48f16_scalar(<24 x i32> inreg %a, i
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: s_lshr_b32 s40, s7, 16
-; VI-NEXT: s_lshr_b32 s41, s6, 16
+; VI-NEXT: s_lshr_b32 s40, s6, 16
+; VI-NEXT: s_lshr_b32 s41, s7, 16
; VI-NEXT: s_lshr_b32 s42, s8, 16
; VI-NEXT: s_lshr_b32 s43, s9, 16
; VI-NEXT: s_lshr_b32 s44, s10, 16
@@ -7599,12 +7666,12 @@ define inreg <48 x half> @bitcast_v24i32_to_v48f16_scalar(<24 x i32> inreg %a, i
; VI-NEXT: s_and_b32 s8, 0xffff, s8
; VI-NEXT: s_lshl_b32 s28, s42, 16
; VI-NEXT: s_or_b32 s8, s8, s28
-; VI-NEXT: s_and_b32 s6, 0xffff, s6
-; VI-NEXT: s_lshl_b32 s28, s41, 16
-; VI-NEXT: s_or_b32 s6, s6, s28
; VI-NEXT: s_and_b32 s7, 0xffff, s7
-; VI-NEXT: s_lshl_b32 s28, s40, 16
+; VI-NEXT: s_lshl_b32 s28, s41, 16
; VI-NEXT: s_or_b32 s7, s7, s28
+; VI-NEXT: s_and_b32 s6, 0xffff, s6
+; VI-NEXT: s_lshl_b32 s28, s40, 16
+; VI-NEXT: s_or_b32 s6, s6, s28
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: v_mov_b32_e32 v2, s16
@@ -7627,8 +7694,8 @@ define inreg <48 x half> @bitcast_v24i32_to_v48f16_scalar(<24 x i32> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v19, s10
; VI-NEXT: v_mov_b32_e32 v20, s9
; VI-NEXT: v_mov_b32_e32 v21, s8
-; VI-NEXT: v_mov_b32_e32 v22, s6
-; VI-NEXT: v_mov_b32_e32 v23, s7
+; VI-NEXT: v_mov_b32_e32 v22, s7
+; VI-NEXT: v_mov_b32_e32 v23, s6
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB17_4:
; VI-NEXT: ; implicit-def: $sgpr79
@@ -7655,35 +7722,38 @@ define inreg <48 x half> @bitcast_v24i32_to_v48f16_scalar(<24 x i32> inreg %a, i
; VI-NEXT: ; implicit-def: $sgpr42
; VI-NEXT: ; implicit-def: $sgpr41
; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: s_branch .LBB17_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB17_2
+; VI-NEXT: s_branch .LBB17_3
;
; GFX9-LABEL: bitcast_v24i32_to_v48f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
-; GFX9-NEXT: v_readfirstlane_b32 s6, v0
-; GFX9-NEXT: v_readfirstlane_b32 s7, v1
-; GFX9-NEXT: v_readfirstlane_b32 s8, v2
-; GFX9-NEXT: v_readfirstlane_b32 s9, v3
-; GFX9-NEXT: v_readfirstlane_b32 s10, v4
-; GFX9-NEXT: v_readfirstlane_b32 s11, v5
-; GFX9-NEXT: v_readfirstlane_b32 s12, v6
-; GFX9-NEXT: v_readfirstlane_b32 s13, v7
-; GFX9-NEXT: v_readfirstlane_b32 s14, v8
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: v_readfirstlane_b32 s15, v9
+; GFX9-NEXT: v_readfirstlane_b32 s7, v0
+; GFX9-NEXT: v_readfirstlane_b32 s8, v1
+; GFX9-NEXT: v_readfirstlane_b32 s9, v2
+; GFX9-NEXT: v_readfirstlane_b32 s10, v3
+; GFX9-NEXT: v_readfirstlane_b32 s11, v4
+; GFX9-NEXT: v_readfirstlane_b32 s12, v5
+; GFX9-NEXT: v_readfirstlane_b32 s13, v6
+; GFX9-NEXT: v_readfirstlane_b32 s14, v7
+; GFX9-NEXT: v_readfirstlane_b32 s15, v8
+; GFX9-NEXT: v_readfirstlane_b32 s6, v9
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB17_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_lshr_b32 s40, s15, 16
-; GFX9-NEXT: s_lshr_b32 s41, s14, 16
-; GFX9-NEXT: s_lshr_b32 s42, s13, 16
-; GFX9-NEXT: s_lshr_b32 s43, s12, 16
-; GFX9-NEXT: s_lshr_b32 s44, s11, 16
-; GFX9-NEXT: s_lshr_b32 s45, s10, 16
-; GFX9-NEXT: s_lshr_b32 s46, s9, 16
-; GFX9-NEXT: s_lshr_b32 s47, s8, 16
-; GFX9-NEXT: s_lshr_b32 s56, s7, 16
-; GFX9-NEXT: s_lshr_b32 s57, s6, 16
+; GFX9-NEXT: s_lshr_b32 s40, s6, 16
+; GFX9-NEXT: s_lshr_b32 s41, s15, 16
+; GFX9-NEXT: s_lshr_b32 s42, s14, 16
+; GFX9-NEXT: s_lshr_b32 s43, s13, 16
+; GFX9-NEXT: s_lshr_b32 s44, s12, 16
+; GFX9-NEXT: s_lshr_b32 s45, s11, 16
+; GFX9-NEXT: s_lshr_b32 s46, s10, 16
+; GFX9-NEXT: s_lshr_b32 s47, s9, 16
+; GFX9-NEXT: s_lshr_b32 s56, s8, 16
+; GFX9-NEXT: s_lshr_b32 s57, s7, 16
; GFX9-NEXT: s_lshr_b32 s58, s29, 16
; GFX9-NEXT: s_lshr_b32 s59, s28, 16
; GFX9-NEXT: s_lshr_b32 s60, s27, 16
@@ -7700,6 +7770,7 @@ define inreg <48 x half> @bitcast_v24i32_to_v48f16_scalar(<24 x i32> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s79, s16, 16
; GFX9-NEXT: s_cbranch_execnz .LBB17_3
; GFX9-NEXT: .LBB17_2: ; %cmp.true
+; GFX9-NEXT: s_add_i32 s6, s6, 3
; GFX9-NEXT: s_add_i32 s15, s15, 3
; GFX9-NEXT: s_add_i32 s14, s14, 3
; GFX9-NEXT: s_add_i32 s13, s13, 3
@@ -7709,7 +7780,6 @@ define inreg <48 x half> @bitcast_v24i32_to_v48f16_scalar(<24 x i32> inreg %a, i
; GFX9-NEXT: s_add_i32 s9, s9, 3
; GFX9-NEXT: s_add_i32 s8, s8, 3
; GFX9-NEXT: s_add_i32 s7, s7, 3
-; GFX9-NEXT: s_add_i32 s6, s6, 3
; GFX9-NEXT: s_add_i32 s29, s29, 3
; GFX9-NEXT: s_add_i32 s28, s28, 3
; GFX9-NEXT: s_add_i32 s27, s27, 3
@@ -7724,16 +7794,16 @@ define inreg <48 x half> @bitcast_v24i32_to_v48f16_scalar(<24 x i32> inreg %a, i
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: s_lshr_b32 s40, s15, 16
-; GFX9-NEXT: s_lshr_b32 s41, s14, 16
-; GFX9-NEXT: s_lshr_b32 s42, s13, 16
-; GFX9-NEXT: s_lshr_b32 s43, s12, 16
-; GFX9-NEXT: s_lshr_b32 s44, s11, 16
-; GFX9-NEXT: s_lshr_b32 s45, s10, 16
-; GFX9-NEXT: s_lshr_b32 s46, s9, 16
-; GFX9-NEXT: s_lshr_b32 s47, s8, 16
-; GFX9-NEXT: s_lshr_b32 s56, s7, 16
-; GFX9-NEXT: s_lshr_b32 s57, s6, 16
+; GFX9-NEXT: s_lshr_b32 s40, s6, 16
+; GFX9-NEXT: s_lshr_b32 s41, s15, 16
+; GFX9-NEXT: s_lshr_b32 s42, s14, 16
+; GFX9-NEXT: s_lshr_b32 s43, s13, 16
+; GFX9-NEXT: s_lshr_b32 s44, s12, 16
+; GFX9-NEXT: s_lshr_b32 s45, s11, 16
+; GFX9-NEXT: s_lshr_b32 s46, s10, 16
+; GFX9-NEXT: s_lshr_b32 s47, s9, 16
+; GFX9-NEXT: s_lshr_b32 s56, s8, 16
+; GFX9-NEXT: s_lshr_b32 s57, s7, 16
; GFX9-NEXT: s_lshr_b32 s58, s29, 16
; GFX9-NEXT: s_lshr_b32 s59, s28, 16
; GFX9-NEXT: s_lshr_b32 s60, s27, 16
@@ -7763,16 +7833,16 @@ define inreg <48 x half> @bitcast_v24i32_to_v48f16_scalar(<24 x i32> inreg %a, i
; GFX9-NEXT: s_pack_ll_b32_b16 s25, s27, s60
; GFX9-NEXT: s_pack_ll_b32_b16 s26, s28, s59
; GFX9-NEXT: s_pack_ll_b32_b16 s27, s29, s58
-; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s57
-; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s56
-; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s47
-; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s46
-; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s45
-; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s44
-; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s43
-; GFX9-NEXT: s_pack_ll_b32_b16 s13, s13, s42
-; GFX9-NEXT: s_pack_ll_b32_b16 s14, s14, s41
-; GFX9-NEXT: s_pack_ll_b32_b16 s15, s15, s40
+; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s57
+; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s56
+; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s47
+; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s46
+; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s45
+; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s44
+; GFX9-NEXT: s_pack_ll_b32_b16 s13, s13, s43
+; GFX9-NEXT: s_pack_ll_b32_b16 s14, s14, s42
+; GFX9-NEXT: s_pack_ll_b32_b16 s15, s15, s41
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s40
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: v_mov_b32_e32 v2, s16
@@ -7787,16 +7857,16 @@ define inreg <48 x half> @bitcast_v24i32_to_v48f16_scalar(<24 x i32> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v11, s25
; GFX9-NEXT: v_mov_b32_e32 v12, s26
; GFX9-NEXT: v_mov_b32_e32 v13, s27
-; GFX9-NEXT: v_mov_b32_e32 v14, s6
-; GFX9-NEXT: v_mov_b32_e32 v15, s7
-; GFX9-NEXT: v_mov_b32_e32 v16, s8
-; GFX9-NEXT: v_mov_b32_e32 v17, s9
-; GFX9-NEXT: v_mov_b32_e32 v18, s10
-; GFX9-NEXT: v_mov_b32_e32 v19, s11
-; GFX9-NEXT: v_mov_b32_e32 v20, s12
-; GFX9-NEXT: v_mov_b32_e32 v21, s13
-; GFX9-NEXT: v_mov_b32_e32 v22, s14
-; GFX9-NEXT: v_mov_b32_e32 v23, s15
+; GFX9-NEXT: v_mov_b32_e32 v14, s7
+; GFX9-NEXT: v_mov_b32_e32 v15, s8
+; GFX9-NEXT: v_mov_b32_e32 v16, s9
+; GFX9-NEXT: v_mov_b32_e32 v17, s10
+; GFX9-NEXT: v_mov_b32_e32 v18, s11
+; GFX9-NEXT: v_mov_b32_e32 v19, s12
+; GFX9-NEXT: v_mov_b32_e32 v20, s13
+; GFX9-NEXT: v_mov_b32_e32 v21, s14
+; GFX9-NEXT: v_mov_b32_e32 v22, s15
+; GFX9-NEXT: v_mov_b32_e32 v23, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB17_4:
; GFX9-NEXT: ; implicit-def: $sgpr79
@@ -7823,7 +7893,9 @@ define inreg <48 x half> @bitcast_v24i32_to_v48f16_scalar(<24 x i32> inreg %a, i
; GFX9-NEXT: ; implicit-def: $sgpr42
; GFX9-NEXT: ; implicit-def: $sgpr41
; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: s_branch .LBB17_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB17_2
+; GFX9-NEXT: s_branch .LBB17_3
;
; GFX11-LABEL: bitcast_v24i32_to_v48f16_scalar:
; GFX11: ; %bb.0:
@@ -7832,16 +7904,16 @@ define inreg <48 x half> @bitcast_v24i32_to_v48f16_scalar(<24 x i32> inreg %a, i
; GFX11-NEXT: v_readfirstlane_b32 s4, v0
; GFX11-NEXT: v_readfirstlane_b32 s5, v1
; GFX11-NEXT: v_readfirstlane_b32 s6, v2
-; GFX11-NEXT: v_readfirstlane_b32 s7, v3
+; GFX11-NEXT: v_readfirstlane_b32 s8, v3
; GFX11-NEXT: v_readfirstlane_b32 s9, v4
-; GFX11-NEXT: v_readfirstlane_b32 s8, v5
-; GFX11-NEXT: s_mov_b32 s74, 0
+; GFX11-NEXT: v_readfirstlane_b32 s7, v5
+; GFX11-NEXT: s_mov_b32 s74, -1
; GFX11-NEXT: s_and_b32 s10, vcc_lo, exec_lo
; GFX11-NEXT: s_cbranch_scc0 .LBB17_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s10, s8, 16
+; GFX11-NEXT: s_lshr_b32 s10, s7, 16
; GFX11-NEXT: s_lshr_b32 s11, s9, 16
-; GFX11-NEXT: s_lshr_b32 s12, s7, 16
+; GFX11-NEXT: s_lshr_b32 s12, s8, 16
; GFX11-NEXT: s_lshr_b32 s13, s6, 16
; GFX11-NEXT: s_lshr_b32 s14, s5, 16
; GFX11-NEXT: s_lshr_b32 s15, s4, 16
@@ -7863,12 +7935,11 @@ define inreg <48 x half> @bitcast_v24i32_to_v48f16_scalar(<24 x i32> inreg %a, i
; GFX11-NEXT: s_lshr_b32 s63, s2, 16
; GFX11-NEXT: s_lshr_b32 s72, s1, 16
; GFX11-NEXT: s_lshr_b32 s73, s0, 16
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s74
-; GFX11-NEXT: s_cbranch_vccnz .LBB17_3
+; GFX11-NEXT: s_cbranch_execnz .LBB17_3
; GFX11-NEXT: .LBB17_2: ; %cmp.true
-; GFX11-NEXT: s_add_i32 s8, s8, 3
-; GFX11-NEXT: s_add_i32 s9, s9, 3
; GFX11-NEXT: s_add_i32 s7, s7, 3
+; GFX11-NEXT: s_add_i32 s9, s9, 3
+; GFX11-NEXT: s_add_i32 s8, s8, 3
; GFX11-NEXT: s_add_i32 s6, s6, 3
; GFX11-NEXT: s_add_i32 s5, s5, 3
; GFX11-NEXT: s_add_i32 s4, s4, 3
@@ -7890,9 +7961,9 @@ define inreg <48 x half> @bitcast_v24i32_to_v48f16_scalar(<24 x i32> inreg %a, i
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: s_lshr_b32 s10, s8, 16
+; GFX11-NEXT: s_lshr_b32 s10, s7, 16
; GFX11-NEXT: s_lshr_b32 s11, s9, 16
-; GFX11-NEXT: s_lshr_b32 s12, s7, 16
+; GFX11-NEXT: s_lshr_b32 s12, s8, 16
; GFX11-NEXT: s_lshr_b32 s13, s6, 16
; GFX11-NEXT: s_lshr_b32 s14, s5, 16
; GFX11-NEXT: s_lshr_b32 s15, s4, 16
@@ -7937,9 +8008,9 @@ define inreg <48 x half> @bitcast_v24i32_to_v48f16_scalar(<24 x i32> inreg %a, i
; GFX11-NEXT: s_pack_ll_b32_b16 s4, s4, s15
; GFX11-NEXT: s_pack_ll_b32_b16 s5, s5, s14
; GFX11-NEXT: s_pack_ll_b32_b16 s6, s6, s13
-; GFX11-NEXT: s_pack_ll_b32_b16 s7, s7, s12
+; GFX11-NEXT: s_pack_ll_b32_b16 s8, s8, s12
; GFX11-NEXT: s_pack_ll_b32_b16 s9, s9, s11
-; GFX11-NEXT: s_pack_ll_b32_b16 s8, s8, s10
+; GFX11-NEXT: s_pack_ll_b32_b16 s7, s7, s10
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -7950,8 +8021,8 @@ define inreg <48 x half> @bitcast_v24i32_to_v48f16_scalar(<24 x i32> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
; GFX11-NEXT: v_dual_mov_b32 v18, s4 :: v_dual_mov_b32 v19, s5
-; GFX11-NEXT: v_dual_mov_b32 v20, s6 :: v_dual_mov_b32 v21, s7
-; GFX11-NEXT: v_dual_mov_b32 v22, s9 :: v_dual_mov_b32 v23, s8
+; GFX11-NEXT: v_dual_mov_b32 v20, s6 :: v_dual_mov_b32 v21, s8
+; GFX11-NEXT: v_dual_mov_b32 v22, s9 :: v_dual_mov_b32 v23, s7
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB17_4:
; GFX11-NEXT: ; implicit-def: $sgpr73
@@ -7978,7 +8049,9 @@ define inreg <48 x half> @bitcast_v24i32_to_v48f16_scalar(<24 x i32> inreg %a, i
; GFX11-NEXT: ; implicit-def: $sgpr12
; GFX11-NEXT: ; implicit-def: $sgpr11
; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: s_branch .LBB17_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s74
+; GFX11-NEXT: s_cbranch_vccz .LBB17_2
+; GFX11-NEXT: s_branch .LBB17_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -9301,6 +9374,7 @@ define inreg <24 x i32> @bitcast_v48f16_to_v24i32_scalar(<48 x half> inreg %a, i
; SI-NEXT: s_waitcnt vmcnt(9)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v40
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB19_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_waitcnt expcnt(2)
@@ -9673,7 +9747,9 @@ define inreg <24 x i32> @bitcast_v48f16_to_v24i32_scalar(<48 x half> inreg %a, i
; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
; SI-NEXT: v_mov_b32_e32 v31, v40
-; SI-NEXT: s_branch .LBB19_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB19_2
+; SI-NEXT: s_branch .LBB19_3
;
; VI-LABEL: bitcast_v48f16_to_v24i32_scalar:
; VI: ; %bb.0:
@@ -9693,6 +9769,7 @@ define inreg <24 x i32> @bitcast_v48f16_to_v24i32_scalar(<48 x half> inreg %a, i
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v9
; VI-NEXT: v_mov_b32_e32 v33, v8
; VI-NEXT: v_mov_b32_e32 v34, v7
@@ -9703,7 +9780,7 @@ define inreg <24 x i32> @bitcast_v48f16_to_v24i32_scalar(<48 x half> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v39, v2
; VI-NEXT: v_mov_b32_e32 v48, v1
; VI-NEXT: v_mov_b32_e32 v49, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB19_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -9732,41 +9809,41 @@ define inreg <24 x i32> @bitcast_v48f16_to_v24i32_scalar(<48 x half> inreg %a, i
; VI-NEXT: s_and_b32 s57, 0xffff, s23
; VI-NEXT: s_lshl_b32 s58, s12, 16
; VI-NEXT: v_or_b32_sdwa v14, v49, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s57, s57, s58
; VI-NEXT: s_and_b32 s58, 0xffff, s24
; VI-NEXT: s_lshl_b32 s59, s11, 16
-; VI-NEXT: v_or_b32_sdwa v15, v48, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v16, v39, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s58, s58, s59
; VI-NEXT: s_and_b32 s59, 0xffff, s25
; VI-NEXT: s_lshl_b32 s60, s10, 16
-; VI-NEXT: v_or_b32_sdwa v16, v39, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v17, v38, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s59, s59, s60
; VI-NEXT: s_and_b32 s60, 0xffff, s26
; VI-NEXT: s_lshl_b32 s61, s9, 16
-; VI-NEXT: v_or_b32_sdwa v17, v38, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v18, v37, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s60, s60, s61
; VI-NEXT: s_and_b32 s61, 0xffff, s27
; VI-NEXT: s_lshl_b32 s62, s8, 16
-; VI-NEXT: v_or_b32_sdwa v18, v37, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v19, v36, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s61, s61, s62
; VI-NEXT: s_and_b32 s62, 0xffff, s28
; VI-NEXT: s_lshl_b32 s63, s7, 16
-; VI-NEXT: v_or_b32_sdwa v19, v36, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v20, v35, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s62, s62, s63
; VI-NEXT: s_and_b32 s63, 0xffff, s29
; VI-NEXT: s_lshl_b32 s72, s6, 16
-; VI-NEXT: v_or_b32_sdwa v20, v35, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; VI-NEXT: s_or_b32 s63, s63, s72
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v0, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: v_or_b32_sdwa v21, v34, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: v_lshlrev_b32_sdwa v0, v0, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: s_or_b32 s63, s63, s72
+; VI-NEXT: v_or_b32_sdwa v15, v48, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v22, v33, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v23, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v0, s4
@@ -9787,13 +9864,13 @@ define inreg <24 x i32> @bitcast_v48f16_to_v24i32_scalar(<48 x half> inreg %a, i
; VI-NEXT: .LBB19_2: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v13, 0x200
; VI-NEXT: v_mov_b32_e32 v0, s43
+; VI-NEXT: v_mov_b32_e32 v2, s42
; VI-NEXT: v_add_f16_sdwa v0, v0, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v1, s16, v13
+; VI-NEXT: v_add_f16_sdwa v2, v2, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-NEXT: v_add_f16_e32 v3, s17, v13
; VI-NEXT: v_or_b32_e32 v0, v1, v0
-; VI-NEXT: v_mov_b32_e32 v1, s42
-; VI-NEXT: v_add_f16_sdwa v1, v1, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; VI-NEXT: v_add_f16_e32 v2, s17, v13
-; VI-NEXT: v_or_b32_e32 v1, v2, v1
+; VI-NEXT: v_or_b32_e32 v1, v3, v2
; VI-NEXT: v_mov_b32_e32 v2, s41
; VI-NEXT: v_add_f16_sdwa v2, v2, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v3, s18, v13
@@ -9877,21 +9954,13 @@ define inreg <24 x i32> @bitcast_v48f16_to_v24i32_scalar(<48 x half> inreg %a, i
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB19_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB19_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB19_2
+; VI-NEXT: s_branch .LBB19_3
;
; GFX9-LABEL: bitcast_v48f16_to_v24i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v9
-; GFX9-NEXT: v_mov_b32_e32 v33, v8
-; GFX9-NEXT: v_mov_b32_e32 v34, v7
-; GFX9-NEXT: v_mov_b32_e32 v35, v6
-; GFX9-NEXT: v_mov_b32_e32 v36, v5
-; GFX9-NEXT: v_mov_b32_e32 v37, v4
-; GFX9-NEXT: v_mov_b32_e32 v38, v3
-; GFX9-NEXT: v_mov_b32_e32 v39, v2
-; GFX9-NEXT: v_mov_b32_e32 v48, v1
-; GFX9-NEXT: v_mov_b32_e32 v49, v0
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
@@ -9907,6 +9976,17 @@ define inreg <24 x i32> @bitcast_v48f16_to_v24i32_scalar(<48 x half> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s8, s18, 16
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
+; GFX9-NEXT: v_mov_b32_e32 v32, v9
+; GFX9-NEXT: v_mov_b32_e32 v33, v8
+; GFX9-NEXT: v_mov_b32_e32 v34, v7
+; GFX9-NEXT: v_mov_b32_e32 v35, v6
+; GFX9-NEXT: v_mov_b32_e32 v36, v5
+; GFX9-NEXT: v_mov_b32_e32 v37, v4
+; GFX9-NEXT: v_mov_b32_e32 v38, v3
+; GFX9-NEXT: v_mov_b32_e32 v39, v2
+; GFX9-NEXT: v_mov_b32_e32 v48, v1
+; GFX9-NEXT: v_mov_b32_e32 v49, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
@@ -9921,7 +10001,6 @@ define inreg <24 x i32> @bitcast_v48f16_to_v24i32_scalar(<48 x half> inreg %a, i
; GFX9-NEXT: v_lshrrev_b32_e32 v41, 16, v39
; GFX9-NEXT: v_lshrrev_b32_e32 v42, 16, v48
; GFX9-NEXT: v_lshrrev_b32_e32 v43, 16, v49
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -9936,12 +10015,11 @@ define inreg <24 x i32> @bitcast_v48f16_to_v24i32_scalar(<48 x half> inreg %a, i
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB19_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v49
; GFX9-NEXT: v_lshl_or_b32 v14, v43, 16, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v48
-; GFX9-NEXT: v_lshl_or_b32 v15, v42, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v39
; GFX9-NEXT: v_lshl_or_b32 v16, v41, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v38
@@ -9955,8 +10033,10 @@ define inreg <24 x i32> @bitcast_v48f16_to_v24i32_scalar(<48 x half> inreg %a, i
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v34
; GFX9-NEXT: v_lshl_or_b32 v21, v52, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v33
+; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v48
; GFX9-NEXT: v_lshl_or_b32 v22, v51, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v32
+; GFX9-NEXT: v_lshl_or_b32 v15, v42, 16, v1
; GFX9-NEXT: v_lshl_or_b32 v23, v50, 16, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s6
; GFX9-NEXT: v_mov_b32_e32 v1, s7
@@ -9984,9 +10064,9 @@ define inreg <24 x i32> @bitcast_v48f16_to_v24i32_scalar(<48 x half> inreg %a, i
; GFX9-NEXT: v_and_b32_e32 v21, 0xffff, v34
; GFX9-NEXT: v_and_b32_e32 v22, 0xffff, v33
; GFX9-NEXT: v_and_b32_e32 v23, 0xffff, v32
-; GFX9-NEXT: s_movk_i32 s4, 0x200
; GFX9-NEXT: v_mov_b32_e32 v13, 0x200
; GFX9-NEXT: v_lshl_or_b32 v14, v43, 16, v14
+; GFX9-NEXT: s_movk_i32 s4, 0x200
; GFX9-NEXT: v_lshl_or_b32 v15, v42, 16, v15
; GFX9-NEXT: v_lshl_or_b32 v16, v41, 16, v16
; GFX9-NEXT: v_lshl_or_b32 v17, v40, 16, v17
@@ -10029,7 +10109,9 @@ define inreg <24 x i32> @bitcast_v48f16_to_v24i32_scalar(<48 x half> inreg %a, i
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB19_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB19_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB19_2
+; GFX9-NEXT: s_branch .LBB19_3
;
; GFX11-TRUE16-LABEL: bitcast_v48f16_to_v24i32_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -10053,44 +10135,44 @@ define inreg <24 x i32> @bitcast_v48f16_to_v24i32_scalar(<48 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v4
; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v5
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s16, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s16, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s17, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s18, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s19, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s20, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s21, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s22, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s23, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s24, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s25, s15
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s26, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s41
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s27, s42
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB19_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
@@ -10099,17 +10181,16 @@ define inreg <24 x i32> @bitcast_v48f16_to_v24i32_scalar(<48 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s6 :: v_dual_mov_b32 v5, s7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s8 :: v_dual_mov_b32 v7, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s10 :: v_dual_mov_b32 v9, s11
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s12 :: v_dual_mov_b32 v11, s13
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s14 :: v_dual_mov_b32 v13, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB19_3
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB19_3
; GFX11-TRUE16-NEXT: .LBB19_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
@@ -10118,21 +10199,21 @@ define inreg <24 x i32> @bitcast_v48f16_to_v24i32_scalar(<48 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s5 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s1 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s16 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
@@ -10145,7 +10226,9 @@ define inreg <24 x i32> @bitcast_v48f16_to_v24i32_scalar(<48 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB19_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB19_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB19_2
+; GFX11-TRUE16-NEXT: s_branch .LBB19_3
;
; GFX11-FAKE16-LABEL: bitcast_v48f16_to_v24i32_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -10163,44 +10246,44 @@ define inreg <24 x i32> @bitcast_v48f16_to_v24i32_scalar(<48 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_and_b32_e32 v48, 0xffff, v3
; GFX11-FAKE16-NEXT: v_and_b32_e32 v39, 0xffff, v4
; GFX11-FAKE16-NEXT: v_and_b32_e32 v38, 0xffff, v5
-; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s26, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s16, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s25, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s24, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s23, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s22, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s21, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s20, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s19, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s18, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s17, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s16, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s3, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s40, 0
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s16, s6
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s17, s7
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s18, s8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s19, s9
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s20, s10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s21, s11
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s22, s12
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s23, s13
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s24, s14
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s25, s15
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s26, s43
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s28, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s29, s41
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s27, s42
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB19_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
@@ -10209,17 +10292,16 @@ define inreg <24 x i32> @bitcast_v48f16_to_v24i32_scalar(<48 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s6 :: v_dual_mov_b32 v5, s7
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s8 :: v_dual_mov_b32 v7, s9
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s10 :: v_dual_mov_b32 v9, s11
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s12 :: v_dual_mov_b32 v11, s13
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s14 :: v_dual_mov_b32 v13, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB19_3
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB19_3
; GFX11-FAKE16-NEXT: .LBB19_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
@@ -10228,21 +10310,21 @@ define inreg <24 x i32> @bitcast_v48f16_to_v24i32_scalar(<48 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, s5 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, s15 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, s1 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, s16 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
@@ -10255,7 +10337,9 @@ define inreg <24 x i32> @bitcast_v48f16_to_v24i32_scalar(<48 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB19_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB19_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB19_2
+; GFX11-FAKE16-NEXT: s_branch .LBB19_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -10432,6 +10516,7 @@ define inreg <12 x i64> @bitcast_v24f32_to_v12i64_scalar(<24 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v23, v9
; SI-NEXT: v_mov_b32_e32 v22, v8
; SI-NEXT: v_mov_b32_e32 v21, v7
@@ -10452,15 +10537,18 @@ define inreg <12 x i64> @bitcast_v24f32_to_v12i64_scalar(<24 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v7, s23
; SI-NEXT: v_mov_b32_e32 v8, s24
; SI-NEXT: v_mov_b32_e32 v9, s25
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB21_4
+; SI-NEXT: s_cbranch_scc0 .LBB21_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB21_3
-; SI-NEXT: .LBB21_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB21_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB21_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e32 v23, 1.0, v23
; SI-NEXT: v_add_f32_e32 v22, 1.0, v22
; SI-NEXT: v_add_f32_e32 v21, 1.0, v21
@@ -10485,15 +10573,14 @@ define inreg <12 x i64> @bitcast_v24f32_to_v12i64_scalar(<24 x float> inreg %a,
; SI-NEXT: v_add_f32_e32 v2, 1.0, v2
; SI-NEXT: v_add_f32_e32 v1, 1.0, v1
; SI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; SI-NEXT: .LBB21_3: ; %end
+; SI-NEXT: .LBB21_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB21_4:
-; SI-NEXT: s_branch .LBB21_2
;
; VI-LABEL: bitcast_v24f32_to_v12i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v23, v9
; VI-NEXT: v_mov_b32_e32 v22, v8
; VI-NEXT: v_mov_b32_e32 v21, v7
@@ -10514,15 +10601,18 @@ define inreg <12 x i64> @bitcast_v24f32_to_v12i64_scalar(<24 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: v_mov_b32_e32 v9, s25
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB21_4
+; VI-NEXT: s_cbranch_scc0 .LBB21_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB21_3
-; VI-NEXT: .LBB21_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB21_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB21_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e32 v23, 1.0, v23
; VI-NEXT: v_add_f32_e32 v22, 1.0, v22
; VI-NEXT: v_add_f32_e32 v21, 1.0, v21
@@ -10547,15 +10637,14 @@ define inreg <12 x i64> @bitcast_v24f32_to_v12i64_scalar(<24 x float> inreg %a,
; VI-NEXT: v_add_f32_e32 v2, 1.0, v2
; VI-NEXT: v_add_f32_e32 v1, 1.0, v1
; VI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; VI-NEXT: .LBB21_3: ; %end
+; VI-NEXT: .LBB21_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB21_4:
-; VI-NEXT: s_branch .LBB21_2
;
; GFX9-LABEL: bitcast_v24f32_to_v12i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v23, v9
; GFX9-NEXT: v_mov_b32_e32 v22, v8
; GFX9-NEXT: v_mov_b32_e32 v21, v7
@@ -10576,15 +10665,18 @@ define inreg <12 x i64> @bitcast_v24f32_to_v12i64_scalar(<24 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: v_mov_b32_e32 v8, s24
; GFX9-NEXT: v_mov_b32_e32 v9, s25
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB21_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB21_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB21_3
-; GFX9-NEXT: .LBB21_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB21_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e32 v23, 1.0, v23
; GFX9-NEXT: v_add_f32_e32 v22, 1.0, v22
; GFX9-NEXT: v_add_f32_e32 v21, 1.0, v21
@@ -10609,39 +10701,37 @@ define inreg <12 x i64> @bitcast_v24f32_to_v12i64_scalar(<24 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e32 v2, 1.0, v2
; GFX9-NEXT: v_add_f32_e32 v1, 1.0, v1
; GFX9-NEXT: v_add_f32_e32 v0, 1.0, v0
-; GFX9-NEXT: .LBB21_3: ; %end
+; GFX9-NEXT: .LBB21_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB21_4:
-; GFX9-NEXT: s_branch .LBB21_2
;
; GFX11-LABEL: bitcast_v24f32_to_v12i64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-NEXT: v_dual_mov_b32 v15, v6 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB21_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB21_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB21_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB21_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB21_3:
-; GFX11-NEXT: .LBB21_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
@@ -10654,6 +10744,7 @@ define inreg <12 x i64> @bitcast_v24f32_to_v12i64_scalar(<24 x float> inreg %a,
; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-NEXT: .LBB21_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -10849,6 +10940,7 @@ define inreg <24 x float> @bitcast_v12i64_to_v24f32_scalar(<12 x i64> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v23, v9
; SI-NEXT: v_mov_b32_e32 v22, v8
; SI-NEXT: v_mov_b32_e32 v21, v7
@@ -10869,15 +10961,18 @@ define inreg <24 x float> @bitcast_v12i64_to_v24f32_scalar(<12 x i64> inreg %a,
; SI-NEXT: v_mov_b32_e32 v7, s23
; SI-NEXT: v_mov_b32_e32 v8, s24
; SI-NEXT: v_mov_b32_e32 v9, s25
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB23_4
+; SI-NEXT: s_cbranch_scc0 .LBB23_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB23_3
-; SI-NEXT: .LBB23_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB23_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB23_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v22, vcc, 3, v22
; SI-NEXT: v_addc_u32_e32 v23, vcc, 0, v23, vcc
; SI-NEXT: v_add_i32_e32 v20, vcc, 3, v20
@@ -10902,15 +10997,14 @@ define inreg <24 x float> @bitcast_v12i64_to_v24f32_scalar(<12 x i64> inreg %a,
; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT: .LBB23_3: ; %end
+; SI-NEXT: .LBB23_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB23_4:
-; SI-NEXT: s_branch .LBB23_2
;
; VI-LABEL: bitcast_v12i64_to_v24f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v23, v9
; VI-NEXT: v_mov_b32_e32 v22, v8
; VI-NEXT: v_mov_b32_e32 v21, v7
@@ -10931,15 +11025,18 @@ define inreg <24 x float> @bitcast_v12i64_to_v24f32_scalar(<12 x i64> inreg %a,
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: v_mov_b32_e32 v9, s25
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB23_4
+; VI-NEXT: s_cbranch_scc0 .LBB23_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB23_3
-; VI-NEXT: .LBB23_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB23_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB23_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v22, vcc, 3, v22
; VI-NEXT: v_addc_u32_e32 v23, vcc, 0, v23, vcc
; VI-NEXT: v_add_u32_e32 v20, vcc, 3, v20
@@ -10964,15 +11061,14 @@ define inreg <24 x float> @bitcast_v12i64_to_v24f32_scalar(<12 x i64> inreg %a,
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT: .LBB23_3: ; %end
+; VI-NEXT: .LBB23_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB23_4:
-; VI-NEXT: s_branch .LBB23_2
;
; GFX9-LABEL: bitcast_v12i64_to_v24f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v23, v9
; GFX9-NEXT: v_mov_b32_e32 v22, v8
; GFX9-NEXT: v_mov_b32_e32 v21, v7
@@ -10993,15 +11089,18 @@ define inreg <24 x float> @bitcast_v12i64_to_v24f32_scalar(<12 x i64> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: v_mov_b32_e32 v8, s24
; GFX9-NEXT: v_mov_b32_e32 v9, s25
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB23_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB23_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB23_3
-; GFX9-NEXT: .LBB23_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB23_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_co_u32_e32 v22, vcc, 3, v22
; GFX9-NEXT: v_addc_co_u32_e32 v23, vcc, 0, v23, vcc
; GFX9-NEXT: v_add_co_u32_e32 v20, vcc, 3, v20
@@ -11026,39 +11125,37 @@ define inreg <24 x float> @bitcast_v12i64_to_v24f32_scalar(<12 x i64> inreg %a,
; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 3, v0
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
-; GFX9-NEXT: .LBB23_3: ; %end
+; GFX9-NEXT: .LBB23_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB23_4:
-; GFX9-NEXT: s_branch .LBB23_2
;
; GFX11-LABEL: bitcast_v12i64_to_v24f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-NEXT: v_dual_mov_b32 v15, v6 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB23_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB23_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB23_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB23_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB23_3:
-; GFX11-NEXT: .LBB23_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_co_u32 v22, vcc_lo, v22, 3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add_co_ci_u32_e64 v23, null, 0, v23, vcc_lo
@@ -11089,6 +11186,7 @@ define inreg <24 x float> @bitcast_v12i64_to_v24f32_scalar(<12 x i64> inreg %a,
; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-NEXT: .LBB23_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -11266,6 +11364,7 @@ define inreg <12 x double> @bitcast_v24f32_to_v12f64_scalar(<24 x float> inreg %
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v23, v9
; SI-NEXT: v_mov_b32_e32 v22, v8
; SI-NEXT: v_mov_b32_e32 v21, v7
@@ -11286,15 +11385,18 @@ define inreg <12 x double> @bitcast_v24f32_to_v12f64_scalar(<24 x float> inreg %
; SI-NEXT: v_mov_b32_e32 v7, s23
; SI-NEXT: v_mov_b32_e32 v8, s24
; SI-NEXT: v_mov_b32_e32 v9, s25
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB25_4
+; SI-NEXT: s_cbranch_scc0 .LBB25_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB25_3
-; SI-NEXT: .LBB25_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB25_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB25_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e32 v23, 1.0, v23
; SI-NEXT: v_add_f32_e32 v22, 1.0, v22
; SI-NEXT: v_add_f32_e32 v21, 1.0, v21
@@ -11319,15 +11421,14 @@ define inreg <12 x double> @bitcast_v24f32_to_v12f64_scalar(<24 x float> inreg %
; SI-NEXT: v_add_f32_e32 v2, 1.0, v2
; SI-NEXT: v_add_f32_e32 v1, 1.0, v1
; SI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; SI-NEXT: .LBB25_3: ; %end
+; SI-NEXT: .LBB25_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB25_4:
-; SI-NEXT: s_branch .LBB25_2
;
; VI-LABEL: bitcast_v24f32_to_v12f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v23, v9
; VI-NEXT: v_mov_b32_e32 v22, v8
; VI-NEXT: v_mov_b32_e32 v21, v7
@@ -11348,15 +11449,18 @@ define inreg <12 x double> @bitcast_v24f32_to_v12f64_scalar(<24 x float> inreg %
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: v_mov_b32_e32 v9, s25
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB25_4
+; VI-NEXT: s_cbranch_scc0 .LBB25_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB25_3
-; VI-NEXT: .LBB25_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB25_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB25_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e32 v23, 1.0, v23
; VI-NEXT: v_add_f32_e32 v22, 1.0, v22
; VI-NEXT: v_add_f32_e32 v21, 1.0, v21
@@ -11381,15 +11485,14 @@ define inreg <12 x double> @bitcast_v24f32_to_v12f64_scalar(<24 x float> inreg %
; VI-NEXT: v_add_f32_e32 v2, 1.0, v2
; VI-NEXT: v_add_f32_e32 v1, 1.0, v1
; VI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; VI-NEXT: .LBB25_3: ; %end
+; VI-NEXT: .LBB25_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB25_4:
-; VI-NEXT: s_branch .LBB25_2
;
; GFX9-LABEL: bitcast_v24f32_to_v12f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v23, v9
; GFX9-NEXT: v_mov_b32_e32 v22, v8
; GFX9-NEXT: v_mov_b32_e32 v21, v7
@@ -11410,15 +11513,18 @@ define inreg <12 x double> @bitcast_v24f32_to_v12f64_scalar(<24 x float> inreg %
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: v_mov_b32_e32 v8, s24
; GFX9-NEXT: v_mov_b32_e32 v9, s25
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB25_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB25_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB25_3
-; GFX9-NEXT: .LBB25_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB25_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB25_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e32 v23, 1.0, v23
; GFX9-NEXT: v_add_f32_e32 v22, 1.0, v22
; GFX9-NEXT: v_add_f32_e32 v21, 1.0, v21
@@ -11443,39 +11549,37 @@ define inreg <12 x double> @bitcast_v24f32_to_v12f64_scalar(<24 x float> inreg %
; GFX9-NEXT: v_add_f32_e32 v2, 1.0, v2
; GFX9-NEXT: v_add_f32_e32 v1, 1.0, v1
; GFX9-NEXT: v_add_f32_e32 v0, 1.0, v0
-; GFX9-NEXT: .LBB25_3: ; %end
+; GFX9-NEXT: .LBB25_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB25_4:
-; GFX9-NEXT: s_branch .LBB25_2
;
; GFX11-LABEL: bitcast_v24f32_to_v12f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-NEXT: v_dual_mov_b32 v15, v6 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB25_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB25_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB25_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB25_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB25_3:
-; GFX11-NEXT: .LBB25_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB25_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v18, 1.0, v18
@@ -11488,6 +11592,7 @@ define inreg <12 x double> @bitcast_v24f32_to_v12f64_scalar(<24 x float> inreg %
; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-NEXT: .LBB25_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -11629,6 +11734,7 @@ define inreg <24 x float> @bitcast_v12f64_to_v24f32_scalar(<12 x double> inreg %
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v23, v9
; SI-NEXT: v_mov_b32_e32 v22, v8
; SI-NEXT: v_mov_b32_e32 v21, v7
@@ -11651,13 +11757,16 @@ define inreg <24 x float> @bitcast_v12f64_to_v24f32_scalar(<12 x double> inreg %
; SI-NEXT: v_mov_b32_e32 v9, s25
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB27_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB27_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB27_3
-; SI-NEXT: .LBB27_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB27_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB27_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
; SI-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
; SI-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
@@ -11670,15 +11779,14 @@ define inreg <24 x float> @bitcast_v12f64_to_v24f32_scalar(<12 x double> inreg %
; SI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; SI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; SI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; SI-NEXT: .LBB27_3: ; %end
+; SI-NEXT: .LBB27_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB27_4:
-; SI-NEXT: s_branch .LBB27_2
;
; VI-LABEL: bitcast_v12f64_to_v24f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v23, v9
; VI-NEXT: v_mov_b32_e32 v22, v8
; VI-NEXT: v_mov_b32_e32 v21, v7
@@ -11701,13 +11809,16 @@ define inreg <24 x float> @bitcast_v12f64_to_v24f32_scalar(<12 x double> inreg %
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB27_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB27_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB27_3
-; VI-NEXT: .LBB27_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB27_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB27_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
; VI-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
; VI-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
@@ -11720,15 +11831,14 @@ define inreg <24 x float> @bitcast_v12f64_to_v24f32_scalar(<12 x double> inreg %
; VI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; VI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; VI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; VI-NEXT: .LBB27_3: ; %end
+; VI-NEXT: .LBB27_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB27_4:
-; VI-NEXT: s_branch .LBB27_2
;
; GFX9-LABEL: bitcast_v12f64_to_v24f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v23, v9
; GFX9-NEXT: v_mov_b32_e32 v22, v8
; GFX9-NEXT: v_mov_b32_e32 v21, v7
@@ -11751,13 +11861,16 @@ define inreg <24 x float> @bitcast_v12f64_to_v24f32_scalar(<12 x double> inreg %
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB27_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB27_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB27_3
-; GFX9-NEXT: .LBB27_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB27_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB27_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
; GFX9-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
; GFX9-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
@@ -11770,39 +11883,37 @@ define inreg <24 x float> @bitcast_v12f64_to_v24f32_scalar(<12 x double> inreg %
; GFX9-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX9-NEXT: .LBB27_3: ; %end
+; GFX9-NEXT: .LBB27_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB27_4:
-; GFX9-NEXT: s_branch .LBB27_2
;
; GFX11-LABEL: bitcast_v12f64_to_v24f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-NEXT: v_dual_mov_b32 v15, v6 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB27_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB27_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB27_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB27_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB27_3:
-; GFX11-NEXT: .LBB27_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB27_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
; GFX11-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
; GFX11-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
@@ -11815,6 +11926,7 @@ define inreg <24 x float> @bitcast_v12f64_to_v24f32_scalar(<12 x double> inreg %
; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-NEXT: .LBB27_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -12563,6 +12675,7 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v26, s16
; SI-NEXT: v_mov_b32_e32 v24, s17
; SI-NEXT: v_mov_b32_e32 v23, s18
@@ -12572,7 +12685,7 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v18, s22
; SI-NEXT: v_mov_b32_e32 v15, s23
; SI-NEXT: v_mov_b32_e32 v17, s24
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v16, s25
; SI-NEXT: v_mov_b32_e32 v14, s26
; SI-NEXT: v_mov_b32_e32 v13, s27
@@ -12828,27 +12941,30 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a,
; SI-NEXT: ; implicit-def: $vgpr32
; SI-NEXT: ; implicit-def: $vgpr21
; SI-NEXT: ; implicit-def: $vgpr30
-; SI-NEXT: s_branch .LBB29_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB29_2
+; SI-NEXT: s_branch .LBB29_3
;
; VI-LABEL: bitcast_v24f32_to_v48i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v23, s16
; VI-NEXT: v_mov_b32_e32 v22, s17
-; VI-NEXT: v_mov_b32_e32 v21, s18
-; VI-NEXT: v_mov_b32_e32 v19, s19
-; VI-NEXT: v_mov_b32_e32 v17, s20
-; VI-NEXT: v_mov_b32_e32 v15, s21
+; VI-NEXT: v_mov_b32_e32 v20, s18
+; VI-NEXT: v_mov_b32_e32 v18, s19
+; VI-NEXT: v_mov_b32_e32 v16, s20
+; VI-NEXT: v_mov_b32_e32 v14, s21
; VI-NEXT: v_mov_b32_e32 v13, s22
; VI-NEXT: v_mov_b32_e32 v12, s23
; VI-NEXT: v_mov_b32_e32 v11, s24
; VI-NEXT: v_mov_b32_e32 v10, s25
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: v_mov_b32_e32 v20, s26
-; VI-NEXT: v_mov_b32_e32 v18, s27
-; VI-NEXT: v_mov_b32_e32 v16, s28
-; VI-NEXT: v_mov_b32_e32 v14, s29
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: v_mov_b32_e32 v21, s26
+; VI-NEXT: v_mov_b32_e32 v19, s27
+; VI-NEXT: v_mov_b32_e32 v17, s28
+; VI-NEXT: v_mov_b32_e32 v15, s29
; VI-NEXT: s_cbranch_scc0 .LBB29_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_lshrrev_b32_e32 v34, 16, v9
@@ -12861,18 +12977,18 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v49, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v50, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v51, 16, v0
-; VI-NEXT: v_lshrrev_b32_e32 v52, 16, v14
-; VI-NEXT: v_lshrrev_b32_e32 v53, 16, v16
-; VI-NEXT: v_lshrrev_b32_e32 v54, 16, v18
-; VI-NEXT: v_lshrrev_b32_e32 v55, 16, v20
+; VI-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; VI-NEXT: v_lshrrev_b32_e32 v53, 16, v17
+; VI-NEXT: v_lshrrev_b32_e32 v54, 16, v19
+; VI-NEXT: v_lshrrev_b32_e32 v55, 16, v21
; VI-NEXT: v_lshrrev_b32_e32 v29, 16, v10
; VI-NEXT: v_lshrrev_b32_e32 v28, 16, v11
; VI-NEXT: v_lshrrev_b32_e32 v27, 16, v12
; VI-NEXT: v_lshrrev_b32_e32 v26, 16, v13
-; VI-NEXT: v_lshrrev_b32_e32 v25, 16, v15
-; VI-NEXT: v_lshrrev_b32_e32 v24, 16, v17
-; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v19
-; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v21
+; VI-NEXT: v_lshrrev_b32_e32 v25, 16, v14
+; VI-NEXT: v_lshrrev_b32_e32 v24, 16, v16
+; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v20
; VI-NEXT: v_lshrrev_b32_e32 v31, 16, v22
; VI-NEXT: v_lshrrev_b32_e32 v30, 16, v23
; VI-NEXT: s_cbranch_execnz .LBB29_3
@@ -12887,18 +13003,18 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a,
; VI-NEXT: v_add_f32_e32 v2, 1.0, v2
; VI-NEXT: v_add_f32_e32 v1, 1.0, v1
; VI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; VI-NEXT: v_add_f32_e32 v14, 1.0, v14
-; VI-NEXT: v_add_f32_e32 v16, 1.0, v16
-; VI-NEXT: v_add_f32_e32 v18, 1.0, v18
-; VI-NEXT: v_add_f32_e32 v20, 1.0, v20
-; VI-NEXT: v_add_f32_e32 v10, 1.0, v10
-; VI-NEXT: v_add_f32_e32 v11, 1.0, v11
-; VI-NEXT: v_add_f32_e32 v12, 1.0, v12
-; VI-NEXT: v_add_f32_e32 v13, 1.0, v13
; VI-NEXT: v_add_f32_e32 v15, 1.0, v15
; VI-NEXT: v_add_f32_e32 v17, 1.0, v17
; VI-NEXT: v_add_f32_e32 v19, 1.0, v19
; VI-NEXT: v_add_f32_e32 v21, 1.0, v21
+; VI-NEXT: v_add_f32_e32 v10, 1.0, v10
+; VI-NEXT: v_add_f32_e32 v11, 1.0, v11
+; VI-NEXT: v_add_f32_e32 v12, 1.0, v12
+; VI-NEXT: v_add_f32_e32 v13, 1.0, v13
+; VI-NEXT: v_add_f32_e32 v14, 1.0, v14
+; VI-NEXT: v_add_f32_e32 v16, 1.0, v16
+; VI-NEXT: v_add_f32_e32 v18, 1.0, v18
+; VI-NEXT: v_add_f32_e32 v20, 1.0, v20
; VI-NEXT: v_add_f32_e32 v22, 1.0, v22
; VI-NEXT: v_add_f32_e32 v23, 1.0, v23
; VI-NEXT: v_lshrrev_b32_e32 v34, 16, v9
@@ -12911,18 +13027,18 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v49, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v50, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v51, 16, v0
-; VI-NEXT: v_lshrrev_b32_e32 v52, 16, v14
-; VI-NEXT: v_lshrrev_b32_e32 v53, 16, v16
-; VI-NEXT: v_lshrrev_b32_e32 v54, 16, v18
-; VI-NEXT: v_lshrrev_b32_e32 v55, 16, v20
+; VI-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; VI-NEXT: v_lshrrev_b32_e32 v53, 16, v17
+; VI-NEXT: v_lshrrev_b32_e32 v54, 16, v19
+; VI-NEXT: v_lshrrev_b32_e32 v55, 16, v21
; VI-NEXT: v_lshrrev_b32_e32 v29, 16, v10
; VI-NEXT: v_lshrrev_b32_e32 v28, 16, v11
; VI-NEXT: v_lshrrev_b32_e32 v27, 16, v12
; VI-NEXT: v_lshrrev_b32_e32 v26, 16, v13
-; VI-NEXT: v_lshrrev_b32_e32 v25, 16, v15
-; VI-NEXT: v_lshrrev_b32_e32 v24, 16, v17
-; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v19
-; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v21
+; VI-NEXT: v_lshrrev_b32_e32 v25, 16, v14
+; VI-NEXT: v_lshrrev_b32_e32 v24, 16, v16
+; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v20
; VI-NEXT: v_lshrrev_b32_e32 v31, 16, v22
; VI-NEXT: v_lshrrev_b32_e32 v30, 16, v23
; VI-NEXT: .LBB29_3: ; %end
@@ -12931,44 +13047,44 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a,
; VI-NEXT: v_lshlrev_b32_e32 v23, 16, v31
; VI-NEXT: v_or_b32_sdwa v31, v22, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v22, 16, v32
-; VI-NEXT: v_or_b32_sdwa v32, v21, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_e32 v21, 16, v33
-; VI-NEXT: v_or_b32_sdwa v33, v19, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_e32 v19, 16, v24
-; VI-NEXT: v_or_b32_sdwa v24, v17, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_e32 v17, 16, v25
-; VI-NEXT: v_or_b32_sdwa v25, v15, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_e32 v15, 16, v26
-; VI-NEXT: v_or_b32_sdwa v26, v13, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v32, v20, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_e32 v20, 16, v33
+; VI-NEXT: v_or_b32_sdwa v33, v18, v20 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_e32 v18, 16, v24
+; VI-NEXT: v_or_b32_sdwa v24, v16, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_e32 v16, 16, v25
+; VI-NEXT: v_or_b32_sdwa v25, v14, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_e32 v14, 16, v26
+; VI-NEXT: v_or_b32_sdwa v26, v13, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v13, 16, v27
+; VI-NEXT: v_lshlrev_b32_e32 v14, 16, v51
; VI-NEXT: v_or_b32_sdwa v27, v12, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v13, 16, v52
-; VI-NEXT: v_or_b32_sdwa v13, v14, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_e32 v14, 16, v51
-; VI-NEXT: v_lshlrev_b32_e32 v12, 16, v28
; VI-NEXT: v_or_b32_sdwa v14, v0, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v50
-; VI-NEXT: v_or_b32_sdwa v28, v11, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_e32 v12, 16, v53
+; VI-NEXT: v_lshlrev_b32_e32 v12, 16, v28
+; VI-NEXT: v_or_b32_sdwa v13, v15, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v15, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v49
-; VI-NEXT: v_lshlrev_b32_e32 v11, 16, v29
-; VI-NEXT: v_or_b32_sdwa v12, v16, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v28, v11, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_e32 v12, 16, v53
; VI-NEXT: v_or_b32_sdwa v16, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v48
-; VI-NEXT: v_or_b32_sdwa v29, v10, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_e32 v11, 16, v54
+; VI-NEXT: v_lshlrev_b32_e32 v11, 16, v29
+; VI-NEXT: v_or_b32_sdwa v12, v17, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v17, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v39
-; VI-NEXT: v_or_b32_sdwa v11, v18, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v29, v10, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_e32 v11, 16, v54
; VI-NEXT: v_or_b32_sdwa v18, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v38
-; VI-NEXT: v_lshlrev_b32_e32 v10, 16, v55
+; VI-NEXT: v_or_b32_sdwa v11, v19, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v19, v5, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v37
-; VI-NEXT: v_or_b32_sdwa v10, v20, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_e32 v10, 16, v55
; VI-NEXT: v_or_b32_sdwa v20, v6, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v36
+; VI-NEXT: v_or_b32_sdwa v10, v21, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v21, v7, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v35
; VI-NEXT: v_or_b32_sdwa v22, v8, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
@@ -13010,27 +13126,30 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a,
; VI-NEXT: ; implicit-def: $vgpr36
; VI-NEXT: ; implicit-def: $vgpr35
; VI-NEXT: ; implicit-def: $vgpr34
-; VI-NEXT: s_branch .LBB29_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB29_2
+; VI-NEXT: s_branch .LBB29_3
;
; GFX9-LABEL: bitcast_v24f32_to_v48i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v23, s16
; GFX9-NEXT: v_mov_b32_e32 v22, s17
-; GFX9-NEXT: v_mov_b32_e32 v21, s18
-; GFX9-NEXT: v_mov_b32_e32 v19, s19
-; GFX9-NEXT: v_mov_b32_e32 v17, s20
-; GFX9-NEXT: v_mov_b32_e32 v15, s21
+; GFX9-NEXT: v_mov_b32_e32 v20, s18
+; GFX9-NEXT: v_mov_b32_e32 v18, s19
+; GFX9-NEXT: v_mov_b32_e32 v16, s20
+; GFX9-NEXT: v_mov_b32_e32 v14, s21
; GFX9-NEXT: v_mov_b32_e32 v13, s22
; GFX9-NEXT: v_mov_b32_e32 v12, s23
; GFX9-NEXT: v_mov_b32_e32 v11, s24
; GFX9-NEXT: v_mov_b32_e32 v10, s25
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: v_mov_b32_e32 v20, s26
-; GFX9-NEXT: v_mov_b32_e32 v18, s27
-; GFX9-NEXT: v_mov_b32_e32 v16, s28
-; GFX9-NEXT: v_mov_b32_e32 v14, s29
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: v_mov_b32_e32 v21, s26
+; GFX9-NEXT: v_mov_b32_e32 v19, s27
+; GFX9-NEXT: v_mov_b32_e32 v17, s28
+; GFX9-NEXT: v_mov_b32_e32 v15, s29
; GFX9-NEXT: s_cbranch_scc0 .LBB29_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v9
@@ -13043,18 +13162,18 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v49, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v50, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v51, 16, v0
-; GFX9-NEXT: v_lshrrev_b32_e32 v52, 16, v14
-; GFX9-NEXT: v_lshrrev_b32_e32 v53, 16, v16
-; GFX9-NEXT: v_lshrrev_b32_e32 v54, 16, v18
-; GFX9-NEXT: v_lshrrev_b32_e32 v55, 16, v20
+; GFX9-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; GFX9-NEXT: v_lshrrev_b32_e32 v53, 16, v17
+; GFX9-NEXT: v_lshrrev_b32_e32 v54, 16, v19
+; GFX9-NEXT: v_lshrrev_b32_e32 v55, 16, v21
; GFX9-NEXT: v_lshrrev_b32_e32 v29, 16, v10
; GFX9-NEXT: v_lshrrev_b32_e32 v28, 16, v11
; GFX9-NEXT: v_lshrrev_b32_e32 v27, 16, v12
; GFX9-NEXT: v_lshrrev_b32_e32 v26, 16, v13
-; GFX9-NEXT: v_lshrrev_b32_e32 v25, 16, v15
-; GFX9-NEXT: v_lshrrev_b32_e32 v24, 16, v17
-; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v19
-; GFX9-NEXT: v_lshrrev_b32_e32 v32, 16, v21
+; GFX9-NEXT: v_lshrrev_b32_e32 v25, 16, v14
+; GFX9-NEXT: v_lshrrev_b32_e32 v24, 16, v16
+; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; GFX9-NEXT: v_lshrrev_b32_e32 v32, 16, v20
; GFX9-NEXT: v_lshrrev_b32_e32 v31, 16, v22
; GFX9-NEXT: v_lshrrev_b32_e32 v30, 16, v23
; GFX9-NEXT: s_cbranch_execnz .LBB29_3
@@ -13069,18 +13188,18 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e32 v2, 1.0, v2
; GFX9-NEXT: v_add_f32_e32 v1, 1.0, v1
; GFX9-NEXT: v_add_f32_e32 v0, 1.0, v0
-; GFX9-NEXT: v_add_f32_e32 v14, 1.0, v14
-; GFX9-NEXT: v_add_f32_e32 v16, 1.0, v16
-; GFX9-NEXT: v_add_f32_e32 v18, 1.0, v18
-; GFX9-NEXT: v_add_f32_e32 v20, 1.0, v20
-; GFX9-NEXT: v_add_f32_e32 v10, 1.0, v10
-; GFX9-NEXT: v_add_f32_e32 v11, 1.0, v11
-; GFX9-NEXT: v_add_f32_e32 v12, 1.0, v12
-; GFX9-NEXT: v_add_f32_e32 v13, 1.0, v13
; GFX9-NEXT: v_add_f32_e32 v15, 1.0, v15
; GFX9-NEXT: v_add_f32_e32 v17, 1.0, v17
; GFX9-NEXT: v_add_f32_e32 v19, 1.0, v19
; GFX9-NEXT: v_add_f32_e32 v21, 1.0, v21
+; GFX9-NEXT: v_add_f32_e32 v10, 1.0, v10
+; GFX9-NEXT: v_add_f32_e32 v11, 1.0, v11
+; GFX9-NEXT: v_add_f32_e32 v12, 1.0, v12
+; GFX9-NEXT: v_add_f32_e32 v13, 1.0, v13
+; GFX9-NEXT: v_add_f32_e32 v14, 1.0, v14
+; GFX9-NEXT: v_add_f32_e32 v16, 1.0, v16
+; GFX9-NEXT: v_add_f32_e32 v18, 1.0, v18
+; GFX9-NEXT: v_add_f32_e32 v20, 1.0, v20
; GFX9-NEXT: v_add_f32_e32 v22, 1.0, v22
; GFX9-NEXT: v_add_f32_e32 v23, 1.0, v23
; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v9
@@ -13093,58 +13212,58 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v49, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v50, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v51, 16, v0
-; GFX9-NEXT: v_lshrrev_b32_e32 v52, 16, v14
-; GFX9-NEXT: v_lshrrev_b32_e32 v53, 16, v16
-; GFX9-NEXT: v_lshrrev_b32_e32 v54, 16, v18
-; GFX9-NEXT: v_lshrrev_b32_e32 v55, 16, v20
+; GFX9-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; GFX9-NEXT: v_lshrrev_b32_e32 v53, 16, v17
+; GFX9-NEXT: v_lshrrev_b32_e32 v54, 16, v19
+; GFX9-NEXT: v_lshrrev_b32_e32 v55, 16, v21
; GFX9-NEXT: v_lshrrev_b32_e32 v29, 16, v10
; GFX9-NEXT: v_lshrrev_b32_e32 v28, 16, v11
; GFX9-NEXT: v_lshrrev_b32_e32 v27, 16, v12
; GFX9-NEXT: v_lshrrev_b32_e32 v26, 16, v13
-; GFX9-NEXT: v_lshrrev_b32_e32 v25, 16, v15
-; GFX9-NEXT: v_lshrrev_b32_e32 v24, 16, v17
-; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v19
-; GFX9-NEXT: v_lshrrev_b32_e32 v32, 16, v21
+; GFX9-NEXT: v_lshrrev_b32_e32 v25, 16, v14
+; GFX9-NEXT: v_lshrrev_b32_e32 v24, 16, v16
+; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; GFX9-NEXT: v_lshrrev_b32_e32 v32, 16, v20
; GFX9-NEXT: v_lshrrev_b32_e32 v31, 16, v22
; GFX9-NEXT: v_lshrrev_b32_e32 v30, 16, v23
; GFX9-NEXT: .LBB29_3: ; %end
-; GFX9-NEXT: v_and_b32_e32 v13, 0xffff, v13
+; GFX9-NEXT: v_and_b32_e32 v14, 0xffff, v14
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX9-NEXT: v_lshl_or_b32 v26, v26, 16, v13
-; GFX9-NEXT: v_and_b32_e32 v13, 0xffff, v14
+; GFX9-NEXT: v_lshl_or_b32 v25, v25, 16, v14
+; GFX9-NEXT: v_and_b32_e32 v13, 0xffff, v13
; GFX9-NEXT: v_lshl_or_b32 v14, v51, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v1
-; GFX9-NEXT: v_lshl_or_b32 v25, v25, 16, v15
-; GFX9-NEXT: v_and_b32_e32 v12, 0xffff, v12
+; GFX9-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX9-NEXT: v_lshl_or_b32 v26, v26, 16, v13
+; GFX9-NEXT: v_and_b32_e32 v13, 0xffff, v15
; GFX9-NEXT: v_lshl_or_b32 v15, v50, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v2
-; GFX9-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX9-NEXT: v_lshl_or_b32 v27, v27, 16, v12
-; GFX9-NEXT: v_and_b32_e32 v12, 0xffff, v16
+; GFX9-NEXT: v_lshl_or_b32 v24, v24, 16, v16
+; GFX9-NEXT: v_and_b32_e32 v12, 0xffff, v12
; GFX9-NEXT: v_lshl_or_b32 v16, v49, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v3
-; GFX9-NEXT: v_lshl_or_b32 v24, v24, 16, v17
-; GFX9-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX9-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX9-NEXT: v_lshl_or_b32 v27, v27, 16, v12
+; GFX9-NEXT: v_and_b32_e32 v12, 0xffff, v17
; GFX9-NEXT: v_lshl_or_b32 v17, v48, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v4
-; GFX9-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX9-NEXT: v_lshl_or_b32 v28, v28, 16, v11
-; GFX9-NEXT: v_and_b32_e32 v11, 0xffff, v18
+; GFX9-NEXT: v_lshl_or_b32 v33, v33, 16, v18
+; GFX9-NEXT: v_and_b32_e32 v11, 0xffff, v11
; GFX9-NEXT: v_lshl_or_b32 v18, v39, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v5
-; GFX9-NEXT: v_lshl_or_b32 v33, v33, 16, v19
-; GFX9-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX9-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX9-NEXT: v_lshl_or_b32 v28, v28, 16, v11
+; GFX9-NEXT: v_and_b32_e32 v11, 0xffff, v19
; GFX9-NEXT: v_lshl_or_b32 v19, v38, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v6
-; GFX9-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX9-NEXT: v_lshl_or_b32 v29, v29, 16, v10
-; GFX9-NEXT: v_and_b32_e32 v10, 0xffff, v20
+; GFX9-NEXT: v_lshl_or_b32 v32, v32, 16, v20
+; GFX9-NEXT: v_and_b32_e32 v10, 0xffff, v10
; GFX9-NEXT: v_lshl_or_b32 v20, v37, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v7
; GFX9-NEXT: v_and_b32_e32 v23, 0xffff, v23
; GFX9-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX9-NEXT: v_lshl_or_b32 v32, v32, 16, v21
+; GFX9-NEXT: v_lshl_or_b32 v29, v29, 16, v10
+; GFX9-NEXT: v_and_b32_e32 v10, 0xffff, v21
; GFX9-NEXT: v_lshl_or_b32 v21, v36, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v8
; GFX9-NEXT: v_lshl_or_b32 v30, v30, 16, v23
@@ -13192,7 +13311,9 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr36
; GFX9-NEXT: ; implicit-def: $vgpr35
; GFX9-NEXT: ; implicit-def: $vgpr34
-; GFX9-NEXT: s_branch .LBB29_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB29_2
+; GFX9-NEXT: s_branch .LBB29_3
;
; GFX11-LABEL: bitcast_v24f32_to_v48i16_scalar:
; GFX11: ; %bb.0:
@@ -13205,10 +13326,10 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v8, s19 :: v_dual_mov_b32 v13, s21
; GFX11-NEXT: v_dual_mov_b32 v12, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: v_dual_mov_b32 v10, s24 :: v_dual_mov_b32 v9, s25
-; GFX11-NEXT: v_dual_mov_b32 v15, s26 :: v_dual_mov_b32 v14, s27
-; GFX11-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s27
+; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v15, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB29_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
@@ -13217,10 +13338,10 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v2
; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v16
+; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v17
+; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v14
; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v9
; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v10
; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v11
@@ -13235,14 +13356,13 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v22
; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v23
; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB29_3
+; GFX11-NEXT: s_cbranch_execnz .LBB29_3
; GFX11-NEXT: .LBB29_2: ; %cmp.true
; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
-; GFX11-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
-; GFX11-NEXT: v_dual_add_f32 v14, 1.0, v14 :: v_dual_add_f32 v15, 1.0, v15
+; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v14, 1.0, v14
; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v10, 1.0, v10
; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v12, 1.0, v12
; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v8, 1.0, v8
@@ -13256,10 +13376,10 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v2
; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v16
+; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v17
+; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v14
; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v9
; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v10
; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v11
@@ -13283,31 +13403,29 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a,
; GFX11-NEXT: v_lshl_or_b32 v6, v6, 16, v18
; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v10
; GFX11-NEXT: v_lshl_or_b32 v10, v51, 16, v12
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-NEXT: v_lshl_or_b32 v25, v25, 16, v23
; GFX11-NEXT: v_lshl_or_b32 v12, v49, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v14
+; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v15
; GFX11-NEXT: v_and_b32_e32 v24, 0xffff, v24
; GFX11-NEXT: v_lshl_or_b32 v27, v27, 16, v21
; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
; GFX11-NEXT: v_lshl_or_b32 v29, v29, 16, v19
; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v13
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v9
-; GFX11-NEXT: v_lshl_or_b32 v14, v39, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v15, v38, 16, v18
+; GFX11-NEXT: v_lshl_or_b32 v15, v38, 16, v17
+; GFX11-NEXT: v_lshl_or_b32 v17, v36, 16, v18
; GFX11-NEXT: v_lshl_or_b32 v18, v35, 16, v0
; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v1
; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v2
; GFX11-NEXT: v_lshl_or_b32 v26, v26, 16, v22
; GFX11-NEXT: v_and_b32_e32 v8, 0xffff, v8
; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v7
+; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v13
; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-NEXT: v_lshl_or_b32 v9, v52, 16, v13
-; GFX11-NEXT: v_lshl_or_b32 v13, v48, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v16
+; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v9
+; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v3
; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v4
; GFX11-NEXT: v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v4, 0xffff, v5
@@ -13317,9 +13435,11 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a,
; GFX11-NEXT: v_lshl_or_b32 v24, v55, 16, v24
; GFX11-NEXT: v_lshl_or_b32 v7, v54, 16, v8
; GFX11-NEXT: v_lshl_or_b32 v8, v53, 16, v21
+; GFX11-NEXT: v_lshl_or_b32 v9, v52, 16, v13
; GFX11-NEXT: v_lshl_or_b32 v11, v50, 16, v11
-; GFX11-NEXT: v_lshl_or_b32 v16, v37, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v17, v36, 16, v19
+; GFX11-NEXT: v_lshl_or_b32 v13, v48, 16, v19
+; GFX11-NEXT: v_lshl_or_b32 v14, v39, 16, v14
+; GFX11-NEXT: v_lshl_or_b32 v16, v37, 16, v16
; GFX11-NEXT: v_lshl_or_b32 v19, v34, 16, v0
; GFX11-NEXT: v_lshl_or_b32 v21, v32, 16, v2
; GFX11-NEXT: v_lshl_or_b32 v22, v31, 16, v3
@@ -13353,7 +13473,9 @@ define inreg <48 x i16> @bitcast_v24f32_to_v48i16_scalar(<24 x float> inreg %a,
; GFX11-NEXT: ; implicit-def: $vgpr32
; GFX11-NEXT: ; implicit-def: $vgpr31
; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: s_branch .LBB29_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_vccz .LBB29_2
+; GFX11-NEXT: s_branch .LBB29_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -14458,6 +14580,7 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a,
; SI-NEXT: v_mov_b32_e32 v49, v10
; SI-NEXT: v_mov_b32_e32 v51, v8
; SI-NEXT: v_mov_b32_e32 v52, v6
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v1
; SI-NEXT: s_waitcnt expcnt(5)
; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v3
@@ -14479,7 +14602,7 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a,
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v2
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_and_b64 s[6:7], vcc, exec
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v4
; SI-NEXT: s_cbranch_scc0 .LBB31_4
@@ -14741,7 +14864,9 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a,
; SI-NEXT: v_mov_b32_e32 v27, v46
; SI-NEXT: v_mov_b32_e32 v29, v45
; SI-NEXT: v_mov_b32_e32 v34, v43
-; SI-NEXT: s_branch .LBB31_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB31_2
+; SI-NEXT: s_branch .LBB31_3
;
; VI-LABEL: bitcast_v48i16_to_v24f32_scalar:
; VI: ; %bb.0:
@@ -14761,6 +14886,7 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a,
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v9
; VI-NEXT: v_mov_b32_e32 v33, v8
; VI-NEXT: v_mov_b32_e32 v34, v7
@@ -14771,7 +14897,7 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a,
; VI-NEXT: v_mov_b32_e32 v39, v2
; VI-NEXT: v_mov_b32_e32 v48, v1
; VI-NEXT: v_mov_b32_e32 v49, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB31_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -14800,41 +14926,41 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a,
; VI-NEXT: s_and_b32 s57, 0xffff, s23
; VI-NEXT: s_lshl_b32 s58, s12, 16
; VI-NEXT: v_or_b32_sdwa v14, v49, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s57, s57, s58
; VI-NEXT: s_and_b32 s58, 0xffff, s24
; VI-NEXT: s_lshl_b32 s59, s11, 16
-; VI-NEXT: v_or_b32_sdwa v15, v48, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v16, v39, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s58, s58, s59
; VI-NEXT: s_and_b32 s59, 0xffff, s25
; VI-NEXT: s_lshl_b32 s60, s10, 16
-; VI-NEXT: v_or_b32_sdwa v16, v39, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v17, v38, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s59, s59, s60
; VI-NEXT: s_and_b32 s60, 0xffff, s26
; VI-NEXT: s_lshl_b32 s61, s9, 16
-; VI-NEXT: v_or_b32_sdwa v17, v38, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v18, v37, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s60, s60, s61
; VI-NEXT: s_and_b32 s61, 0xffff, s27
; VI-NEXT: s_lshl_b32 s62, s8, 16
-; VI-NEXT: v_or_b32_sdwa v18, v37, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v19, v36, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s61, s61, s62
; VI-NEXT: s_and_b32 s62, 0xffff, s28
; VI-NEXT: s_lshl_b32 s63, s7, 16
-; VI-NEXT: v_or_b32_sdwa v19, v36, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v20, v35, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s62, s62, s63
; VI-NEXT: s_and_b32 s63, 0xffff, s29
; VI-NEXT: s_lshl_b32 s72, s6, 16
-; VI-NEXT: v_or_b32_sdwa v20, v35, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; VI-NEXT: s_or_b32 s63, s63, s72
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v0, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: v_or_b32_sdwa v21, v34, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: v_lshlrev_b32_sdwa v0, v0, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: s_or_b32 s63, s63, s72
+; VI-NEXT: v_or_b32_sdwa v15, v48, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v22, v33, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v23, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v0, s4
@@ -14857,97 +14983,98 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a,
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v49
; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v49 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v14, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v48
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v48
+; VI-NEXT: v_lshlrev_b32_sdwa v3, v1, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v14, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v15, vcc, 0x30000, v2
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v39
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_and_b32 s4, s16, 0xffff
; VI-NEXT: s_lshl_b32 s5, s43, 16
; VI-NEXT: s_add_i32 s17, s17, 3
-; VI-NEXT: v_add_u32_e32 v15, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v39
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s4, s5, s4
; VI-NEXT: s_and_b32 s5, s17, 0xffff
; VI-NEXT: s_lshl_b32 s16, s42, 16
; VI-NEXT: s_add_i32 s18, s18, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v16, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v38
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s5, s16, s5
; VI-NEXT: s_and_b32 s16, s18, 0xffff
; VI-NEXT: s_lshl_b32 s17, s41, 16
; VI-NEXT: s_add_i32 s19, s19, 3
-; VI-NEXT: v_add_u32_e32 v16, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v38
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s16, s17, s16
; VI-NEXT: s_and_b32 s17, s19, 0xffff
; VI-NEXT: s_lshl_b32 s18, s40, 16
; VI-NEXT: s_add_i32 s20, s20, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v17, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v37
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s17, s18, s17
; VI-NEXT: s_and_b32 s18, s20, 0xffff
; VI-NEXT: s_lshl_b32 s15, s15, 16
; VI-NEXT: s_add_i32 s21, s21, 3
-; VI-NEXT: v_add_u32_e32 v17, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v37
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s15, s15, s18
; VI-NEXT: s_and_b32 s18, s21, 0xffff
; VI-NEXT: s_lshl_b32 s14, s14, 16
; VI-NEXT: s_add_i32 s22, s22, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v18, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v36
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s14, s14, s18
; VI-NEXT: s_and_b32 s18, s22, 0xffff
; VI-NEXT: s_lshl_b32 s13, s13, 16
; VI-NEXT: s_add_i32 s23, s23, 3
-; VI-NEXT: v_add_u32_e32 v18, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v36
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s13, s13, s18
; VI-NEXT: s_and_b32 s18, s23, 0xffff
; VI-NEXT: s_lshl_b32 s12, s12, 16
; VI-NEXT: s_add_i32 s24, s24, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v19, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v35
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s12, s12, s18
; VI-NEXT: s_and_b32 s18, s24, 0xffff
; VI-NEXT: s_lshl_b32 s11, s11, 16
; VI-NEXT: s_add_i32 s25, s25, 3
-; VI-NEXT: v_add_u32_e32 v19, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v35
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s11, s11, s18
; VI-NEXT: s_and_b32 s18, s25, 0xffff
; VI-NEXT: s_lshl_b32 s10, s10, 16
; VI-NEXT: s_add_i32 s26, s26, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v20, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v34
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s10, s10, s18
; VI-NEXT: s_and_b32 s18, s26, 0xffff
; VI-NEXT: s_lshl_b32 s9, s9, 16
; VI-NEXT: s_add_i32 s27, s27, 3
-; VI-NEXT: v_add_u32_e32 v20, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v34
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s9, s9, s18
; VI-NEXT: s_and_b32 s18, s27, 0xffff
; VI-NEXT: s_lshl_b32 s8, s8, 16
; VI-NEXT: s_add_i32 s28, s28, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v21, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v33
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s8, s8, s18
; VI-NEXT: s_and_b32 s18, s28, 0xffff
; VI-NEXT: s_lshl_b32 s7, s7, 16
; VI-NEXT: s_add_i32 s29, s29, 3
-; VI-NEXT: v_add_u32_e32 v21, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v33
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s7, s7, s18
; VI-NEXT: s_and_b32 s18, s29, 0xffff
; VI-NEXT: s_lshl_b32 s6, s6, 16
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: s_or_b32 s6, s6, s18
; VI-NEXT: v_add_u32_e32 v22, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v32
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v1, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_lshlrev_b32_sdwa v0, v1, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v32
+; VI-NEXT: s_or_b32 s6, s6, s18
+; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_add_i32 s4, s4, 0x30000
; VI-NEXT: s_add_i32 s5, s5, 0x30000
; VI-NEXT: s_add_i32 s16, s16, 0x30000
@@ -14962,7 +15089,6 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a,
; VI-NEXT: s_add_i32 s8, s8, 0x30000
; VI-NEXT: s_add_i32 s7, s7, 0x30000
; VI-NEXT: s_add_i32 s6, s6, 0x30000
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v23, vcc, 0x30000, v0
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
@@ -14982,21 +15108,13 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a,
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB31_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB31_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB31_2
+; VI-NEXT: s_branch .LBB31_3
;
; GFX9-LABEL: bitcast_v48i16_to_v24f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v9
-; GFX9-NEXT: v_mov_b32_e32 v33, v8
-; GFX9-NEXT: v_mov_b32_e32 v34, v7
-; GFX9-NEXT: v_mov_b32_e32 v35, v6
-; GFX9-NEXT: v_mov_b32_e32 v36, v5
-; GFX9-NEXT: v_mov_b32_e32 v37, v4
-; GFX9-NEXT: v_mov_b32_e32 v38, v3
-; GFX9-NEXT: v_mov_b32_e32 v39, v2
-; GFX9-NEXT: v_mov_b32_e32 v48, v1
-; GFX9-NEXT: v_mov_b32_e32 v49, v0
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
@@ -15012,6 +15130,17 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a,
; GFX9-NEXT: s_lshr_b32 s8, s18, 16
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
+; GFX9-NEXT: v_mov_b32_e32 v32, v9
+; GFX9-NEXT: v_mov_b32_e32 v33, v8
+; GFX9-NEXT: v_mov_b32_e32 v34, v7
+; GFX9-NEXT: v_mov_b32_e32 v35, v6
+; GFX9-NEXT: v_mov_b32_e32 v36, v5
+; GFX9-NEXT: v_mov_b32_e32 v37, v4
+; GFX9-NEXT: v_mov_b32_e32 v38, v3
+; GFX9-NEXT: v_mov_b32_e32 v39, v2
+; GFX9-NEXT: v_mov_b32_e32 v48, v1
+; GFX9-NEXT: v_mov_b32_e32 v49, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
@@ -15026,7 +15155,6 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v41, 16, v39
; GFX9-NEXT: v_lshrrev_b32_e32 v42, 16, v48
; GFX9-NEXT: v_lshrrev_b32_e32 v43, 16, v49
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -15041,12 +15169,11 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a,
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB31_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v49
; GFX9-NEXT: v_lshl_or_b32 v14, v43, 16, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v48
-; GFX9-NEXT: v_lshl_or_b32 v15, v42, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v39
; GFX9-NEXT: v_lshl_or_b32 v16, v41, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v38
@@ -15060,8 +15187,10 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a,
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v34
; GFX9-NEXT: v_lshl_or_b32 v21, v52, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v33
+; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v48
; GFX9-NEXT: v_lshl_or_b32 v22, v51, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v32
+; GFX9-NEXT: v_lshl_or_b32 v15, v42, 16, v1
; GFX9-NEXT: v_lshl_or_b32 v23, v50, 16, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s6
; GFX9-NEXT: v_mov_b32_e32 v1, s7
@@ -15079,26 +15208,30 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v13, s19
; GFX9-NEXT: s_cbranch_execnz .LBB31_3
; GFX9-NEXT: .LBB31_2: ; %cmp.true
-; GFX9-NEXT: v_and_b32_e32 v14, 0xffff, v49
-; GFX9-NEXT: v_and_b32_e32 v15, 0xffff, v48
-; GFX9-NEXT: v_and_b32_e32 v16, 0xffff, v39
-; GFX9-NEXT: v_and_b32_e32 v17, 0xffff, v38
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v49
+; GFX9-NEXT: v_lshl_or_b32 v0, v43, 16, v0
+; GFX9-NEXT: v_pk_add_u16 v14, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v39
+; GFX9-NEXT: v_lshl_or_b32 v0, v41, 16, v0
+; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v48
+; GFX9-NEXT: v_pk_add_u16 v16, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v38
; GFX9-NEXT: v_and_b32_e32 v18, 0xffff, v37
; GFX9-NEXT: v_and_b32_e32 v19, 0xffff, v36
; GFX9-NEXT: v_and_b32_e32 v20, 0xffff, v35
; GFX9-NEXT: v_and_b32_e32 v21, 0xffff, v34
; GFX9-NEXT: v_and_b32_e32 v22, 0xffff, v33
; GFX9-NEXT: v_and_b32_e32 v23, 0xffff, v32
-; GFX9-NEXT: v_lshl_or_b32 v14, v43, 16, v14
-; GFX9-NEXT: v_lshl_or_b32 v15, v42, 16, v15
-; GFX9-NEXT: v_lshl_or_b32 v16, v41, 16, v16
-; GFX9-NEXT: v_lshl_or_b32 v17, v40, 16, v17
+; GFX9-NEXT: v_lshl_or_b32 v1, v42, 16, v1
+; GFX9-NEXT: v_lshl_or_b32 v0, v40, 16, v0
; GFX9-NEXT: v_lshl_or_b32 v18, v55, 16, v18
; GFX9-NEXT: v_lshl_or_b32 v19, v54, 16, v19
; GFX9-NEXT: v_lshl_or_b32 v20, v53, 16, v20
; GFX9-NEXT: v_lshl_or_b32 v21, v52, 16, v21
; GFX9-NEXT: v_lshl_or_b32 v22, v51, 16, v22
; GFX9-NEXT: v_lshl_or_b32 v23, v50, 16, v23
+; GFX9-NEXT: v_pk_add_u16 v15, v1, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_pk_add_u16 v17, v0, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s6, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s7, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v2, s8, 3 op_sel_hi:[1,0]
@@ -15113,10 +15246,6 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a,
; GFX9-NEXT: v_pk_add_u16 v11, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v12, s18, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v13, s19, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
@@ -15132,7 +15261,9 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a,
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB31_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB31_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB31_2
+; GFX9-NEXT: s_branch .LBB31_3
;
; GFX11-TRUE16-LABEL: bitcast_v48i16_to_v24f32_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -15156,44 +15287,44 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a,
; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v4
; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v5
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s16, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s16, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s17, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s18, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s19, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s20, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s21, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s22, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s23, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s24, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s25, s15
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s26, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s41
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s27, s42
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB31_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
@@ -15202,17 +15333,16 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a,
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s6 :: v_dual_mov_b32 v5, s7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s8 :: v_dual_mov_b32 v7, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s10 :: v_dual_mov_b32 v9, s11
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s12 :: v_dual_mov_b32 v11, s13
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s14 :: v_dual_mov_b32 v13, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB31_3
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB31_3
; GFX11-TRUE16-NEXT: .LBB31_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
@@ -15221,21 +15351,21 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a,
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s5, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s1, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s16, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
@@ -15248,7 +15378,9 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a,
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB31_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB31_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB31_2
+; GFX11-TRUE16-NEXT: s_branch .LBB31_3
;
; GFX11-FAKE16-LABEL: bitcast_v48i16_to_v24f32_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -15266,44 +15398,44 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a,
; GFX11-FAKE16-NEXT: v_and_b32_e32 v48, 0xffff, v3
; GFX11-FAKE16-NEXT: v_and_b32_e32 v39, 0xffff, v4
; GFX11-FAKE16-NEXT: v_and_b32_e32 v38, 0xffff, v5
-; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s26, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s16, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s25, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s24, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s23, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s22, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s21, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s20, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s19, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s18, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s17, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s16, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s3, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s40, 0
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s16, s6
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s17, s7
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s18, s8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s19, s9
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s20, s10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s21, s11
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s22, s12
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s23, s13
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s24, s14
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s25, s15
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s26, s43
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s28, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s29, s41
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s27, s42
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB31_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
@@ -15312,17 +15444,16 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a,
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s6 :: v_dual_mov_b32 v5, s7
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s8 :: v_dual_mov_b32 v7, s9
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s10 :: v_dual_mov_b32 v9, s11
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s12 :: v_dual_mov_b32 v11, s13
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s14 :: v_dual_mov_b32 v13, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB31_3
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB31_3
; GFX11-FAKE16-NEXT: .LBB31_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
@@ -15331,21 +15462,21 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a,
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, s4, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, s5, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, s5, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, s6, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, s7, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, s8, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, s9, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, s10, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, s11, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, s12, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, s13, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, s14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, s6, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, s7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, s8, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, s9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, s10, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, s11, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, s12, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, s13, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, s14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, s15, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, s0, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, s15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, s1, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, s16, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
@@ -15358,7 +15489,9 @@ define inreg <24 x float> @bitcast_v48i16_to_v24f32_scalar(<48 x i16> inreg %a,
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB31_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB31_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB31_2
+; GFX11-FAKE16-NEXT: s_branch .LBB31_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -16351,17 +16484,18 @@ define inreg <48 x half> @bitcast_v24f32_to_v48f16_scalar(<24 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s15, v1
; SI-NEXT: v_readfirstlane_b32 s14, v2
; SI-NEXT: v_readfirstlane_b32 s13, v3
; SI-NEXT: v_readfirstlane_b32 s12, v4
; SI-NEXT: v_readfirstlane_b32 s11, v5
; SI-NEXT: v_readfirstlane_b32 s10, v6
-; SI-NEXT: v_readfirstlane_b32 s8, v7
-; SI-NEXT: v_readfirstlane_b32 s7, v8
-; SI-NEXT: v_readfirstlane_b32 s6, v9
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: v_readfirstlane_b32 s9, v10
+; SI-NEXT: v_readfirstlane_b32 s9, v7
+; SI-NEXT: v_readfirstlane_b32 s8, v8
+; SI-NEXT: v_readfirstlane_b32 s7, v9
+; SI-NEXT: v_readfirstlane_b32 s6, v10
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
@@ -16377,13 +16511,13 @@ define inreg <48 x half> @bitcast_v24f32_to_v48f16_scalar(<24 x float> inreg %a,
; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB33_4
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_lshr_b32 s4, s9, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v1, s4
; SI-NEXT: s_lshr_b32 s4, s6, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v2, s4
+; SI-NEXT: v_cvt_f32_f16_e32 v1, s4
; SI-NEXT: s_lshr_b32 s4, s7, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v3, s4
+; SI-NEXT: v_cvt_f32_f16_e32 v2, s4
; SI-NEXT: s_lshr_b32 s4, s8, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v3, s4
+; SI-NEXT: s_lshr_b32 s4, s9, 16
; SI-NEXT: v_cvt_f32_f16_e32 v5, s4
; SI-NEXT: s_lshr_b32 s4, s10, 16
; SI-NEXT: v_cvt_f32_f16_e32 v7, s4
@@ -16425,10 +16559,10 @@ define inreg <48 x half> @bitcast_v24f32_to_v48f16_scalar(<24 x float> inreg %a,
; SI-NEXT: v_cvt_f32_f16_e32 v54, s4
; SI-NEXT: s_lshr_b32 s4, s16, 16
; SI-NEXT: v_cvt_f32_f16_e32 v40, s4
-; SI-NEXT: v_cvt_f32_f16_e32 v4, s9
-; SI-NEXT: v_cvt_f32_f16_e32 v6, s6
-; SI-NEXT: v_cvt_f32_f16_e32 v8, s7
-; SI-NEXT: v_cvt_f32_f16_e32 v9, s8
+; SI-NEXT: v_cvt_f32_f16_e32 v4, s6
+; SI-NEXT: v_cvt_f32_f16_e32 v6, s7
+; SI-NEXT: v_cvt_f32_f16_e32 v8, s8
+; SI-NEXT: v_cvt_f32_f16_e32 v9, s9
; SI-NEXT: v_cvt_f32_f16_e32 v11, s10
; SI-NEXT: v_cvt_f32_f16_e32 v13, s11
; SI-NEXT: v_cvt_f32_f16_e32 v15, s12
@@ -16471,10 +16605,10 @@ define inreg <48 x half> @bitcast_v24f32_to_v48f16_scalar(<24 x float> inreg %a,
; SI-NEXT: v_add_f32_e64 v15, s12, 1.0
; SI-NEXT: v_add_f32_e64 v13, s11, 1.0
; SI-NEXT: v_add_f32_e64 v11, s10, 1.0
-; SI-NEXT: v_add_f32_e64 v9, s8, 1.0
-; SI-NEXT: v_add_f32_e64 v8, s7, 1.0
-; SI-NEXT: v_add_f32_e64 v6, s6, 1.0
-; SI-NEXT: v_add_f32_e64 v4, s9, 1.0
+; SI-NEXT: v_add_f32_e64 v9, s9, 1.0
+; SI-NEXT: v_add_f32_e64 v8, s8, 1.0
+; SI-NEXT: v_add_f32_e64 v6, s7, 1.0
+; SI-NEXT: v_add_f32_e64 v4, s6, 1.0
; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v1
; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v2
; SI-NEXT: v_lshrrev_b32_e32 v52, 16, v3
@@ -16785,27 +16919,30 @@ define inreg <48 x half> @bitcast_v24f32_to_v48f16_scalar(<24 x float> inreg %a,
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr4
; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: s_branch .LBB33_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB33_2
+; SI-NEXT: s_branch .LBB33_3
;
; VI-LABEL: bitcast_v24f32_to_v48f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v23, s16
; VI-NEXT: v_mov_b32_e32 v22, s17
-; VI-NEXT: v_mov_b32_e32 v21, s18
-; VI-NEXT: v_mov_b32_e32 v19, s19
-; VI-NEXT: v_mov_b32_e32 v17, s20
-; VI-NEXT: v_mov_b32_e32 v15, s21
+; VI-NEXT: v_mov_b32_e32 v20, s18
+; VI-NEXT: v_mov_b32_e32 v18, s19
+; VI-NEXT: v_mov_b32_e32 v16, s20
+; VI-NEXT: v_mov_b32_e32 v14, s21
; VI-NEXT: v_mov_b32_e32 v13, s22
; VI-NEXT: v_mov_b32_e32 v12, s23
; VI-NEXT: v_mov_b32_e32 v11, s24
; VI-NEXT: v_mov_b32_e32 v10, s25
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: v_mov_b32_e32 v20, s26
-; VI-NEXT: v_mov_b32_e32 v18, s27
-; VI-NEXT: v_mov_b32_e32 v16, s28
-; VI-NEXT: v_mov_b32_e32 v14, s29
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: v_mov_b32_e32 v21, s26
+; VI-NEXT: v_mov_b32_e32 v19, s27
+; VI-NEXT: v_mov_b32_e32 v17, s28
+; VI-NEXT: v_mov_b32_e32 v15, s29
; VI-NEXT: s_cbranch_scc0 .LBB33_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_lshrrev_b32_e32 v34, 16, v9
@@ -16818,18 +16955,18 @@ define inreg <48 x half> @bitcast_v24f32_to_v48f16_scalar(<24 x float> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v49, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v50, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v51, 16, v0
-; VI-NEXT: v_lshrrev_b32_e32 v52, 16, v14
-; VI-NEXT: v_lshrrev_b32_e32 v53, 16, v16
-; VI-NEXT: v_lshrrev_b32_e32 v54, 16, v18
-; VI-NEXT: v_lshrrev_b32_e32 v55, 16, v20
+; VI-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; VI-NEXT: v_lshrrev_b32_e32 v53, 16, v17
+; VI-NEXT: v_lshrrev_b32_e32 v54, 16, v19
+; VI-NEXT: v_lshrrev_b32_e32 v55, 16, v21
; VI-NEXT: v_lshrrev_b32_e32 v29, 16, v10
; VI-NEXT: v_lshrrev_b32_e32 v28, 16, v11
; VI-NEXT: v_lshrrev_b32_e32 v27, 16, v12
; VI-NEXT: v_lshrrev_b32_e32 v26, 16, v13
-; VI-NEXT: v_lshrrev_b32_e32 v25, 16, v15
-; VI-NEXT: v_lshrrev_b32_e32 v24, 16, v17
-; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v19
-; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v21
+; VI-NEXT: v_lshrrev_b32_e32 v25, 16, v14
+; VI-NEXT: v_lshrrev_b32_e32 v24, 16, v16
+; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v20
; VI-NEXT: v_lshrrev_b32_e32 v31, 16, v22
; VI-NEXT: v_lshrrev_b32_e32 v30, 16, v23
; VI-NEXT: s_cbranch_execnz .LBB33_3
@@ -16844,18 +16981,18 @@ define inreg <48 x half> @bitcast_v24f32_to_v48f16_scalar(<24 x float> inreg %a,
; VI-NEXT: v_add_f32_e32 v2, 1.0, v2
; VI-NEXT: v_add_f32_e32 v1, 1.0, v1
; VI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; VI-NEXT: v_add_f32_e32 v14, 1.0, v14
-; VI-NEXT: v_add_f32_e32 v16, 1.0, v16
-; VI-NEXT: v_add_f32_e32 v18, 1.0, v18
-; VI-NEXT: v_add_f32_e32 v20, 1.0, v20
-; VI-NEXT: v_add_f32_e32 v10, 1.0, v10
-; VI-NEXT: v_add_f32_e32 v11, 1.0, v11
-; VI-NEXT: v_add_f32_e32 v12, 1.0, v12
-; VI-NEXT: v_add_f32_e32 v13, 1.0, v13
; VI-NEXT: v_add_f32_e32 v15, 1.0, v15
; VI-NEXT: v_add_f32_e32 v17, 1.0, v17
; VI-NEXT: v_add_f32_e32 v19, 1.0, v19
; VI-NEXT: v_add_f32_e32 v21, 1.0, v21
+; VI-NEXT: v_add_f32_e32 v10, 1.0, v10
+; VI-NEXT: v_add_f32_e32 v11, 1.0, v11
+; VI-NEXT: v_add_f32_e32 v12, 1.0, v12
+; VI-NEXT: v_add_f32_e32 v13, 1.0, v13
+; VI-NEXT: v_add_f32_e32 v14, 1.0, v14
+; VI-NEXT: v_add_f32_e32 v16, 1.0, v16
+; VI-NEXT: v_add_f32_e32 v18, 1.0, v18
+; VI-NEXT: v_add_f32_e32 v20, 1.0, v20
; VI-NEXT: v_add_f32_e32 v22, 1.0, v22
; VI-NEXT: v_add_f32_e32 v23, 1.0, v23
; VI-NEXT: v_lshrrev_b32_e32 v34, 16, v9
@@ -16868,18 +17005,18 @@ define inreg <48 x half> @bitcast_v24f32_to_v48f16_scalar(<24 x float> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v49, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v50, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v51, 16, v0
-; VI-NEXT: v_lshrrev_b32_e32 v52, 16, v14
-; VI-NEXT: v_lshrrev_b32_e32 v53, 16, v16
-; VI-NEXT: v_lshrrev_b32_e32 v54, 16, v18
-; VI-NEXT: v_lshrrev_b32_e32 v55, 16, v20
+; VI-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; VI-NEXT: v_lshrrev_b32_e32 v53, 16, v17
+; VI-NEXT: v_lshrrev_b32_e32 v54, 16, v19
+; VI-NEXT: v_lshrrev_b32_e32 v55, 16, v21
; VI-NEXT: v_lshrrev_b32_e32 v29, 16, v10
; VI-NEXT: v_lshrrev_b32_e32 v28, 16, v11
; VI-NEXT: v_lshrrev_b32_e32 v27, 16, v12
; VI-NEXT: v_lshrrev_b32_e32 v26, 16, v13
-; VI-NEXT: v_lshrrev_b32_e32 v25, 16, v15
-; VI-NEXT: v_lshrrev_b32_e32 v24, 16, v17
-; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v19
-; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v21
+; VI-NEXT: v_lshrrev_b32_e32 v25, 16, v14
+; VI-NEXT: v_lshrrev_b32_e32 v24, 16, v16
+; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v20
; VI-NEXT: v_lshrrev_b32_e32 v31, 16, v22
; VI-NEXT: v_lshrrev_b32_e32 v30, 16, v23
; VI-NEXT: .LBB33_3: ; %end
@@ -16888,44 +17025,44 @@ define inreg <48 x half> @bitcast_v24f32_to_v48f16_scalar(<24 x float> inreg %a,
; VI-NEXT: v_lshlrev_b32_e32 v23, 16, v31
; VI-NEXT: v_or_b32_sdwa v31, v22, v23 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v22, 16, v32
-; VI-NEXT: v_or_b32_sdwa v32, v21, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_e32 v21, 16, v33
-; VI-NEXT: v_or_b32_sdwa v33, v19, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_e32 v19, 16, v24
-; VI-NEXT: v_or_b32_sdwa v24, v17, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_e32 v17, 16, v25
-; VI-NEXT: v_or_b32_sdwa v25, v15, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_e32 v15, 16, v26
-; VI-NEXT: v_or_b32_sdwa v26, v13, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v32, v20, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_e32 v20, 16, v33
+; VI-NEXT: v_or_b32_sdwa v33, v18, v20 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_e32 v18, 16, v24
+; VI-NEXT: v_or_b32_sdwa v24, v16, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_e32 v16, 16, v25
+; VI-NEXT: v_or_b32_sdwa v25, v14, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_e32 v14, 16, v26
+; VI-NEXT: v_or_b32_sdwa v26, v13, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v13, 16, v27
+; VI-NEXT: v_lshlrev_b32_e32 v14, 16, v51
; VI-NEXT: v_or_b32_sdwa v27, v12, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v13, 16, v52
-; VI-NEXT: v_or_b32_sdwa v13, v14, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_e32 v14, 16, v51
-; VI-NEXT: v_lshlrev_b32_e32 v12, 16, v28
; VI-NEXT: v_or_b32_sdwa v14, v0, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v50
-; VI-NEXT: v_or_b32_sdwa v28, v11, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_e32 v12, 16, v53
+; VI-NEXT: v_lshlrev_b32_e32 v12, 16, v28
+; VI-NEXT: v_or_b32_sdwa v13, v15, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v15, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v49
-; VI-NEXT: v_lshlrev_b32_e32 v11, 16, v29
-; VI-NEXT: v_or_b32_sdwa v12, v16, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v28, v11, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_e32 v12, 16, v53
; VI-NEXT: v_or_b32_sdwa v16, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v48
-; VI-NEXT: v_or_b32_sdwa v29, v10, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_e32 v11, 16, v54
+; VI-NEXT: v_lshlrev_b32_e32 v11, 16, v29
+; VI-NEXT: v_or_b32_sdwa v12, v17, v12 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v17, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v39
-; VI-NEXT: v_or_b32_sdwa v11, v18, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v29, v10, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_e32 v11, 16, v54
; VI-NEXT: v_or_b32_sdwa v18, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v38
-; VI-NEXT: v_lshlrev_b32_e32 v10, 16, v55
+; VI-NEXT: v_or_b32_sdwa v11, v19, v11 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v19, v5, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v37
-; VI-NEXT: v_or_b32_sdwa v10, v20, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_e32 v10, 16, v55
; VI-NEXT: v_or_b32_sdwa v20, v6, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v36
+; VI-NEXT: v_or_b32_sdwa v10, v21, v10 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v21, v7, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v35
; VI-NEXT: v_or_b32_sdwa v22, v8, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
@@ -16967,27 +17104,30 @@ define inreg <48 x half> @bitcast_v24f32_to_v48f16_scalar(<24 x float> inreg %a,
; VI-NEXT: ; implicit-def: $vgpr36
; VI-NEXT: ; implicit-def: $vgpr35
; VI-NEXT: ; implicit-def: $vgpr34
-; VI-NEXT: s_branch .LBB33_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB33_2
+; VI-NEXT: s_branch .LBB33_3
;
; GFX9-LABEL: bitcast_v24f32_to_v48f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v23, s16
; GFX9-NEXT: v_mov_b32_e32 v22, s17
-; GFX9-NEXT: v_mov_b32_e32 v21, s18
-; GFX9-NEXT: v_mov_b32_e32 v19, s19
-; GFX9-NEXT: v_mov_b32_e32 v17, s20
-; GFX9-NEXT: v_mov_b32_e32 v15, s21
+; GFX9-NEXT: v_mov_b32_e32 v20, s18
+; GFX9-NEXT: v_mov_b32_e32 v18, s19
+; GFX9-NEXT: v_mov_b32_e32 v16, s20
+; GFX9-NEXT: v_mov_b32_e32 v14, s21
; GFX9-NEXT: v_mov_b32_e32 v13, s22
; GFX9-NEXT: v_mov_b32_e32 v12, s23
; GFX9-NEXT: v_mov_b32_e32 v11, s24
; GFX9-NEXT: v_mov_b32_e32 v10, s25
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: v_mov_b32_e32 v20, s26
-; GFX9-NEXT: v_mov_b32_e32 v18, s27
-; GFX9-NEXT: v_mov_b32_e32 v16, s28
-; GFX9-NEXT: v_mov_b32_e32 v14, s29
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: v_mov_b32_e32 v21, s26
+; GFX9-NEXT: v_mov_b32_e32 v19, s27
+; GFX9-NEXT: v_mov_b32_e32 v17, s28
+; GFX9-NEXT: v_mov_b32_e32 v15, s29
; GFX9-NEXT: s_cbranch_scc0 .LBB33_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v9
@@ -17000,18 +17140,18 @@ define inreg <48 x half> @bitcast_v24f32_to_v48f16_scalar(<24 x float> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v49, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v50, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v51, 16, v0
-; GFX9-NEXT: v_lshrrev_b32_e32 v52, 16, v14
-; GFX9-NEXT: v_lshrrev_b32_e32 v53, 16, v16
-; GFX9-NEXT: v_lshrrev_b32_e32 v54, 16, v18
-; GFX9-NEXT: v_lshrrev_b32_e32 v55, 16, v20
+; GFX9-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; GFX9-NEXT: v_lshrrev_b32_e32 v53, 16, v17
+; GFX9-NEXT: v_lshrrev_b32_e32 v54, 16, v19
+; GFX9-NEXT: v_lshrrev_b32_e32 v55, 16, v21
; GFX9-NEXT: v_lshrrev_b32_e32 v29, 16, v10
; GFX9-NEXT: v_lshrrev_b32_e32 v28, 16, v11
; GFX9-NEXT: v_lshrrev_b32_e32 v27, 16, v12
; GFX9-NEXT: v_lshrrev_b32_e32 v26, 16, v13
-; GFX9-NEXT: v_lshrrev_b32_e32 v25, 16, v15
-; GFX9-NEXT: v_lshrrev_b32_e32 v24, 16, v17
-; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v19
-; GFX9-NEXT: v_lshrrev_b32_e32 v32, 16, v21
+; GFX9-NEXT: v_lshrrev_b32_e32 v25, 16, v14
+; GFX9-NEXT: v_lshrrev_b32_e32 v24, 16, v16
+; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; GFX9-NEXT: v_lshrrev_b32_e32 v32, 16, v20
; GFX9-NEXT: v_lshrrev_b32_e32 v31, 16, v22
; GFX9-NEXT: v_lshrrev_b32_e32 v30, 16, v23
; GFX9-NEXT: s_cbranch_execnz .LBB33_3
@@ -17026,18 +17166,18 @@ define inreg <48 x half> @bitcast_v24f32_to_v48f16_scalar(<24 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e32 v2, 1.0, v2
; GFX9-NEXT: v_add_f32_e32 v1, 1.0, v1
; GFX9-NEXT: v_add_f32_e32 v0, 1.0, v0
-; GFX9-NEXT: v_add_f32_e32 v14, 1.0, v14
-; GFX9-NEXT: v_add_f32_e32 v16, 1.0, v16
-; GFX9-NEXT: v_add_f32_e32 v18, 1.0, v18
-; GFX9-NEXT: v_add_f32_e32 v20, 1.0, v20
-; GFX9-NEXT: v_add_f32_e32 v10, 1.0, v10
-; GFX9-NEXT: v_add_f32_e32 v11, 1.0, v11
-; GFX9-NEXT: v_add_f32_e32 v12, 1.0, v12
-; GFX9-NEXT: v_add_f32_e32 v13, 1.0, v13
; GFX9-NEXT: v_add_f32_e32 v15, 1.0, v15
; GFX9-NEXT: v_add_f32_e32 v17, 1.0, v17
; GFX9-NEXT: v_add_f32_e32 v19, 1.0, v19
; GFX9-NEXT: v_add_f32_e32 v21, 1.0, v21
+; GFX9-NEXT: v_add_f32_e32 v10, 1.0, v10
+; GFX9-NEXT: v_add_f32_e32 v11, 1.0, v11
+; GFX9-NEXT: v_add_f32_e32 v12, 1.0, v12
+; GFX9-NEXT: v_add_f32_e32 v13, 1.0, v13
+; GFX9-NEXT: v_add_f32_e32 v14, 1.0, v14
+; GFX9-NEXT: v_add_f32_e32 v16, 1.0, v16
+; GFX9-NEXT: v_add_f32_e32 v18, 1.0, v18
+; GFX9-NEXT: v_add_f32_e32 v20, 1.0, v20
; GFX9-NEXT: v_add_f32_e32 v22, 1.0, v22
; GFX9-NEXT: v_add_f32_e32 v23, 1.0, v23
; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v9
@@ -17050,58 +17190,58 @@ define inreg <48 x half> @bitcast_v24f32_to_v48f16_scalar(<24 x float> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v49, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v50, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v51, 16, v0
-; GFX9-NEXT: v_lshrrev_b32_e32 v52, 16, v14
-; GFX9-NEXT: v_lshrrev_b32_e32 v53, 16, v16
-; GFX9-NEXT: v_lshrrev_b32_e32 v54, 16, v18
-; GFX9-NEXT: v_lshrrev_b32_e32 v55, 16, v20
+; GFX9-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; GFX9-NEXT: v_lshrrev_b32_e32 v53, 16, v17
+; GFX9-NEXT: v_lshrrev_b32_e32 v54, 16, v19
+; GFX9-NEXT: v_lshrrev_b32_e32 v55, 16, v21
; GFX9-NEXT: v_lshrrev_b32_e32 v29, 16, v10
; GFX9-NEXT: v_lshrrev_b32_e32 v28, 16, v11
; GFX9-NEXT: v_lshrrev_b32_e32 v27, 16, v12
; GFX9-NEXT: v_lshrrev_b32_e32 v26, 16, v13
-; GFX9-NEXT: v_lshrrev_b32_e32 v25, 16, v15
-; GFX9-NEXT: v_lshrrev_b32_e32 v24, 16, v17
-; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v19
-; GFX9-NEXT: v_lshrrev_b32_e32 v32, 16, v21
+; GFX9-NEXT: v_lshrrev_b32_e32 v25, 16, v14
+; GFX9-NEXT: v_lshrrev_b32_e32 v24, 16, v16
+; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v18
+; GFX9-NEXT: v_lshrrev_b32_e32 v32, 16, v20
; GFX9-NEXT: v_lshrrev_b32_e32 v31, 16, v22
; GFX9-NEXT: v_lshrrev_b32_e32 v30, 16, v23
; GFX9-NEXT: .LBB33_3: ; %end
-; GFX9-NEXT: v_and_b32_e32 v13, 0xffff, v13
+; GFX9-NEXT: v_and_b32_e32 v14, 0xffff, v14
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
-; GFX9-NEXT: v_and_b32_e32 v15, 0xffff, v15
-; GFX9-NEXT: v_lshl_or_b32 v26, v26, 16, v13
-; GFX9-NEXT: v_and_b32_e32 v13, 0xffff, v14
+; GFX9-NEXT: v_lshl_or_b32 v25, v25, 16, v14
+; GFX9-NEXT: v_and_b32_e32 v13, 0xffff, v13
; GFX9-NEXT: v_lshl_or_b32 v14, v51, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v1
-; GFX9-NEXT: v_lshl_or_b32 v25, v25, 16, v15
-; GFX9-NEXT: v_and_b32_e32 v12, 0xffff, v12
+; GFX9-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX9-NEXT: v_lshl_or_b32 v26, v26, 16, v13
+; GFX9-NEXT: v_and_b32_e32 v13, 0xffff, v15
; GFX9-NEXT: v_lshl_or_b32 v15, v50, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v2
-; GFX9-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX9-NEXT: v_lshl_or_b32 v27, v27, 16, v12
-; GFX9-NEXT: v_and_b32_e32 v12, 0xffff, v16
+; GFX9-NEXT: v_lshl_or_b32 v24, v24, 16, v16
+; GFX9-NEXT: v_and_b32_e32 v12, 0xffff, v12
; GFX9-NEXT: v_lshl_or_b32 v16, v49, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v3
-; GFX9-NEXT: v_lshl_or_b32 v24, v24, 16, v17
-; GFX9-NEXT: v_and_b32_e32 v11, 0xffff, v11
+; GFX9-NEXT: v_and_b32_e32 v18, 0xffff, v18
+; GFX9-NEXT: v_lshl_or_b32 v27, v27, 16, v12
+; GFX9-NEXT: v_and_b32_e32 v12, 0xffff, v17
; GFX9-NEXT: v_lshl_or_b32 v17, v48, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v4
-; GFX9-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX9-NEXT: v_lshl_or_b32 v28, v28, 16, v11
-; GFX9-NEXT: v_and_b32_e32 v11, 0xffff, v18
+; GFX9-NEXT: v_lshl_or_b32 v33, v33, 16, v18
+; GFX9-NEXT: v_and_b32_e32 v11, 0xffff, v11
; GFX9-NEXT: v_lshl_or_b32 v18, v39, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v5
-; GFX9-NEXT: v_lshl_or_b32 v33, v33, 16, v19
-; GFX9-NEXT: v_and_b32_e32 v10, 0xffff, v10
+; GFX9-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX9-NEXT: v_lshl_or_b32 v28, v28, 16, v11
+; GFX9-NEXT: v_and_b32_e32 v11, 0xffff, v19
; GFX9-NEXT: v_lshl_or_b32 v19, v38, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v6
-; GFX9-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX9-NEXT: v_lshl_or_b32 v29, v29, 16, v10
-; GFX9-NEXT: v_and_b32_e32 v10, 0xffff, v20
+; GFX9-NEXT: v_lshl_or_b32 v32, v32, 16, v20
+; GFX9-NEXT: v_and_b32_e32 v10, 0xffff, v10
; GFX9-NEXT: v_lshl_or_b32 v20, v37, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v7
; GFX9-NEXT: v_and_b32_e32 v23, 0xffff, v23
; GFX9-NEXT: v_and_b32_e32 v22, 0xffff, v22
-; GFX9-NEXT: v_lshl_or_b32 v32, v32, 16, v21
+; GFX9-NEXT: v_lshl_or_b32 v29, v29, 16, v10
+; GFX9-NEXT: v_and_b32_e32 v10, 0xffff, v21
; GFX9-NEXT: v_lshl_or_b32 v21, v36, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v8
; GFX9-NEXT: v_lshl_or_b32 v30, v30, 16, v23
@@ -17149,7 +17289,9 @@ define inreg <48 x half> @bitcast_v24f32_to_v48f16_scalar(<24 x float> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr36
; GFX9-NEXT: ; implicit-def: $vgpr35
; GFX9-NEXT: ; implicit-def: $vgpr34
-; GFX9-NEXT: s_branch .LBB33_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB33_2
+; GFX9-NEXT: s_branch .LBB33_3
;
; GFX11-LABEL: bitcast_v24f32_to_v48f16_scalar:
; GFX11: ; %bb.0:
@@ -17162,10 +17304,10 @@ define inreg <48 x half> @bitcast_v24f32_to_v48f16_scalar(<24 x float> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v8, s19 :: v_dual_mov_b32 v13, s21
; GFX11-NEXT: v_dual_mov_b32 v12, s22 :: v_dual_mov_b32 v11, s23
; GFX11-NEXT: v_dual_mov_b32 v10, s24 :: v_dual_mov_b32 v9, s25
-; GFX11-NEXT: v_dual_mov_b32 v15, s26 :: v_dual_mov_b32 v14, s27
-; GFX11-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s27
+; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v15, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB33_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
@@ -17174,10 +17316,10 @@ define inreg <48 x half> @bitcast_v24f32_to_v48f16_scalar(<24 x float> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v2
; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v16
+; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v17
+; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v14
; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v9
; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v10
; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v11
@@ -17192,14 +17334,13 @@ define inreg <48 x half> @bitcast_v24f32_to_v48f16_scalar(<24 x float> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v22
; GFX11-NEXT: v_lshrrev_b32_e32 v25, 16, v23
; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB33_3
+; GFX11-NEXT: s_cbranch_execnz .LBB33_3
; GFX11-NEXT: .LBB33_2: ; %cmp.true
; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
-; GFX11-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
-; GFX11-NEXT: v_dual_add_f32 v14, 1.0, v14 :: v_dual_add_f32 v15, 1.0, v15
+; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v14, 1.0, v14
; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v10, 1.0, v10
; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v12, 1.0, v12
; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v8, 1.0, v8
@@ -17213,10 +17354,10 @@ define inreg <48 x half> @bitcast_v24f32_to_v48f16_scalar(<24 x float> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v2
; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v1
; GFX11-NEXT: v_lshrrev_b32_e32 v35, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v16
+; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v17
+; GFX11-NEXT: v_lshrrev_b32_e32 v39, 16, v14
; GFX11-NEXT: v_lshrrev_b32_e32 v48, 16, v9
; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v10
; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v11
@@ -17240,31 +17381,29 @@ define inreg <48 x half> @bitcast_v24f32_to_v48f16_scalar(<24 x float> inreg %a,
; GFX11-NEXT: v_lshl_or_b32 v6, v6, 16, v18
; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v10
; GFX11-NEXT: v_lshl_or_b32 v10, v51, 16, v12
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-NEXT: v_lshl_or_b32 v25, v25, 16, v23
; GFX11-NEXT: v_lshl_or_b32 v12, v49, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v14
+; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v15
; GFX11-NEXT: v_and_b32_e32 v24, 0xffff, v24
; GFX11-NEXT: v_lshl_or_b32 v27, v27, 16, v21
; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
; GFX11-NEXT: v_lshl_or_b32 v29, v29, 16, v19
; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v13
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v9
-; GFX11-NEXT: v_lshl_or_b32 v14, v39, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v15, v38, 16, v18
+; GFX11-NEXT: v_lshl_or_b32 v15, v38, 16, v17
+; GFX11-NEXT: v_lshl_or_b32 v17, v36, 16, v18
; GFX11-NEXT: v_lshl_or_b32 v18, v35, 16, v0
; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v1
; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v2
; GFX11-NEXT: v_lshl_or_b32 v26, v26, 16, v22
; GFX11-NEXT: v_and_b32_e32 v8, 0xffff, v8
; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v7
+; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v13
; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-NEXT: v_lshl_or_b32 v9, v52, 16, v13
-; GFX11-NEXT: v_lshl_or_b32 v13, v48, 16, v19
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v16
+; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v9
+; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v3
; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v4
; GFX11-NEXT: v_dual_mov_b32 v5, v29 :: v_dual_and_b32 v4, 0xffff, v5
@@ -17274,9 +17413,11 @@ define inreg <48 x half> @bitcast_v24f32_to_v48f16_scalar(<24 x float> inreg %a,
; GFX11-NEXT: v_lshl_or_b32 v24, v55, 16, v24
; GFX11-NEXT: v_lshl_or_b32 v7, v54, 16, v8
; GFX11-NEXT: v_lshl_or_b32 v8, v53, 16, v21
+; GFX11-NEXT: v_lshl_or_b32 v9, v52, 16, v13
; GFX11-NEXT: v_lshl_or_b32 v11, v50, 16, v11
-; GFX11-NEXT: v_lshl_or_b32 v16, v37, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v17, v36, 16, v19
+; GFX11-NEXT: v_lshl_or_b32 v13, v48, 16, v19
+; GFX11-NEXT: v_lshl_or_b32 v14, v39, 16, v14
+; GFX11-NEXT: v_lshl_or_b32 v16, v37, 16, v16
; GFX11-NEXT: v_lshl_or_b32 v19, v34, 16, v0
; GFX11-NEXT: v_lshl_or_b32 v21, v32, 16, v2
; GFX11-NEXT: v_lshl_or_b32 v22, v31, 16, v3
@@ -17310,7 +17451,9 @@ define inreg <48 x half> @bitcast_v24f32_to_v48f16_scalar(<24 x float> inreg %a,
; GFX11-NEXT: ; implicit-def: $vgpr32
; GFX11-NEXT: ; implicit-def: $vgpr31
; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: s_branch .LBB33_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_vccz .LBB33_2
+; GFX11-NEXT: s_branch .LBB33_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -18633,6 +18776,7 @@ define inreg <24 x float> @bitcast_v48f16_to_v24f32_scalar(<48 x half> inreg %a,
; SI-NEXT: s_waitcnt vmcnt(9)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v40
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB35_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_waitcnt expcnt(2)
@@ -19005,7 +19149,9 @@ define inreg <24 x float> @bitcast_v48f16_to_v24f32_scalar(<48 x half> inreg %a,
; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
; SI-NEXT: v_mov_b32_e32 v31, v40
-; SI-NEXT: s_branch .LBB35_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB35_2
+; SI-NEXT: s_branch .LBB35_3
;
; VI-LABEL: bitcast_v48f16_to_v24f32_scalar:
; VI: ; %bb.0:
@@ -19025,6 +19171,7 @@ define inreg <24 x float> @bitcast_v48f16_to_v24f32_scalar(<48 x half> inreg %a,
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v9
; VI-NEXT: v_mov_b32_e32 v33, v8
; VI-NEXT: v_mov_b32_e32 v34, v7
@@ -19035,7 +19182,7 @@ define inreg <24 x float> @bitcast_v48f16_to_v24f32_scalar(<48 x half> inreg %a,
; VI-NEXT: v_mov_b32_e32 v39, v2
; VI-NEXT: v_mov_b32_e32 v48, v1
; VI-NEXT: v_mov_b32_e32 v49, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB35_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -19064,41 +19211,41 @@ define inreg <24 x float> @bitcast_v48f16_to_v24f32_scalar(<48 x half> inreg %a,
; VI-NEXT: s_and_b32 s57, 0xffff, s23
; VI-NEXT: s_lshl_b32 s58, s12, 16
; VI-NEXT: v_or_b32_sdwa v14, v49, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s57, s57, s58
; VI-NEXT: s_and_b32 s58, 0xffff, s24
; VI-NEXT: s_lshl_b32 s59, s11, 16
-; VI-NEXT: v_or_b32_sdwa v15, v48, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v16, v39, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s58, s58, s59
; VI-NEXT: s_and_b32 s59, 0xffff, s25
; VI-NEXT: s_lshl_b32 s60, s10, 16
-; VI-NEXT: v_or_b32_sdwa v16, v39, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v17, v38, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s59, s59, s60
; VI-NEXT: s_and_b32 s60, 0xffff, s26
; VI-NEXT: s_lshl_b32 s61, s9, 16
-; VI-NEXT: v_or_b32_sdwa v17, v38, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v18, v37, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s60, s60, s61
; VI-NEXT: s_and_b32 s61, 0xffff, s27
; VI-NEXT: s_lshl_b32 s62, s8, 16
-; VI-NEXT: v_or_b32_sdwa v18, v37, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v19, v36, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s61, s61, s62
; VI-NEXT: s_and_b32 s62, 0xffff, s28
; VI-NEXT: s_lshl_b32 s63, s7, 16
-; VI-NEXT: v_or_b32_sdwa v19, v36, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v20, v35, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s62, s62, s63
; VI-NEXT: s_and_b32 s63, 0xffff, s29
; VI-NEXT: s_lshl_b32 s72, s6, 16
-; VI-NEXT: v_or_b32_sdwa v20, v35, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; VI-NEXT: s_or_b32 s63, s63, s72
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v0, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: v_or_b32_sdwa v21, v34, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: v_lshlrev_b32_sdwa v0, v0, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: s_or_b32 s63, s63, s72
+; VI-NEXT: v_or_b32_sdwa v15, v48, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v22, v33, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v23, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v0, s4
@@ -19119,13 +19266,13 @@ define inreg <24 x float> @bitcast_v48f16_to_v24f32_scalar(<48 x half> inreg %a,
; VI-NEXT: .LBB35_2: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v13, 0x200
; VI-NEXT: v_mov_b32_e32 v0, s43
+; VI-NEXT: v_mov_b32_e32 v2, s42
; VI-NEXT: v_add_f16_sdwa v0, v0, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v1, s16, v13
+; VI-NEXT: v_add_f16_sdwa v2, v2, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-NEXT: v_add_f16_e32 v3, s17, v13
; VI-NEXT: v_or_b32_e32 v0, v1, v0
-; VI-NEXT: v_mov_b32_e32 v1, s42
-; VI-NEXT: v_add_f16_sdwa v1, v1, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; VI-NEXT: v_add_f16_e32 v2, s17, v13
-; VI-NEXT: v_or_b32_e32 v1, v2, v1
+; VI-NEXT: v_or_b32_e32 v1, v3, v2
; VI-NEXT: v_mov_b32_e32 v2, s41
; VI-NEXT: v_add_f16_sdwa v2, v2, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v3, s18, v13
@@ -19209,21 +19356,13 @@ define inreg <24 x float> @bitcast_v48f16_to_v24f32_scalar(<48 x half> inreg %a,
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB35_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB35_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB35_2
+; VI-NEXT: s_branch .LBB35_3
;
; GFX9-LABEL: bitcast_v48f16_to_v24f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v9
-; GFX9-NEXT: v_mov_b32_e32 v33, v8
-; GFX9-NEXT: v_mov_b32_e32 v34, v7
-; GFX9-NEXT: v_mov_b32_e32 v35, v6
-; GFX9-NEXT: v_mov_b32_e32 v36, v5
-; GFX9-NEXT: v_mov_b32_e32 v37, v4
-; GFX9-NEXT: v_mov_b32_e32 v38, v3
-; GFX9-NEXT: v_mov_b32_e32 v39, v2
-; GFX9-NEXT: v_mov_b32_e32 v48, v1
-; GFX9-NEXT: v_mov_b32_e32 v49, v0
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
@@ -19239,6 +19378,17 @@ define inreg <24 x float> @bitcast_v48f16_to_v24f32_scalar(<48 x half> inreg %a,
; GFX9-NEXT: s_lshr_b32 s8, s18, 16
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
+; GFX9-NEXT: v_mov_b32_e32 v32, v9
+; GFX9-NEXT: v_mov_b32_e32 v33, v8
+; GFX9-NEXT: v_mov_b32_e32 v34, v7
+; GFX9-NEXT: v_mov_b32_e32 v35, v6
+; GFX9-NEXT: v_mov_b32_e32 v36, v5
+; GFX9-NEXT: v_mov_b32_e32 v37, v4
+; GFX9-NEXT: v_mov_b32_e32 v38, v3
+; GFX9-NEXT: v_mov_b32_e32 v39, v2
+; GFX9-NEXT: v_mov_b32_e32 v48, v1
+; GFX9-NEXT: v_mov_b32_e32 v49, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
@@ -19253,7 +19403,6 @@ define inreg <24 x float> @bitcast_v48f16_to_v24f32_scalar(<48 x half> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v41, 16, v39
; GFX9-NEXT: v_lshrrev_b32_e32 v42, 16, v48
; GFX9-NEXT: v_lshrrev_b32_e32 v43, 16, v49
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -19268,12 +19417,11 @@ define inreg <24 x float> @bitcast_v48f16_to_v24f32_scalar(<48 x half> inreg %a,
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB35_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v49
; GFX9-NEXT: v_lshl_or_b32 v14, v43, 16, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v48
-; GFX9-NEXT: v_lshl_or_b32 v15, v42, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v39
; GFX9-NEXT: v_lshl_or_b32 v16, v41, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v38
@@ -19287,8 +19435,10 @@ define inreg <24 x float> @bitcast_v48f16_to_v24f32_scalar(<48 x half> inreg %a,
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v34
; GFX9-NEXT: v_lshl_or_b32 v21, v52, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v33
+; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v48
; GFX9-NEXT: v_lshl_or_b32 v22, v51, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v32
+; GFX9-NEXT: v_lshl_or_b32 v15, v42, 16, v1
; GFX9-NEXT: v_lshl_or_b32 v23, v50, 16, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s6
; GFX9-NEXT: v_mov_b32_e32 v1, s7
@@ -19316,9 +19466,9 @@ define inreg <24 x float> @bitcast_v48f16_to_v24f32_scalar(<48 x half> inreg %a,
; GFX9-NEXT: v_and_b32_e32 v21, 0xffff, v34
; GFX9-NEXT: v_and_b32_e32 v22, 0xffff, v33
; GFX9-NEXT: v_and_b32_e32 v23, 0xffff, v32
-; GFX9-NEXT: s_movk_i32 s4, 0x200
; GFX9-NEXT: v_mov_b32_e32 v13, 0x200
; GFX9-NEXT: v_lshl_or_b32 v14, v43, 16, v14
+; GFX9-NEXT: s_movk_i32 s4, 0x200
; GFX9-NEXT: v_lshl_or_b32 v15, v42, 16, v15
; GFX9-NEXT: v_lshl_or_b32 v16, v41, 16, v16
; GFX9-NEXT: v_lshl_or_b32 v17, v40, 16, v17
@@ -19361,7 +19511,9 @@ define inreg <24 x float> @bitcast_v48f16_to_v24f32_scalar(<48 x half> inreg %a,
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB35_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB35_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB35_2
+; GFX9-NEXT: s_branch .LBB35_3
;
; GFX11-TRUE16-LABEL: bitcast_v48f16_to_v24f32_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -19385,44 +19537,44 @@ define inreg <24 x float> @bitcast_v48f16_to_v24f32_scalar(<48 x half> inreg %a,
; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v4
; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v5
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s16, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s16, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s17, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s18, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s19, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s20, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s21, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s22, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s23, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s24, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s25, s15
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s26, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s41
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s27, s42
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB35_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
@@ -19431,17 +19583,16 @@ define inreg <24 x float> @bitcast_v48f16_to_v24f32_scalar(<48 x half> inreg %a,
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s6 :: v_dual_mov_b32 v5, s7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s8 :: v_dual_mov_b32 v7, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s10 :: v_dual_mov_b32 v9, s11
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s12 :: v_dual_mov_b32 v11, s13
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s14 :: v_dual_mov_b32 v13, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB35_3
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB35_3
; GFX11-TRUE16-NEXT: .LBB35_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
@@ -19450,21 +19601,21 @@ define inreg <24 x float> @bitcast_v48f16_to_v24f32_scalar(<48 x half> inreg %a,
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s5 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s1 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s16 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
@@ -19477,7 +19628,9 @@ define inreg <24 x float> @bitcast_v48f16_to_v24f32_scalar(<48 x half> inreg %a,
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB35_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB35_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB35_2
+; GFX11-TRUE16-NEXT: s_branch .LBB35_3
;
; GFX11-FAKE16-LABEL: bitcast_v48f16_to_v24f32_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -19495,44 +19648,44 @@ define inreg <24 x float> @bitcast_v48f16_to_v24f32_scalar(<48 x half> inreg %a,
; GFX11-FAKE16-NEXT: v_and_b32_e32 v48, 0xffff, v3
; GFX11-FAKE16-NEXT: v_and_b32_e32 v39, 0xffff, v4
; GFX11-FAKE16-NEXT: v_and_b32_e32 v38, 0xffff, v5
-; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s26, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s16, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s25, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s24, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s23, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s22, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s21, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s20, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s19, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s18, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s17, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s16, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s3, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s40, 0
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s16, s6
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s17, s7
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s18, s8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s19, s9
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s20, s10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s21, s11
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s22, s12
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s23, s13
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s24, s14
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s25, s15
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s26, s43
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s28, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s29, s41
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s27, s42
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB35_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
@@ -19541,17 +19694,16 @@ define inreg <24 x float> @bitcast_v48f16_to_v24f32_scalar(<48 x half> inreg %a,
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s6 :: v_dual_mov_b32 v5, s7
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s8 :: v_dual_mov_b32 v7, s9
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s10 :: v_dual_mov_b32 v9, s11
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s12 :: v_dual_mov_b32 v11, s13
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s14 :: v_dual_mov_b32 v13, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB35_3
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB35_3
; GFX11-FAKE16-NEXT: .LBB35_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
@@ -19560,21 +19712,21 @@ define inreg <24 x float> @bitcast_v48f16_to_v24f32_scalar(<48 x half> inreg %a,
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, s5 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, s15 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, s1 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, s16 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
@@ -19587,7 +19739,9 @@ define inreg <24 x float> @bitcast_v48f16_to_v24f32_scalar(<48 x half> inreg %a,
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB35_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB35_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB35_2
+; GFX11-FAKE16-NEXT: s_branch .LBB35_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -19782,6 +19936,7 @@ define inreg <12 x double> @bitcast_v12i64_to_v12f64_scalar(<12 x i64> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v23, v9
; SI-NEXT: v_mov_b32_e32 v22, v8
; SI-NEXT: v_mov_b32_e32 v21, v7
@@ -19802,15 +19957,18 @@ define inreg <12 x double> @bitcast_v12i64_to_v12f64_scalar(<12 x i64> inreg %a,
; SI-NEXT: v_mov_b32_e32 v7, s23
; SI-NEXT: v_mov_b32_e32 v8, s24
; SI-NEXT: v_mov_b32_e32 v9, s25
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB37_4
+; SI-NEXT: s_cbranch_scc0 .LBB37_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB37_3
-; SI-NEXT: .LBB37_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB37_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB37_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2
@@ -19835,15 +19993,14 @@ define inreg <12 x double> @bitcast_v12i64_to_v12f64_scalar(<12 x i64> inreg %a,
; SI-NEXT: v_addc_u32_e32 v21, vcc, 0, v21, vcc
; SI-NEXT: v_add_i32_e32 v22, vcc, 3, v22
; SI-NEXT: v_addc_u32_e32 v23, vcc, 0, v23, vcc
-; SI-NEXT: .LBB37_3: ; %end
+; SI-NEXT: .LBB37_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB37_4:
-; SI-NEXT: s_branch .LBB37_2
;
; VI-LABEL: bitcast_v12i64_to_v12f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v23, v9
; VI-NEXT: v_mov_b32_e32 v22, v8
; VI-NEXT: v_mov_b32_e32 v21, v7
@@ -19864,15 +20021,18 @@ define inreg <12 x double> @bitcast_v12i64_to_v12f64_scalar(<12 x i64> inreg %a,
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: v_mov_b32_e32 v8, s24
; VI-NEXT: v_mov_b32_e32 v9, s25
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB37_4
+; VI-NEXT: s_cbranch_scc0 .LBB37_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB37_3
-; VI-NEXT: .LBB37_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB37_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB37_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
@@ -19897,15 +20057,14 @@ define inreg <12 x double> @bitcast_v12i64_to_v12f64_scalar(<12 x i64> inreg %a,
; VI-NEXT: v_addc_u32_e32 v21, vcc, 0, v21, vcc
; VI-NEXT: v_add_u32_e32 v22, vcc, 3, v22
; VI-NEXT: v_addc_u32_e32 v23, vcc, 0, v23, vcc
-; VI-NEXT: .LBB37_3: ; %end
+; VI-NEXT: .LBB37_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB37_4:
-; VI-NEXT: s_branch .LBB37_2
;
; GFX9-LABEL: bitcast_v12i64_to_v12f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v23, v9
; GFX9-NEXT: v_mov_b32_e32 v22, v8
; GFX9-NEXT: v_mov_b32_e32 v21, v7
@@ -19926,15 +20085,18 @@ define inreg <12 x double> @bitcast_v12i64_to_v12f64_scalar(<12 x i64> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: v_mov_b32_e32 v8, s24
; GFX9-NEXT: v_mov_b32_e32 v9, s25
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB37_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB37_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB37_3
-; GFX9-NEXT: .LBB37_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB37_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB37_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 3, v0
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, 3, v2
@@ -19959,39 +20121,37 @@ define inreg <12 x double> @bitcast_v12i64_to_v12f64_scalar(<12 x i64> inreg %a,
; GFX9-NEXT: v_addc_co_u32_e32 v21, vcc, 0, v21, vcc
; GFX9-NEXT: v_add_co_u32_e32 v22, vcc, 3, v22
; GFX9-NEXT: v_addc_co_u32_e32 v23, vcc, 0, v23, vcc
-; GFX9-NEXT: .LBB37_3: ; %end
+; GFX9-NEXT: .LBB37_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB37_4:
-; GFX9-NEXT: s_branch .LBB37_2
;
; GFX11-LABEL: bitcast_v12i64_to_v12f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-NEXT: v_dual_mov_b32 v15, v6 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB37_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB37_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB37_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB37_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB37_3:
-; GFX11-NEXT: .LBB37_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB37_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
@@ -20022,6 +20182,7 @@ define inreg <12 x double> @bitcast_v12i64_to_v12f64_scalar(<12 x i64> inreg %a,
; GFX11-NEXT: v_add_co_ci_u32_e64 v21, null, 0, v21, vcc_lo
; GFX11-NEXT: v_add_co_u32 v22, vcc_lo, v22, 3
; GFX11-NEXT: v_add_co_ci_u32_e64 v23, null, 0, v23, vcc_lo
+; GFX11-NEXT: .LBB37_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -20163,6 +20324,7 @@ define inreg <12 x i64> @bitcast_v12f64_to_v12i64_scalar(<12 x double> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v23, v9
; SI-NEXT: v_mov_b32_e32 v22, v8
; SI-NEXT: v_mov_b32_e32 v21, v7
@@ -20185,13 +20347,16 @@ define inreg <12 x i64> @bitcast_v12f64_to_v12i64_scalar(<12 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v9, s25
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB39_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB39_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB39_3
-; SI-NEXT: .LBB39_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB39_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB39_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
; SI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; SI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
@@ -20204,15 +20369,14 @@ define inreg <12 x i64> @bitcast_v12f64_to_v12i64_scalar(<12 x double> inreg %a,
; SI-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
; SI-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
; SI-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
-; SI-NEXT: .LBB39_3: ; %end
+; SI-NEXT: .LBB39_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB39_4:
-; SI-NEXT: s_branch .LBB39_2
;
; VI-LABEL: bitcast_v12f64_to_v12i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v23, v9
; VI-NEXT: v_mov_b32_e32 v22, v8
; VI-NEXT: v_mov_b32_e32 v21, v7
@@ -20235,13 +20399,16 @@ define inreg <12 x i64> @bitcast_v12f64_to_v12i64_scalar(<12 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB39_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB39_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB39_3
-; VI-NEXT: .LBB39_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB39_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB39_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
; VI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; VI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
@@ -20254,15 +20421,14 @@ define inreg <12 x i64> @bitcast_v12f64_to_v12i64_scalar(<12 x double> inreg %a,
; VI-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
; VI-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
; VI-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
-; VI-NEXT: .LBB39_3: ; %end
+; VI-NEXT: .LBB39_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB39_4:
-; VI-NEXT: s_branch .LBB39_2
;
; GFX9-LABEL: bitcast_v12f64_to_v12i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v23, v9
; GFX9-NEXT: v_mov_b32_e32 v22, v8
; GFX9-NEXT: v_mov_b32_e32 v21, v7
@@ -20285,13 +20451,16 @@ define inreg <12 x i64> @bitcast_v12f64_to_v12i64_scalar(<12 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB39_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB39_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB39_3
-; GFX9-NEXT: .LBB39_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB39_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB39_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX9-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
@@ -20304,39 +20473,37 @@ define inreg <12 x i64> @bitcast_v12f64_to_v12i64_scalar(<12 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
; GFX9-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
; GFX9-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
-; GFX9-NEXT: .LBB39_3: ; %end
+; GFX9-NEXT: .LBB39_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB39_4:
-; GFX9-NEXT: s_branch .LBB39_2
;
; GFX11-LABEL: bitcast_v12f64_to_v12i64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
+; GFX11-NEXT: v_dual_mov_b32 v15, v6 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB39_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB39_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB39_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB39_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB39_3:
-; GFX11-NEXT: .LBB39_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB39_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
@@ -20349,6 +20516,7 @@ define inreg <12 x i64> @bitcast_v12f64_to_v12i64_scalar(<12 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[18:19], v[18:19], 1.0
; GFX11-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
; GFX11-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
+; GFX11-NEXT: .LBB39_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -21133,6 +21301,7 @@ define inreg <48 x i16> @bitcast_v12i64_to_v48i16_scalar(<12 x i64> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s15, v1
; SI-NEXT: v_readfirstlane_b32 s14, v2
; SI-NEXT: v_readfirstlane_b32 s13, v3
@@ -21142,8 +21311,8 @@ define inreg <48 x i16> @bitcast_v12i64_to_v48i16_scalar(<12 x i64> inreg %a, i3
; SI-NEXT: v_readfirstlane_b32 s9, v7
; SI-NEXT: v_readfirstlane_b32 s8, v8
; SI-NEXT: v_readfirstlane_b32 s7, v9
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s6, v10
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB41_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v1, s7
@@ -21415,12 +21584,15 @@ define inreg <48 x i16> @bitcast_v12i64_to_v48i16_scalar(<12 x i64> inreg %a, i3
; SI-NEXT: ; implicit-def: $sgpr41
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; implicit-def: $sgpr40
-; SI-NEXT: s_branch .LBB41_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB41_2
+; SI-NEXT: s_branch .LBB41_3
;
; VI-LABEL: bitcast_v12i64_to_v48i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s15, v0
; VI-NEXT: v_readfirstlane_b32 s14, v1
; VI-NEXT: v_readfirstlane_b32 s13, v2
@@ -21429,13 +21601,13 @@ define inreg <48 x i16> @bitcast_v12i64_to_v48i16_scalar(<12 x i64> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s10, v5
; VI-NEXT: v_readfirstlane_b32 s9, v6
; VI-NEXT: v_readfirstlane_b32 s8, v7
-; VI-NEXT: v_readfirstlane_b32 s6, v8
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: v_readfirstlane_b32 s7, v9
+; VI-NEXT: v_readfirstlane_b32 s7, v8
+; VI-NEXT: v_readfirstlane_b32 s6, v9
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB41_4
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_lshr_b32 s40, s7, 16
-; VI-NEXT: s_lshr_b32 s41, s6, 16
+; VI-NEXT: s_lshr_b32 s40, s6, 16
+; VI-NEXT: s_lshr_b32 s41, s7, 16
; VI-NEXT: s_lshr_b32 s42, s8, 16
; VI-NEXT: s_lshr_b32 s43, s9, 16
; VI-NEXT: s_lshr_b32 s44, s10, 16
@@ -21460,8 +21632,8 @@ define inreg <48 x i16> @bitcast_v12i64_to_v48i16_scalar(<12 x i64> inreg %a, i3
; VI-NEXT: s_lshr_b32 s79, s16, 16
; VI-NEXT: s_cbranch_execnz .LBB41_3
; VI-NEXT: .LBB41_2: ; %cmp.true
-; VI-NEXT: s_add_u32 s6, s6, 3
-; VI-NEXT: s_addc_u32 s7, s7, 0
+; VI-NEXT: s_add_u32 s7, s7, 3
+; VI-NEXT: s_addc_u32 s6, s6, 0
; VI-NEXT: s_add_u32 s9, s9, 3
; VI-NEXT: s_addc_u32 s8, s8, 0
; VI-NEXT: s_add_u32 s11, s11, 3
@@ -21484,8 +21656,8 @@ define inreg <48 x i16> @bitcast_v12i64_to_v48i16_scalar(<12 x i64> inreg %a, i3
; VI-NEXT: s_addc_u32 s19, s19, 0
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
-; VI-NEXT: s_lshr_b32 s40, s7, 16
-; VI-NEXT: s_lshr_b32 s41, s6, 16
+; VI-NEXT: s_lshr_b32 s40, s6, 16
+; VI-NEXT: s_lshr_b32 s41, s7, 16
; VI-NEXT: s_lshr_b32 s42, s8, 16
; VI-NEXT: s_lshr_b32 s43, s9, 16
; VI-NEXT: s_lshr_b32 s44, s10, 16
@@ -21575,12 +21747,12 @@ define inreg <48 x i16> @bitcast_v12i64_to_v48i16_scalar(<12 x i64> inreg %a, i3
; VI-NEXT: s_and_b32 s8, 0xffff, s8
; VI-NEXT: s_lshl_b32 s28, s42, 16
; VI-NEXT: s_or_b32 s8, s8, s28
-; VI-NEXT: s_and_b32 s6, 0xffff, s6
-; VI-NEXT: s_lshl_b32 s28, s41, 16
-; VI-NEXT: s_or_b32 s6, s6, s28
; VI-NEXT: s_and_b32 s7, 0xffff, s7
-; VI-NEXT: s_lshl_b32 s28, s40, 16
+; VI-NEXT: s_lshl_b32 s28, s41, 16
; VI-NEXT: s_or_b32 s7, s7, s28
+; VI-NEXT: s_and_b32 s6, 0xffff, s6
+; VI-NEXT: s_lshl_b32 s28, s40, 16
+; VI-NEXT: s_or_b32 s6, s6, s28
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: v_mov_b32_e32 v2, s16
@@ -21603,8 +21775,8 @@ define inreg <48 x i16> @bitcast_v12i64_to_v48i16_scalar(<12 x i64> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v19, s10
; VI-NEXT: v_mov_b32_e32 v20, s9
; VI-NEXT: v_mov_b32_e32 v21, s8
-; VI-NEXT: v_mov_b32_e32 v22, s6
-; VI-NEXT: v_mov_b32_e32 v23, s7
+; VI-NEXT: v_mov_b32_e32 v22, s7
+; VI-NEXT: v_mov_b32_e32 v23, s6
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB41_4:
; VI-NEXT: ; implicit-def: $sgpr79
@@ -21631,35 +21803,38 @@ define inreg <48 x i16> @bitcast_v12i64_to_v48i16_scalar(<12 x i64> inreg %a, i3
; VI-NEXT: ; implicit-def: $sgpr42
; VI-NEXT: ; implicit-def: $sgpr41
; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: s_branch .LBB41_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB41_2
+; VI-NEXT: s_branch .LBB41_3
;
; GFX9-LABEL: bitcast_v12i64_to_v48i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
-; GFX9-NEXT: v_readfirstlane_b32 s6, v0
-; GFX9-NEXT: v_readfirstlane_b32 s7, v1
-; GFX9-NEXT: v_readfirstlane_b32 s8, v2
-; GFX9-NEXT: v_readfirstlane_b32 s9, v3
-; GFX9-NEXT: v_readfirstlane_b32 s10, v4
-; GFX9-NEXT: v_readfirstlane_b32 s11, v5
-; GFX9-NEXT: v_readfirstlane_b32 s12, v6
-; GFX9-NEXT: v_readfirstlane_b32 s13, v7
-; GFX9-NEXT: v_readfirstlane_b32 s14, v8
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: v_readfirstlane_b32 s15, v9
+; GFX9-NEXT: v_readfirstlane_b32 s7, v0
+; GFX9-NEXT: v_readfirstlane_b32 s8, v1
+; GFX9-NEXT: v_readfirstlane_b32 s9, v2
+; GFX9-NEXT: v_readfirstlane_b32 s10, v3
+; GFX9-NEXT: v_readfirstlane_b32 s11, v4
+; GFX9-NEXT: v_readfirstlane_b32 s12, v5
+; GFX9-NEXT: v_readfirstlane_b32 s13, v6
+; GFX9-NEXT: v_readfirstlane_b32 s14, v7
+; GFX9-NEXT: v_readfirstlane_b32 s15, v8
+; GFX9-NEXT: v_readfirstlane_b32 s6, v9
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB41_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_lshr_b32 s40, s15, 16
-; GFX9-NEXT: s_lshr_b32 s41, s14, 16
-; GFX9-NEXT: s_lshr_b32 s42, s13, 16
-; GFX9-NEXT: s_lshr_b32 s43, s12, 16
-; GFX9-NEXT: s_lshr_b32 s44, s11, 16
-; GFX9-NEXT: s_lshr_b32 s45, s10, 16
-; GFX9-NEXT: s_lshr_b32 s46, s9, 16
-; GFX9-NEXT: s_lshr_b32 s47, s8, 16
-; GFX9-NEXT: s_lshr_b32 s56, s7, 16
-; GFX9-NEXT: s_lshr_b32 s57, s6, 16
+; GFX9-NEXT: s_lshr_b32 s40, s6, 16
+; GFX9-NEXT: s_lshr_b32 s41, s15, 16
+; GFX9-NEXT: s_lshr_b32 s42, s14, 16
+; GFX9-NEXT: s_lshr_b32 s43, s13, 16
+; GFX9-NEXT: s_lshr_b32 s44, s12, 16
+; GFX9-NEXT: s_lshr_b32 s45, s11, 16
+; GFX9-NEXT: s_lshr_b32 s46, s10, 16
+; GFX9-NEXT: s_lshr_b32 s47, s9, 16
+; GFX9-NEXT: s_lshr_b32 s56, s8, 16
+; GFX9-NEXT: s_lshr_b32 s57, s7, 16
; GFX9-NEXT: s_lshr_b32 s58, s29, 16
; GFX9-NEXT: s_lshr_b32 s59, s28, 16
; GFX9-NEXT: s_lshr_b32 s60, s27, 16
@@ -21676,16 +21851,16 @@ define inreg <48 x i16> @bitcast_v12i64_to_v48i16_scalar(<12 x i64> inreg %a, i3
; GFX9-NEXT: s_lshr_b32 s79, s16, 16
; GFX9-NEXT: s_cbranch_execnz .LBB41_3
; GFX9-NEXT: .LBB41_2: ; %cmp.true
-; GFX9-NEXT: s_add_u32 s14, s14, 3
-; GFX9-NEXT: s_addc_u32 s15, s15, 0
-; GFX9-NEXT: s_add_u32 s12, s12, 3
-; GFX9-NEXT: s_addc_u32 s13, s13, 0
-; GFX9-NEXT: s_add_u32 s10, s10, 3
-; GFX9-NEXT: s_addc_u32 s11, s11, 0
-; GFX9-NEXT: s_add_u32 s8, s8, 3
-; GFX9-NEXT: s_addc_u32 s9, s9, 0
-; GFX9-NEXT: s_add_u32 s6, s6, 3
-; GFX9-NEXT: s_addc_u32 s7, s7, 0
+; GFX9-NEXT: s_add_u32 s15, s15, 3
+; GFX9-NEXT: s_addc_u32 s6, s6, 0
+; GFX9-NEXT: s_add_u32 s13, s13, 3
+; GFX9-NEXT: s_addc_u32 s14, s14, 0
+; GFX9-NEXT: s_add_u32 s11, s11, 3
+; GFX9-NEXT: s_addc_u32 s12, s12, 0
+; GFX9-NEXT: s_add_u32 s9, s9, 3
+; GFX9-NEXT: s_addc_u32 s10, s10, 0
+; GFX9-NEXT: s_add_u32 s7, s7, 3
+; GFX9-NEXT: s_addc_u32 s8, s8, 0
; GFX9-NEXT: s_add_u32 s28, s28, 3
; GFX9-NEXT: s_addc_u32 s29, s29, 0
; GFX9-NEXT: s_add_u32 s26, s26, 3
@@ -21700,16 +21875,16 @@ define inreg <48 x i16> @bitcast_v12i64_to_v48i16_scalar(<12 x i64> inreg %a, i3
; GFX9-NEXT: s_addc_u32 s19, s19, 0
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
-; GFX9-NEXT: s_lshr_b32 s40, s15, 16
-; GFX9-NEXT: s_lshr_b32 s41, s14, 16
-; GFX9-NEXT: s_lshr_b32 s42, s13, 16
-; GFX9-NEXT: s_lshr_b32 s43, s12, 16
-; GFX9-NEXT: s_lshr_b32 s44, s11, 16
-; GFX9-NEXT: s_lshr_b32 s45, s10, 16
-; GFX9-NEXT: s_lshr_b32 s46, s9, 16
-; GFX9-NEXT: s_lshr_b32 s47, s8, 16
-; GFX9-NEXT: s_lshr_b32 s56, s7, 16
-; GFX9-NEXT: s_lshr_b32 s57, s6, 16
+; GFX9-NEXT: s_lshr_b32 s40, s6, 16
+; GFX9-NEXT: s_lshr_b32 s41, s15, 16
+; GFX9-NEXT: s_lshr_b32 s42, s14, 16
+; GFX9-NEXT: s_lshr_b32 s43, s13, 16
+; GFX9-NEXT: s_lshr_b32 s44, s12, 16
+; GFX9-NEXT: s_lshr_b32 s45, s11, 16
+; GFX9-NEXT: s_lshr_b32 s46, s10, 16
+; GFX9-NEXT: s_lshr_b32 s47, s9, 16
+; GFX9-NEXT: s_lshr_b32 s56, s8, 16
+; GFX9-NEXT: s_lshr_b32 s57, s7, 16
; GFX9-NEXT: s_lshr_b32 s58, s29, 16
; GFX9-NEXT: s_lshr_b32 s59, s28, 16
; GFX9-NEXT: s_lshr_b32 s60, s27, 16
@@ -21739,16 +21914,16 @@ define inreg <48 x i16> @bitcast_v12i64_to_v48i16_scalar(<12 x i64> inreg %a, i3
; GFX9-NEXT: s_pack_ll_b32_b16 s25, s27, s60
; GFX9-NEXT: s_pack_ll_b32_b16 s26, s28, s59
; GFX9-NEXT: s_pack_ll_b32_b16 s27, s29, s58
-; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s57
-; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s56
-; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s47
-; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s46
-; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s45
-; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s44
-; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s43
-; GFX9-NEXT: s_pack_ll_b32_b16 s13, s13, s42
-; GFX9-NEXT: s_pack_ll_b32_b16 s14, s14, s41
-; GFX9-NEXT: s_pack_ll_b32_b16 s15, s15, s40
+; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s57
+; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s56
+; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s47
+; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s46
+; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s45
+; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s44
+; GFX9-NEXT: s_pack_ll_b32_b16 s13, s13, s43
+; GFX9-NEXT: s_pack_ll_b32_b16 s14, s14, s42
+; GFX9-NEXT: s_pack_ll_b32_b16 s15, s15, s41
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s40
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: v_mov_b32_e32 v2, s16
@@ -21763,16 +21938,16 @@ define inreg <48 x i16> @bitcast_v12i64_to_v48i16_scalar(<12 x i64> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v11, s25
; GFX9-NEXT: v_mov_b32_e32 v12, s26
; GFX9-NEXT: v_mov_b32_e32 v13, s27
-; GFX9-NEXT: v_mov_b32_e32 v14, s6
-; GFX9-NEXT: v_mov_b32_e32 v15, s7
-; GFX9-NEXT: v_mov_b32_e32 v16, s8
-; GFX9-NEXT: v_mov_b32_e32 v17, s9
-; GFX9-NEXT: v_mov_b32_e32 v18, s10
-; GFX9-NEXT: v_mov_b32_e32 v19, s11
-; GFX9-NEXT: v_mov_b32_e32 v20, s12
-; GFX9-NEXT: v_mov_b32_e32 v21, s13
-; GFX9-NEXT: v_mov_b32_e32 v22, s14
-; GFX9-NEXT: v_mov_b32_e32 v23, s15
+; GFX9-NEXT: v_mov_b32_e32 v14, s7
+; GFX9-NEXT: v_mov_b32_e32 v15, s8
+; GFX9-NEXT: v_mov_b32_e32 v16, s9
+; GFX9-NEXT: v_mov_b32_e32 v17, s10
+; GFX9-NEXT: v_mov_b32_e32 v18, s11
+; GFX9-NEXT: v_mov_b32_e32 v19, s12
+; GFX9-NEXT: v_mov_b32_e32 v20, s13
+; GFX9-NEXT: v_mov_b32_e32 v21, s14
+; GFX9-NEXT: v_mov_b32_e32 v22, s15
+; GFX9-NEXT: v_mov_b32_e32 v23, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB41_4:
; GFX9-NEXT: ; implicit-def: $sgpr79
@@ -21799,7 +21974,9 @@ define inreg <48 x i16> @bitcast_v12i64_to_v48i16_scalar(<12 x i64> inreg %a, i3
; GFX9-NEXT: ; implicit-def: $sgpr42
; GFX9-NEXT: ; implicit-def: $sgpr41
; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: s_branch .LBB41_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB41_2
+; GFX9-NEXT: s_branch .LBB41_3
;
; GFX11-LABEL: bitcast_v12i64_to_v48i16_scalar:
; GFX11: ; %bb.0:
@@ -21808,16 +21985,16 @@ define inreg <48 x i16> @bitcast_v12i64_to_v48i16_scalar(<12 x i64> inreg %a, i3
; GFX11-NEXT: v_readfirstlane_b32 s4, v0
; GFX11-NEXT: v_readfirstlane_b32 s5, v1
; GFX11-NEXT: v_readfirstlane_b32 s6, v2
-; GFX11-NEXT: v_readfirstlane_b32 s7, v3
+; GFX11-NEXT: v_readfirstlane_b32 s8, v3
; GFX11-NEXT: v_readfirstlane_b32 s9, v4
-; GFX11-NEXT: v_readfirstlane_b32 s8, v5
-; GFX11-NEXT: s_mov_b32 s74, 0
+; GFX11-NEXT: v_readfirstlane_b32 s7, v5
+; GFX11-NEXT: s_mov_b32 s74, -1
; GFX11-NEXT: s_and_b32 s10, vcc_lo, exec_lo
; GFX11-NEXT: s_cbranch_scc0 .LBB41_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s10, s8, 16
+; GFX11-NEXT: s_lshr_b32 s10, s7, 16
; GFX11-NEXT: s_lshr_b32 s11, s9, 16
-; GFX11-NEXT: s_lshr_b32 s12, s7, 16
+; GFX11-NEXT: s_lshr_b32 s12, s8, 16
; GFX11-NEXT: s_lshr_b32 s13, s6, 16
; GFX11-NEXT: s_lshr_b32 s14, s5, 16
; GFX11-NEXT: s_lshr_b32 s15, s4, 16
@@ -21839,13 +22016,12 @@ define inreg <48 x i16> @bitcast_v12i64_to_v48i16_scalar(<12 x i64> inreg %a, i3
; GFX11-NEXT: s_lshr_b32 s63, s2, 16
; GFX11-NEXT: s_lshr_b32 s72, s1, 16
; GFX11-NEXT: s_lshr_b32 s73, s0, 16
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s74
-; GFX11-NEXT: s_cbranch_vccnz .LBB41_3
+; GFX11-NEXT: s_cbranch_execnz .LBB41_3
; GFX11-NEXT: .LBB41_2: ; %cmp.true
; GFX11-NEXT: s_add_u32 s9, s9, 3
-; GFX11-NEXT: s_addc_u32 s8, s8, 0
-; GFX11-NEXT: s_add_u32 s6, s6, 3
; GFX11-NEXT: s_addc_u32 s7, s7, 0
+; GFX11-NEXT: s_add_u32 s6, s6, 3
+; GFX11-NEXT: s_addc_u32 s8, s8, 0
; GFX11-NEXT: s_add_u32 s4, s4, 3
; GFX11-NEXT: s_addc_u32 s5, s5, 0
; GFX11-NEXT: s_add_u32 s28, s28, 3
@@ -21866,9 +22042,9 @@ define inreg <48 x i16> @bitcast_v12i64_to_v48i16_scalar(<12 x i64> inreg %a, i3
; GFX11-NEXT: s_addc_u32 s3, s3, 0
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: s_lshr_b32 s10, s8, 16
+; GFX11-NEXT: s_lshr_b32 s10, s7, 16
; GFX11-NEXT: s_lshr_b32 s11, s9, 16
-; GFX11-NEXT: s_lshr_b32 s12, s7, 16
+; GFX11-NEXT: s_lshr_b32 s12, s8, 16
; GFX11-NEXT: s_lshr_b32 s13, s6, 16
; GFX11-NEXT: s_lshr_b32 s14, s5, 16
; GFX11-NEXT: s_lshr_b32 s15, s4, 16
@@ -21913,9 +22089,9 @@ define inreg <48 x i16> @bitcast_v12i64_to_v48i16_scalar(<12 x i64> inreg %a, i3
; GFX11-NEXT: s_pack_ll_b32_b16 s4, s4, s15
; GFX11-NEXT: s_pack_ll_b32_b16 s5, s5, s14
; GFX11-NEXT: s_pack_ll_b32_b16 s6, s6, s13
-; GFX11-NEXT: s_pack_ll_b32_b16 s7, s7, s12
+; GFX11-NEXT: s_pack_ll_b32_b16 s8, s8, s12
; GFX11-NEXT: s_pack_ll_b32_b16 s9, s9, s11
-; GFX11-NEXT: s_pack_ll_b32_b16 s8, s8, s10
+; GFX11-NEXT: s_pack_ll_b32_b16 s7, s7, s10
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -21926,8 +22102,8 @@ define inreg <48 x i16> @bitcast_v12i64_to_v48i16_scalar(<12 x i64> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
; GFX11-NEXT: v_dual_mov_b32 v18, s4 :: v_dual_mov_b32 v19, s5
-; GFX11-NEXT: v_dual_mov_b32 v20, s6 :: v_dual_mov_b32 v21, s7
-; GFX11-NEXT: v_dual_mov_b32 v22, s9 :: v_dual_mov_b32 v23, s8
+; GFX11-NEXT: v_dual_mov_b32 v20, s6 :: v_dual_mov_b32 v21, s8
+; GFX11-NEXT: v_dual_mov_b32 v22, s9 :: v_dual_mov_b32 v23, s7
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB41_4:
; GFX11-NEXT: ; implicit-def: $sgpr73
@@ -21954,7 +22130,9 @@ define inreg <48 x i16> @bitcast_v12i64_to_v48i16_scalar(<12 x i64> inreg %a, i3
; GFX11-NEXT: ; implicit-def: $sgpr12
; GFX11-NEXT: ; implicit-def: $sgpr11
; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: s_branch .LBB41_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s74
+; GFX11-NEXT: s_cbranch_vccz .LBB41_2
+; GFX11-NEXT: s_branch .LBB41_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -23059,6 +23237,7 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v49, v10
; SI-NEXT: v_mov_b32_e32 v51, v8
; SI-NEXT: v_mov_b32_e32 v52, v6
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v1
; SI-NEXT: s_waitcnt expcnt(5)
; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v3
@@ -23080,7 +23259,7 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v2
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_and_b64 s[6:7], vcc, exec
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v4
; SI-NEXT: s_cbranch_scc0 .LBB43_4
@@ -23342,7 +23521,9 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v27, v46
; SI-NEXT: v_mov_b32_e32 v29, v45
; SI-NEXT: v_mov_b32_e32 v34, v43
-; SI-NEXT: s_branch .LBB43_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB43_2
+; SI-NEXT: s_branch .LBB43_3
;
; VI-LABEL: bitcast_v48i16_to_v12i64_scalar:
; VI: ; %bb.0:
@@ -23362,6 +23543,7 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v9
; VI-NEXT: v_mov_b32_e32 v33, v8
; VI-NEXT: v_mov_b32_e32 v34, v7
@@ -23372,7 +23554,7 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v39, v2
; VI-NEXT: v_mov_b32_e32 v48, v1
; VI-NEXT: v_mov_b32_e32 v49, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB43_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -23401,41 +23583,41 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3
; VI-NEXT: s_and_b32 s57, 0xffff, s23
; VI-NEXT: s_lshl_b32 s58, s12, 16
; VI-NEXT: v_or_b32_sdwa v14, v49, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s57, s57, s58
; VI-NEXT: s_and_b32 s58, 0xffff, s24
; VI-NEXT: s_lshl_b32 s59, s11, 16
-; VI-NEXT: v_or_b32_sdwa v15, v48, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v16, v39, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s58, s58, s59
; VI-NEXT: s_and_b32 s59, 0xffff, s25
; VI-NEXT: s_lshl_b32 s60, s10, 16
-; VI-NEXT: v_or_b32_sdwa v16, v39, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v17, v38, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s59, s59, s60
; VI-NEXT: s_and_b32 s60, 0xffff, s26
; VI-NEXT: s_lshl_b32 s61, s9, 16
-; VI-NEXT: v_or_b32_sdwa v17, v38, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v18, v37, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s60, s60, s61
; VI-NEXT: s_and_b32 s61, 0xffff, s27
; VI-NEXT: s_lshl_b32 s62, s8, 16
-; VI-NEXT: v_or_b32_sdwa v18, v37, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v19, v36, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s61, s61, s62
; VI-NEXT: s_and_b32 s62, 0xffff, s28
; VI-NEXT: s_lshl_b32 s63, s7, 16
-; VI-NEXT: v_or_b32_sdwa v19, v36, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v20, v35, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s62, s62, s63
; VI-NEXT: s_and_b32 s63, 0xffff, s29
; VI-NEXT: s_lshl_b32 s72, s6, 16
-; VI-NEXT: v_or_b32_sdwa v20, v35, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; VI-NEXT: s_or_b32 s63, s63, s72
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v0, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: v_or_b32_sdwa v21, v34, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: v_lshlrev_b32_sdwa v0, v0, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: s_or_b32 s63, s63, s72
+; VI-NEXT: v_or_b32_sdwa v15, v48, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v22, v33, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v23, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v0, s4
@@ -23458,97 +23640,98 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v49
; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v49 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v14, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v48
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v48
+; VI-NEXT: v_lshlrev_b32_sdwa v3, v1, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v14, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v15, vcc, 0x30000, v2
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v39
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_and_b32 s4, s16, 0xffff
; VI-NEXT: s_lshl_b32 s5, s43, 16
; VI-NEXT: s_add_i32 s17, s17, 3
-; VI-NEXT: v_add_u32_e32 v15, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v39
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s4, s5, s4
; VI-NEXT: s_and_b32 s5, s17, 0xffff
; VI-NEXT: s_lshl_b32 s16, s42, 16
; VI-NEXT: s_add_i32 s18, s18, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v16, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v38
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s5, s16, s5
; VI-NEXT: s_and_b32 s16, s18, 0xffff
; VI-NEXT: s_lshl_b32 s17, s41, 16
; VI-NEXT: s_add_i32 s19, s19, 3
-; VI-NEXT: v_add_u32_e32 v16, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v38
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s16, s17, s16
; VI-NEXT: s_and_b32 s17, s19, 0xffff
; VI-NEXT: s_lshl_b32 s18, s40, 16
; VI-NEXT: s_add_i32 s20, s20, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v17, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v37
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s17, s18, s17
; VI-NEXT: s_and_b32 s18, s20, 0xffff
; VI-NEXT: s_lshl_b32 s15, s15, 16
; VI-NEXT: s_add_i32 s21, s21, 3
-; VI-NEXT: v_add_u32_e32 v17, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v37
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s15, s15, s18
; VI-NEXT: s_and_b32 s18, s21, 0xffff
; VI-NEXT: s_lshl_b32 s14, s14, 16
; VI-NEXT: s_add_i32 s22, s22, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v18, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v36
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s14, s14, s18
; VI-NEXT: s_and_b32 s18, s22, 0xffff
; VI-NEXT: s_lshl_b32 s13, s13, 16
; VI-NEXT: s_add_i32 s23, s23, 3
-; VI-NEXT: v_add_u32_e32 v18, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v36
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s13, s13, s18
; VI-NEXT: s_and_b32 s18, s23, 0xffff
; VI-NEXT: s_lshl_b32 s12, s12, 16
; VI-NEXT: s_add_i32 s24, s24, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v19, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v35
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s12, s12, s18
; VI-NEXT: s_and_b32 s18, s24, 0xffff
; VI-NEXT: s_lshl_b32 s11, s11, 16
; VI-NEXT: s_add_i32 s25, s25, 3
-; VI-NEXT: v_add_u32_e32 v19, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v35
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s11, s11, s18
; VI-NEXT: s_and_b32 s18, s25, 0xffff
; VI-NEXT: s_lshl_b32 s10, s10, 16
; VI-NEXT: s_add_i32 s26, s26, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v20, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v34
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s10, s10, s18
; VI-NEXT: s_and_b32 s18, s26, 0xffff
; VI-NEXT: s_lshl_b32 s9, s9, 16
; VI-NEXT: s_add_i32 s27, s27, 3
-; VI-NEXT: v_add_u32_e32 v20, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v34
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s9, s9, s18
; VI-NEXT: s_and_b32 s18, s27, 0xffff
; VI-NEXT: s_lshl_b32 s8, s8, 16
; VI-NEXT: s_add_i32 s28, s28, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v21, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v33
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s8, s8, s18
; VI-NEXT: s_and_b32 s18, s28, 0xffff
; VI-NEXT: s_lshl_b32 s7, s7, 16
; VI-NEXT: s_add_i32 s29, s29, 3
-; VI-NEXT: v_add_u32_e32 v21, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v33
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s7, s7, s18
; VI-NEXT: s_and_b32 s18, s29, 0xffff
; VI-NEXT: s_lshl_b32 s6, s6, 16
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: s_or_b32 s6, s6, s18
; VI-NEXT: v_add_u32_e32 v22, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v32
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v1, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_lshlrev_b32_sdwa v0, v1, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v32
+; VI-NEXT: s_or_b32 s6, s6, s18
+; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_add_i32 s4, s4, 0x30000
; VI-NEXT: s_add_i32 s5, s5, 0x30000
; VI-NEXT: s_add_i32 s16, s16, 0x30000
@@ -23563,7 +23746,6 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3
; VI-NEXT: s_add_i32 s8, s8, 0x30000
; VI-NEXT: s_add_i32 s7, s7, 0x30000
; VI-NEXT: s_add_i32 s6, s6, 0x30000
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v23, vcc, 0x30000, v0
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
@@ -23583,21 +23765,13 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB43_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB43_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB43_2
+; VI-NEXT: s_branch .LBB43_3
;
; GFX9-LABEL: bitcast_v48i16_to_v12i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v9
-; GFX9-NEXT: v_mov_b32_e32 v33, v8
-; GFX9-NEXT: v_mov_b32_e32 v34, v7
-; GFX9-NEXT: v_mov_b32_e32 v35, v6
-; GFX9-NEXT: v_mov_b32_e32 v36, v5
-; GFX9-NEXT: v_mov_b32_e32 v37, v4
-; GFX9-NEXT: v_mov_b32_e32 v38, v3
-; GFX9-NEXT: v_mov_b32_e32 v39, v2
-; GFX9-NEXT: v_mov_b32_e32 v48, v1
-; GFX9-NEXT: v_mov_b32_e32 v49, v0
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
@@ -23613,6 +23787,17 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3
; GFX9-NEXT: s_lshr_b32 s8, s18, 16
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
+; GFX9-NEXT: v_mov_b32_e32 v32, v9
+; GFX9-NEXT: v_mov_b32_e32 v33, v8
+; GFX9-NEXT: v_mov_b32_e32 v34, v7
+; GFX9-NEXT: v_mov_b32_e32 v35, v6
+; GFX9-NEXT: v_mov_b32_e32 v36, v5
+; GFX9-NEXT: v_mov_b32_e32 v37, v4
+; GFX9-NEXT: v_mov_b32_e32 v38, v3
+; GFX9-NEXT: v_mov_b32_e32 v39, v2
+; GFX9-NEXT: v_mov_b32_e32 v48, v1
+; GFX9-NEXT: v_mov_b32_e32 v49, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
@@ -23627,7 +23812,6 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3
; GFX9-NEXT: v_lshrrev_b32_e32 v41, 16, v39
; GFX9-NEXT: v_lshrrev_b32_e32 v42, 16, v48
; GFX9-NEXT: v_lshrrev_b32_e32 v43, 16, v49
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -23642,12 +23826,11 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB43_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v49
; GFX9-NEXT: v_lshl_or_b32 v14, v43, 16, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v48
-; GFX9-NEXT: v_lshl_or_b32 v15, v42, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v39
; GFX9-NEXT: v_lshl_or_b32 v16, v41, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v38
@@ -23661,8 +23844,10 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v34
; GFX9-NEXT: v_lshl_or_b32 v21, v52, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v33
+; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v48
; GFX9-NEXT: v_lshl_or_b32 v22, v51, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v32
+; GFX9-NEXT: v_lshl_or_b32 v15, v42, 16, v1
; GFX9-NEXT: v_lshl_or_b32 v23, v50, 16, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s6
; GFX9-NEXT: v_mov_b32_e32 v1, s7
@@ -23680,26 +23865,30 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v13, s19
; GFX9-NEXT: s_cbranch_execnz .LBB43_3
; GFX9-NEXT: .LBB43_2: ; %cmp.true
-; GFX9-NEXT: v_and_b32_e32 v14, 0xffff, v49
-; GFX9-NEXT: v_and_b32_e32 v15, 0xffff, v48
-; GFX9-NEXT: v_and_b32_e32 v16, 0xffff, v39
-; GFX9-NEXT: v_and_b32_e32 v17, 0xffff, v38
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v49
+; GFX9-NEXT: v_lshl_or_b32 v0, v43, 16, v0
+; GFX9-NEXT: v_pk_add_u16 v14, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v39
+; GFX9-NEXT: v_lshl_or_b32 v0, v41, 16, v0
+; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v48
+; GFX9-NEXT: v_pk_add_u16 v16, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v38
; GFX9-NEXT: v_and_b32_e32 v18, 0xffff, v37
; GFX9-NEXT: v_and_b32_e32 v19, 0xffff, v36
; GFX9-NEXT: v_and_b32_e32 v20, 0xffff, v35
; GFX9-NEXT: v_and_b32_e32 v21, 0xffff, v34
; GFX9-NEXT: v_and_b32_e32 v22, 0xffff, v33
; GFX9-NEXT: v_and_b32_e32 v23, 0xffff, v32
-; GFX9-NEXT: v_lshl_or_b32 v14, v43, 16, v14
-; GFX9-NEXT: v_lshl_or_b32 v15, v42, 16, v15
-; GFX9-NEXT: v_lshl_or_b32 v16, v41, 16, v16
-; GFX9-NEXT: v_lshl_or_b32 v17, v40, 16, v17
+; GFX9-NEXT: v_lshl_or_b32 v1, v42, 16, v1
+; GFX9-NEXT: v_lshl_or_b32 v0, v40, 16, v0
; GFX9-NEXT: v_lshl_or_b32 v18, v55, 16, v18
; GFX9-NEXT: v_lshl_or_b32 v19, v54, 16, v19
; GFX9-NEXT: v_lshl_or_b32 v20, v53, 16, v20
; GFX9-NEXT: v_lshl_or_b32 v21, v52, 16, v21
; GFX9-NEXT: v_lshl_or_b32 v22, v51, 16, v22
; GFX9-NEXT: v_lshl_or_b32 v23, v50, 16, v23
+; GFX9-NEXT: v_pk_add_u16 v15, v1, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_pk_add_u16 v17, v0, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s6, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s7, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v2, s8, 3 op_sel_hi:[1,0]
@@ -23714,10 +23903,6 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3
; GFX9-NEXT: v_pk_add_u16 v11, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v12, s18, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v13, s19, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
@@ -23733,7 +23918,9 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB43_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB43_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB43_2
+; GFX9-NEXT: s_branch .LBB43_3
;
; GFX11-TRUE16-LABEL: bitcast_v48i16_to_v12i64_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -23757,44 +23944,44 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v4
; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v5
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s16, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s16, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s17, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s18, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s19, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s20, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s21, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s22, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s23, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s24, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s25, s15
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s26, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s41
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s27, s42
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB43_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
@@ -23803,17 +23990,16 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s6 :: v_dual_mov_b32 v5, s7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s8 :: v_dual_mov_b32 v7, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s10 :: v_dual_mov_b32 v9, s11
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s12 :: v_dual_mov_b32 v11, s13
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s14 :: v_dual_mov_b32 v13, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB43_3
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB43_3
; GFX11-TRUE16-NEXT: .LBB43_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
@@ -23822,21 +24008,21 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s5, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s1, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s16, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
@@ -23849,7 +24035,9 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB43_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB43_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB43_2
+; GFX11-TRUE16-NEXT: s_branch .LBB43_3
;
; GFX11-FAKE16-LABEL: bitcast_v48i16_to_v12i64_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -23867,44 +24055,44 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: v_and_b32_e32 v48, 0xffff, v3
; GFX11-FAKE16-NEXT: v_and_b32_e32 v39, 0xffff, v4
; GFX11-FAKE16-NEXT: v_and_b32_e32 v38, 0xffff, v5
-; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s26, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s16, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s25, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s24, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s23, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s22, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s21, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s20, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s19, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s18, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s17, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s16, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s3, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s40, 0
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s16, s6
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s17, s7
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s18, s8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s19, s9
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s20, s10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s21, s11
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s22, s12
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s23, s13
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s24, s14
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s25, s15
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s26, s43
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s28, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s29, s41
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s27, s42
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB43_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
@@ -23913,17 +24101,16 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s6 :: v_dual_mov_b32 v5, s7
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s8 :: v_dual_mov_b32 v7, s9
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s10 :: v_dual_mov_b32 v9, s11
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s12 :: v_dual_mov_b32 v11, s13
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s14 :: v_dual_mov_b32 v13, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB43_3
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB43_3
; GFX11-FAKE16-NEXT: .LBB43_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
@@ -23932,21 +24119,21 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, s4, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, s5, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, s5, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, s6, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, s7, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, s8, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, s9, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, s10, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, s11, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, s12, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, s13, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, s14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, s6, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, s7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, s8, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, s9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, s10, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, s11, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, s12, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, s13, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, s14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, s15, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, s0, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, s15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, s1, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, s16, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
@@ -23959,7 +24146,9 @@ define inreg <12 x i64> @bitcast_v48i16_to_v12i64_scalar(<48 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB43_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB43_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB43_2
+; GFX11-FAKE16-NEXT: s_branch .LBB43_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -24988,27 +25177,28 @@ define inreg <48 x half> @bitcast_v12i64_to_v48f16_scalar(<12 x i64> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s14, v1
; SI-NEXT: v_readfirstlane_b32 s15, v2
; SI-NEXT: v_readfirstlane_b32 s12, v3
; SI-NEXT: v_readfirstlane_b32 s13, v4
; SI-NEXT: v_readfirstlane_b32 s10, v5
; SI-NEXT: v_readfirstlane_b32 s11, v6
-; SI-NEXT: v_readfirstlane_b32 s7, v7
-; SI-NEXT: v_readfirstlane_b32 s8, v8
+; SI-NEXT: v_readfirstlane_b32 s8, v7
+; SI-NEXT: v_readfirstlane_b32 s9, v8
; SI-NEXT: v_readfirstlane_b32 s6, v9
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: v_readfirstlane_b32 s9, v10
+; SI-NEXT: v_readfirstlane_b32 s7, v10
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB45_4
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_lshr_b32 s4, s9, 16
+; SI-NEXT: s_lshr_b32 s4, s7, 16
; SI-NEXT: v_cvt_f32_f16_e32 v1, s4
; SI-NEXT: s_lshr_b32 s4, s6, 16
; SI-NEXT: v_cvt_f32_f16_e32 v2, s4
-; SI-NEXT: s_lshr_b32 s4, s8, 16
+; SI-NEXT: s_lshr_b32 s4, s9, 16
; SI-NEXT: v_cvt_f32_f16_e32 v3, s4
-; SI-NEXT: s_lshr_b32 s4, s7, 16
+; SI-NEXT: s_lshr_b32 s4, s8, 16
; SI-NEXT: v_cvt_f32_f16_e32 v5, s4
; SI-NEXT: s_lshr_b32 s4, s11, 16
; SI-NEXT: v_cvt_f32_f16_e32 v7, s4
@@ -25051,10 +25241,10 @@ define inreg <48 x half> @bitcast_v12i64_to_v48f16_scalar(<12 x i64> inreg %a, i
; SI-NEXT: s_lshr_b32 s4, s16, 16
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v40, s4
-; SI-NEXT: v_cvt_f32_f16_e32 v4, s9
+; SI-NEXT: v_cvt_f32_f16_e32 v4, s7
; SI-NEXT: v_cvt_f32_f16_e32 v6, s6
-; SI-NEXT: v_cvt_f32_f16_e32 v8, s8
-; SI-NEXT: v_cvt_f32_f16_e32 v9, s7
+; SI-NEXT: v_cvt_f32_f16_e32 v8, s9
+; SI-NEXT: v_cvt_f32_f16_e32 v9, s8
; SI-NEXT: v_cvt_f32_f16_e32 v11, s11
; SI-NEXT: v_cvt_f32_f16_e32 v13, s10
; SI-NEXT: v_cvt_f32_f16_e32 v15, s13
@@ -25117,18 +25307,18 @@ define inreg <48 x half> @bitcast_v12i64_to_v48f16_scalar(<12 x i64> inreg %a, i
; SI-NEXT: s_addc_u32 s11, s11, 0
; SI-NEXT: s_lshr_b32 s72, s10, 16
; SI-NEXT: s_lshr_b32 s73, s11, 16
-; SI-NEXT: s_add_u32 s7, s7, 3
-; SI-NEXT: s_addc_u32 s8, s8, 0
-; SI-NEXT: s_lshr_b32 s74, s7, 16
-; SI-NEXT: s_lshr_b32 s75, s8, 16
-; SI-NEXT: s_add_u32 s6, s6, 3
+; SI-NEXT: s_add_u32 s8, s8, 3
; SI-NEXT: s_addc_u32 s9, s9, 0
+; SI-NEXT: s_lshr_b32 s74, s8, 16
+; SI-NEXT: s_lshr_b32 s75, s9, 16
+; SI-NEXT: s_add_u32 s6, s6, 3
+; SI-NEXT: s_addc_u32 s7, s7, 0
; SI-NEXT: s_lshr_b32 s76, s6, 16
-; SI-NEXT: s_lshr_b32 s77, s9, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v4, s9
+; SI-NEXT: s_lshr_b32 s77, s7, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v4, s7
; SI-NEXT: v_cvt_f32_f16_e32 v6, s6
-; SI-NEXT: v_cvt_f32_f16_e32 v8, s8
-; SI-NEXT: v_cvt_f32_f16_e32 v9, s7
+; SI-NEXT: v_cvt_f32_f16_e32 v8, s9
+; SI-NEXT: v_cvt_f32_f16_e32 v9, s8
; SI-NEXT: v_cvt_f32_f16_e32 v11, s11
; SI-NEXT: v_cvt_f32_f16_e32 v13, s10
; SI-NEXT: v_cvt_f32_f16_e32 v15, s13
@@ -25394,12 +25584,15 @@ define inreg <48 x half> @bitcast_v12i64_to_v48f16_scalar(<12 x i64> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr4
; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: s_branch .LBB45_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB45_2
+; SI-NEXT: s_branch .LBB45_3
;
; VI-LABEL: bitcast_v12i64_to_v48f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s15, v0
; VI-NEXT: v_readfirstlane_b32 s14, v1
; VI-NEXT: v_readfirstlane_b32 s13, v2
@@ -25408,13 +25601,13 @@ define inreg <48 x half> @bitcast_v12i64_to_v48f16_scalar(<12 x i64> inreg %a, i
; VI-NEXT: v_readfirstlane_b32 s10, v5
; VI-NEXT: v_readfirstlane_b32 s9, v6
; VI-NEXT: v_readfirstlane_b32 s8, v7
-; VI-NEXT: v_readfirstlane_b32 s6, v8
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: v_readfirstlane_b32 s7, v9
+; VI-NEXT: v_readfirstlane_b32 s7, v8
+; VI-NEXT: v_readfirstlane_b32 s6, v9
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB45_4
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_lshr_b32 s40, s7, 16
-; VI-NEXT: s_lshr_b32 s41, s6, 16
+; VI-NEXT: s_lshr_b32 s40, s6, 16
+; VI-NEXT: s_lshr_b32 s41, s7, 16
; VI-NEXT: s_lshr_b32 s42, s8, 16
; VI-NEXT: s_lshr_b32 s43, s9, 16
; VI-NEXT: s_lshr_b32 s44, s10, 16
@@ -25439,8 +25632,8 @@ define inreg <48 x half> @bitcast_v12i64_to_v48f16_scalar(<12 x i64> inreg %a, i
; VI-NEXT: s_lshr_b32 s79, s16, 16
; VI-NEXT: s_cbranch_execnz .LBB45_3
; VI-NEXT: .LBB45_2: ; %cmp.true
-; VI-NEXT: s_add_u32 s6, s6, 3
-; VI-NEXT: s_addc_u32 s7, s7, 0
+; VI-NEXT: s_add_u32 s7, s7, 3
+; VI-NEXT: s_addc_u32 s6, s6, 0
; VI-NEXT: s_add_u32 s9, s9, 3
; VI-NEXT: s_addc_u32 s8, s8, 0
; VI-NEXT: s_add_u32 s11, s11, 3
@@ -25463,8 +25656,8 @@ define inreg <48 x half> @bitcast_v12i64_to_v48f16_scalar(<12 x i64> inreg %a, i
; VI-NEXT: s_addc_u32 s19, s19, 0
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
-; VI-NEXT: s_lshr_b32 s40, s7, 16
-; VI-NEXT: s_lshr_b32 s41, s6, 16
+; VI-NEXT: s_lshr_b32 s40, s6, 16
+; VI-NEXT: s_lshr_b32 s41, s7, 16
; VI-NEXT: s_lshr_b32 s42, s8, 16
; VI-NEXT: s_lshr_b32 s43, s9, 16
; VI-NEXT: s_lshr_b32 s44, s10, 16
@@ -25554,12 +25747,12 @@ define inreg <48 x half> @bitcast_v12i64_to_v48f16_scalar(<12 x i64> inreg %a, i
; VI-NEXT: s_and_b32 s8, 0xffff, s8
; VI-NEXT: s_lshl_b32 s28, s42, 16
; VI-NEXT: s_or_b32 s8, s8, s28
-; VI-NEXT: s_and_b32 s6, 0xffff, s6
-; VI-NEXT: s_lshl_b32 s28, s41, 16
-; VI-NEXT: s_or_b32 s6, s6, s28
; VI-NEXT: s_and_b32 s7, 0xffff, s7
-; VI-NEXT: s_lshl_b32 s28, s40, 16
+; VI-NEXT: s_lshl_b32 s28, s41, 16
; VI-NEXT: s_or_b32 s7, s7, s28
+; VI-NEXT: s_and_b32 s6, 0xffff, s6
+; VI-NEXT: s_lshl_b32 s28, s40, 16
+; VI-NEXT: s_or_b32 s6, s6, s28
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: v_mov_b32_e32 v2, s16
@@ -25582,8 +25775,8 @@ define inreg <48 x half> @bitcast_v12i64_to_v48f16_scalar(<12 x i64> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v19, s10
; VI-NEXT: v_mov_b32_e32 v20, s9
; VI-NEXT: v_mov_b32_e32 v21, s8
-; VI-NEXT: v_mov_b32_e32 v22, s6
-; VI-NEXT: v_mov_b32_e32 v23, s7
+; VI-NEXT: v_mov_b32_e32 v22, s7
+; VI-NEXT: v_mov_b32_e32 v23, s6
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB45_4:
; VI-NEXT: ; implicit-def: $sgpr79
@@ -25610,35 +25803,38 @@ define inreg <48 x half> @bitcast_v12i64_to_v48f16_scalar(<12 x i64> inreg %a, i
; VI-NEXT: ; implicit-def: $sgpr42
; VI-NEXT: ; implicit-def: $sgpr41
; VI-NEXT: ; implicit-def: $sgpr40
-; VI-NEXT: s_branch .LBB45_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB45_2
+; VI-NEXT: s_branch .LBB45_3
;
; GFX9-LABEL: bitcast_v12i64_to_v48f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
-; GFX9-NEXT: v_readfirstlane_b32 s6, v0
-; GFX9-NEXT: v_readfirstlane_b32 s7, v1
-; GFX9-NEXT: v_readfirstlane_b32 s8, v2
-; GFX9-NEXT: v_readfirstlane_b32 s9, v3
-; GFX9-NEXT: v_readfirstlane_b32 s10, v4
-; GFX9-NEXT: v_readfirstlane_b32 s11, v5
-; GFX9-NEXT: v_readfirstlane_b32 s12, v6
-; GFX9-NEXT: v_readfirstlane_b32 s13, v7
-; GFX9-NEXT: v_readfirstlane_b32 s14, v8
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: v_readfirstlane_b32 s15, v9
+; GFX9-NEXT: v_readfirstlane_b32 s7, v0
+; GFX9-NEXT: v_readfirstlane_b32 s8, v1
+; GFX9-NEXT: v_readfirstlane_b32 s9, v2
+; GFX9-NEXT: v_readfirstlane_b32 s10, v3
+; GFX9-NEXT: v_readfirstlane_b32 s11, v4
+; GFX9-NEXT: v_readfirstlane_b32 s12, v5
+; GFX9-NEXT: v_readfirstlane_b32 s13, v6
+; GFX9-NEXT: v_readfirstlane_b32 s14, v7
+; GFX9-NEXT: v_readfirstlane_b32 s15, v8
+; GFX9-NEXT: v_readfirstlane_b32 s6, v9
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB45_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_lshr_b32 s40, s15, 16
-; GFX9-NEXT: s_lshr_b32 s41, s14, 16
-; GFX9-NEXT: s_lshr_b32 s42, s13, 16
-; GFX9-NEXT: s_lshr_b32 s43, s12, 16
-; GFX9-NEXT: s_lshr_b32 s44, s11, 16
-; GFX9-NEXT: s_lshr_b32 s45, s10, 16
-; GFX9-NEXT: s_lshr_b32 s46, s9, 16
-; GFX9-NEXT: s_lshr_b32 s47, s8, 16
-; GFX9-NEXT: s_lshr_b32 s56, s7, 16
-; GFX9-NEXT: s_lshr_b32 s57, s6, 16
+; GFX9-NEXT: s_lshr_b32 s40, s6, 16
+; GFX9-NEXT: s_lshr_b32 s41, s15, 16
+; GFX9-NEXT: s_lshr_b32 s42, s14, 16
+; GFX9-NEXT: s_lshr_b32 s43, s13, 16
+; GFX9-NEXT: s_lshr_b32 s44, s12, 16
+; GFX9-NEXT: s_lshr_b32 s45, s11, 16
+; GFX9-NEXT: s_lshr_b32 s46, s10, 16
+; GFX9-NEXT: s_lshr_b32 s47, s9, 16
+; GFX9-NEXT: s_lshr_b32 s56, s8, 16
+; GFX9-NEXT: s_lshr_b32 s57, s7, 16
; GFX9-NEXT: s_lshr_b32 s58, s29, 16
; GFX9-NEXT: s_lshr_b32 s59, s28, 16
; GFX9-NEXT: s_lshr_b32 s60, s27, 16
@@ -25655,16 +25851,16 @@ define inreg <48 x half> @bitcast_v12i64_to_v48f16_scalar(<12 x i64> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s79, s16, 16
; GFX9-NEXT: s_cbranch_execnz .LBB45_3
; GFX9-NEXT: .LBB45_2: ; %cmp.true
-; GFX9-NEXT: s_add_u32 s14, s14, 3
-; GFX9-NEXT: s_addc_u32 s15, s15, 0
-; GFX9-NEXT: s_add_u32 s12, s12, 3
-; GFX9-NEXT: s_addc_u32 s13, s13, 0
-; GFX9-NEXT: s_add_u32 s10, s10, 3
-; GFX9-NEXT: s_addc_u32 s11, s11, 0
-; GFX9-NEXT: s_add_u32 s8, s8, 3
-; GFX9-NEXT: s_addc_u32 s9, s9, 0
-; GFX9-NEXT: s_add_u32 s6, s6, 3
-; GFX9-NEXT: s_addc_u32 s7, s7, 0
+; GFX9-NEXT: s_add_u32 s15, s15, 3
+; GFX9-NEXT: s_addc_u32 s6, s6, 0
+; GFX9-NEXT: s_add_u32 s13, s13, 3
+; GFX9-NEXT: s_addc_u32 s14, s14, 0
+; GFX9-NEXT: s_add_u32 s11, s11, 3
+; GFX9-NEXT: s_addc_u32 s12, s12, 0
+; GFX9-NEXT: s_add_u32 s9, s9, 3
+; GFX9-NEXT: s_addc_u32 s10, s10, 0
+; GFX9-NEXT: s_add_u32 s7, s7, 3
+; GFX9-NEXT: s_addc_u32 s8, s8, 0
; GFX9-NEXT: s_add_u32 s28, s28, 3
; GFX9-NEXT: s_addc_u32 s29, s29, 0
; GFX9-NEXT: s_add_u32 s26, s26, 3
@@ -25679,16 +25875,16 @@ define inreg <48 x half> @bitcast_v12i64_to_v48f16_scalar(<12 x i64> inreg %a, i
; GFX9-NEXT: s_addc_u32 s19, s19, 0
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
-; GFX9-NEXT: s_lshr_b32 s40, s15, 16
-; GFX9-NEXT: s_lshr_b32 s41, s14, 16
-; GFX9-NEXT: s_lshr_b32 s42, s13, 16
-; GFX9-NEXT: s_lshr_b32 s43, s12, 16
-; GFX9-NEXT: s_lshr_b32 s44, s11, 16
-; GFX9-NEXT: s_lshr_b32 s45, s10, 16
-; GFX9-NEXT: s_lshr_b32 s46, s9, 16
-; GFX9-NEXT: s_lshr_b32 s47, s8, 16
-; GFX9-NEXT: s_lshr_b32 s56, s7, 16
-; GFX9-NEXT: s_lshr_b32 s57, s6, 16
+; GFX9-NEXT: s_lshr_b32 s40, s6, 16
+; GFX9-NEXT: s_lshr_b32 s41, s15, 16
+; GFX9-NEXT: s_lshr_b32 s42, s14, 16
+; GFX9-NEXT: s_lshr_b32 s43, s13, 16
+; GFX9-NEXT: s_lshr_b32 s44, s12, 16
+; GFX9-NEXT: s_lshr_b32 s45, s11, 16
+; GFX9-NEXT: s_lshr_b32 s46, s10, 16
+; GFX9-NEXT: s_lshr_b32 s47, s9, 16
+; GFX9-NEXT: s_lshr_b32 s56, s8, 16
+; GFX9-NEXT: s_lshr_b32 s57, s7, 16
; GFX9-NEXT: s_lshr_b32 s58, s29, 16
; GFX9-NEXT: s_lshr_b32 s59, s28, 16
; GFX9-NEXT: s_lshr_b32 s60, s27, 16
@@ -25718,16 +25914,16 @@ define inreg <48 x half> @bitcast_v12i64_to_v48f16_scalar(<12 x i64> inreg %a, i
; GFX9-NEXT: s_pack_ll_b32_b16 s25, s27, s60
; GFX9-NEXT: s_pack_ll_b32_b16 s26, s28, s59
; GFX9-NEXT: s_pack_ll_b32_b16 s27, s29, s58
-; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s57
-; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s56
-; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s47
-; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s46
-; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s45
-; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s44
-; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s43
-; GFX9-NEXT: s_pack_ll_b32_b16 s13, s13, s42
-; GFX9-NEXT: s_pack_ll_b32_b16 s14, s14, s41
-; GFX9-NEXT: s_pack_ll_b32_b16 s15, s15, s40
+; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s57
+; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s56
+; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s47
+; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s46
+; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s45
+; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s44
+; GFX9-NEXT: s_pack_ll_b32_b16 s13, s13, s43
+; GFX9-NEXT: s_pack_ll_b32_b16 s14, s14, s42
+; GFX9-NEXT: s_pack_ll_b32_b16 s15, s15, s41
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s40
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: v_mov_b32_e32 v2, s16
@@ -25742,16 +25938,16 @@ define inreg <48 x half> @bitcast_v12i64_to_v48f16_scalar(<12 x i64> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v11, s25
; GFX9-NEXT: v_mov_b32_e32 v12, s26
; GFX9-NEXT: v_mov_b32_e32 v13, s27
-; GFX9-NEXT: v_mov_b32_e32 v14, s6
-; GFX9-NEXT: v_mov_b32_e32 v15, s7
-; GFX9-NEXT: v_mov_b32_e32 v16, s8
-; GFX9-NEXT: v_mov_b32_e32 v17, s9
-; GFX9-NEXT: v_mov_b32_e32 v18, s10
-; GFX9-NEXT: v_mov_b32_e32 v19, s11
-; GFX9-NEXT: v_mov_b32_e32 v20, s12
-; GFX9-NEXT: v_mov_b32_e32 v21, s13
-; GFX9-NEXT: v_mov_b32_e32 v22, s14
-; GFX9-NEXT: v_mov_b32_e32 v23, s15
+; GFX9-NEXT: v_mov_b32_e32 v14, s7
+; GFX9-NEXT: v_mov_b32_e32 v15, s8
+; GFX9-NEXT: v_mov_b32_e32 v16, s9
+; GFX9-NEXT: v_mov_b32_e32 v17, s10
+; GFX9-NEXT: v_mov_b32_e32 v18, s11
+; GFX9-NEXT: v_mov_b32_e32 v19, s12
+; GFX9-NEXT: v_mov_b32_e32 v20, s13
+; GFX9-NEXT: v_mov_b32_e32 v21, s14
+; GFX9-NEXT: v_mov_b32_e32 v22, s15
+; GFX9-NEXT: v_mov_b32_e32 v23, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB45_4:
; GFX9-NEXT: ; implicit-def: $sgpr79
@@ -25778,7 +25974,9 @@ define inreg <48 x half> @bitcast_v12i64_to_v48f16_scalar(<12 x i64> inreg %a, i
; GFX9-NEXT: ; implicit-def: $sgpr42
; GFX9-NEXT: ; implicit-def: $sgpr41
; GFX9-NEXT: ; implicit-def: $sgpr40
-; GFX9-NEXT: s_branch .LBB45_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB45_2
+; GFX9-NEXT: s_branch .LBB45_3
;
; GFX11-LABEL: bitcast_v12i64_to_v48f16_scalar:
; GFX11: ; %bb.0:
@@ -25787,16 +25985,16 @@ define inreg <48 x half> @bitcast_v12i64_to_v48f16_scalar(<12 x i64> inreg %a, i
; GFX11-NEXT: v_readfirstlane_b32 s4, v0
; GFX11-NEXT: v_readfirstlane_b32 s5, v1
; GFX11-NEXT: v_readfirstlane_b32 s6, v2
-; GFX11-NEXT: v_readfirstlane_b32 s7, v3
+; GFX11-NEXT: v_readfirstlane_b32 s8, v3
; GFX11-NEXT: v_readfirstlane_b32 s9, v4
-; GFX11-NEXT: v_readfirstlane_b32 s8, v5
-; GFX11-NEXT: s_mov_b32 s74, 0
+; GFX11-NEXT: v_readfirstlane_b32 s7, v5
+; GFX11-NEXT: s_mov_b32 s74, -1
; GFX11-NEXT: s_and_b32 s10, vcc_lo, exec_lo
; GFX11-NEXT: s_cbranch_scc0 .LBB45_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s10, s8, 16
+; GFX11-NEXT: s_lshr_b32 s10, s7, 16
; GFX11-NEXT: s_lshr_b32 s11, s9, 16
-; GFX11-NEXT: s_lshr_b32 s12, s7, 16
+; GFX11-NEXT: s_lshr_b32 s12, s8, 16
; GFX11-NEXT: s_lshr_b32 s13, s6, 16
; GFX11-NEXT: s_lshr_b32 s14, s5, 16
; GFX11-NEXT: s_lshr_b32 s15, s4, 16
@@ -25818,13 +26016,12 @@ define inreg <48 x half> @bitcast_v12i64_to_v48f16_scalar(<12 x i64> inreg %a, i
; GFX11-NEXT: s_lshr_b32 s63, s2, 16
; GFX11-NEXT: s_lshr_b32 s72, s1, 16
; GFX11-NEXT: s_lshr_b32 s73, s0, 16
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s74
-; GFX11-NEXT: s_cbranch_vccnz .LBB45_3
+; GFX11-NEXT: s_cbranch_execnz .LBB45_3
; GFX11-NEXT: .LBB45_2: ; %cmp.true
; GFX11-NEXT: s_add_u32 s9, s9, 3
-; GFX11-NEXT: s_addc_u32 s8, s8, 0
-; GFX11-NEXT: s_add_u32 s6, s6, 3
; GFX11-NEXT: s_addc_u32 s7, s7, 0
+; GFX11-NEXT: s_add_u32 s6, s6, 3
+; GFX11-NEXT: s_addc_u32 s8, s8, 0
; GFX11-NEXT: s_add_u32 s4, s4, 3
; GFX11-NEXT: s_addc_u32 s5, s5, 0
; GFX11-NEXT: s_add_u32 s28, s28, 3
@@ -25845,9 +26042,9 @@ define inreg <48 x half> @bitcast_v12i64_to_v48f16_scalar(<12 x i64> inreg %a, i
; GFX11-NEXT: s_addc_u32 s3, s3, 0
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: s_lshr_b32 s10, s8, 16
+; GFX11-NEXT: s_lshr_b32 s10, s7, 16
; GFX11-NEXT: s_lshr_b32 s11, s9, 16
-; GFX11-NEXT: s_lshr_b32 s12, s7, 16
+; GFX11-NEXT: s_lshr_b32 s12, s8, 16
; GFX11-NEXT: s_lshr_b32 s13, s6, 16
; GFX11-NEXT: s_lshr_b32 s14, s5, 16
; GFX11-NEXT: s_lshr_b32 s15, s4, 16
@@ -25892,9 +26089,9 @@ define inreg <48 x half> @bitcast_v12i64_to_v48f16_scalar(<12 x i64> inreg %a, i
; GFX11-NEXT: s_pack_ll_b32_b16 s4, s4, s15
; GFX11-NEXT: s_pack_ll_b32_b16 s5, s5, s14
; GFX11-NEXT: s_pack_ll_b32_b16 s6, s6, s13
-; GFX11-NEXT: s_pack_ll_b32_b16 s7, s7, s12
+; GFX11-NEXT: s_pack_ll_b32_b16 s8, s8, s12
; GFX11-NEXT: s_pack_ll_b32_b16 s9, s9, s11
-; GFX11-NEXT: s_pack_ll_b32_b16 s8, s8, s10
+; GFX11-NEXT: s_pack_ll_b32_b16 s7, s7, s10
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -25905,8 +26102,8 @@ define inreg <48 x half> @bitcast_v12i64_to_v48f16_scalar(<12 x i64> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
; GFX11-NEXT: v_dual_mov_b32 v18, s4 :: v_dual_mov_b32 v19, s5
-; GFX11-NEXT: v_dual_mov_b32 v20, s6 :: v_dual_mov_b32 v21, s7
-; GFX11-NEXT: v_dual_mov_b32 v22, s9 :: v_dual_mov_b32 v23, s8
+; GFX11-NEXT: v_dual_mov_b32 v20, s6 :: v_dual_mov_b32 v21, s8
+; GFX11-NEXT: v_dual_mov_b32 v22, s9 :: v_dual_mov_b32 v23, s7
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB45_4:
; GFX11-NEXT: ; implicit-def: $sgpr73
@@ -25933,7 +26130,9 @@ define inreg <48 x half> @bitcast_v12i64_to_v48f16_scalar(<12 x i64> inreg %a, i
; GFX11-NEXT: ; implicit-def: $sgpr12
; GFX11-NEXT: ; implicit-def: $sgpr11
; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: s_branch .LBB45_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s74
+; GFX11-NEXT: s_cbranch_vccz .LBB45_2
+; GFX11-NEXT: s_branch .LBB45_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -27256,6 +27455,7 @@ define inreg <12 x i64> @bitcast_v48f16_to_v12i64_scalar(<48 x half> inreg %a, i
; SI-NEXT: s_waitcnt vmcnt(9)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v40
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB47_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_waitcnt expcnt(2)
@@ -27628,7 +27828,9 @@ define inreg <12 x i64> @bitcast_v48f16_to_v12i64_scalar(<48 x half> inreg %a, i
; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
; SI-NEXT: v_mov_b32_e32 v31, v40
-; SI-NEXT: s_branch .LBB47_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB47_2
+; SI-NEXT: s_branch .LBB47_3
;
; VI-LABEL: bitcast_v48f16_to_v12i64_scalar:
; VI: ; %bb.0:
@@ -27648,6 +27850,7 @@ define inreg <12 x i64> @bitcast_v48f16_to_v12i64_scalar(<48 x half> inreg %a, i
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v9
; VI-NEXT: v_mov_b32_e32 v33, v8
; VI-NEXT: v_mov_b32_e32 v34, v7
@@ -27658,7 +27861,7 @@ define inreg <12 x i64> @bitcast_v48f16_to_v12i64_scalar(<48 x half> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v39, v2
; VI-NEXT: v_mov_b32_e32 v48, v1
; VI-NEXT: v_mov_b32_e32 v49, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB47_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -27687,41 +27890,41 @@ define inreg <12 x i64> @bitcast_v48f16_to_v12i64_scalar(<48 x half> inreg %a, i
; VI-NEXT: s_and_b32 s57, 0xffff, s23
; VI-NEXT: s_lshl_b32 s58, s12, 16
; VI-NEXT: v_or_b32_sdwa v14, v49, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s57, s57, s58
; VI-NEXT: s_and_b32 s58, 0xffff, s24
; VI-NEXT: s_lshl_b32 s59, s11, 16
-; VI-NEXT: v_or_b32_sdwa v15, v48, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v16, v39, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s58, s58, s59
; VI-NEXT: s_and_b32 s59, 0xffff, s25
; VI-NEXT: s_lshl_b32 s60, s10, 16
-; VI-NEXT: v_or_b32_sdwa v16, v39, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v17, v38, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s59, s59, s60
; VI-NEXT: s_and_b32 s60, 0xffff, s26
; VI-NEXT: s_lshl_b32 s61, s9, 16
-; VI-NEXT: v_or_b32_sdwa v17, v38, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v18, v37, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s60, s60, s61
; VI-NEXT: s_and_b32 s61, 0xffff, s27
; VI-NEXT: s_lshl_b32 s62, s8, 16
-; VI-NEXT: v_or_b32_sdwa v18, v37, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v19, v36, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s61, s61, s62
; VI-NEXT: s_and_b32 s62, 0xffff, s28
; VI-NEXT: s_lshl_b32 s63, s7, 16
-; VI-NEXT: v_or_b32_sdwa v19, v36, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v20, v35, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s62, s62, s63
; VI-NEXT: s_and_b32 s63, 0xffff, s29
; VI-NEXT: s_lshl_b32 s72, s6, 16
-; VI-NEXT: v_or_b32_sdwa v20, v35, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; VI-NEXT: s_or_b32 s63, s63, s72
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v0, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: v_or_b32_sdwa v21, v34, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: v_lshlrev_b32_sdwa v0, v0, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: s_or_b32 s63, s63, s72
+; VI-NEXT: v_or_b32_sdwa v15, v48, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v22, v33, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v23, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v0, s4
@@ -27742,13 +27945,13 @@ define inreg <12 x i64> @bitcast_v48f16_to_v12i64_scalar(<48 x half> inreg %a, i
; VI-NEXT: .LBB47_2: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v13, 0x200
; VI-NEXT: v_mov_b32_e32 v0, s43
+; VI-NEXT: v_mov_b32_e32 v2, s42
; VI-NEXT: v_add_f16_sdwa v0, v0, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v1, s16, v13
+; VI-NEXT: v_add_f16_sdwa v2, v2, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-NEXT: v_add_f16_e32 v3, s17, v13
; VI-NEXT: v_or_b32_e32 v0, v1, v0
-; VI-NEXT: v_mov_b32_e32 v1, s42
-; VI-NEXT: v_add_f16_sdwa v1, v1, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; VI-NEXT: v_add_f16_e32 v2, s17, v13
-; VI-NEXT: v_or_b32_e32 v1, v2, v1
+; VI-NEXT: v_or_b32_e32 v1, v3, v2
; VI-NEXT: v_mov_b32_e32 v2, s41
; VI-NEXT: v_add_f16_sdwa v2, v2, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v3, s18, v13
@@ -27832,21 +28035,13 @@ define inreg <12 x i64> @bitcast_v48f16_to_v12i64_scalar(<48 x half> inreg %a, i
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB47_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB47_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB47_2
+; VI-NEXT: s_branch .LBB47_3
;
; GFX9-LABEL: bitcast_v48f16_to_v12i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v9
-; GFX9-NEXT: v_mov_b32_e32 v33, v8
-; GFX9-NEXT: v_mov_b32_e32 v34, v7
-; GFX9-NEXT: v_mov_b32_e32 v35, v6
-; GFX9-NEXT: v_mov_b32_e32 v36, v5
-; GFX9-NEXT: v_mov_b32_e32 v37, v4
-; GFX9-NEXT: v_mov_b32_e32 v38, v3
-; GFX9-NEXT: v_mov_b32_e32 v39, v2
-; GFX9-NEXT: v_mov_b32_e32 v48, v1
-; GFX9-NEXT: v_mov_b32_e32 v49, v0
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
@@ -27862,6 +28057,17 @@ define inreg <12 x i64> @bitcast_v48f16_to_v12i64_scalar(<48 x half> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s8, s18, 16
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
+; GFX9-NEXT: v_mov_b32_e32 v32, v9
+; GFX9-NEXT: v_mov_b32_e32 v33, v8
+; GFX9-NEXT: v_mov_b32_e32 v34, v7
+; GFX9-NEXT: v_mov_b32_e32 v35, v6
+; GFX9-NEXT: v_mov_b32_e32 v36, v5
+; GFX9-NEXT: v_mov_b32_e32 v37, v4
+; GFX9-NEXT: v_mov_b32_e32 v38, v3
+; GFX9-NEXT: v_mov_b32_e32 v39, v2
+; GFX9-NEXT: v_mov_b32_e32 v48, v1
+; GFX9-NEXT: v_mov_b32_e32 v49, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
@@ -27876,7 +28082,6 @@ define inreg <12 x i64> @bitcast_v48f16_to_v12i64_scalar(<48 x half> inreg %a, i
; GFX9-NEXT: v_lshrrev_b32_e32 v41, 16, v39
; GFX9-NEXT: v_lshrrev_b32_e32 v42, 16, v48
; GFX9-NEXT: v_lshrrev_b32_e32 v43, 16, v49
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -27891,12 +28096,11 @@ define inreg <12 x i64> @bitcast_v48f16_to_v12i64_scalar(<48 x half> inreg %a, i
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB47_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v49
; GFX9-NEXT: v_lshl_or_b32 v14, v43, 16, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v48
-; GFX9-NEXT: v_lshl_or_b32 v15, v42, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v39
; GFX9-NEXT: v_lshl_or_b32 v16, v41, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v38
@@ -27910,8 +28114,10 @@ define inreg <12 x i64> @bitcast_v48f16_to_v12i64_scalar(<48 x half> inreg %a, i
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v34
; GFX9-NEXT: v_lshl_or_b32 v21, v52, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v33
+; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v48
; GFX9-NEXT: v_lshl_or_b32 v22, v51, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v32
+; GFX9-NEXT: v_lshl_or_b32 v15, v42, 16, v1
; GFX9-NEXT: v_lshl_or_b32 v23, v50, 16, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s6
; GFX9-NEXT: v_mov_b32_e32 v1, s7
@@ -27939,9 +28145,9 @@ define inreg <12 x i64> @bitcast_v48f16_to_v12i64_scalar(<48 x half> inreg %a, i
; GFX9-NEXT: v_and_b32_e32 v21, 0xffff, v34
; GFX9-NEXT: v_and_b32_e32 v22, 0xffff, v33
; GFX9-NEXT: v_and_b32_e32 v23, 0xffff, v32
-; GFX9-NEXT: s_movk_i32 s4, 0x200
; GFX9-NEXT: v_mov_b32_e32 v13, 0x200
; GFX9-NEXT: v_lshl_or_b32 v14, v43, 16, v14
+; GFX9-NEXT: s_movk_i32 s4, 0x200
; GFX9-NEXT: v_lshl_or_b32 v15, v42, 16, v15
; GFX9-NEXT: v_lshl_or_b32 v16, v41, 16, v16
; GFX9-NEXT: v_lshl_or_b32 v17, v40, 16, v17
@@ -27984,7 +28190,9 @@ define inreg <12 x i64> @bitcast_v48f16_to_v12i64_scalar(<48 x half> inreg %a, i
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB47_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB47_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB47_2
+; GFX9-NEXT: s_branch .LBB47_3
;
; GFX11-TRUE16-LABEL: bitcast_v48f16_to_v12i64_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -28008,44 +28216,44 @@ define inreg <12 x i64> @bitcast_v48f16_to_v12i64_scalar(<48 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v4
; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v5
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s16, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s16, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s17, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s18, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s19, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s20, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s21, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s22, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s23, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s24, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s25, s15
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s26, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s41
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s27, s42
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB47_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
@@ -28054,17 +28262,16 @@ define inreg <12 x i64> @bitcast_v48f16_to_v12i64_scalar(<48 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s6 :: v_dual_mov_b32 v5, s7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s8 :: v_dual_mov_b32 v7, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s10 :: v_dual_mov_b32 v9, s11
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s12 :: v_dual_mov_b32 v11, s13
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s14 :: v_dual_mov_b32 v13, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB47_3
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB47_3
; GFX11-TRUE16-NEXT: .LBB47_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
@@ -28073,21 +28280,21 @@ define inreg <12 x i64> @bitcast_v48f16_to_v12i64_scalar(<48 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s5 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s1 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s16 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
@@ -28100,7 +28307,9 @@ define inreg <12 x i64> @bitcast_v48f16_to_v12i64_scalar(<48 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB47_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB47_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB47_2
+; GFX11-TRUE16-NEXT: s_branch .LBB47_3
;
; GFX11-FAKE16-LABEL: bitcast_v48f16_to_v12i64_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -28118,44 +28327,44 @@ define inreg <12 x i64> @bitcast_v48f16_to_v12i64_scalar(<48 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_and_b32_e32 v48, 0xffff, v3
; GFX11-FAKE16-NEXT: v_and_b32_e32 v39, 0xffff, v4
; GFX11-FAKE16-NEXT: v_and_b32_e32 v38, 0xffff, v5
-; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s26, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s16, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s25, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s24, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s23, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s22, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s21, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s20, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s19, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s18, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s17, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s16, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s3, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s40, 0
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s16, s6
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s17, s7
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s18, s8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s19, s9
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s20, s10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s21, s11
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s22, s12
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s23, s13
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s24, s14
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s25, s15
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s26, s43
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s28, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s29, s41
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s27, s42
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB47_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
@@ -28164,17 +28373,16 @@ define inreg <12 x i64> @bitcast_v48f16_to_v12i64_scalar(<48 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s6 :: v_dual_mov_b32 v5, s7
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s8 :: v_dual_mov_b32 v7, s9
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s10 :: v_dual_mov_b32 v9, s11
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s12 :: v_dual_mov_b32 v11, s13
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s14 :: v_dual_mov_b32 v13, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB47_3
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB47_3
; GFX11-FAKE16-NEXT: .LBB47_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
@@ -28183,21 +28391,21 @@ define inreg <12 x i64> @bitcast_v48f16_to_v12i64_scalar(<48 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, s5 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, s15 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, s1 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, s16 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
@@ -28210,7 +28418,9 @@ define inreg <12 x i64> @bitcast_v48f16_to_v12i64_scalar(<48 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB47_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB47_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB47_2
+; GFX11-FAKE16-NEXT: s_branch .LBB47_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -28922,6 +29132,7 @@ define inreg <48 x i16> @bitcast_v12f64_to_v48i16_scalar(<12 x double> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v23, s16
; SI-NEXT: v_mov_b32_e32 v24, s17
; SI-NEXT: v_mov_b32_e32 v21, s18
@@ -28934,9 +29145,9 @@ define inreg <48 x i16> @bitcast_v12f64_to_v48i16_scalar(<12 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v16, s25
; SI-NEXT: v_mov_b32_e32 v13, s26
; SI-NEXT: v_mov_b32_e32 v14, s27
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v11, s28
; SI-NEXT: v_mov_b32_e32 v12, s29
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB49_4
; SI-NEXT: ; %bb.1: ; %cmp.false
@@ -29175,12 +29386,15 @@ define inreg <48 x i16> @bitcast_v12f64_to_v48i16_scalar(<12 x double> inreg %a,
; SI-NEXT: ; implicit-def: $vgpr32
; SI-NEXT: ; implicit-def: $vgpr25
; SI-NEXT: ; implicit-def: $vgpr30
-; SI-NEXT: s_branch .LBB49_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB49_2
+; SI-NEXT: s_branch .LBB49_3
;
; VI-LABEL: bitcast_v12f64_to_v48i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v23, s16
; VI-NEXT: v_mov_b32_e32 v24, s17
; VI-NEXT: v_mov_b32_e32 v19, s18
@@ -29193,9 +29407,9 @@ define inreg <48 x i16> @bitcast_v12f64_to_v48i16_scalar(<12 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v22, s25
; VI-NEXT: v_mov_b32_e32 v17, s26
; VI-NEXT: v_mov_b32_e32 v18, s27
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s28
; VI-NEXT: v_mov_b32_e32 v14, s29
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB49_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_lshrrev_b32_e32 v34, 16, v9
@@ -29345,12 +29559,15 @@ define inreg <48 x i16> @bitcast_v12f64_to_v48i16_scalar(<12 x double> inreg %a,
; VI-NEXT: ; implicit-def: $vgpr36
; VI-NEXT: ; implicit-def: $vgpr35
; VI-NEXT: ; implicit-def: $vgpr34
-; VI-NEXT: s_branch .LBB49_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB49_2
+; VI-NEXT: s_branch .LBB49_3
;
; GFX9-LABEL: bitcast_v12f64_to_v48i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v23, s16
; GFX9-NEXT: v_mov_b32_e32 v24, s17
; GFX9-NEXT: v_mov_b32_e32 v19, s18
@@ -29363,9 +29580,9 @@ define inreg <48 x i16> @bitcast_v12f64_to_v48i16_scalar(<12 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v22, s25
; GFX9-NEXT: v_mov_b32_e32 v17, s26
; GFX9-NEXT: v_mov_b32_e32 v18, s27
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s28
; GFX9-NEXT: v_mov_b32_e32 v14, s29
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB49_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v9
@@ -29515,7 +29732,9 @@ define inreg <48 x i16> @bitcast_v12f64_to_v48i16_scalar(<12 x double> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr36
; GFX9-NEXT: ; implicit-def: $vgpr35
; GFX9-NEXT: ; implicit-def: $vgpr34
-; GFX9-NEXT: s_branch .LBB49_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB49_2
+; GFX9-NEXT: s_branch .LBB49_3
;
; GFX11-LABEL: bitcast_v12f64_to_v48i16_scalar:
; GFX11: ; %bb.0:
@@ -29530,8 +29749,8 @@ define inreg <48 x i16> @bitcast_v12f64_to_v48i16_scalar(<12 x double> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v9, s24 :: v_dual_mov_b32 v10, s25
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB49_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
@@ -29558,8 +29777,7 @@ define inreg <48 x i16> @bitcast_v12f64_to_v48i16_scalar(<12 x double> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v22
; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v25
; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB49_3
+; GFX11-NEXT: s_cbranch_execnz .LBB49_3
; GFX11-NEXT: .LBB49_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
@@ -29676,7 +29894,9 @@ define inreg <48 x i16> @bitcast_v12f64_to_v48i16_scalar(<12 x double> inreg %a,
; GFX11-NEXT: ; implicit-def: $vgpr32
; GFX11-NEXT: ; implicit-def: $vgpr31
; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: s_branch .LBB49_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_vccz .LBB49_2
+; GFX11-NEXT: s_branch .LBB49_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -30781,6 +31001,7 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a,
; SI-NEXT: v_mov_b32_e32 v49, v10
; SI-NEXT: v_mov_b32_e32 v51, v8
; SI-NEXT: v_mov_b32_e32 v52, v6
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v1
; SI-NEXT: s_waitcnt expcnt(5)
; SI-NEXT: v_lshlrev_b32_e32 v58, 16, v3
@@ -30802,7 +31023,7 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a,
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v2
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_and_b64 s[6:7], vcc, exec
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_lshlrev_b32_e32 v34, 16, v4
; SI-NEXT: s_cbranch_scc0 .LBB51_4
@@ -31064,7 +31285,9 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a,
; SI-NEXT: v_mov_b32_e32 v27, v46
; SI-NEXT: v_mov_b32_e32 v29, v45
; SI-NEXT: v_mov_b32_e32 v34, v43
-; SI-NEXT: s_branch .LBB51_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB51_2
+; SI-NEXT: s_branch .LBB51_3
;
; VI-LABEL: bitcast_v48i16_to_v12f64_scalar:
; VI: ; %bb.0:
@@ -31084,6 +31307,7 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a,
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v9
; VI-NEXT: v_mov_b32_e32 v33, v8
; VI-NEXT: v_mov_b32_e32 v34, v7
@@ -31094,7 +31318,7 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a,
; VI-NEXT: v_mov_b32_e32 v39, v2
; VI-NEXT: v_mov_b32_e32 v48, v1
; VI-NEXT: v_mov_b32_e32 v49, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB51_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -31123,41 +31347,41 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a,
; VI-NEXT: s_and_b32 s57, 0xffff, s23
; VI-NEXT: s_lshl_b32 s58, s12, 16
; VI-NEXT: v_or_b32_sdwa v14, v49, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s57, s57, s58
; VI-NEXT: s_and_b32 s58, 0xffff, s24
; VI-NEXT: s_lshl_b32 s59, s11, 16
-; VI-NEXT: v_or_b32_sdwa v15, v48, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v16, v39, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s58, s58, s59
; VI-NEXT: s_and_b32 s59, 0xffff, s25
; VI-NEXT: s_lshl_b32 s60, s10, 16
-; VI-NEXT: v_or_b32_sdwa v16, v39, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v17, v38, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s59, s59, s60
; VI-NEXT: s_and_b32 s60, 0xffff, s26
; VI-NEXT: s_lshl_b32 s61, s9, 16
-; VI-NEXT: v_or_b32_sdwa v17, v38, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v18, v37, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s60, s60, s61
; VI-NEXT: s_and_b32 s61, 0xffff, s27
; VI-NEXT: s_lshl_b32 s62, s8, 16
-; VI-NEXT: v_or_b32_sdwa v18, v37, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v19, v36, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s61, s61, s62
; VI-NEXT: s_and_b32 s62, 0xffff, s28
; VI-NEXT: s_lshl_b32 s63, s7, 16
-; VI-NEXT: v_or_b32_sdwa v19, v36, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v20, v35, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s62, s62, s63
; VI-NEXT: s_and_b32 s63, 0xffff, s29
; VI-NEXT: s_lshl_b32 s72, s6, 16
-; VI-NEXT: v_or_b32_sdwa v20, v35, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; VI-NEXT: s_or_b32 s63, s63, s72
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v0, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: v_or_b32_sdwa v21, v34, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: v_lshlrev_b32_sdwa v0, v0, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: s_or_b32 s63, s63, s72
+; VI-NEXT: v_or_b32_sdwa v15, v48, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v22, v33, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v23, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v0, s4
@@ -31180,97 +31404,98 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a,
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v49
; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v49 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: v_add_u32_e32 v14, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v48
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v48
+; VI-NEXT: v_lshlrev_b32_sdwa v3, v1, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v2, v3, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v14, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v15, vcc, 0x30000, v2
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v39
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_and_b32 s4, s16, 0xffff
; VI-NEXT: s_lshl_b32 s5, s43, 16
; VI-NEXT: s_add_i32 s17, s17, 3
-; VI-NEXT: v_add_u32_e32 v15, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v39
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s4, s5, s4
; VI-NEXT: s_and_b32 s5, s17, 0xffff
; VI-NEXT: s_lshl_b32 s16, s42, 16
; VI-NEXT: s_add_i32 s18, s18, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v16, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v38
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s5, s16, s5
; VI-NEXT: s_and_b32 s16, s18, 0xffff
; VI-NEXT: s_lshl_b32 s17, s41, 16
; VI-NEXT: s_add_i32 s19, s19, 3
-; VI-NEXT: v_add_u32_e32 v16, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v38
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s16, s17, s16
; VI-NEXT: s_and_b32 s17, s19, 0xffff
; VI-NEXT: s_lshl_b32 s18, s40, 16
; VI-NEXT: s_add_i32 s20, s20, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v17, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v37
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s17, s18, s17
; VI-NEXT: s_and_b32 s18, s20, 0xffff
; VI-NEXT: s_lshl_b32 s15, s15, 16
; VI-NEXT: s_add_i32 s21, s21, 3
-; VI-NEXT: v_add_u32_e32 v17, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v37
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s15, s15, s18
; VI-NEXT: s_and_b32 s18, s21, 0xffff
; VI-NEXT: s_lshl_b32 s14, s14, 16
; VI-NEXT: s_add_i32 s22, s22, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v18, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v36
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s14, s14, s18
; VI-NEXT: s_and_b32 s18, s22, 0xffff
; VI-NEXT: s_lshl_b32 s13, s13, 16
; VI-NEXT: s_add_i32 s23, s23, 3
-; VI-NEXT: v_add_u32_e32 v18, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v36
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s13, s13, s18
; VI-NEXT: s_and_b32 s18, s23, 0xffff
; VI-NEXT: s_lshl_b32 s12, s12, 16
; VI-NEXT: s_add_i32 s24, s24, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v19, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v35
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s12, s12, s18
; VI-NEXT: s_and_b32 s18, s24, 0xffff
; VI-NEXT: s_lshl_b32 s11, s11, 16
; VI-NEXT: s_add_i32 s25, s25, 3
-; VI-NEXT: v_add_u32_e32 v19, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v35
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s11, s11, s18
; VI-NEXT: s_and_b32 s18, s25, 0xffff
; VI-NEXT: s_lshl_b32 s10, s10, 16
; VI-NEXT: s_add_i32 s26, s26, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v20, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v34
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s10, s10, s18
; VI-NEXT: s_and_b32 s18, s26, 0xffff
; VI-NEXT: s_lshl_b32 s9, s9, 16
; VI-NEXT: s_add_i32 s27, s27, 3
-; VI-NEXT: v_add_u32_e32 v20, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v34
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s9, s9, s18
; VI-NEXT: s_and_b32 s18, s27, 0xffff
; VI-NEXT: s_lshl_b32 s8, s8, 16
; VI-NEXT: s_add_i32 s28, s28, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v21, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v33
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s8, s8, s18
; VI-NEXT: s_and_b32 s18, s28, 0xffff
; VI-NEXT: s_lshl_b32 s7, s7, 16
; VI-NEXT: s_add_i32 s29, s29, 3
-; VI-NEXT: v_add_u32_e32 v21, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v33
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s7, s7, s18
; VI-NEXT: s_and_b32 s18, s29, 0xffff
; VI-NEXT: s_lshl_b32 s6, s6, 16
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: s_or_b32 s6, s6, s18
; VI-NEXT: v_add_u32_e32 v22, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v32
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v1, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_lshlrev_b32_sdwa v0, v1, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v32
+; VI-NEXT: s_or_b32 s6, s6, s18
+; VI-NEXT: v_or_b32_sdwa v0, v0, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_add_i32 s4, s4, 0x30000
; VI-NEXT: s_add_i32 s5, s5, 0x30000
; VI-NEXT: s_add_i32 s16, s16, 0x30000
@@ -31285,7 +31510,6 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a,
; VI-NEXT: s_add_i32 s8, s8, 0x30000
; VI-NEXT: s_add_i32 s7, s7, 0x30000
; VI-NEXT: s_add_i32 s6, s6, 0x30000
-; VI-NEXT: v_or_b32_sdwa v0, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: v_add_u32_e32 v23, vcc, 0x30000, v0
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
@@ -31305,21 +31529,13 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a,
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB51_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB51_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB51_2
+; VI-NEXT: s_branch .LBB51_3
;
; GFX9-LABEL: bitcast_v48i16_to_v12f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v9
-; GFX9-NEXT: v_mov_b32_e32 v33, v8
-; GFX9-NEXT: v_mov_b32_e32 v34, v7
-; GFX9-NEXT: v_mov_b32_e32 v35, v6
-; GFX9-NEXT: v_mov_b32_e32 v36, v5
-; GFX9-NEXT: v_mov_b32_e32 v37, v4
-; GFX9-NEXT: v_mov_b32_e32 v38, v3
-; GFX9-NEXT: v_mov_b32_e32 v39, v2
-; GFX9-NEXT: v_mov_b32_e32 v48, v1
-; GFX9-NEXT: v_mov_b32_e32 v49, v0
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
@@ -31335,6 +31551,17 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a,
; GFX9-NEXT: s_lshr_b32 s8, s18, 16
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
+; GFX9-NEXT: v_mov_b32_e32 v32, v9
+; GFX9-NEXT: v_mov_b32_e32 v33, v8
+; GFX9-NEXT: v_mov_b32_e32 v34, v7
+; GFX9-NEXT: v_mov_b32_e32 v35, v6
+; GFX9-NEXT: v_mov_b32_e32 v36, v5
+; GFX9-NEXT: v_mov_b32_e32 v37, v4
+; GFX9-NEXT: v_mov_b32_e32 v38, v3
+; GFX9-NEXT: v_mov_b32_e32 v39, v2
+; GFX9-NEXT: v_mov_b32_e32 v48, v1
+; GFX9-NEXT: v_mov_b32_e32 v49, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
@@ -31349,7 +31576,6 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v41, 16, v39
; GFX9-NEXT: v_lshrrev_b32_e32 v42, 16, v48
; GFX9-NEXT: v_lshrrev_b32_e32 v43, 16, v49
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -31364,12 +31590,11 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a,
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB51_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v49
; GFX9-NEXT: v_lshl_or_b32 v14, v43, 16, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v48
-; GFX9-NEXT: v_lshl_or_b32 v15, v42, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v39
; GFX9-NEXT: v_lshl_or_b32 v16, v41, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v38
@@ -31383,8 +31608,10 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a,
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v34
; GFX9-NEXT: v_lshl_or_b32 v21, v52, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v33
+; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v48
; GFX9-NEXT: v_lshl_or_b32 v22, v51, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v32
+; GFX9-NEXT: v_lshl_or_b32 v15, v42, 16, v1
; GFX9-NEXT: v_lshl_or_b32 v23, v50, 16, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s6
; GFX9-NEXT: v_mov_b32_e32 v1, s7
@@ -31402,26 +31629,30 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v13, s19
; GFX9-NEXT: s_cbranch_execnz .LBB51_3
; GFX9-NEXT: .LBB51_2: ; %cmp.true
-; GFX9-NEXT: v_and_b32_e32 v14, 0xffff, v49
-; GFX9-NEXT: v_and_b32_e32 v15, 0xffff, v48
-; GFX9-NEXT: v_and_b32_e32 v16, 0xffff, v39
-; GFX9-NEXT: v_and_b32_e32 v17, 0xffff, v38
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v49
+; GFX9-NEXT: v_lshl_or_b32 v0, v43, 16, v0
+; GFX9-NEXT: v_pk_add_u16 v14, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v39
+; GFX9-NEXT: v_lshl_or_b32 v0, v41, 16, v0
+; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v48
+; GFX9-NEXT: v_pk_add_u16 v16, v0, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v38
; GFX9-NEXT: v_and_b32_e32 v18, 0xffff, v37
; GFX9-NEXT: v_and_b32_e32 v19, 0xffff, v36
; GFX9-NEXT: v_and_b32_e32 v20, 0xffff, v35
; GFX9-NEXT: v_and_b32_e32 v21, 0xffff, v34
; GFX9-NEXT: v_and_b32_e32 v22, 0xffff, v33
; GFX9-NEXT: v_and_b32_e32 v23, 0xffff, v32
-; GFX9-NEXT: v_lshl_or_b32 v14, v43, 16, v14
-; GFX9-NEXT: v_lshl_or_b32 v15, v42, 16, v15
-; GFX9-NEXT: v_lshl_or_b32 v16, v41, 16, v16
-; GFX9-NEXT: v_lshl_or_b32 v17, v40, 16, v17
+; GFX9-NEXT: v_lshl_or_b32 v1, v42, 16, v1
+; GFX9-NEXT: v_lshl_or_b32 v0, v40, 16, v0
; GFX9-NEXT: v_lshl_or_b32 v18, v55, 16, v18
; GFX9-NEXT: v_lshl_or_b32 v19, v54, 16, v19
; GFX9-NEXT: v_lshl_or_b32 v20, v53, 16, v20
; GFX9-NEXT: v_lshl_or_b32 v21, v52, 16, v21
; GFX9-NEXT: v_lshl_or_b32 v22, v51, 16, v22
; GFX9-NEXT: v_lshl_or_b32 v23, v50, 16, v23
+; GFX9-NEXT: v_pk_add_u16 v15, v1, 3 op_sel_hi:[1,0]
+; GFX9-NEXT: v_pk_add_u16 v17, v0, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s6, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s7, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v2, s8, 3 op_sel_hi:[1,0]
@@ -31436,10 +31667,6 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a,
; GFX9-NEXT: v_pk_add_u16 v11, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v12, s18, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v13, s19, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v14, v14, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v15, v15, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v16, v16, 3 op_sel_hi:[1,0]
-; GFX9-NEXT: v_pk_add_u16 v17, v17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
@@ -31455,7 +31682,9 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a,
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB51_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB51_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB51_2
+; GFX9-NEXT: s_branch .LBB51_3
;
; GFX11-TRUE16-LABEL: bitcast_v48i16_to_v12f64_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -31479,44 +31708,44 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a,
; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v4
; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v5
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s16, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s16, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s17, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s18, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s19, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s20, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s21, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s22, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s23, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s24, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s25, s15
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s26, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s41
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s27, s42
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB51_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
@@ -31525,17 +31754,16 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a,
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s6 :: v_dual_mov_b32 v5, s7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s8 :: v_dual_mov_b32 v7, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s10 :: v_dual_mov_b32 v9, s11
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s12 :: v_dual_mov_b32 v11, s13
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s14 :: v_dual_mov_b32 v13, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB51_3
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB51_3
; GFX11-TRUE16-NEXT: .LBB51_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
@@ -31544,21 +31772,21 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a,
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s5, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s15, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s0, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s1, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s16, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
@@ -31571,7 +31799,9 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a,
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB51_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB51_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB51_2
+; GFX11-TRUE16-NEXT: s_branch .LBB51_3
;
; GFX11-FAKE16-LABEL: bitcast_v48i16_to_v12f64_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -31589,44 +31819,44 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a,
; GFX11-FAKE16-NEXT: v_and_b32_e32 v48, 0xffff, v3
; GFX11-FAKE16-NEXT: v_and_b32_e32 v39, 0xffff, v4
; GFX11-FAKE16-NEXT: v_and_b32_e32 v38, 0xffff, v5
-; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s26, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s16, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s25, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s24, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s23, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s22, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s21, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s20, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s19, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s18, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s17, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s16, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s3, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s40, 0
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s16, s6
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s17, s7
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s18, s8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s19, s9
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s20, s10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s21, s11
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s22, s12
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s23, s13
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s24, s14
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s25, s15
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s26, s43
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s28, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s29, s41
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s27, s42
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB51_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
@@ -31635,17 +31865,16 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a,
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s6 :: v_dual_mov_b32 v5, s7
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s8 :: v_dual_mov_b32 v7, s9
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s10 :: v_dual_mov_b32 v9, s11
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s12 :: v_dual_mov_b32 v11, s13
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s14 :: v_dual_mov_b32 v13, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB51_3
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB51_3
; GFX11-FAKE16-NEXT: .LBB51_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
@@ -31654,21 +31883,21 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a,
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, s4, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, s5, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, s5, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, s6, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, s7, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, s8, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, s9, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, s10, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, s11, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, s12, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, s13, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, s14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, s6, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, s7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, s8, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, s9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, s10, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, s11, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, s12, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, s13, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, s14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, s15, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, s0, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, s15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, s1, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, s16, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
@@ -31681,7 +31910,9 @@ define inreg <12 x double> @bitcast_v48i16_to_v12f64_scalar(<48 x i16> inreg %a,
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB51_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB51_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB51_2
+; GFX11-FAKE16-NEXT: s_branch .LBB51_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -32626,6 +32857,7 @@ define inreg <48 x half> @bitcast_v12f64_to_v48f16_scalar(<12 x double> inreg %a
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v11
+; SI-NEXT: s_and_b64 s[14:15], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s12, v1
; SI-NEXT: v_readfirstlane_b32 s13, v2
; SI-NEXT: v_readfirstlane_b32 s10, v3
@@ -32635,8 +32867,8 @@ define inreg <48 x half> @bitcast_v12f64_to_v48f16_scalar(<12 x double> inreg %a
; SI-NEXT: v_readfirstlane_b32 s6, v7
; SI-NEXT: v_readfirstlane_b32 s7, v8
; SI-NEXT: v_readfirstlane_b32 s4, v9
-; SI-NEXT: s_and_b64 s[14:15], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s5, v10
+; SI-NEXT: s_mov_b64 s[14:15], -1
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
@@ -33050,12 +33282,15 @@ define inreg <48 x half> @bitcast_v12f64_to_v48f16_scalar(<12 x double> inreg %a
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr4
; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: s_branch .LBB53_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[14:15]
+; SI-NEXT: s_cbranch_vccz .LBB53_2
+; SI-NEXT: s_branch .LBB53_3
;
; VI-LABEL: bitcast_v12f64_to_v48f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v23, s16
; VI-NEXT: v_mov_b32_e32 v24, s17
; VI-NEXT: v_mov_b32_e32 v19, s18
@@ -33068,9 +33303,9 @@ define inreg <48 x half> @bitcast_v12f64_to_v48f16_scalar(<12 x double> inreg %a
; VI-NEXT: v_mov_b32_e32 v22, s25
; VI-NEXT: v_mov_b32_e32 v17, s26
; VI-NEXT: v_mov_b32_e32 v18, s27
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v13, s28
; VI-NEXT: v_mov_b32_e32 v14, s29
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB53_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_lshrrev_b32_e32 v34, 16, v9
@@ -33220,12 +33455,15 @@ define inreg <48 x half> @bitcast_v12f64_to_v48f16_scalar(<12 x double> inreg %a
; VI-NEXT: ; implicit-def: $vgpr36
; VI-NEXT: ; implicit-def: $vgpr35
; VI-NEXT: ; implicit-def: $vgpr34
-; VI-NEXT: s_branch .LBB53_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB53_2
+; VI-NEXT: s_branch .LBB53_3
;
; GFX9-LABEL: bitcast_v12f64_to_v48f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v23, s16
; GFX9-NEXT: v_mov_b32_e32 v24, s17
; GFX9-NEXT: v_mov_b32_e32 v19, s18
@@ -33238,9 +33476,9 @@ define inreg <48 x half> @bitcast_v12f64_to_v48f16_scalar(<12 x double> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v22, s25
; GFX9-NEXT: v_mov_b32_e32 v17, s26
; GFX9-NEXT: v_mov_b32_e32 v18, s27
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v13, s28
; GFX9-NEXT: v_mov_b32_e32 v14, s29
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB53_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v9
@@ -33390,7 +33628,9 @@ define inreg <48 x half> @bitcast_v12f64_to_v48f16_scalar(<12 x double> inreg %a
; GFX9-NEXT: ; implicit-def: $vgpr36
; GFX9-NEXT: ; implicit-def: $vgpr35
; GFX9-NEXT: ; implicit-def: $vgpr34
-; GFX9-NEXT: s_branch .LBB53_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB53_2
+; GFX9-NEXT: s_branch .LBB53_3
;
; GFX11-LABEL: bitcast_v12f64_to_v48f16_scalar:
; GFX11: ; %bb.0:
@@ -33405,8 +33645,8 @@ define inreg <48 x half> @bitcast_v12f64_to_v48f16_scalar(<12 x double> inreg %a
; GFX11-NEXT: v_dual_mov_b32 v9, s24 :: v_dual_mov_b32 v10, s25
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB53_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v5
@@ -33433,8 +33673,7 @@ define inreg <48 x half> @bitcast_v12f64_to_v48f16_scalar(<12 x double> inreg %a
; GFX11-NEXT: v_lshrrev_b32_e32 v26, 16, v22
; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v25
; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB53_3
+; GFX11-NEXT: s_cbranch_execnz .LBB53_3
; GFX11-NEXT: .LBB53_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
@@ -33551,7 +33790,9 @@ define inreg <48 x half> @bitcast_v12f64_to_v48f16_scalar(<12 x double> inreg %a
; GFX11-NEXT: ; implicit-def: $vgpr32
; GFX11-NEXT: ; implicit-def: $vgpr31
; GFX11-NEXT: ; implicit-def: $vgpr30
-; GFX11-NEXT: s_branch .LBB53_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_vccz .LBB53_2
+; GFX11-NEXT: s_branch .LBB53_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -34874,6 +35115,7 @@ define inreg <12 x double> @bitcast_v48f16_to_v12f64_scalar(<48 x half> inreg %a
; SI-NEXT: s_waitcnt vmcnt(9)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v40
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB55_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_waitcnt expcnt(2)
@@ -35246,7 +35488,9 @@ define inreg <12 x double> @bitcast_v48f16_to_v12f64_scalar(<48 x half> inreg %a
; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v34, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
; SI-NEXT: v_mov_b32_e32 v31, v40
-; SI-NEXT: s_branch .LBB55_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB55_2
+; SI-NEXT: s_branch .LBB55_3
;
; VI-LABEL: bitcast_v48f16_to_v12f64_scalar:
; VI: ; %bb.0:
@@ -35266,6 +35510,7 @@ define inreg <12 x double> @bitcast_v48f16_to_v12f64_scalar(<48 x half> inreg %a
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v9
; VI-NEXT: v_mov_b32_e32 v33, v8
; VI-NEXT: v_mov_b32_e32 v34, v7
@@ -35276,7 +35521,7 @@ define inreg <12 x double> @bitcast_v48f16_to_v12f64_scalar(<48 x half> inreg %a
; VI-NEXT: v_mov_b32_e32 v39, v2
; VI-NEXT: v_mov_b32_e32 v48, v1
; VI-NEXT: v_mov_b32_e32 v49, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB55_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -35305,41 +35550,41 @@ define inreg <12 x double> @bitcast_v48f16_to_v12f64_scalar(<48 x half> inreg %a
; VI-NEXT: s_and_b32 s57, 0xffff, s23
; VI-NEXT: s_lshl_b32 s58, s12, 16
; VI-NEXT: v_or_b32_sdwa v14, v49, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s57, s57, s58
; VI-NEXT: s_and_b32 s58, 0xffff, s24
; VI-NEXT: s_lshl_b32 s59, s11, 16
-; VI-NEXT: v_or_b32_sdwa v15, v48, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v16, v39, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s58, s58, s59
; VI-NEXT: s_and_b32 s59, 0xffff, s25
; VI-NEXT: s_lshl_b32 s60, s10, 16
-; VI-NEXT: v_or_b32_sdwa v16, v39, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v17, v38, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s59, s59, s60
; VI-NEXT: s_and_b32 s60, 0xffff, s26
; VI-NEXT: s_lshl_b32 s61, s9, 16
-; VI-NEXT: v_or_b32_sdwa v17, v38, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v18, v37, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s60, s60, s61
; VI-NEXT: s_and_b32 s61, 0xffff, s27
; VI-NEXT: s_lshl_b32 s62, s8, 16
-; VI-NEXT: v_or_b32_sdwa v18, v37, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v19, v36, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s61, s61, s62
; VI-NEXT: s_and_b32 s62, 0xffff, s28
; VI-NEXT: s_lshl_b32 s63, s7, 16
-; VI-NEXT: v_or_b32_sdwa v19, v36, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v20, v35, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s62, s62, s63
; VI-NEXT: s_and_b32 s63, 0xffff, s29
; VI-NEXT: s_lshl_b32 s72, s6, 16
-; VI-NEXT: v_or_b32_sdwa v20, v35, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; VI-NEXT: s_or_b32 s63, s63, s72
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v0, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: v_or_b32_sdwa v21, v34, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_sdwa v1, v0, v33 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: v_lshlrev_b32_sdwa v0, v0, v32 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: s_or_b32 s63, s63, s72
+; VI-NEXT: v_or_b32_sdwa v15, v48, v2 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v22, v33, v1 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v23, v32, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_mov_b32_e32 v0, s4
@@ -35360,13 +35605,13 @@ define inreg <12 x double> @bitcast_v48f16_to_v12f64_scalar(<48 x half> inreg %a
; VI-NEXT: .LBB55_2: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v13, 0x200
; VI-NEXT: v_mov_b32_e32 v0, s43
+; VI-NEXT: v_mov_b32_e32 v2, s42
; VI-NEXT: v_add_f16_sdwa v0, v0, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v1, s16, v13
+; VI-NEXT: v_add_f16_sdwa v2, v2, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
+; VI-NEXT: v_add_f16_e32 v3, s17, v13
; VI-NEXT: v_or_b32_e32 v0, v1, v0
-; VI-NEXT: v_mov_b32_e32 v1, s42
-; VI-NEXT: v_add_f16_sdwa v1, v1, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
-; VI-NEXT: v_add_f16_e32 v2, s17, v13
-; VI-NEXT: v_or_b32_e32 v1, v2, v1
+; VI-NEXT: v_or_b32_e32 v1, v3, v2
; VI-NEXT: v_mov_b32_e32 v2, s41
; VI-NEXT: v_add_f16_sdwa v2, v2, v13 dst_sel:WORD_1 dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:DWORD
; VI-NEXT: v_add_f16_e32 v3, s18, v13
@@ -35450,21 +35695,13 @@ define inreg <12 x double> @bitcast_v48f16_to_v12f64_scalar(<48 x half> inreg %a
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB55_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB55_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB55_2
+; VI-NEXT: s_branch .LBB55_3
;
; GFX9-LABEL: bitcast_v48f16_to_v12f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v9
-; GFX9-NEXT: v_mov_b32_e32 v33, v8
-; GFX9-NEXT: v_mov_b32_e32 v34, v7
-; GFX9-NEXT: v_mov_b32_e32 v35, v6
-; GFX9-NEXT: v_mov_b32_e32 v36, v5
-; GFX9-NEXT: v_mov_b32_e32 v37, v4
-; GFX9-NEXT: v_mov_b32_e32 v38, v3
-; GFX9-NEXT: v_mov_b32_e32 v39, v2
-; GFX9-NEXT: v_mov_b32_e32 v48, v1
-; GFX9-NEXT: v_mov_b32_e32 v49, v0
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
@@ -35480,6 +35717,17 @@ define inreg <12 x double> @bitcast_v48f16_to_v12f64_scalar(<48 x half> inreg %a
; GFX9-NEXT: s_lshr_b32 s8, s18, 16
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
+; GFX9-NEXT: v_mov_b32_e32 v32, v9
+; GFX9-NEXT: v_mov_b32_e32 v33, v8
+; GFX9-NEXT: v_mov_b32_e32 v34, v7
+; GFX9-NEXT: v_mov_b32_e32 v35, v6
+; GFX9-NEXT: v_mov_b32_e32 v36, v5
+; GFX9-NEXT: v_mov_b32_e32 v37, v4
+; GFX9-NEXT: v_mov_b32_e32 v38, v3
+; GFX9-NEXT: v_mov_b32_e32 v39, v2
+; GFX9-NEXT: v_mov_b32_e32 v48, v1
+; GFX9-NEXT: v_mov_b32_e32 v49, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
@@ -35494,7 +35742,6 @@ define inreg <12 x double> @bitcast_v48f16_to_v12f64_scalar(<48 x half> inreg %a
; GFX9-NEXT: v_lshrrev_b32_e32 v41, 16, v39
; GFX9-NEXT: v_lshrrev_b32_e32 v42, 16, v48
; GFX9-NEXT: v_lshrrev_b32_e32 v43, 16, v49
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -35509,12 +35756,11 @@ define inreg <12 x double> @bitcast_v48f16_to_v12f64_scalar(<48 x half> inreg %a
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB55_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v49
; GFX9-NEXT: v_lshl_or_b32 v14, v43, 16, v0
-; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v48
-; GFX9-NEXT: v_lshl_or_b32 v15, v42, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v39
; GFX9-NEXT: v_lshl_or_b32 v16, v41, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v38
@@ -35528,8 +35774,10 @@ define inreg <12 x double> @bitcast_v48f16_to_v12f64_scalar(<48 x half> inreg %a
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v34
; GFX9-NEXT: v_lshl_or_b32 v21, v52, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v33
+; GFX9-NEXT: v_and_b32_e32 v1, 0xffff, v48
; GFX9-NEXT: v_lshl_or_b32 v22, v51, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v32
+; GFX9-NEXT: v_lshl_or_b32 v15, v42, 16, v1
; GFX9-NEXT: v_lshl_or_b32 v23, v50, 16, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s6
; GFX9-NEXT: v_mov_b32_e32 v1, s7
@@ -35557,9 +35805,9 @@ define inreg <12 x double> @bitcast_v48f16_to_v12f64_scalar(<48 x half> inreg %a
; GFX9-NEXT: v_and_b32_e32 v21, 0xffff, v34
; GFX9-NEXT: v_and_b32_e32 v22, 0xffff, v33
; GFX9-NEXT: v_and_b32_e32 v23, 0xffff, v32
-; GFX9-NEXT: s_movk_i32 s4, 0x200
; GFX9-NEXT: v_mov_b32_e32 v13, 0x200
; GFX9-NEXT: v_lshl_or_b32 v14, v43, 16, v14
+; GFX9-NEXT: s_movk_i32 s4, 0x200
; GFX9-NEXT: v_lshl_or_b32 v15, v42, 16, v15
; GFX9-NEXT: v_lshl_or_b32 v16, v41, 16, v16
; GFX9-NEXT: v_lshl_or_b32 v17, v40, 16, v17
@@ -35602,7 +35850,9 @@ define inreg <12 x double> @bitcast_v48f16_to_v12f64_scalar(<48 x half> inreg %a
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB55_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB55_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB55_2
+; GFX9-NEXT: s_branch .LBB55_3
;
; GFX11-TRUE16-LABEL: bitcast_v48f16_to_v12f64_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -35626,44 +35876,44 @@ define inreg <12 x double> @bitcast_v48f16_to_v12f64_scalar(<48 x half> inreg %a
; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v39, 0xffff, v4
; GFX11-TRUE16-NEXT: v_and_b32_e32 v38, 0xffff, v5
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s16, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s3, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s16, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s17, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s18, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s19, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s20, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s21, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s22, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s23, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s24, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s25, s15
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s26, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s41
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s27, s42
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB55_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
@@ -35672,17 +35922,16 @@ define inreg <12 x double> @bitcast_v48f16_to_v12f64_scalar(<48 x half> inreg %a
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s6 :: v_dual_mov_b32 v5, s7
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s8 :: v_dual_mov_b32 v7, s9
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s10 :: v_dual_mov_b32 v9, s11
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s12 :: v_dual_mov_b32 v11, s13
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s14 :: v_dual_mov_b32 v13, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB55_3
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB55_3
; GFX11-TRUE16-NEXT: .LBB55_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
@@ -35691,21 +35940,21 @@ define inreg <12 x double> @bitcast_v48f16_to_v12f64_scalar(<48 x half> inreg %a
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s5 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s15 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s1 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s16 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
@@ -35718,7 +35967,9 @@ define inreg <12 x double> @bitcast_v48f16_to_v12f64_scalar(<48 x half> inreg %a
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB55_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB55_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB55_2
+; GFX11-TRUE16-NEXT: s_branch .LBB55_3
;
; GFX11-FAKE16-LABEL: bitcast_v48f16_to_v12f64_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -35736,44 +35987,44 @@ define inreg <12 x double> @bitcast_v48f16_to_v12f64_scalar(<48 x half> inreg %a
; GFX11-FAKE16-NEXT: v_and_b32_e32 v48, 0xffff, v3
; GFX11-FAKE16-NEXT: v_and_b32_e32 v39, 0xffff, v4
; GFX11-FAKE16-NEXT: v_and_b32_e32 v38, 0xffff, v5
-; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s28, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s26, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s25, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s24, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s23, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s22, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s21, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s20, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s19, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s18, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s17, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s16, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s25, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s24, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s23, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s22, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s21, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s20, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s19, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s18, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s17, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s16, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s3, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s2, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s40, 0
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s1, s5
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s46
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s45
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s44
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s16, s6
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s17, s7
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s18, s8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s19, s9
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s20, s10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s21, s11
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s22, s12
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s23, s13
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s24, s14
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s25, s15
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s26, s43
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s28, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s29, s41
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s27, s42
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB55_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
@@ -35782,17 +36033,16 @@ define inreg <12 x double> @bitcast_v48f16_to_v12f64_scalar(<48 x half> inreg %a
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v21, v34, 16, v48
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s6 :: v_dual_mov_b32 v5, s7
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s8 :: v_dual_mov_b32 v7, s9
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s10 :: v_dual_mov_b32 v9, s11
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s12 :: v_dual_mov_b32 v11, s13
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s14 :: v_dual_mov_b32 v13, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB55_3
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB55_3
; GFX11-FAKE16-NEXT: .LBB55_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v37, 16, v51
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v36, 16, v50
@@ -35801,21 +36051,21 @@ define inreg <12 x double> @bitcast_v48f16_to_v12f64_scalar(<48 x half> inreg %a
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v22, v33, 16, v39
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v23, v32, 16, v38
; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, s5 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, s15 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, s0 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, s1 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, s16 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
@@ -35828,7 +36078,9 @@ define inreg <12 x double> @bitcast_v48f16_to_v12f64_scalar(<48 x half> inreg %a
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB55_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB55_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB55_2
+; GFX11-FAKE16-NEXT: s_branch .LBB55_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -37039,6 +37291,7 @@ define inreg <48 x half> @bitcast_v48i16_to_v48f16_scalar(<48 x i16> inreg %a, i
; SI-NEXT: s_waitcnt vmcnt(4)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB57_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_cvt_f32_f16_e32 v31, s22
@@ -37493,10 +37746,7 @@ define inreg <48 x half> @bitcast_v48i16_to_v48f16_scalar(<48 x i16> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr31
; SI-NEXT: ; kill: killed $vgpr31
; SI-NEXT: ; implicit-def: $vgpr44
-; SI-NEXT: ; implicit-def: $vgpr31
-; SI-NEXT: ; kill: killed $vgpr31
; SI-NEXT: ; kill: killed $vgpr44
-; SI-NEXT: ; implicit-def: $vgpr44
; SI-NEXT: ; implicit-def: $vgpr60
; SI-NEXT: ; implicit-def: $vgpr35
; SI-NEXT: ; implicit-def: $vgpr61
@@ -37524,6 +37774,9 @@ define inreg <48 x half> @bitcast_v48i16_to_v48f16_scalar(<48 x i16> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr43
; SI-NEXT: ; implicit-def: $vgpr59
; SI-NEXT: ; implicit-def: $vgpr56
+; SI-NEXT: ; implicit-def: $vgpr31
+; SI-NEXT: ; kill: killed $vgpr31
+; SI-NEXT: ; implicit-def: $vgpr44
; SI-NEXT: ; kill: killed $vgpr44
; SI-NEXT: ; implicit-def: $vgpr31
; SI-NEXT: ; kill: killed $vgpr31
@@ -37557,11 +37810,14 @@ define inreg <48 x half> @bitcast_v48i16_to_v48f16_scalar(<48 x i16> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr31
; SI-NEXT: ; kill: killed $vgpr31
; SI-NEXT: ; implicit-def: $vgpr31
-; SI-NEXT: s_branch .LBB57_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB57_2
+; SI-NEXT: s_branch .LBB57_3
;
; VI-LABEL: bitcast_v48i16_to_v48f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
; VI-NEXT: s_lshr_b32 s6, s29, 16
; VI-NEXT: s_lshr_b32 s7, s28, 16
; VI-NEXT: s_lshr_b32 s8, s27, 16
@@ -37576,7 +37832,7 @@ define inreg <48 x half> @bitcast_v48i16_to_v48f16_scalar(<48 x i16> inreg %a, i
; VI-NEXT: s_lshr_b32 s41, s18, 16
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_lshrrev_b32_e32 v11, 16, v9
; VI-NEXT: v_lshrrev_b32_e32 v12, 16, v8
; VI-NEXT: v_lshrrev_b32_e32 v13, 16, v7
@@ -37586,12 +37842,15 @@ define inreg <48 x half> @bitcast_v48i16_to_v48f16_scalar(<48 x i16> inreg %a, i
; VI-NEXT: v_lshrrev_b32_e32 v17, 16, v3
; VI-NEXT: v_lshrrev_b32_e32 v16, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v15, 16, v1
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_lshrrev_b32_e32 v10, 16, v0
-; VI-NEXT: s_cbranch_scc0 .LBB57_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB57_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB57_3
-; VI-NEXT: .LBB57_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB57_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB57_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s16, s16, 3
; VI-NEXT: s_add_i32 s43, s43, 3
; VI-NEXT: s_add_i32 s17, s17, 3
@@ -37640,7 +37899,7 @@ define inreg <48 x half> @bitcast_v48i16_to_v48f16_scalar(<48 x i16> inreg %a, i
; VI-NEXT: v_add_u32_e32 v12, vcc, 3, v12
; VI-NEXT: v_add_u32_e32 v9, vcc, 3, v9
; VI-NEXT: v_add_u32_e32 v11, vcc, 3, v11
-; VI-NEXT: .LBB57_3: ; %end
+; VI-NEXT: .LBB57_4: ; %end
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s43, 16
; VI-NEXT: s_or_b32 s4, s4, s5
@@ -37718,12 +37977,11 @@ define inreg <48 x half> @bitcast_v48i16_to_v48f16_scalar(<48 x i16> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v12, s7
; VI-NEXT: v_mov_b32_e32 v13, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB57_4:
-; VI-NEXT: s_branch .LBB57_2
;
; GFX9-LABEL: bitcast_v48i16_to_v48f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
; GFX9-NEXT: s_lshr_b32 s43, s29, 16
; GFX9-NEXT: s_lshr_b32 s42, s28, 16
; GFX9-NEXT: s_lshr_b32 s41, s27, 16
@@ -37738,7 +37996,7 @@ define inreg <48 x half> @bitcast_v48i16_to_v48f16_scalar(<48 x i16> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s8, s18, 16
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_lshrrev_b32_e32 v23, 16, v9
; GFX9-NEXT: v_lshrrev_b32_e32 v22, 16, v8
; GFX9-NEXT: v_lshrrev_b32_e32 v21, 16, v7
@@ -37748,12 +38006,15 @@ define inreg <48 x half> @bitcast_v48i16_to_v48f16_scalar(<48 x i16> inreg %a, i
; GFX9-NEXT: v_lshrrev_b32_e32 v17, 16, v3
; GFX9-NEXT: v_lshrrev_b32_e32 v16, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v15, 16, v1
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_lshrrev_b32_e32 v14, 16, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB57_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB57_4
-; GFX9-NEXT: .LBB57_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB57_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB57_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_pack_ll_b32_b16 s4, s29, s43
; GFX9-NEXT: v_pk_add_u16 v13, s4, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_pack_ll_b32_b16 s4, s28, s42
@@ -37837,8 +38098,6 @@ define inreg <48 x half> @bitcast_v48i16_to_v48f16_scalar(<48 x i16> inreg %a, i
; GFX9-NEXT: v_lshrrev_b32_e32 v22, 16, v8
; GFX9-NEXT: v_lshrrev_b32_e32 v23, 16, v9
; GFX9-NEXT: s_branch .LBB57_5
-; GFX9-NEXT: .LBB57_3:
-; GFX9-NEXT: s_branch .LBB57_2
; GFX9-NEXT: .LBB57_4:
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v12, s28
@@ -37951,7 +38210,7 @@ define inreg <48 x half> @bitcast_v48i16_to_v48f16_scalar(<48 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s26, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s25, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s23, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s22, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s21, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s20, 16
@@ -37962,14 +38221,17 @@ define inreg <48 x half> @bitcast_v48i16_to_v48f16_scalar(<48 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s3, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s2, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s0, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s0, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s46, -1
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB57_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_mov_b32 s46, 0
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB57_3
-; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: .LBB57_2: ; %Flow
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB57_4
-; GFX11-TRUE16-NEXT: .LBB57_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: ; %bb.3: ; %cmp.true
; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
@@ -37988,7 +38250,7 @@ define inreg <48 x half> @bitcast_v48i16_to_v48f16_scalar(<48 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s26, s26, s42
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s25, s25, s41
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s24, s24, s40
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s23, s15
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s23, s14
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s22, s13
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s21, s12
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s20, s11
@@ -37998,7 +38260,7 @@ define inreg <48 x half> @bitcast_v48i16_to_v48f16_scalar(<48 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s5
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s15
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, v4, 3 op_sel_hi:[1,0]
@@ -38012,7 +38274,7 @@ define inreg <48 x half> @bitcast_v48i16_to_v48f16_scalar(<48 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s26, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s25, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s24, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s14, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s13, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s12, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s11, 3 op_sel_hi:[1,0]
@@ -38049,8 +38311,6 @@ define inreg <48 x half> @bitcast_v48i16_to_v48f16_scalar(<48 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v4
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v5
; GFX11-TRUE16-NEXT: s_branch .LBB57_5
-; GFX11-TRUE16-NEXT: .LBB57_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB57_2
; GFX11-TRUE16-NEXT: .LBB57_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s29 :: v_dual_mov_b32 v15, s28
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s27 :: v_dual_mov_b32 v17, s26
@@ -38064,12 +38324,12 @@ define inreg <48 x half> @bitcast_v48i16_to_v48f16_scalar(<48 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s45 :: v_dual_mov_b32 v31, s44
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v32, s43 :: v_dual_mov_b32 v33, s42
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v34, s41 :: v_dual_mov_b32 v35, s40
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v36, s15 :: v_dual_mov_b32 v37, s13
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v36, s14 :: v_dual_mov_b32 v37, s13
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v38, s12 :: v_dual_mov_b32 v39, s11
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v48, s10 :: v_dual_mov_b32 v49, s9
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v50, s8 :: v_dual_mov_b32 v51, s6
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v52, s5 :: v_dual_mov_b32 v53, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s4 :: v_dual_mov_b32 v55, s14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s4 :: v_dual_mov_b32 v55, s15
; GFX11-TRUE16-NEXT: .LBB57_5: ; %end
; GFX11-TRUE16-NEXT: v_and_b32_e32 v27, 0xffff, v27
; GFX11-TRUE16-NEXT: v_and_b32_e32 v65, 0xffff, v24
@@ -38147,19 +38407,22 @@ define inreg <48 x half> @bitcast_v48i16_to_v48f16_scalar(<48 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s20, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s19, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s18, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s17, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s3, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s17, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s16, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s2, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s0, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_mov_b32 s46, -1
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB57_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_mov_b32 s46, 0
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB57_3
-; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: .LBB57_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB57_4
-; GFX11-FAKE16-NEXT: .LBB57_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
@@ -38184,10 +38447,10 @@ define inreg <48 x half> @bitcast_v48i16_to_v48f16_scalar(<48 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s20, s12
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s19, s11
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s18, s10
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s17, s9
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s6
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s17, s8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s16, s6
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s9
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s7
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s5
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, v5, 3 op_sel_hi:[1,0]
@@ -38208,12 +38471,12 @@ define inreg <48 x half> @bitcast_v48i16_to_v48f16_scalar(<48 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, s12, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, s11, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, s10, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v28, s9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v28, s8, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v27, s0, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v26, s1, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v25, s2, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v24, s3, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v29, s7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v29, s6, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v27
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v26
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v25
@@ -38239,8 +38502,6 @@ define inreg <48 x half> @bitcast_v48i16_to_v48f16_scalar(<48 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v4
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v5
; GFX11-FAKE16-NEXT: s_branch .LBB57_5
-; GFX11-FAKE16-NEXT: .LBB57_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB57_2
; GFX11-FAKE16-NEXT: .LBB57_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s29 :: v_dual_mov_b32 v15, s28
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s27 :: v_dual_mov_b32 v17, s26
@@ -38257,8 +38518,8 @@ define inreg <48 x half> @bitcast_v48i16_to_v48f16_scalar(<48 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v36, s15 :: v_dual_mov_b32 v37, s14
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v38, s13 :: v_dual_mov_b32 v39, s12
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v48, s11 :: v_dual_mov_b32 v49, s10
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v50, s9 :: v_dual_mov_b32 v51, s7
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v52, s6 :: v_dual_mov_b32 v53, s8
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v50, s8 :: v_dual_mov_b32 v51, s6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v52, s9 :: v_dual_mov_b32 v53, s7
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v54, s4 :: v_dual_mov_b32 v55, s5
; GFX11-FAKE16-NEXT: .LBB57_5: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v27, 0xffff, v27
@@ -39352,10 +39613,14 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v44
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: s_cbranch_scc0 .LBB59_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB59_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB59_3
-; SI-NEXT: .LBB59_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB59_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB59_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v40, v40
; SI-NEXT: v_cvt_f32_f16_e32 v30, v30
; SI-NEXT: v_cvt_f32_f16_e32 v55, v55
@@ -39560,7 +39825,7 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; SI-NEXT: v_alignbit_b32 v50, v11, v27, 16
; SI-NEXT: v_alignbit_b32 v27, v5, v26, 16
; SI-NEXT: v_alignbit_b32 v26, v2, v44, 16
-; SI-NEXT: .LBB59_3: ; %end
+; SI-NEXT: .LBB59_4: ; %end
; SI-NEXT: v_and_b32_e32 v25, 0xffff, v25
; SI-NEXT: v_lshlrev_b32_e32 v40, 16, v40
; SI-NEXT: v_and_b32_e32 v22, 0xffff, v22
@@ -39706,12 +39971,11 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:36 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB59_4:
-; SI-NEXT: s_branch .LBB59_2
;
; VI-LABEL: bitcast_v48f16_to_v48i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
; VI-NEXT: s_lshr_b32 s6, s29, 16
; VI-NEXT: s_lshr_b32 s7, s28, 16
; VI-NEXT: s_lshr_b32 s8, s27, 16
@@ -39726,7 +39990,7 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; VI-NEXT: s_lshr_b32 s41, s18, 16
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
-; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_lshrrev_b32_e32 v23, 16, v9
; VI-NEXT: v_lshrrev_b32_e32 v22, 16, v8
; VI-NEXT: v_lshrrev_b32_e32 v21, 16, v7
@@ -39736,12 +40000,15 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; VI-NEXT: v_lshrrev_b32_e32 v17, 16, v3
; VI-NEXT: v_lshrrev_b32_e32 v16, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v15, 16, v1
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_lshrrev_b32_e32 v14, 16, v0
-; VI-NEXT: s_cbranch_scc0 .LBB59_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB59_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB59_4
-; VI-NEXT: .LBB59_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB59_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB59_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v34, 0x200
; VI-NEXT: v_add_f16_e32 v30, s16, v34
; VI-NEXT: v_add_f16_e32 v55, s43, v34
@@ -39792,8 +40059,6 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; VI-NEXT: v_add_f16_e32 v9, 0x200, v9
; VI-NEXT: v_add_f16_e32 v23, 0x200, v23
; VI-NEXT: s_branch .LBB59_5
-; VI-NEXT: .LBB59_3:
-; VI-NEXT: s_branch .LBB59_2
; VI-NEXT: .LBB59_4:
; VI-NEXT: v_mov_b32_e32 v34, s6
; VI-NEXT: v_mov_b32_e32 v13, s29
@@ -39887,6 +40152,7 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; GFX9-LABEL: bitcast_v48f16_to_v48i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
; GFX9-NEXT: s_lshr_b32 s43, s29, 16
; GFX9-NEXT: s_lshr_b32 s42, s28, 16
; GFX9-NEXT: s_lshr_b32 s41, s27, 16
@@ -39901,7 +40167,7 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s8, s18, 16
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v10
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_lshrrev_b32_e32 v23, 16, v9
; GFX9-NEXT: v_lshrrev_b32_e32 v22, 16, v8
; GFX9-NEXT: v_lshrrev_b32_e32 v21, 16, v7
@@ -39911,12 +40177,15 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; GFX9-NEXT: v_lshrrev_b32_e32 v17, 16, v3
; GFX9-NEXT: v_lshrrev_b32_e32 v16, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v15, 16, v1
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_lshrrev_b32_e32 v14, 16, v0
-; GFX9-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB59_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB59_4
-; GFX9-NEXT: .LBB59_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB59_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB59_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_and_b32_e32 v9, 0xffff, v9
; GFX9-NEXT: v_and_b32_e32 v8, 0xffff, v8
; GFX9-NEXT: v_and_b32_e32 v7, 0xffff, v7
@@ -40002,8 +40271,6 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; GFX9-NEXT: v_lshrrev_b32_e32 v22, 16, v8
; GFX9-NEXT: v_lshrrev_b32_e32 v23, 16, v9
; GFX9-NEXT: s_branch .LBB59_5
-; GFX9-NEXT: .LBB59_3:
-; GFX9-NEXT: s_branch .LBB59_2
; GFX9-NEXT: .LBB59_4:
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v12, s28
@@ -40116,7 +40383,7 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s26, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s25, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s23, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s22, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s21, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s20, 16
@@ -40127,14 +40394,17 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s3, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s2, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s0, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s0, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s46, -1
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB59_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_mov_b32 s46, 0
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB59_3
-; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: .LBB59_2: ; %Flow
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB59_4
-; GFX11-TRUE16-NEXT: .LBB59_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: ; %bb.3: ; %cmp.true
; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
; GFX11-TRUE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
@@ -40153,7 +40423,7 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s26, s26, s42
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s25, s25, s41
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s24, s24, s40
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s23, s15
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s23, s14
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s22, s13
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s21, s12
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s20, s11
@@ -40163,7 +40433,7 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s5
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s15
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, v4 op_sel_hi:[0,1]
@@ -40177,7 +40447,7 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s26 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s25 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s24 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s14 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s13 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s12 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s11 op_sel_hi:[0,1]
@@ -40214,8 +40484,6 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v22, 16, v4
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v23, 16, v5
; GFX11-TRUE16-NEXT: s_branch .LBB59_5
-; GFX11-TRUE16-NEXT: .LBB59_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB59_2
; GFX11-TRUE16-NEXT: .LBB59_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s29 :: v_dual_mov_b32 v15, s28
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s27 :: v_dual_mov_b32 v17, s26
@@ -40229,12 +40497,12 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v30, s45 :: v_dual_mov_b32 v31, s44
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v32, s43 :: v_dual_mov_b32 v33, s42
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v34, s41 :: v_dual_mov_b32 v35, s40
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v36, s15 :: v_dual_mov_b32 v37, s13
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v36, s14 :: v_dual_mov_b32 v37, s13
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v38, s12 :: v_dual_mov_b32 v39, s11
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v48, s10 :: v_dual_mov_b32 v49, s9
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v50, s8 :: v_dual_mov_b32 v51, s6
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v52, s5 :: v_dual_mov_b32 v53, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s4 :: v_dual_mov_b32 v55, s14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v54, s4 :: v_dual_mov_b32 v55, s15
; GFX11-TRUE16-NEXT: .LBB59_5: ; %end
; GFX11-TRUE16-NEXT: v_and_b32_e32 v27, 0xffff, v27
; GFX11-TRUE16-NEXT: v_and_b32_e32 v65, 0xffff, v24
@@ -40312,19 +40580,22 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s20, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s19, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s18, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s17, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s3, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s17, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s16, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s2, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s0, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_mov_b32 s46, -1
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB59_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_mov_b32 s46, 0
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB59_3
-; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: .LBB59_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB59_4
-; GFX11-FAKE16-NEXT: .LBB59_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 0xffff, v4
; GFX11-FAKE16-NEXT: v_and_b32_e32 v3, 0xffff, v3
@@ -40349,10 +40620,10 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s20, s12
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s19, s11
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s18, s10
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s17, s9
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s6
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s17, s8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s16, s6
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s9
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s7
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s5
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, v5 op_sel_hi:[0,1]
@@ -40373,12 +40644,12 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, s12 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, s11 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v28, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v28, 0x200, s8 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v27, 0x200, s0 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v26, 0x200, s1 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v25, 0x200, s2 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v24, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v29, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v29, 0x200, s6 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v55, 16, v27
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v54, 16, v26
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v53, 16, v25
@@ -40404,8 +40675,6 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v22, 16, v4
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v23, 16, v5
; GFX11-FAKE16-NEXT: s_branch .LBB59_5
-; GFX11-FAKE16-NEXT: .LBB59_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB59_2
; GFX11-FAKE16-NEXT: .LBB59_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s29 :: v_dual_mov_b32 v15, s28
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s27 :: v_dual_mov_b32 v17, s26
@@ -40422,8 +40691,8 @@ define inreg <48 x i16> @bitcast_v48f16_to_v48i16_scalar(<48 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v36, s15 :: v_dual_mov_b32 v37, s14
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v38, s13 :: v_dual_mov_b32 v39, s12
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v48, s11 :: v_dual_mov_b32 v49, s10
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v50, s9 :: v_dual_mov_b32 v51, s7
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v52, s6 :: v_dual_mov_b32 v53, s8
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v50, s8 :: v_dual_mov_b32 v51, s6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v52, s9 :: v_dual_mov_b32 v53, s7
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v54, s4 :: v_dual_mov_b32 v55, s5
; GFX11-FAKE16-NEXT: .LBB59_5: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v27, 0xffff, v27
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.832bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.832bit.ll
index 97d040b..e5b9a31 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.832bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.832bit.ll
@@ -185,6 +185,7 @@ define inreg <26 x float> @bitcast_v26i32_to_v26f32_scalar(<26 x i32> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v25, v11
; SI-NEXT: v_mov_b32_e32 v24, v10
; SI-NEXT: v_mov_b32_e32 v23, v9
@@ -198,7 +199,7 @@ define inreg <26 x float> @bitcast_v26i32_to_v26f32_scalar(<26 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v15, v1
; SI-NEXT: v_mov_b32_e32 v14, v0
; SI-NEXT: v_mov_b32_e32 v0, s16
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
; SI-NEXT: v_mov_b32_e32 v3, s19
@@ -212,10 +213,13 @@ define inreg <26 x float> @bitcast_v26i32_to_v26f32_scalar(<26 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB1_3
-; SI-NEXT: .LBB1_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB1_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB1_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v25, vcc, 3, v25
; SI-NEXT: v_add_i32_e32 v24, vcc, 3, v24
; SI-NEXT: v_add_i32_e32 v23, vcc, 3, v23
@@ -242,15 +246,14 @@ define inreg <26 x float> @bitcast_v26i32_to_v26f32_scalar(<26 x i32> inreg %a,
; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
-; SI-NEXT: .LBB1_3: ; %end
+; SI-NEXT: .LBB1_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: s_branch .LBB1_2
;
; VI-LABEL: bitcast_v26i32_to_v26f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v25, v11
; VI-NEXT: v_mov_b32_e32 v24, v10
; VI-NEXT: v_mov_b32_e32 v23, v9
@@ -264,7 +267,7 @@ define inreg <26 x float> @bitcast_v26i32_to_v26f32_scalar(<26 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
@@ -278,10 +281,13 @@ define inreg <26 x float> @bitcast_v26i32_to_v26f32_scalar(<26 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB1_4
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB1_3
-; VI-NEXT: .LBB1_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB1_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB1_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v25, vcc, 3, v25
; VI-NEXT: v_add_u32_e32 v24, vcc, 3, v24
; VI-NEXT: v_add_u32_e32 v23, vcc, 3, v23
@@ -308,15 +314,14 @@ define inreg <26 x float> @bitcast_v26i32_to_v26f32_scalar(<26 x i32> inreg %a,
; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: .LBB1_3: ; %end
+; VI-NEXT: .LBB1_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_4:
-; VI-NEXT: s_branch .LBB1_2
;
; GFX9-LABEL: bitcast_v26i32_to_v26f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v25, v11
; GFX9-NEXT: v_mov_b32_e32 v24, v10
; GFX9-NEXT: v_mov_b32_e32 v23, v9
@@ -330,7 +335,7 @@ define inreg <26 x float> @bitcast_v26i32_to_v26f32_scalar(<26 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
@@ -344,10 +349,13 @@ define inreg <26 x float> @bitcast_v26i32_to_v26f32_scalar(<26 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB1_3
-; GFX9-NEXT: .LBB1_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB1_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_u32_e32 v25, 3, v25
; GFX9-NEXT: v_add_u32_e32 v24, 3, v24
; GFX9-NEXT: v_add_u32_e32 v23, 3, v23
@@ -374,40 +382,38 @@ define inreg <26 x float> @bitcast_v26i32_to_v26f32_scalar(<26 x i32> inreg %a,
; GFX9-NEXT: v_add_u32_e32 v2, 3, v2
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: .LBB1_3: ; %end
+; GFX9-NEXT: .LBB1_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-LABEL: bitcast_v26i32_to_v26f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-NEXT: v_dual_mov_b32 v15, v8 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB1_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB1_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_3:
-; GFX11-NEXT: .LBB1_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_nc_u32_e32 v25, 3, v25
; GFX11-NEXT: v_add_nc_u32_e32 v24, 3, v24
; GFX11-NEXT: v_add_nc_u32_e32 v23, 3, v23
@@ -434,6 +440,7 @@ define inreg <26 x float> @bitcast_v26i32_to_v26f32_scalar(<26 x i32> inreg %a,
; GFX11-NEXT: v_add_nc_u32_e32 v2, 3, v2
; GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v1
; GFX11-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-NEXT: .LBB1_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -618,6 +625,7 @@ define inreg <26 x i32> @bitcast_v26f32_to_v26i32_scalar(<26 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v25, v11
; SI-NEXT: v_mov_b32_e32 v24, v10
; SI-NEXT: v_mov_b32_e32 v23, v9
@@ -631,7 +639,7 @@ define inreg <26 x i32> @bitcast_v26f32_to_v26i32_scalar(<26 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v15, v1
; SI-NEXT: v_mov_b32_e32 v14, v0
; SI-NEXT: v_mov_b32_e32 v0, s16
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
; SI-NEXT: v_mov_b32_e32 v3, s19
@@ -645,10 +653,13 @@ define inreg <26 x i32> @bitcast_v26f32_to_v26i32_scalar(<26 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB3_4
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB3_3
-; SI-NEXT: .LBB3_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB3_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB3_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e32 v25, 1.0, v25
; SI-NEXT: v_add_f32_e32 v24, 1.0, v24
; SI-NEXT: v_add_f32_e32 v23, 1.0, v23
@@ -675,15 +686,14 @@ define inreg <26 x i32> @bitcast_v26f32_to_v26i32_scalar(<26 x float> inreg %a,
; SI-NEXT: v_add_f32_e32 v2, 1.0, v2
; SI-NEXT: v_add_f32_e32 v1, 1.0, v1
; SI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; SI-NEXT: .LBB3_3: ; %end
+; SI-NEXT: .LBB3_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB3_4:
-; SI-NEXT: s_branch .LBB3_2
;
; VI-LABEL: bitcast_v26f32_to_v26i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v25, v11
; VI-NEXT: v_mov_b32_e32 v24, v10
; VI-NEXT: v_mov_b32_e32 v23, v9
@@ -697,7 +707,7 @@ define inreg <26 x i32> @bitcast_v26f32_to_v26i32_scalar(<26 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
@@ -711,10 +721,13 @@ define inreg <26 x i32> @bitcast_v26f32_to_v26i32_scalar(<26 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB3_4
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_3
-; VI-NEXT: .LBB3_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB3_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB3_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e32 v25, 1.0, v25
; VI-NEXT: v_add_f32_e32 v24, 1.0, v24
; VI-NEXT: v_add_f32_e32 v23, 1.0, v23
@@ -741,15 +754,14 @@ define inreg <26 x i32> @bitcast_v26f32_to_v26i32_scalar(<26 x float> inreg %a,
; VI-NEXT: v_add_f32_e32 v2, 1.0, v2
; VI-NEXT: v_add_f32_e32 v1, 1.0, v1
; VI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; VI-NEXT: .LBB3_3: ; %end
+; VI-NEXT: .LBB3_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB3_4:
-; VI-NEXT: s_branch .LBB3_2
;
; GFX9-LABEL: bitcast_v26f32_to_v26i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v25, v11
; GFX9-NEXT: v_mov_b32_e32 v24, v10
; GFX9-NEXT: v_mov_b32_e32 v23, v9
@@ -763,7 +775,7 @@ define inreg <26 x i32> @bitcast_v26f32_to_v26i32_scalar(<26 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
@@ -777,10 +789,13 @@ define inreg <26 x i32> @bitcast_v26f32_to_v26i32_scalar(<26 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_3
-; GFX9-NEXT: .LBB3_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB3_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e32 v25, 1.0, v25
; GFX9-NEXT: v_add_f32_e32 v24, 1.0, v24
; GFX9-NEXT: v_add_f32_e32 v23, 1.0, v23
@@ -807,40 +822,38 @@ define inreg <26 x i32> @bitcast_v26f32_to_v26i32_scalar(<26 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e32 v2, 1.0, v2
; GFX9-NEXT: v_add_f32_e32 v1, 1.0, v1
; GFX9-NEXT: v_add_f32_e32 v0, 1.0, v0
-; GFX9-NEXT: .LBB3_3: ; %end
+; GFX9-NEXT: .LBB3_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB3_4:
-; GFX9-NEXT: s_branch .LBB3_2
;
; GFX11-LABEL: bitcast_v26f32_to_v26i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-NEXT: v_dual_mov_b32 v15, v8 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB3_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB3_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB3_3:
-; GFX11-NEXT: .LBB3_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v24, 1.0, v24
; GFX11-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
@@ -854,6 +867,7 @@ define inreg <26 x i32> @bitcast_v26f32_to_v26i32_scalar(<26 x float> inreg %a,
; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-NEXT: .LBB3_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1051,6 +1065,7 @@ define inreg <13 x i64> @bitcast_v26i32_to_v13i64_scalar(<26 x i32> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v25, v11
; SI-NEXT: v_mov_b32_e32 v24, v10
; SI-NEXT: v_mov_b32_e32 v23, v9
@@ -1064,7 +1079,7 @@ define inreg <13 x i64> @bitcast_v26i32_to_v13i64_scalar(<26 x i32> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v15, v1
; SI-NEXT: v_mov_b32_e32 v14, v0
; SI-NEXT: v_mov_b32_e32 v0, s16
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
; SI-NEXT: v_mov_b32_e32 v3, s19
@@ -1078,10 +1093,13 @@ define inreg <13 x i64> @bitcast_v26i32_to_v13i64_scalar(<26 x i32> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB5_4
+; SI-NEXT: s_cbranch_scc0 .LBB5_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB5_3
-; SI-NEXT: .LBB5_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB5_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB5_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v25, vcc, 3, v25
; SI-NEXT: v_add_i32_e32 v24, vcc, 3, v24
; SI-NEXT: v_add_i32_e32 v23, vcc, 3, v23
@@ -1108,15 +1126,14 @@ define inreg <13 x i64> @bitcast_v26i32_to_v13i64_scalar(<26 x i32> inreg %a, i3
; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
-; SI-NEXT: .LBB5_3: ; %end
+; SI-NEXT: .LBB5_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB5_4:
-; SI-NEXT: s_branch .LBB5_2
;
; VI-LABEL: bitcast_v26i32_to_v13i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v25, v11
; VI-NEXT: v_mov_b32_e32 v24, v10
; VI-NEXT: v_mov_b32_e32 v23, v9
@@ -1130,7 +1147,7 @@ define inreg <13 x i64> @bitcast_v26i32_to_v13i64_scalar(<26 x i32> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
@@ -1144,10 +1161,13 @@ define inreg <13 x i64> @bitcast_v26i32_to_v13i64_scalar(<26 x i32> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB5_4
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB5_3
-; VI-NEXT: .LBB5_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB5_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB5_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v25, vcc, 3, v25
; VI-NEXT: v_add_u32_e32 v24, vcc, 3, v24
; VI-NEXT: v_add_u32_e32 v23, vcc, 3, v23
@@ -1174,15 +1194,14 @@ define inreg <13 x i64> @bitcast_v26i32_to_v13i64_scalar(<26 x i32> inreg %a, i3
; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: .LBB5_3: ; %end
+; VI-NEXT: .LBB5_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB5_4:
-; VI-NEXT: s_branch .LBB5_2
;
; GFX9-LABEL: bitcast_v26i32_to_v13i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v25, v11
; GFX9-NEXT: v_mov_b32_e32 v24, v10
; GFX9-NEXT: v_mov_b32_e32 v23, v9
@@ -1196,7 +1215,7 @@ define inreg <13 x i64> @bitcast_v26i32_to_v13i64_scalar(<26 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
@@ -1210,10 +1229,13 @@ define inreg <13 x i64> @bitcast_v26i32_to_v13i64_scalar(<26 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB5_3
-; GFX9-NEXT: .LBB5_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB5_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_u32_e32 v25, 3, v25
; GFX9-NEXT: v_add_u32_e32 v24, 3, v24
; GFX9-NEXT: v_add_u32_e32 v23, 3, v23
@@ -1240,40 +1262,38 @@ define inreg <13 x i64> @bitcast_v26i32_to_v13i64_scalar(<26 x i32> inreg %a, i3
; GFX9-NEXT: v_add_u32_e32 v2, 3, v2
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: .LBB5_3: ; %end
+; GFX9-NEXT: .LBB5_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_4:
-; GFX9-NEXT: s_branch .LBB5_2
;
; GFX11-LABEL: bitcast_v26i32_to_v13i64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-NEXT: v_dual_mov_b32 v15, v8 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB5_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB5_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB5_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB5_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB5_3:
-; GFX11-NEXT: .LBB5_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_nc_u32_e32 v25, 3, v25
; GFX11-NEXT: v_add_nc_u32_e32 v24, 3, v24
; GFX11-NEXT: v_add_nc_u32_e32 v23, 3, v23
@@ -1300,6 +1320,7 @@ define inreg <13 x i64> @bitcast_v26i32_to_v13i64_scalar(<26 x i32> inreg %a, i3
; GFX11-NEXT: v_add_nc_u32_e32 v2, 3, v2
; GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v1
; GFX11-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-NEXT: .LBB5_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1504,6 +1525,7 @@ define inreg <26 x i32> @bitcast_v13i64_to_v26i32_scalar(<13 x i64> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v25, v11
; SI-NEXT: v_mov_b32_e32 v24, v10
; SI-NEXT: v_mov_b32_e32 v23, v9
@@ -1517,7 +1539,7 @@ define inreg <26 x i32> @bitcast_v13i64_to_v26i32_scalar(<13 x i64> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v15, v1
; SI-NEXT: v_mov_b32_e32 v14, v0
; SI-NEXT: v_mov_b32_e32 v0, s16
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
; SI-NEXT: v_mov_b32_e32 v3, s19
@@ -1531,10 +1553,13 @@ define inreg <26 x i32> @bitcast_v13i64_to_v26i32_scalar(<13 x i64> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB7_4
+; SI-NEXT: s_cbranch_scc0 .LBB7_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB7_3
-; SI-NEXT: .LBB7_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB7_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB7_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v24, vcc, 3, v24
; SI-NEXT: v_addc_u32_e32 v25, vcc, 0, v25, vcc
; SI-NEXT: v_add_i32_e32 v22, vcc, 3, v22
@@ -1561,15 +1586,14 @@ define inreg <26 x i32> @bitcast_v13i64_to_v26i32_scalar(<13 x i64> inreg %a, i3
; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT: .LBB7_3: ; %end
+; SI-NEXT: .LBB7_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB7_4:
-; SI-NEXT: s_branch .LBB7_2
;
; VI-LABEL: bitcast_v13i64_to_v26i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v25, v11
; VI-NEXT: v_mov_b32_e32 v24, v10
; VI-NEXT: v_mov_b32_e32 v23, v9
@@ -1583,7 +1607,7 @@ define inreg <26 x i32> @bitcast_v13i64_to_v26i32_scalar(<13 x i64> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
@@ -1597,10 +1621,13 @@ define inreg <26 x i32> @bitcast_v13i64_to_v26i32_scalar(<13 x i64> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB7_4
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB7_3
-; VI-NEXT: .LBB7_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB7_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB7_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v24, vcc, 3, v24
; VI-NEXT: v_addc_u32_e32 v25, vcc, 0, v25, vcc
; VI-NEXT: v_add_u32_e32 v22, vcc, 3, v22
@@ -1627,15 +1654,14 @@ define inreg <26 x i32> @bitcast_v13i64_to_v26i32_scalar(<13 x i64> inreg %a, i3
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT: .LBB7_3: ; %end
+; VI-NEXT: .LBB7_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB7_4:
-; VI-NEXT: s_branch .LBB7_2
;
; GFX9-LABEL: bitcast_v13i64_to_v26i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v25, v11
; GFX9-NEXT: v_mov_b32_e32 v24, v10
; GFX9-NEXT: v_mov_b32_e32 v23, v9
@@ -1649,7 +1675,7 @@ define inreg <26 x i32> @bitcast_v13i64_to_v26i32_scalar(<13 x i64> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
@@ -1663,10 +1689,13 @@ define inreg <26 x i32> @bitcast_v13i64_to_v26i32_scalar(<13 x i64> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB7_3
-; GFX9-NEXT: .LBB7_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB7_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB7_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_co_u32_e32 v24, vcc, 3, v24
; GFX9-NEXT: v_addc_co_u32_e32 v25, vcc, 0, v25, vcc
; GFX9-NEXT: v_add_co_u32_e32 v22, vcc, 3, v22
@@ -1693,40 +1722,38 @@ define inreg <26 x i32> @bitcast_v13i64_to_v26i32_scalar(<13 x i64> inreg %a, i3
; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 3, v0
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
-; GFX9-NEXT: .LBB7_3: ; %end
+; GFX9-NEXT: .LBB7_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB7_4:
-; GFX9-NEXT: s_branch .LBB7_2
;
; GFX11-LABEL: bitcast_v13i64_to_v26i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-NEXT: v_dual_mov_b32 v15, v8 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB7_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB7_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB7_3:
-; GFX11-NEXT: .LBB7_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB7_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_co_u32 v24, vcc_lo, v24, 3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add_co_ci_u32_e64 v25, null, 0, v25, vcc_lo
@@ -1760,6 +1787,7 @@ define inreg <26 x i32> @bitcast_v13i64_to_v26i32_scalar(<13 x i64> inreg %a, i3
; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-NEXT: .LBB7_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1957,6 +1985,7 @@ define inreg <13 x double> @bitcast_v26i32_to_v13f64_scalar(<26 x i32> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v25, v11
; SI-NEXT: v_mov_b32_e32 v24, v10
; SI-NEXT: v_mov_b32_e32 v23, v9
@@ -1970,7 +1999,7 @@ define inreg <13 x double> @bitcast_v26i32_to_v13f64_scalar(<26 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v15, v1
; SI-NEXT: v_mov_b32_e32 v14, v0
; SI-NEXT: v_mov_b32_e32 v0, s16
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
; SI-NEXT: v_mov_b32_e32 v3, s19
@@ -1984,10 +2013,13 @@ define inreg <13 x double> @bitcast_v26i32_to_v13f64_scalar(<26 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB9_4
+; SI-NEXT: s_cbranch_scc0 .LBB9_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB9_3
-; SI-NEXT: .LBB9_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB9_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB9_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v25, vcc, 3, v25
; SI-NEXT: v_add_i32_e32 v24, vcc, 3, v24
; SI-NEXT: v_add_i32_e32 v23, vcc, 3, v23
@@ -2014,15 +2046,14 @@ define inreg <13 x double> @bitcast_v26i32_to_v13f64_scalar(<26 x i32> inreg %a,
; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
-; SI-NEXT: .LBB9_3: ; %end
+; SI-NEXT: .LBB9_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB9_4:
-; SI-NEXT: s_branch .LBB9_2
;
; VI-LABEL: bitcast_v26i32_to_v13f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v25, v11
; VI-NEXT: v_mov_b32_e32 v24, v10
; VI-NEXT: v_mov_b32_e32 v23, v9
@@ -2036,7 +2067,7 @@ define inreg <13 x double> @bitcast_v26i32_to_v13f64_scalar(<26 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
@@ -2050,10 +2081,13 @@ define inreg <13 x double> @bitcast_v26i32_to_v13f64_scalar(<26 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB9_4
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB9_3
-; VI-NEXT: .LBB9_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB9_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB9_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v25, vcc, 3, v25
; VI-NEXT: v_add_u32_e32 v24, vcc, 3, v24
; VI-NEXT: v_add_u32_e32 v23, vcc, 3, v23
@@ -2080,15 +2114,14 @@ define inreg <13 x double> @bitcast_v26i32_to_v13f64_scalar(<26 x i32> inreg %a,
; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: .LBB9_3: ; %end
+; VI-NEXT: .LBB9_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB9_4:
-; VI-NEXT: s_branch .LBB9_2
;
; GFX9-LABEL: bitcast_v26i32_to_v13f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v25, v11
; GFX9-NEXT: v_mov_b32_e32 v24, v10
; GFX9-NEXT: v_mov_b32_e32 v23, v9
@@ -2102,7 +2135,7 @@ define inreg <13 x double> @bitcast_v26i32_to_v13f64_scalar(<26 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
@@ -2116,10 +2149,13 @@ define inreg <13 x double> @bitcast_v26i32_to_v13f64_scalar(<26 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB9_3
-; GFX9-NEXT: .LBB9_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB9_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_u32_e32 v25, 3, v25
; GFX9-NEXT: v_add_u32_e32 v24, 3, v24
; GFX9-NEXT: v_add_u32_e32 v23, 3, v23
@@ -2146,40 +2182,38 @@ define inreg <13 x double> @bitcast_v26i32_to_v13f64_scalar(<26 x i32> inreg %a,
; GFX9-NEXT: v_add_u32_e32 v2, 3, v2
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: .LBB9_3: ; %end
+; GFX9-NEXT: .LBB9_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB9_4:
-; GFX9-NEXT: s_branch .LBB9_2
;
; GFX11-LABEL: bitcast_v26i32_to_v13f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-NEXT: v_dual_mov_b32 v15, v8 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB9_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB9_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB9_3:
-; GFX11-NEXT: .LBB9_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_nc_u32_e32 v25, 3, v25
; GFX11-NEXT: v_add_nc_u32_e32 v24, 3, v24
; GFX11-NEXT: v_add_nc_u32_e32 v23, 3, v23
@@ -2206,6 +2240,7 @@ define inreg <13 x double> @bitcast_v26i32_to_v13f64_scalar(<26 x i32> inreg %a,
; GFX11-NEXT: v_add_nc_u32_e32 v2, 3, v2
; GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v1
; GFX11-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-NEXT: .LBB9_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2351,6 +2386,7 @@ define inreg <26 x i32> @bitcast_v13f64_to_v26i32_scalar(<13 x double> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v25, v11
; SI-NEXT: v_mov_b32_e32 v24, v10
; SI-NEXT: v_mov_b32_e32 v23, v9
@@ -2375,13 +2411,16 @@ define inreg <26 x i32> @bitcast_v13f64_to_v26i32_scalar(<13 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v9, s25
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB11_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB11_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB11_3
-; SI-NEXT: .LBB11_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB11_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB11_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
; SI-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
; SI-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
@@ -2395,15 +2434,14 @@ define inreg <26 x i32> @bitcast_v13f64_to_v26i32_scalar(<13 x double> inreg %a,
; SI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; SI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; SI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; SI-NEXT: .LBB11_3: ; %end
+; SI-NEXT: .LBB11_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB11_4:
-; SI-NEXT: s_branch .LBB11_2
;
; VI-LABEL: bitcast_v13f64_to_v26i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v25, v11
; VI-NEXT: v_mov_b32_e32 v24, v10
; VI-NEXT: v_mov_b32_e32 v23, v9
@@ -2428,13 +2466,16 @@ define inreg <26 x i32> @bitcast_v13f64_to_v26i32_scalar(<13 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB11_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB11_3
-; VI-NEXT: .LBB11_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB11_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB11_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
; VI-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
; VI-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
@@ -2448,15 +2489,14 @@ define inreg <26 x i32> @bitcast_v13f64_to_v26i32_scalar(<13 x double> inreg %a,
; VI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; VI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; VI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; VI-NEXT: .LBB11_3: ; %end
+; VI-NEXT: .LBB11_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB11_4:
-; VI-NEXT: s_branch .LBB11_2
;
; GFX9-LABEL: bitcast_v13f64_to_v26i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v25, v11
; GFX9-NEXT: v_mov_b32_e32 v24, v10
; GFX9-NEXT: v_mov_b32_e32 v23, v9
@@ -2481,13 +2521,16 @@ define inreg <26 x i32> @bitcast_v13f64_to_v26i32_scalar(<13 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_3
-; GFX9-NEXT: .LBB11_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB11_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
; GFX9-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
; GFX9-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
@@ -2501,40 +2544,38 @@ define inreg <26 x i32> @bitcast_v13f64_to_v26i32_scalar(<13 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX9-NEXT: .LBB11_3: ; %end
+; GFX9-NEXT: .LBB11_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB11_4:
-; GFX9-NEXT: s_branch .LBB11_2
;
; GFX11-LABEL: bitcast_v13f64_to_v26i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-NEXT: v_dual_mov_b32 v15, v8 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB11_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB11_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: .LBB11_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
; GFX11-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
; GFX11-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
@@ -2548,6 +2589,7 @@ define inreg <26 x i32> @bitcast_v13f64_to_v26i32_scalar(<13 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-NEXT: .LBB11_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -3409,6 +3451,7 @@ define inreg <52 x i16> @bitcast_v26i32_to_v52i16_scalar(<26 x i32> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s41, v1
; SI-NEXT: v_readfirstlane_b32 s40, v2
; SI-NEXT: v_readfirstlane_b32 s15, v3
@@ -3420,8 +3463,8 @@ define inreg <52 x i16> @bitcast_v26i32_to_v52i16_scalar(<26 x i32> inreg %a, i3
; SI-NEXT: v_readfirstlane_b32 s9, v9
; SI-NEXT: v_readfirstlane_b32 s8, v10
; SI-NEXT: v_readfirstlane_b32 s7, v11
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s6, v12
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB13_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v1, s7
@@ -3715,12 +3758,15 @@ define inreg <52 x i16> @bitcast_v26i32_to_v52i16_scalar(<26 x i32> inreg %a, i3
; SI-NEXT: ; implicit-def: $sgpr43
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: s_branch .LBB13_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB13_2
+; SI-NEXT: s_branch .LBB13_3
;
; VI-LABEL: bitcast_v26i32_to_v52i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s41, v0
; VI-NEXT: v_readfirstlane_b32 s40, v1
; VI-NEXT: v_readfirstlane_b32 s15, v2
@@ -3731,13 +3777,13 @@ define inreg <52 x i16> @bitcast_v26i32_to_v52i16_scalar(<26 x i32> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s10, v7
; VI-NEXT: v_readfirstlane_b32 s9, v8
; VI-NEXT: v_readfirstlane_b32 s8, v9
-; VI-NEXT: v_readfirstlane_b32 s6, v10
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: v_readfirstlane_b32 s7, v11
+; VI-NEXT: v_readfirstlane_b32 s7, v10
+; VI-NEXT: v_readfirstlane_b32 s6, v11
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB13_4
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_lshr_b32 s42, s7, 16
-; VI-NEXT: s_lshr_b32 s43, s6, 16
+; VI-NEXT: s_lshr_b32 s42, s6, 16
+; VI-NEXT: s_lshr_b32 s43, s7, 16
; VI-NEXT: s_lshr_b32 s44, s8, 16
; VI-NEXT: s_lshr_b32 s45, s9, 16
; VI-NEXT: s_lshr_b32 s46, s10, 16
@@ -3764,8 +3810,8 @@ define inreg <52 x i16> @bitcast_v26i32_to_v52i16_scalar(<26 x i32> inreg %a, i3
; VI-NEXT: s_lshr_b32 s91, s16, 16
; VI-NEXT: s_cbranch_execnz .LBB13_3
; VI-NEXT: .LBB13_2: ; %cmp.true
-; VI-NEXT: s_add_i32 s7, s7, 3
; VI-NEXT: s_add_i32 s6, s6, 3
+; VI-NEXT: s_add_i32 s7, s7, 3
; VI-NEXT: s_add_i32 s8, s8, 3
; VI-NEXT: s_add_i32 s9, s9, 3
; VI-NEXT: s_add_i32 s10, s10, 3
@@ -3790,8 +3836,8 @@ define inreg <52 x i16> @bitcast_v26i32_to_v52i16_scalar(<26 x i32> inreg %a, i3
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: s_lshr_b32 s42, s7, 16
-; VI-NEXT: s_lshr_b32 s43, s6, 16
+; VI-NEXT: s_lshr_b32 s42, s6, 16
+; VI-NEXT: s_lshr_b32 s43, s7, 16
; VI-NEXT: s_lshr_b32 s44, s8, 16
; VI-NEXT: s_lshr_b32 s45, s9, 16
; VI-NEXT: s_lshr_b32 s46, s10, 16
@@ -3889,12 +3935,12 @@ define inreg <52 x i16> @bitcast_v26i32_to_v52i16_scalar(<26 x i32> inreg %a, i3
; VI-NEXT: s_and_b32 s8, 0xffff, s8
; VI-NEXT: s_lshl_b32 s40, s44, 16
; VI-NEXT: s_or_b32 s8, s8, s40
-; VI-NEXT: s_and_b32 s6, 0xffff, s6
-; VI-NEXT: s_lshl_b32 s40, s43, 16
-; VI-NEXT: s_or_b32 s6, s6, s40
; VI-NEXT: s_and_b32 s7, 0xffff, s7
-; VI-NEXT: s_lshl_b32 s40, s42, 16
+; VI-NEXT: s_lshl_b32 s40, s43, 16
; VI-NEXT: s_or_b32 s7, s7, s40
+; VI-NEXT: s_and_b32 s6, 0xffff, s6
+; VI-NEXT: s_lshl_b32 s40, s42, 16
+; VI-NEXT: s_or_b32 s6, s6, s40
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: v_mov_b32_e32 v2, s16
@@ -3919,8 +3965,8 @@ define inreg <52 x i16> @bitcast_v26i32_to_v52i16_scalar(<26 x i32> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v21, s10
; VI-NEXT: v_mov_b32_e32 v22, s9
; VI-NEXT: v_mov_b32_e32 v23, s8
-; VI-NEXT: v_mov_b32_e32 v24, s6
-; VI-NEXT: v_mov_b32_e32 v25, s7
+; VI-NEXT: v_mov_b32_e32 v24, s7
+; VI-NEXT: v_mov_b32_e32 v25, s6
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB13_4:
; VI-NEXT: ; implicit-def: $sgpr91
@@ -3949,39 +3995,42 @@ define inreg <52 x i16> @bitcast_v26i32_to_v52i16_scalar(<26 x i32> inreg %a, i3
; VI-NEXT: ; implicit-def: $sgpr44
; VI-NEXT: ; implicit-def: $sgpr43
; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: s_branch .LBB13_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB13_2
+; VI-NEXT: s_branch .LBB13_3
;
; GFX9-LABEL: bitcast_v26i32_to_v52i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
-; GFX9-NEXT: v_readfirstlane_b32 s6, v0
-; GFX9-NEXT: v_readfirstlane_b32 s7, v1
-; GFX9-NEXT: v_readfirstlane_b32 s8, v2
-; GFX9-NEXT: v_readfirstlane_b32 s9, v3
-; GFX9-NEXT: v_readfirstlane_b32 s10, v4
-; GFX9-NEXT: v_readfirstlane_b32 s11, v5
-; GFX9-NEXT: v_readfirstlane_b32 s12, v6
-; GFX9-NEXT: v_readfirstlane_b32 s13, v7
-; GFX9-NEXT: v_readfirstlane_b32 s14, v8
-; GFX9-NEXT: v_readfirstlane_b32 s15, v9
-; GFX9-NEXT: v_readfirstlane_b32 s40, v10
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: v_readfirstlane_b32 s41, v11
+; GFX9-NEXT: v_readfirstlane_b32 s7, v0
+; GFX9-NEXT: v_readfirstlane_b32 s8, v1
+; GFX9-NEXT: v_readfirstlane_b32 s9, v2
+; GFX9-NEXT: v_readfirstlane_b32 s10, v3
+; GFX9-NEXT: v_readfirstlane_b32 s11, v4
+; GFX9-NEXT: v_readfirstlane_b32 s12, v5
+; GFX9-NEXT: v_readfirstlane_b32 s13, v6
+; GFX9-NEXT: v_readfirstlane_b32 s14, v7
+; GFX9-NEXT: v_readfirstlane_b32 s15, v8
+; GFX9-NEXT: v_readfirstlane_b32 s40, v9
+; GFX9-NEXT: v_readfirstlane_b32 s41, v10
+; GFX9-NEXT: v_readfirstlane_b32 s6, v11
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB13_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_lshr_b32 s42, s41, 16
-; GFX9-NEXT: s_lshr_b32 s43, s40, 16
-; GFX9-NEXT: s_lshr_b32 s44, s15, 16
-; GFX9-NEXT: s_lshr_b32 s45, s14, 16
-; GFX9-NEXT: s_lshr_b32 s46, s13, 16
-; GFX9-NEXT: s_lshr_b32 s47, s12, 16
-; GFX9-NEXT: s_lshr_b32 s56, s11, 16
-; GFX9-NEXT: s_lshr_b32 s57, s10, 16
-; GFX9-NEXT: s_lshr_b32 s58, s9, 16
-; GFX9-NEXT: s_lshr_b32 s59, s8, 16
-; GFX9-NEXT: s_lshr_b32 s60, s7, 16
-; GFX9-NEXT: s_lshr_b32 s61, s6, 16
+; GFX9-NEXT: s_lshr_b32 s42, s6, 16
+; GFX9-NEXT: s_lshr_b32 s43, s41, 16
+; GFX9-NEXT: s_lshr_b32 s44, s40, 16
+; GFX9-NEXT: s_lshr_b32 s45, s15, 16
+; GFX9-NEXT: s_lshr_b32 s46, s14, 16
+; GFX9-NEXT: s_lshr_b32 s47, s13, 16
+; GFX9-NEXT: s_lshr_b32 s56, s12, 16
+; GFX9-NEXT: s_lshr_b32 s57, s11, 16
+; GFX9-NEXT: s_lshr_b32 s58, s10, 16
+; GFX9-NEXT: s_lshr_b32 s59, s9, 16
+; GFX9-NEXT: s_lshr_b32 s60, s8, 16
+; GFX9-NEXT: s_lshr_b32 s61, s7, 16
; GFX9-NEXT: s_lshr_b32 s62, s29, 16
; GFX9-NEXT: s_lshr_b32 s63, s28, 16
; GFX9-NEXT: s_lshr_b32 s72, s27, 16
@@ -3998,6 +4047,7 @@ define inreg <52 x i16> @bitcast_v26i32_to_v52i16_scalar(<26 x i32> inreg %a, i3
; GFX9-NEXT: s_lshr_b32 s91, s16, 16
; GFX9-NEXT: s_cbranch_execnz .LBB13_3
; GFX9-NEXT: .LBB13_2: ; %cmp.true
+; GFX9-NEXT: s_add_i32 s6, s6, 3
; GFX9-NEXT: s_add_i32 s41, s41, 3
; GFX9-NEXT: s_add_i32 s40, s40, 3
; GFX9-NEXT: s_add_i32 s15, s15, 3
@@ -4009,7 +4059,6 @@ define inreg <52 x i16> @bitcast_v26i32_to_v52i16_scalar(<26 x i32> inreg %a, i3
; GFX9-NEXT: s_add_i32 s9, s9, 3
; GFX9-NEXT: s_add_i32 s8, s8, 3
; GFX9-NEXT: s_add_i32 s7, s7, 3
-; GFX9-NEXT: s_add_i32 s6, s6, 3
; GFX9-NEXT: s_add_i32 s29, s29, 3
; GFX9-NEXT: s_add_i32 s28, s28, 3
; GFX9-NEXT: s_add_i32 s27, s27, 3
@@ -4024,18 +4073,18 @@ define inreg <52 x i16> @bitcast_v26i32_to_v52i16_scalar(<26 x i32> inreg %a, i3
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: s_lshr_b32 s42, s41, 16
-; GFX9-NEXT: s_lshr_b32 s43, s40, 16
-; GFX9-NEXT: s_lshr_b32 s44, s15, 16
-; GFX9-NEXT: s_lshr_b32 s45, s14, 16
-; GFX9-NEXT: s_lshr_b32 s46, s13, 16
-; GFX9-NEXT: s_lshr_b32 s47, s12, 16
-; GFX9-NEXT: s_lshr_b32 s56, s11, 16
-; GFX9-NEXT: s_lshr_b32 s57, s10, 16
-; GFX9-NEXT: s_lshr_b32 s58, s9, 16
-; GFX9-NEXT: s_lshr_b32 s59, s8, 16
-; GFX9-NEXT: s_lshr_b32 s60, s7, 16
-; GFX9-NEXT: s_lshr_b32 s61, s6, 16
+; GFX9-NEXT: s_lshr_b32 s42, s6, 16
+; GFX9-NEXT: s_lshr_b32 s43, s41, 16
+; GFX9-NEXT: s_lshr_b32 s44, s40, 16
+; GFX9-NEXT: s_lshr_b32 s45, s15, 16
+; GFX9-NEXT: s_lshr_b32 s46, s14, 16
+; GFX9-NEXT: s_lshr_b32 s47, s13, 16
+; GFX9-NEXT: s_lshr_b32 s56, s12, 16
+; GFX9-NEXT: s_lshr_b32 s57, s11, 16
+; GFX9-NEXT: s_lshr_b32 s58, s10, 16
+; GFX9-NEXT: s_lshr_b32 s59, s9, 16
+; GFX9-NEXT: s_lshr_b32 s60, s8, 16
+; GFX9-NEXT: s_lshr_b32 s61, s7, 16
; GFX9-NEXT: s_lshr_b32 s62, s29, 16
; GFX9-NEXT: s_lshr_b32 s63, s28, 16
; GFX9-NEXT: s_lshr_b32 s72, s27, 16
@@ -4065,18 +4114,18 @@ define inreg <52 x i16> @bitcast_v26i32_to_v52i16_scalar(<26 x i32> inreg %a, i3
; GFX9-NEXT: s_pack_ll_b32_b16 s25, s27, s72
; GFX9-NEXT: s_pack_ll_b32_b16 s26, s28, s63
; GFX9-NEXT: s_pack_ll_b32_b16 s27, s29, s62
-; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s61
-; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s60
-; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s59
-; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s58
-; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s57
-; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s56
-; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s47
-; GFX9-NEXT: s_pack_ll_b32_b16 s13, s13, s46
-; GFX9-NEXT: s_pack_ll_b32_b16 s14, s14, s45
-; GFX9-NEXT: s_pack_ll_b32_b16 s15, s15, s44
-; GFX9-NEXT: s_pack_ll_b32_b16 s28, s40, s43
-; GFX9-NEXT: s_pack_ll_b32_b16 s29, s41, s42
+; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s61
+; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s60
+; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s59
+; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s58
+; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s57
+; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s56
+; GFX9-NEXT: s_pack_ll_b32_b16 s13, s13, s47
+; GFX9-NEXT: s_pack_ll_b32_b16 s14, s14, s46
+; GFX9-NEXT: s_pack_ll_b32_b16 s15, s15, s45
+; GFX9-NEXT: s_pack_ll_b32_b16 s28, s40, s44
+; GFX9-NEXT: s_pack_ll_b32_b16 s29, s41, s43
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s42
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: v_mov_b32_e32 v2, s16
@@ -4091,18 +4140,18 @@ define inreg <52 x i16> @bitcast_v26i32_to_v52i16_scalar(<26 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v11, s25
; GFX9-NEXT: v_mov_b32_e32 v12, s26
; GFX9-NEXT: v_mov_b32_e32 v13, s27
-; GFX9-NEXT: v_mov_b32_e32 v14, s6
-; GFX9-NEXT: v_mov_b32_e32 v15, s7
-; GFX9-NEXT: v_mov_b32_e32 v16, s8
-; GFX9-NEXT: v_mov_b32_e32 v17, s9
-; GFX9-NEXT: v_mov_b32_e32 v18, s10
-; GFX9-NEXT: v_mov_b32_e32 v19, s11
-; GFX9-NEXT: v_mov_b32_e32 v20, s12
-; GFX9-NEXT: v_mov_b32_e32 v21, s13
-; GFX9-NEXT: v_mov_b32_e32 v22, s14
-; GFX9-NEXT: v_mov_b32_e32 v23, s15
-; GFX9-NEXT: v_mov_b32_e32 v24, s28
-; GFX9-NEXT: v_mov_b32_e32 v25, s29
+; GFX9-NEXT: v_mov_b32_e32 v14, s7
+; GFX9-NEXT: v_mov_b32_e32 v15, s8
+; GFX9-NEXT: v_mov_b32_e32 v16, s9
+; GFX9-NEXT: v_mov_b32_e32 v17, s10
+; GFX9-NEXT: v_mov_b32_e32 v18, s11
+; GFX9-NEXT: v_mov_b32_e32 v19, s12
+; GFX9-NEXT: v_mov_b32_e32 v20, s13
+; GFX9-NEXT: v_mov_b32_e32 v21, s14
+; GFX9-NEXT: v_mov_b32_e32 v22, s15
+; GFX9-NEXT: v_mov_b32_e32 v23, s28
+; GFX9-NEXT: v_mov_b32_e32 v24, s29
+; GFX9-NEXT: v_mov_b32_e32 v25, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB13_4:
; GFX9-NEXT: ; implicit-def: $sgpr91
@@ -4131,7 +4180,9 @@ define inreg <52 x i16> @bitcast_v26i32_to_v52i16_scalar(<26 x i32> inreg %a, i3
; GFX9-NEXT: ; implicit-def: $sgpr44
; GFX9-NEXT: ; implicit-def: $sgpr43
; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: s_branch .LBB13_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB13_2
+; GFX9-NEXT: s_branch .LBB13_3
;
; GFX11-LABEL: bitcast_v26i32_to_v52i16_scalar:
; GFX11: ; %bb.0:
@@ -4142,16 +4193,16 @@ define inreg <52 x i16> @bitcast_v26i32_to_v52i16_scalar(<26 x i32> inreg %a, i3
; GFX11-NEXT: v_readfirstlane_b32 s6, v2
; GFX11-NEXT: v_readfirstlane_b32 s7, v3
; GFX11-NEXT: v_readfirstlane_b32 s8, v4
-; GFX11-NEXT: v_readfirstlane_b32 s9, v5
+; GFX11-NEXT: v_readfirstlane_b32 s10, v5
; GFX11-NEXT: v_readfirstlane_b32 s11, v6
-; GFX11-NEXT: v_readfirstlane_b32 s10, v7
-; GFX11-NEXT: s_mov_b32 s78, 0
+; GFX11-NEXT: v_readfirstlane_b32 s9, v7
+; GFX11-NEXT: s_mov_b32 s78, -1
; GFX11-NEXT: s_and_b32 s12, vcc_lo, exec_lo
; GFX11-NEXT: s_cbranch_scc0 .LBB13_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s12, s10, 16
+; GFX11-NEXT: s_lshr_b32 s12, s9, 16
; GFX11-NEXT: s_lshr_b32 s13, s11, 16
-; GFX11-NEXT: s_lshr_b32 s14, s9, 16
+; GFX11-NEXT: s_lshr_b32 s14, s10, 16
; GFX11-NEXT: s_lshr_b32 s15, s8, 16
; GFX11-NEXT: s_lshr_b32 s40, s7, 16
; GFX11-NEXT: s_lshr_b32 s41, s6, 16
@@ -4175,12 +4226,11 @@ define inreg <52 x i16> @bitcast_v26i32_to_v52i16_scalar(<26 x i32> inreg %a, i3
; GFX11-NEXT: s_lshr_b32 s75, s2, 16
; GFX11-NEXT: s_lshr_b32 s76, s1, 16
; GFX11-NEXT: s_lshr_b32 s77, s0, 16
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s78
-; GFX11-NEXT: s_cbranch_vccnz .LBB13_3
+; GFX11-NEXT: s_cbranch_execnz .LBB13_3
; GFX11-NEXT: .LBB13_2: ; %cmp.true
-; GFX11-NEXT: s_add_i32 s10, s10, 3
-; GFX11-NEXT: s_add_i32 s11, s11, 3
; GFX11-NEXT: s_add_i32 s9, s9, 3
+; GFX11-NEXT: s_add_i32 s11, s11, 3
+; GFX11-NEXT: s_add_i32 s10, s10, 3
; GFX11-NEXT: s_add_i32 s8, s8, 3
; GFX11-NEXT: s_add_i32 s7, s7, 3
; GFX11-NEXT: s_add_i32 s6, s6, 3
@@ -4204,9 +4254,9 @@ define inreg <52 x i16> @bitcast_v26i32_to_v52i16_scalar(<26 x i32> inreg %a, i3
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: s_lshr_b32 s12, s10, 16
+; GFX11-NEXT: s_lshr_b32 s12, s9, 16
; GFX11-NEXT: s_lshr_b32 s13, s11, 16
-; GFX11-NEXT: s_lshr_b32 s14, s9, 16
+; GFX11-NEXT: s_lshr_b32 s14, s10, 16
; GFX11-NEXT: s_lshr_b32 s15, s8, 16
; GFX11-NEXT: s_lshr_b32 s40, s7, 16
; GFX11-NEXT: s_lshr_b32 s41, s6, 16
@@ -4255,9 +4305,9 @@ define inreg <52 x i16> @bitcast_v26i32_to_v52i16_scalar(<26 x i32> inreg %a, i3
; GFX11-NEXT: s_pack_ll_b32_b16 s6, s6, s41
; GFX11-NEXT: s_pack_ll_b32_b16 s7, s7, s40
; GFX11-NEXT: s_pack_ll_b32_b16 s8, s8, s15
-; GFX11-NEXT: s_pack_ll_b32_b16 s9, s9, s14
+; GFX11-NEXT: s_pack_ll_b32_b16 s10, s10, s14
; GFX11-NEXT: s_pack_ll_b32_b16 s11, s11, s13
-; GFX11-NEXT: s_pack_ll_b32_b16 s10, s10, s12
+; GFX11-NEXT: s_pack_ll_b32_b16 s9, s9, s12
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -4269,8 +4319,8 @@ define inreg <52 x i16> @bitcast_v26i32_to_v52i16_scalar(<26 x i32> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
; GFX11-NEXT: v_dual_mov_b32 v18, s4 :: v_dual_mov_b32 v19, s5
; GFX11-NEXT: v_dual_mov_b32 v20, s6 :: v_dual_mov_b32 v21, s7
-; GFX11-NEXT: v_dual_mov_b32 v22, s8 :: v_dual_mov_b32 v23, s9
-; GFX11-NEXT: v_dual_mov_b32 v24, s11 :: v_dual_mov_b32 v25, s10
+; GFX11-NEXT: v_dual_mov_b32 v22, s8 :: v_dual_mov_b32 v23, s10
+; GFX11-NEXT: v_dual_mov_b32 v24, s11 :: v_dual_mov_b32 v25, s9
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB13_4:
; GFX11-NEXT: ; implicit-def: $sgpr77
@@ -4299,7 +4349,9 @@ define inreg <52 x i16> @bitcast_v26i32_to_v52i16_scalar(<26 x i32> inreg %a, i3
; GFX11-NEXT: ; implicit-def: $sgpr14
; GFX11-NEXT: ; implicit-def: $sgpr13
; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: s_branch .LBB13_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s78
+; GFX11-NEXT: s_cbranch_vccz .LBB13_2
+; GFX11-NEXT: s_branch .LBB13_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -5511,6 +5563,7 @@ define inreg <26 x i32> @bitcast_v52i16_to_v26i32_scalar(<52 x i16> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v38, v14
; SI-NEXT: v_mov_b32_e32 v39, v12
; SI-NEXT: v_mov_b32_e32 v48, v10
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v3
@@ -5533,7 +5586,7 @@ define inreg <26 x i32> @bitcast_v52i16_to_v26i32_scalar(<52 x i16> inreg %a, i3
; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v2
; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_lshlrev_b32_e32 v59, 16, v4
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_and_b64 s[6:7], vcc, exec
; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v6
; SI-NEXT: s_waitcnt vmcnt(1)
@@ -5827,7 +5880,9 @@ define inreg <26 x i32> @bitcast_v52i16_to_v26i32_scalar(<52 x i16> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v32, v57
; SI-NEXT: v_mov_b32_e32 v57, v63
; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
-; SI-NEXT: s_branch .LBB15_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB15_2
+; SI-NEXT: s_branch .LBB15_3
;
; VI-LABEL: bitcast_v52i16_to_v26i32_scalar:
; VI: ; %bb.0:
@@ -5847,6 +5902,7 @@ define inreg <26 x i32> @bitcast_v52i16_to_v26i32_scalar(<52 x i16> inreg %a, i3
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v11
; VI-NEXT: v_mov_b32_e32 v33, v10
; VI-NEXT: v_mov_b32_e32 v34, v9
@@ -5859,7 +5915,7 @@ define inreg <26 x i32> @bitcast_v52i16_to_v26i32_scalar(<52 x i16> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v49, v2
; VI-NEXT: v_mov_b32_e32 v50, v1
; VI-NEXT: v_mov_b32_e32 v51, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB15_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -5960,79 +6016,79 @@ define inreg <26 x i32> @bitcast_v52i16_to_v26i32_scalar(<52 x i16> inreg %a, i3
; VI-NEXT: v_add_u32_e32 v16, vcc, 0x30000, v0
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v48
; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; VI-NEXT: s_add_i32 s16, s16, 3
; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: s_and_b32 s4, s16, 0xffff
-; VI-NEXT: s_lshl_b32 s5, s43, 16
-; VI-NEXT: s_add_i32 s17, s17, 3
+; VI-NEXT: s_add_i32 s16, s16, 3
; VI-NEXT: v_add_u32_e32 v17, vcc, 0x30000, v0
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v39
; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: s_and_b32 s4, s16, 0xffff
+; VI-NEXT: s_lshl_b32 s5, s43, 16
+; VI-NEXT: s_add_i32 s17, s17, 3
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s4, s5, s4
; VI-NEXT: s_and_b32 s5, s17, 0xffff
; VI-NEXT: s_lshl_b32 s16, s42, 16
; VI-NEXT: s_add_i32 s18, s18, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v18, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v38
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s5, s16, s5
; VI-NEXT: s_and_b32 s16, s18, 0xffff
; VI-NEXT: s_lshl_b32 s17, s41, 16
; VI-NEXT: s_add_i32 s19, s19, 3
-; VI-NEXT: v_add_u32_e32 v18, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v38
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s16, s17, s16
; VI-NEXT: s_and_b32 s17, s19, 0xffff
; VI-NEXT: s_lshl_b32 s18, s40, 16
; VI-NEXT: s_add_i32 s20, s20, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v19, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v37
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s17, s18, s17
; VI-NEXT: s_and_b32 s18, s20, 0xffff
; VI-NEXT: s_lshl_b32 s15, s15, 16
; VI-NEXT: s_add_i32 s21, s21, 3
-; VI-NEXT: v_add_u32_e32 v19, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v37
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s15, s15, s18
; VI-NEXT: s_and_b32 s18, s21, 0xffff
; VI-NEXT: s_lshl_b32 s14, s14, 16
; VI-NEXT: s_add_i32 s22, s22, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v20, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v36
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s14, s14, s18
; VI-NEXT: s_and_b32 s18, s22, 0xffff
; VI-NEXT: s_lshl_b32 s13, s13, 16
; VI-NEXT: s_add_i32 s23, s23, 3
-; VI-NEXT: v_add_u32_e32 v20, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v36
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s13, s13, s18
; VI-NEXT: s_and_b32 s18, s23, 0xffff
; VI-NEXT: s_lshl_b32 s12, s12, 16
; VI-NEXT: s_add_i32 s24, s24, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v21, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v35
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s12, s12, s18
; VI-NEXT: s_and_b32 s18, s24, 0xffff
; VI-NEXT: s_lshl_b32 s11, s11, 16
; VI-NEXT: s_add_i32 s25, s25, 3
-; VI-NEXT: v_add_u32_e32 v21, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v35
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s11, s11, s18
; VI-NEXT: s_and_b32 s18, s25, 0xffff
; VI-NEXT: s_lshl_b32 s10, s10, 16
; VI-NEXT: s_add_i32 s26, s26, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v22, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v34
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s10, s10, s18
; VI-NEXT: s_and_b32 s18, s26, 0xffff
; VI-NEXT: s_lshl_b32 s9, s9, 16
; VI-NEXT: s_add_i32 s27, s27, 3
-; VI-NEXT: v_add_u32_e32 v22, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v34
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s9, s9, s18
; VI-NEXT: s_and_b32 s18, s27, 0xffff
; VI-NEXT: s_lshl_b32 s8, s8, 16
; VI-NEXT: s_add_i32 s28, s28, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s8, s8, s18
; VI-NEXT: s_and_b32 s18, s28, 0xffff
; VI-NEXT: s_lshl_b32 s7, s7, 16
@@ -6082,23 +6138,13 @@ define inreg <26 x i32> @bitcast_v52i16_to_v26i32_scalar(<52 x i16> inreg %a, i3
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB15_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB15_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB15_2
+; VI-NEXT: s_branch .LBB15_3
;
; GFX9-LABEL: bitcast_v52i16_to_v26i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v11
-; GFX9-NEXT: v_mov_b32_e32 v33, v10
-; GFX9-NEXT: v_mov_b32_e32 v34, v9
-; GFX9-NEXT: v_mov_b32_e32 v35, v8
-; GFX9-NEXT: v_mov_b32_e32 v36, v7
-; GFX9-NEXT: v_mov_b32_e32 v37, v6
-; GFX9-NEXT: v_mov_b32_e32 v38, v5
-; GFX9-NEXT: v_mov_b32_e32 v39, v4
-; GFX9-NEXT: v_mov_b32_e32 v48, v3
-; GFX9-NEXT: v_mov_b32_e32 v49, v2
-; GFX9-NEXT: v_mov_b32_e32 v50, v1
-; GFX9-NEXT: v_mov_b32_e32 v51, v0
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
@@ -6114,6 +6160,19 @@ define inreg <26 x i32> @bitcast_v52i16_to_v26i32_scalar(<52 x i16> inreg %a, i3
; GFX9-NEXT: s_lshr_b32 s8, s18, 16
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
+; GFX9-NEXT: v_mov_b32_e32 v32, v11
+; GFX9-NEXT: v_mov_b32_e32 v33, v10
+; GFX9-NEXT: v_mov_b32_e32 v34, v9
+; GFX9-NEXT: v_mov_b32_e32 v35, v8
+; GFX9-NEXT: v_mov_b32_e32 v36, v7
+; GFX9-NEXT: v_mov_b32_e32 v37, v6
+; GFX9-NEXT: v_mov_b32_e32 v38, v5
+; GFX9-NEXT: v_mov_b32_e32 v39, v4
+; GFX9-NEXT: v_mov_b32_e32 v48, v3
+; GFX9-NEXT: v_mov_b32_e32 v49, v2
+; GFX9-NEXT: v_mov_b32_e32 v50, v1
+; GFX9-NEXT: v_mov_b32_e32 v51, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
@@ -6130,7 +6189,6 @@ define inreg <26 x i32> @bitcast_v52i16_to_v26i32_scalar(<52 x i16> inreg %a, i3
; GFX9-NEXT: v_lshrrev_b32_e32 v41, 16, v37
; GFX9-NEXT: v_lshrrev_b32_e32 v42, 16, v38
; GFX9-NEXT: v_lshrrev_b32_e32 v43, 16, v39
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -6145,6 +6203,7 @@ define inreg <26 x i32> @bitcast_v52i16_to_v26i32_scalar(<52 x i16> inreg %a, i3
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_lshrrev_b32_e32 v44, 16, v48
; GFX9-NEXT: v_lshrrev_b32_e32 v45, 16, v49
; GFX9-NEXT: v_lshrrev_b32_e32 v46, 16, v50
@@ -6254,7 +6313,9 @@ define inreg <26 x i32> @bitcast_v52i16_to_v26i32_scalar(<52 x i16> inreg %a, i3
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB15_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB15_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB15_2
+; GFX9-NEXT: s_branch .LBB15_3
;
; GFX11-TRUE16-LABEL: bitcast_v52i16_to_v26i32_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -6284,9 +6345,9 @@ define inreg <26 x i32> @bitcast_v52i16_to_v26i32_scalar(<52 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v5
; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v6
; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v7
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s27, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
@@ -6298,15 +6359,14 @@ define inreg <26 x i32> @bitcast_v52i16_to_v26i32_scalar(<52 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s42
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
@@ -6318,10 +6378,11 @@ define inreg <26 x i32> @bitcast_v52i16_to_v26i32_scalar(<52 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s28, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s29, s41
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB15_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
@@ -6339,10 +6400,9 @@ define inreg <26 x i32> @bitcast_v52i16_to_v26i32_scalar(<52 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s17
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s15 :: v_dual_mov_b32 v17, s16
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB15_3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB15_3
; GFX11-TRUE16-NEXT: .LBB15_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
@@ -6367,9 +6427,9 @@ define inreg <26 x i32> @bitcast_v52i16_to_v26i32_scalar(<52 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s15, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
@@ -6382,7 +6442,9 @@ define inreg <26 x i32> @bitcast_v52i16_to_v26i32_scalar(<52 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB15_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB15_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB15_2
+; GFX11-TRUE16-NEXT: s_branch .LBB15_3
;
; GFX11-FAKE16-LABEL: bitcast_v52i16_to_v26i32_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -6404,9 +6466,9 @@ define inreg <26 x i32> @bitcast_v52i16_to_v26i32_scalar(<52 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: v_and_b32_e32 v50, 0xffff, v5
; GFX11-FAKE16-NEXT: v_and_b32_e32 v49, 0xffff, v6
; GFX11-FAKE16-NEXT: v_and_b32_e32 v48, 0xffff, v7
-; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s28, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s27, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s26, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s25, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s24, 16
@@ -6418,15 +6480,14 @@ define inreg <26 x i32> @bitcast_v52i16_to_v26i32_scalar(<52 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s18, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s17, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s40, 0
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s44
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s45
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s43
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s42
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
@@ -6438,10 +6499,11 @@ define inreg <26 x i32> @bitcast_v52i16_to_v26i32_scalar(<52 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s27, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s28, s15
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s29, s41
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB15_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
@@ -6459,10 +6521,9 @@ define inreg <26 x i32> @bitcast_v52i16_to_v26i32_scalar(<52 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s17
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s15 :: v_dual_mov_b32 v17, s16
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB15_3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB15_3
; GFX11-FAKE16-NEXT: .LBB15_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
@@ -6487,9 +6548,9 @@ define inreg <26 x i32> @bitcast_v52i16_to_v26i32_scalar(<52 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, s17, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, s15, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, s16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, s15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, s16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
@@ -6502,7 +6563,9 @@ define inreg <26 x i32> @bitcast_v52i16_to_v26i32_scalar(<52 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB15_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB15_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB15_2
+; GFX11-FAKE16-NEXT: s_branch .LBB15_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -7640,6 +7703,7 @@ define inreg <52 x half> @bitcast_v26i32_to_v52f16_scalar(<26 x i32> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s41, v1
; SI-NEXT: v_readfirstlane_b32 s40, v2
; SI-NEXT: v_readfirstlane_b32 s15, v3
@@ -7648,11 +7712,11 @@ define inreg <52 x half> @bitcast_v26i32_to_v52f16_scalar(<26 x i32> inreg %a, i
; SI-NEXT: v_readfirstlane_b32 s12, v6
; SI-NEXT: v_readfirstlane_b32 s11, v7
; SI-NEXT: v_readfirstlane_b32 s10, v8
-; SI-NEXT: v_readfirstlane_b32 s8, v9
-; SI-NEXT: v_readfirstlane_b32 s7, v10
-; SI-NEXT: v_readfirstlane_b32 s6, v11
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: v_readfirstlane_b32 s9, v12
+; SI-NEXT: v_readfirstlane_b32 s9, v9
+; SI-NEXT: v_readfirstlane_b32 s8, v10
+; SI-NEXT: v_readfirstlane_b32 s7, v11
+; SI-NEXT: v_readfirstlane_b32 s6, v12
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
@@ -7660,13 +7724,13 @@ define inreg <52 x half> @bitcast_v26i32_to_v52f16_scalar(<26 x i32> inreg %a, i
; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB17_4
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_lshr_b32 s4, s9, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v1, s4
; SI-NEXT: s_lshr_b32 s4, s6, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v2, s4
+; SI-NEXT: v_cvt_f32_f16_e32 v1, s4
; SI-NEXT: s_lshr_b32 s4, s7, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v3, s4
+; SI-NEXT: v_cvt_f32_f16_e32 v2, s4
; SI-NEXT: s_lshr_b32 s4, s8, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v3, s4
+; SI-NEXT: s_lshr_b32 s4, s9, 16
; SI-NEXT: v_cvt_f32_f16_e32 v5, s4
; SI-NEXT: s_lshr_b32 s4, s10, 16
; SI-NEXT: v_cvt_f32_f16_e32 v7, s4
@@ -7715,10 +7779,10 @@ define inreg <52 x half> @bitcast_v26i32_to_v52f16_scalar(<26 x i32> inreg %a, i
; SI-NEXT: s_lshr_b32 s4, s16, 16
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v44, s4
-; SI-NEXT: v_cvt_f32_f16_e32 v4, s9
-; SI-NEXT: v_cvt_f32_f16_e32 v6, s6
-; SI-NEXT: v_cvt_f32_f16_e32 v8, s7
-; SI-NEXT: v_cvt_f32_f16_e32 v10, s8
+; SI-NEXT: v_cvt_f32_f16_e32 v4, s6
+; SI-NEXT: v_cvt_f32_f16_e32 v6, s7
+; SI-NEXT: v_cvt_f32_f16_e32 v8, s8
+; SI-NEXT: v_cvt_f32_f16_e32 v10, s9
; SI-NEXT: v_cvt_f32_f16_e32 v12, s10
; SI-NEXT: v_cvt_f32_f16_e32 v13, s11
; SI-NEXT: v_cvt_f32_f16_e32 v15, s12
@@ -7765,10 +7829,10 @@ define inreg <52 x half> @bitcast_v26i32_to_v52f16_scalar(<26 x i32> inreg %a, i
; SI-NEXT: s_add_i32 s12, s12, 3
; SI-NEXT: s_add_i32 s11, s11, 3
; SI-NEXT: s_add_i32 s10, s10, 3
+; SI-NEXT: s_add_i32 s9, s9, 3
; SI-NEXT: s_add_i32 s8, s8, 3
; SI-NEXT: s_add_i32 s7, s7, 3
; SI-NEXT: s_add_i32 s6, s6, 3
-; SI-NEXT: s_add_i32 s9, s9, 3
; SI-NEXT: s_lshr_b32 s4, s16, 16
; SI-NEXT: s_lshr_b32 s5, s17, 16
; SI-NEXT: s_lshr_b32 s42, s18, 16
@@ -7791,14 +7855,14 @@ define inreg <52 x half> @bitcast_v26i32_to_v52f16_scalar(<26 x i32> inreg %a, i
; SI-NEXT: s_lshr_b32 s75, s12, 16
; SI-NEXT: s_lshr_b32 s76, s11, 16
; SI-NEXT: s_lshr_b32 s77, s10, 16
-; SI-NEXT: s_lshr_b32 s78, s8, 16
-; SI-NEXT: s_lshr_b32 s79, s7, 16
-; SI-NEXT: s_lshr_b32 s88, s6, 16
-; SI-NEXT: s_lshr_b32 s89, s9, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v4, s9
-; SI-NEXT: v_cvt_f32_f16_e32 v6, s6
-; SI-NEXT: v_cvt_f32_f16_e32 v8, s7
-; SI-NEXT: v_cvt_f32_f16_e32 v10, s8
+; SI-NEXT: s_lshr_b32 s78, s9, 16
+; SI-NEXT: s_lshr_b32 s79, s8, 16
+; SI-NEXT: s_lshr_b32 s88, s7, 16
+; SI-NEXT: s_lshr_b32 s89, s6, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v4, s6
+; SI-NEXT: v_cvt_f32_f16_e32 v6, s7
+; SI-NEXT: v_cvt_f32_f16_e32 v8, s8
+; SI-NEXT: v_cvt_f32_f16_e32 v10, s9
; SI-NEXT: v_cvt_f32_f16_e32 v12, s10
; SI-NEXT: v_cvt_f32_f16_e32 v13, s11
; SI-NEXT: v_cvt_f32_f16_e32 v15, s12
@@ -8092,12 +8156,15 @@ define inreg <52 x half> @bitcast_v26i32_to_v52f16_scalar(<26 x i32> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr4
; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: s_branch .LBB17_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB17_2
+; SI-NEXT: s_branch .LBB17_3
;
; VI-LABEL: bitcast_v26i32_to_v52f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s41, v0
; VI-NEXT: v_readfirstlane_b32 s40, v1
; VI-NEXT: v_readfirstlane_b32 s15, v2
@@ -8108,13 +8175,13 @@ define inreg <52 x half> @bitcast_v26i32_to_v52f16_scalar(<26 x i32> inreg %a, i
; VI-NEXT: v_readfirstlane_b32 s10, v7
; VI-NEXT: v_readfirstlane_b32 s9, v8
; VI-NEXT: v_readfirstlane_b32 s8, v9
-; VI-NEXT: v_readfirstlane_b32 s6, v10
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: v_readfirstlane_b32 s7, v11
+; VI-NEXT: v_readfirstlane_b32 s7, v10
+; VI-NEXT: v_readfirstlane_b32 s6, v11
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB17_4
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_lshr_b32 s42, s7, 16
-; VI-NEXT: s_lshr_b32 s43, s6, 16
+; VI-NEXT: s_lshr_b32 s42, s6, 16
+; VI-NEXT: s_lshr_b32 s43, s7, 16
; VI-NEXT: s_lshr_b32 s44, s8, 16
; VI-NEXT: s_lshr_b32 s45, s9, 16
; VI-NEXT: s_lshr_b32 s46, s10, 16
@@ -8141,8 +8208,8 @@ define inreg <52 x half> @bitcast_v26i32_to_v52f16_scalar(<26 x i32> inreg %a, i
; VI-NEXT: s_lshr_b32 s91, s16, 16
; VI-NEXT: s_cbranch_execnz .LBB17_3
; VI-NEXT: .LBB17_2: ; %cmp.true
-; VI-NEXT: s_add_i32 s7, s7, 3
; VI-NEXT: s_add_i32 s6, s6, 3
+; VI-NEXT: s_add_i32 s7, s7, 3
; VI-NEXT: s_add_i32 s8, s8, 3
; VI-NEXT: s_add_i32 s9, s9, 3
; VI-NEXT: s_add_i32 s10, s10, 3
@@ -8167,8 +8234,8 @@ define inreg <52 x half> @bitcast_v26i32_to_v52f16_scalar(<26 x i32> inreg %a, i
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: s_lshr_b32 s42, s7, 16
-; VI-NEXT: s_lshr_b32 s43, s6, 16
+; VI-NEXT: s_lshr_b32 s42, s6, 16
+; VI-NEXT: s_lshr_b32 s43, s7, 16
; VI-NEXT: s_lshr_b32 s44, s8, 16
; VI-NEXT: s_lshr_b32 s45, s9, 16
; VI-NEXT: s_lshr_b32 s46, s10, 16
@@ -8266,12 +8333,12 @@ define inreg <52 x half> @bitcast_v26i32_to_v52f16_scalar(<26 x i32> inreg %a, i
; VI-NEXT: s_and_b32 s8, 0xffff, s8
; VI-NEXT: s_lshl_b32 s40, s44, 16
; VI-NEXT: s_or_b32 s8, s8, s40
-; VI-NEXT: s_and_b32 s6, 0xffff, s6
-; VI-NEXT: s_lshl_b32 s40, s43, 16
-; VI-NEXT: s_or_b32 s6, s6, s40
; VI-NEXT: s_and_b32 s7, 0xffff, s7
-; VI-NEXT: s_lshl_b32 s40, s42, 16
+; VI-NEXT: s_lshl_b32 s40, s43, 16
; VI-NEXT: s_or_b32 s7, s7, s40
+; VI-NEXT: s_and_b32 s6, 0xffff, s6
+; VI-NEXT: s_lshl_b32 s40, s42, 16
+; VI-NEXT: s_or_b32 s6, s6, s40
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: v_mov_b32_e32 v2, s16
@@ -8296,8 +8363,8 @@ define inreg <52 x half> @bitcast_v26i32_to_v52f16_scalar(<26 x i32> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v21, s10
; VI-NEXT: v_mov_b32_e32 v22, s9
; VI-NEXT: v_mov_b32_e32 v23, s8
-; VI-NEXT: v_mov_b32_e32 v24, s6
-; VI-NEXT: v_mov_b32_e32 v25, s7
+; VI-NEXT: v_mov_b32_e32 v24, s7
+; VI-NEXT: v_mov_b32_e32 v25, s6
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB17_4:
; VI-NEXT: ; implicit-def: $sgpr91
@@ -8326,39 +8393,42 @@ define inreg <52 x half> @bitcast_v26i32_to_v52f16_scalar(<26 x i32> inreg %a, i
; VI-NEXT: ; implicit-def: $sgpr44
; VI-NEXT: ; implicit-def: $sgpr43
; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: s_branch .LBB17_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB17_2
+; VI-NEXT: s_branch .LBB17_3
;
; GFX9-LABEL: bitcast_v26i32_to_v52f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
-; GFX9-NEXT: v_readfirstlane_b32 s6, v0
-; GFX9-NEXT: v_readfirstlane_b32 s7, v1
-; GFX9-NEXT: v_readfirstlane_b32 s8, v2
-; GFX9-NEXT: v_readfirstlane_b32 s9, v3
-; GFX9-NEXT: v_readfirstlane_b32 s10, v4
-; GFX9-NEXT: v_readfirstlane_b32 s11, v5
-; GFX9-NEXT: v_readfirstlane_b32 s12, v6
-; GFX9-NEXT: v_readfirstlane_b32 s13, v7
-; GFX9-NEXT: v_readfirstlane_b32 s14, v8
-; GFX9-NEXT: v_readfirstlane_b32 s15, v9
-; GFX9-NEXT: v_readfirstlane_b32 s40, v10
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: v_readfirstlane_b32 s41, v11
+; GFX9-NEXT: v_readfirstlane_b32 s7, v0
+; GFX9-NEXT: v_readfirstlane_b32 s8, v1
+; GFX9-NEXT: v_readfirstlane_b32 s9, v2
+; GFX9-NEXT: v_readfirstlane_b32 s10, v3
+; GFX9-NEXT: v_readfirstlane_b32 s11, v4
+; GFX9-NEXT: v_readfirstlane_b32 s12, v5
+; GFX9-NEXT: v_readfirstlane_b32 s13, v6
+; GFX9-NEXT: v_readfirstlane_b32 s14, v7
+; GFX9-NEXT: v_readfirstlane_b32 s15, v8
+; GFX9-NEXT: v_readfirstlane_b32 s40, v9
+; GFX9-NEXT: v_readfirstlane_b32 s41, v10
+; GFX9-NEXT: v_readfirstlane_b32 s6, v11
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB17_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_lshr_b32 s42, s41, 16
-; GFX9-NEXT: s_lshr_b32 s43, s40, 16
-; GFX9-NEXT: s_lshr_b32 s44, s15, 16
-; GFX9-NEXT: s_lshr_b32 s45, s14, 16
-; GFX9-NEXT: s_lshr_b32 s46, s13, 16
-; GFX9-NEXT: s_lshr_b32 s47, s12, 16
-; GFX9-NEXT: s_lshr_b32 s56, s11, 16
-; GFX9-NEXT: s_lshr_b32 s57, s10, 16
-; GFX9-NEXT: s_lshr_b32 s58, s9, 16
-; GFX9-NEXT: s_lshr_b32 s59, s8, 16
-; GFX9-NEXT: s_lshr_b32 s60, s7, 16
-; GFX9-NEXT: s_lshr_b32 s61, s6, 16
+; GFX9-NEXT: s_lshr_b32 s42, s6, 16
+; GFX9-NEXT: s_lshr_b32 s43, s41, 16
+; GFX9-NEXT: s_lshr_b32 s44, s40, 16
+; GFX9-NEXT: s_lshr_b32 s45, s15, 16
+; GFX9-NEXT: s_lshr_b32 s46, s14, 16
+; GFX9-NEXT: s_lshr_b32 s47, s13, 16
+; GFX9-NEXT: s_lshr_b32 s56, s12, 16
+; GFX9-NEXT: s_lshr_b32 s57, s11, 16
+; GFX9-NEXT: s_lshr_b32 s58, s10, 16
+; GFX9-NEXT: s_lshr_b32 s59, s9, 16
+; GFX9-NEXT: s_lshr_b32 s60, s8, 16
+; GFX9-NEXT: s_lshr_b32 s61, s7, 16
; GFX9-NEXT: s_lshr_b32 s62, s29, 16
; GFX9-NEXT: s_lshr_b32 s63, s28, 16
; GFX9-NEXT: s_lshr_b32 s72, s27, 16
@@ -8375,6 +8445,7 @@ define inreg <52 x half> @bitcast_v26i32_to_v52f16_scalar(<26 x i32> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s91, s16, 16
; GFX9-NEXT: s_cbranch_execnz .LBB17_3
; GFX9-NEXT: .LBB17_2: ; %cmp.true
+; GFX9-NEXT: s_add_i32 s6, s6, 3
; GFX9-NEXT: s_add_i32 s41, s41, 3
; GFX9-NEXT: s_add_i32 s40, s40, 3
; GFX9-NEXT: s_add_i32 s15, s15, 3
@@ -8386,7 +8457,6 @@ define inreg <52 x half> @bitcast_v26i32_to_v52f16_scalar(<26 x i32> inreg %a, i
; GFX9-NEXT: s_add_i32 s9, s9, 3
; GFX9-NEXT: s_add_i32 s8, s8, 3
; GFX9-NEXT: s_add_i32 s7, s7, 3
-; GFX9-NEXT: s_add_i32 s6, s6, 3
; GFX9-NEXT: s_add_i32 s29, s29, 3
; GFX9-NEXT: s_add_i32 s28, s28, 3
; GFX9-NEXT: s_add_i32 s27, s27, 3
@@ -8401,18 +8471,18 @@ define inreg <52 x half> @bitcast_v26i32_to_v52f16_scalar(<26 x i32> inreg %a, i
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: s_lshr_b32 s42, s41, 16
-; GFX9-NEXT: s_lshr_b32 s43, s40, 16
-; GFX9-NEXT: s_lshr_b32 s44, s15, 16
-; GFX9-NEXT: s_lshr_b32 s45, s14, 16
-; GFX9-NEXT: s_lshr_b32 s46, s13, 16
-; GFX9-NEXT: s_lshr_b32 s47, s12, 16
-; GFX9-NEXT: s_lshr_b32 s56, s11, 16
-; GFX9-NEXT: s_lshr_b32 s57, s10, 16
-; GFX9-NEXT: s_lshr_b32 s58, s9, 16
-; GFX9-NEXT: s_lshr_b32 s59, s8, 16
-; GFX9-NEXT: s_lshr_b32 s60, s7, 16
-; GFX9-NEXT: s_lshr_b32 s61, s6, 16
+; GFX9-NEXT: s_lshr_b32 s42, s6, 16
+; GFX9-NEXT: s_lshr_b32 s43, s41, 16
+; GFX9-NEXT: s_lshr_b32 s44, s40, 16
+; GFX9-NEXT: s_lshr_b32 s45, s15, 16
+; GFX9-NEXT: s_lshr_b32 s46, s14, 16
+; GFX9-NEXT: s_lshr_b32 s47, s13, 16
+; GFX9-NEXT: s_lshr_b32 s56, s12, 16
+; GFX9-NEXT: s_lshr_b32 s57, s11, 16
+; GFX9-NEXT: s_lshr_b32 s58, s10, 16
+; GFX9-NEXT: s_lshr_b32 s59, s9, 16
+; GFX9-NEXT: s_lshr_b32 s60, s8, 16
+; GFX9-NEXT: s_lshr_b32 s61, s7, 16
; GFX9-NEXT: s_lshr_b32 s62, s29, 16
; GFX9-NEXT: s_lshr_b32 s63, s28, 16
; GFX9-NEXT: s_lshr_b32 s72, s27, 16
@@ -8442,18 +8512,18 @@ define inreg <52 x half> @bitcast_v26i32_to_v52f16_scalar(<26 x i32> inreg %a, i
; GFX9-NEXT: s_pack_ll_b32_b16 s25, s27, s72
; GFX9-NEXT: s_pack_ll_b32_b16 s26, s28, s63
; GFX9-NEXT: s_pack_ll_b32_b16 s27, s29, s62
-; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s61
-; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s60
-; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s59
-; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s58
-; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s57
-; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s56
-; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s47
-; GFX9-NEXT: s_pack_ll_b32_b16 s13, s13, s46
-; GFX9-NEXT: s_pack_ll_b32_b16 s14, s14, s45
-; GFX9-NEXT: s_pack_ll_b32_b16 s15, s15, s44
-; GFX9-NEXT: s_pack_ll_b32_b16 s28, s40, s43
-; GFX9-NEXT: s_pack_ll_b32_b16 s29, s41, s42
+; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s61
+; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s60
+; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s59
+; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s58
+; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s57
+; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s56
+; GFX9-NEXT: s_pack_ll_b32_b16 s13, s13, s47
+; GFX9-NEXT: s_pack_ll_b32_b16 s14, s14, s46
+; GFX9-NEXT: s_pack_ll_b32_b16 s15, s15, s45
+; GFX9-NEXT: s_pack_ll_b32_b16 s28, s40, s44
+; GFX9-NEXT: s_pack_ll_b32_b16 s29, s41, s43
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s42
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: v_mov_b32_e32 v2, s16
@@ -8468,18 +8538,18 @@ define inreg <52 x half> @bitcast_v26i32_to_v52f16_scalar(<26 x i32> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v11, s25
; GFX9-NEXT: v_mov_b32_e32 v12, s26
; GFX9-NEXT: v_mov_b32_e32 v13, s27
-; GFX9-NEXT: v_mov_b32_e32 v14, s6
-; GFX9-NEXT: v_mov_b32_e32 v15, s7
-; GFX9-NEXT: v_mov_b32_e32 v16, s8
-; GFX9-NEXT: v_mov_b32_e32 v17, s9
-; GFX9-NEXT: v_mov_b32_e32 v18, s10
-; GFX9-NEXT: v_mov_b32_e32 v19, s11
-; GFX9-NEXT: v_mov_b32_e32 v20, s12
-; GFX9-NEXT: v_mov_b32_e32 v21, s13
-; GFX9-NEXT: v_mov_b32_e32 v22, s14
-; GFX9-NEXT: v_mov_b32_e32 v23, s15
-; GFX9-NEXT: v_mov_b32_e32 v24, s28
-; GFX9-NEXT: v_mov_b32_e32 v25, s29
+; GFX9-NEXT: v_mov_b32_e32 v14, s7
+; GFX9-NEXT: v_mov_b32_e32 v15, s8
+; GFX9-NEXT: v_mov_b32_e32 v16, s9
+; GFX9-NEXT: v_mov_b32_e32 v17, s10
+; GFX9-NEXT: v_mov_b32_e32 v18, s11
+; GFX9-NEXT: v_mov_b32_e32 v19, s12
+; GFX9-NEXT: v_mov_b32_e32 v20, s13
+; GFX9-NEXT: v_mov_b32_e32 v21, s14
+; GFX9-NEXT: v_mov_b32_e32 v22, s15
+; GFX9-NEXT: v_mov_b32_e32 v23, s28
+; GFX9-NEXT: v_mov_b32_e32 v24, s29
+; GFX9-NEXT: v_mov_b32_e32 v25, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB17_4:
; GFX9-NEXT: ; implicit-def: $sgpr91
@@ -8508,7 +8578,9 @@ define inreg <52 x half> @bitcast_v26i32_to_v52f16_scalar(<26 x i32> inreg %a, i
; GFX9-NEXT: ; implicit-def: $sgpr44
; GFX9-NEXT: ; implicit-def: $sgpr43
; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: s_branch .LBB17_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB17_2
+; GFX9-NEXT: s_branch .LBB17_3
;
; GFX11-LABEL: bitcast_v26i32_to_v52f16_scalar:
; GFX11: ; %bb.0:
@@ -8519,16 +8591,16 @@ define inreg <52 x half> @bitcast_v26i32_to_v52f16_scalar(<26 x i32> inreg %a, i
; GFX11-NEXT: v_readfirstlane_b32 s6, v2
; GFX11-NEXT: v_readfirstlane_b32 s7, v3
; GFX11-NEXT: v_readfirstlane_b32 s8, v4
-; GFX11-NEXT: v_readfirstlane_b32 s9, v5
+; GFX11-NEXT: v_readfirstlane_b32 s10, v5
; GFX11-NEXT: v_readfirstlane_b32 s11, v6
-; GFX11-NEXT: v_readfirstlane_b32 s10, v7
-; GFX11-NEXT: s_mov_b32 s78, 0
+; GFX11-NEXT: v_readfirstlane_b32 s9, v7
+; GFX11-NEXT: s_mov_b32 s78, -1
; GFX11-NEXT: s_and_b32 s12, vcc_lo, exec_lo
; GFX11-NEXT: s_cbranch_scc0 .LBB17_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s12, s10, 16
+; GFX11-NEXT: s_lshr_b32 s12, s9, 16
; GFX11-NEXT: s_lshr_b32 s13, s11, 16
-; GFX11-NEXT: s_lshr_b32 s14, s9, 16
+; GFX11-NEXT: s_lshr_b32 s14, s10, 16
; GFX11-NEXT: s_lshr_b32 s15, s8, 16
; GFX11-NEXT: s_lshr_b32 s40, s7, 16
; GFX11-NEXT: s_lshr_b32 s41, s6, 16
@@ -8552,12 +8624,11 @@ define inreg <52 x half> @bitcast_v26i32_to_v52f16_scalar(<26 x i32> inreg %a, i
; GFX11-NEXT: s_lshr_b32 s75, s2, 16
; GFX11-NEXT: s_lshr_b32 s76, s1, 16
; GFX11-NEXT: s_lshr_b32 s77, s0, 16
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s78
-; GFX11-NEXT: s_cbranch_vccnz .LBB17_3
+; GFX11-NEXT: s_cbranch_execnz .LBB17_3
; GFX11-NEXT: .LBB17_2: ; %cmp.true
-; GFX11-NEXT: s_add_i32 s10, s10, 3
-; GFX11-NEXT: s_add_i32 s11, s11, 3
; GFX11-NEXT: s_add_i32 s9, s9, 3
+; GFX11-NEXT: s_add_i32 s11, s11, 3
+; GFX11-NEXT: s_add_i32 s10, s10, 3
; GFX11-NEXT: s_add_i32 s8, s8, 3
; GFX11-NEXT: s_add_i32 s7, s7, 3
; GFX11-NEXT: s_add_i32 s6, s6, 3
@@ -8581,9 +8652,9 @@ define inreg <52 x half> @bitcast_v26i32_to_v52f16_scalar(<26 x i32> inreg %a, i
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: s_lshr_b32 s12, s10, 16
+; GFX11-NEXT: s_lshr_b32 s12, s9, 16
; GFX11-NEXT: s_lshr_b32 s13, s11, 16
-; GFX11-NEXT: s_lshr_b32 s14, s9, 16
+; GFX11-NEXT: s_lshr_b32 s14, s10, 16
; GFX11-NEXT: s_lshr_b32 s15, s8, 16
; GFX11-NEXT: s_lshr_b32 s40, s7, 16
; GFX11-NEXT: s_lshr_b32 s41, s6, 16
@@ -8632,9 +8703,9 @@ define inreg <52 x half> @bitcast_v26i32_to_v52f16_scalar(<26 x i32> inreg %a, i
; GFX11-NEXT: s_pack_ll_b32_b16 s6, s6, s41
; GFX11-NEXT: s_pack_ll_b32_b16 s7, s7, s40
; GFX11-NEXT: s_pack_ll_b32_b16 s8, s8, s15
-; GFX11-NEXT: s_pack_ll_b32_b16 s9, s9, s14
+; GFX11-NEXT: s_pack_ll_b32_b16 s10, s10, s14
; GFX11-NEXT: s_pack_ll_b32_b16 s11, s11, s13
-; GFX11-NEXT: s_pack_ll_b32_b16 s10, s10, s12
+; GFX11-NEXT: s_pack_ll_b32_b16 s9, s9, s12
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -8646,8 +8717,8 @@ define inreg <52 x half> @bitcast_v26i32_to_v52f16_scalar(<26 x i32> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
; GFX11-NEXT: v_dual_mov_b32 v18, s4 :: v_dual_mov_b32 v19, s5
; GFX11-NEXT: v_dual_mov_b32 v20, s6 :: v_dual_mov_b32 v21, s7
-; GFX11-NEXT: v_dual_mov_b32 v22, s8 :: v_dual_mov_b32 v23, s9
-; GFX11-NEXT: v_dual_mov_b32 v24, s11 :: v_dual_mov_b32 v25, s10
+; GFX11-NEXT: v_dual_mov_b32 v22, s8 :: v_dual_mov_b32 v23, s10
+; GFX11-NEXT: v_dual_mov_b32 v24, s11 :: v_dual_mov_b32 v25, s9
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB17_4:
; GFX11-NEXT: ; implicit-def: $sgpr77
@@ -8676,7 +8747,9 @@ define inreg <52 x half> @bitcast_v26i32_to_v52f16_scalar(<26 x i32> inreg %a, i
; GFX11-NEXT: ; implicit-def: $sgpr14
; GFX11-NEXT: ; implicit-def: $sgpr13
; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: s_branch .LBB17_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s78
+; GFX11-NEXT: s_cbranch_vccz .LBB17_2
+; GFX11-NEXT: s_branch .LBB17_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -10062,15 +10135,15 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v28
-; SI-NEXT: v_cvt_f16_f32_e32 v42, v5
-; SI-NEXT: v_cvt_f16_f32_e32 v55, v4
-; SI-NEXT: v_cvt_f16_f32_e32 v56, v7
+; SI-NEXT: v_cvt_f16_f32_e32 v55, v5
+; SI-NEXT: v_cvt_f16_f32_e32 v42, v4
+; SI-NEXT: v_cvt_f16_f32_e32 v54, v7
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
-; SI-NEXT: v_cvt_f16_f32_e32 v54, v6
-; SI-NEXT: v_cvt_f16_f32_e32 v43, v9
-; SI-NEXT: v_cvt_f16_f32_e32 v47, v8
-; SI-NEXT: v_cvt_f16_f32_e32 v57, v11
-; SI-NEXT: v_cvt_f16_f32_e32 v60, v10
+; SI-NEXT: v_cvt_f16_f32_e32 v57, v6
+; SI-NEXT: v_cvt_f16_f32_e32 v56, v9
+; SI-NEXT: v_cvt_f16_f32_e32 v60, v8
+; SI-NEXT: v_cvt_f16_f32_e32 v47, v11
+; SI-NEXT: v_cvt_f16_f32_e32 v53, v10
; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
; SI-NEXT: v_cvt_f16_f32_e32 v52, v12
; SI-NEXT: v_cvt_f16_f32_e32 v15, v15
@@ -10088,13 +10161,13 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v27, v26
; SI-NEXT: v_cvt_f16_f32_e32 v26, v29
; SI-NEXT: v_cvt_f16_f32_e32 v30, v30
-; SI-NEXT: v_cvt_f16_f32_e32 v53, s17
-; SI-NEXT: v_cvt_f16_f32_e32 v11, s16
+; SI-NEXT: v_cvt_f16_f32_e32 v43, s17
+; SI-NEXT: v_cvt_f16_f32_e32 v3, s16
; SI-NEXT: v_cvt_f16_f32_e32 v1, s19
; SI-NEXT: v_cvt_f16_f32_e32 v2, s18
; SI-NEXT: v_cvt_f16_f32_e32 v12, s21
; SI-NEXT: v_cvt_f16_f32_e32 v14, s20
-; SI-NEXT: v_cvt_f16_f32_e32 v3, s23
+; SI-NEXT: v_cvt_f16_f32_e32 v11, s23
; SI-NEXT: v_cvt_f16_f32_e32 v10, s22
; SI-NEXT: v_cvt_f16_f32_e32 v4, s25
; SI-NEXT: v_cvt_f16_f32_e32 v9, s24
@@ -10112,6 +10185,7 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v44
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v48
@@ -10137,8 +10211,8 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB19_4
@@ -10149,17 +10223,18 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(2)
+; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v43
+; SI-NEXT: s_waitcnt expcnt(3)
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v53
+; SI-NEXT: v_or_b32_e32 v0, v3, v0
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v12
-; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v11
; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; SI-NEXT: v_mov_b32_e32 v39, v54
; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v26
-; SI-NEXT: v_or_b32_e32 v0, v11, v0
; SI-NEXT: v_or_b32_e32 v2, v14, v2
; SI-NEXT: v_or_b32_e32 v3, v10, v3
; SI-NEXT: v_or_b32_e32 v4, v9, v4
@@ -10167,11 +10242,12 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; SI-NEXT: v_or_b32_e32 v6, v7, v6
; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v46
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v41
-; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v42
-; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v56
-; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v43
+; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v55
+; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v39
+; SI-NEXT: s_waitcnt expcnt(2)
+; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v56
; SI-NEXT: s_waitcnt expcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v57
+; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v47
; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v15
@@ -10183,10 +10259,10 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v51, v46
; SI-NEXT: v_or_b32_e32 v7, v45, v7
; SI-NEXT: v_or_b32_e32 v8, v40, v8
-; SI-NEXT: v_or_b32_e32 v9, v55, v9
-; SI-NEXT: v_or_b32_e32 v10, v54, v10
-; SI-NEXT: v_or_b32_e32 v11, v47, v11
-; SI-NEXT: v_or_b32_e32 v12, v60, v12
+; SI-NEXT: v_or_b32_e32 v9, v42, v9
+; SI-NEXT: v_or_b32_e32 v10, v57, v10
+; SI-NEXT: v_or_b32_e32 v11, v60, v11
+; SI-NEXT: v_or_b32_e32 v12, v53, v12
; SI-NEXT: v_or_b32_e32 v13, v52, v13
; SI-NEXT: v_or_b32_e32 v14, v63, v14
; SI-NEXT: v_or_b32_e32 v15, v61, v15
@@ -10214,15 +10290,16 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; SI-NEXT: v_or_b32_e32 v25, v38, v25
; SI-NEXT: s_cbranch_execnz .LBB19_3
; SI-NEXT: .LBB19_2: ; %cmp.true
-; SI-NEXT: s_waitcnt expcnt(2)
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(3)
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
-; SI-NEXT: v_cvt_f32_f16_e32 v0, v53
+; SI-NEXT: v_cvt_f32_f16_e32 v0, v43
; SI-NEXT: v_cvt_f32_f16_e32 v9, v40
-; SI-NEXT: v_cvt_f32_f16_e32 v10, v55
-; SI-NEXT: v_cvt_f32_f16_e32 v11, v54
+; SI-NEXT: v_cvt_f32_f16_e32 v10, v42
+; SI-NEXT: s_waitcnt expcnt(2)
+; SI-NEXT: v_cvt_f32_f16_e32 v11, v57
; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9
@@ -10233,8 +10310,8 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11
; SI-NEXT: v_cvt_f16_f32_e32 v11, v11
; SI-NEXT: s_waitcnt expcnt(1)
-; SI-NEXT: v_cvt_f32_f16_e32 v12, v47
-; SI-NEXT: v_cvt_f32_f16_e32 v13, v60
+; SI-NEXT: v_cvt_f32_f16_e32 v12, v60
+; SI-NEXT: v_cvt_f32_f16_e32 v13, v53
; SI-NEXT: v_cvt_f32_f16_e32 v15, v52
; SI-NEXT: v_cvt_f32_f16_e32 v16, v63
; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12
@@ -10334,7 +10411,7 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; SI-NEXT: v_or_b32_e32 v2, v3, v2
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3
@@ -10372,22 +10449,22 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v8, v8
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8
; SI-NEXT: v_or_b32_e32 v8, v9, v8
-; SI-NEXT: v_cvt_f32_f16_e32 v9, v42
+; SI-NEXT: v_cvt_f32_f16_e32 v9, v55
; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9
; SI-NEXT: v_cvt_f16_f32_e32 v9, v9
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9
; SI-NEXT: v_or_b32_e32 v9, v10, v9
-; SI-NEXT: v_cvt_f32_f16_e32 v10, v56
+; SI-NEXT: v_cvt_f32_f16_e32 v10, v39
; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10
; SI-NEXT: v_cvt_f16_f32_e32 v10, v10
; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10
; SI-NEXT: v_or_b32_e32 v10, v11, v10
-; SI-NEXT: v_cvt_f32_f16_e32 v11, v43
+; SI-NEXT: v_cvt_f32_f16_e32 v11, v56
; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11
; SI-NEXT: v_cvt_f16_f32_e32 v11, v11
; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11
; SI-NEXT: v_or_b32_e32 v11, v12, v11
-; SI-NEXT: v_cvt_f32_f16_e32 v12, v57
+; SI-NEXT: v_cvt_f32_f16_e32 v12, v47
; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12
; SI-NEXT: v_cvt_f16_f32_e32 v12, v12
; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12
@@ -10524,7 +10601,10 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v30, v58
; SI-NEXT: v_mov_b32_e32 v58, v63
; SI-NEXT: v_mov_b32_e32 v63, v50
-; SI-NEXT: s_branch .LBB19_2
+; SI-NEXT: v_mov_b32_e32 v39, v54
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB19_2
+; SI-NEXT: s_branch .LBB19_3
;
; VI-LABEL: bitcast_v52f16_to_v26i32_scalar:
; VI: ; %bb.0:
@@ -10544,6 +10624,7 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v11
; VI-NEXT: v_mov_b32_e32 v33, v10
; VI-NEXT: v_mov_b32_e32 v34, v9
@@ -10556,7 +10637,7 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v49, v2
; VI-NEXT: v_mov_b32_e32 v50, v1
; VI-NEXT: v_mov_b32_e32 v51, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB19_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -10740,23 +10821,13 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB19_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB19_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB19_2
+; VI-NEXT: s_branch .LBB19_3
;
; GFX9-LABEL: bitcast_v52f16_to_v26i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v11
-; GFX9-NEXT: v_mov_b32_e32 v33, v10
-; GFX9-NEXT: v_mov_b32_e32 v34, v9
-; GFX9-NEXT: v_mov_b32_e32 v35, v8
-; GFX9-NEXT: v_mov_b32_e32 v36, v7
-; GFX9-NEXT: v_mov_b32_e32 v37, v6
-; GFX9-NEXT: v_mov_b32_e32 v38, v5
-; GFX9-NEXT: v_mov_b32_e32 v39, v4
-; GFX9-NEXT: v_mov_b32_e32 v48, v3
-; GFX9-NEXT: v_mov_b32_e32 v49, v2
-; GFX9-NEXT: v_mov_b32_e32 v50, v1
-; GFX9-NEXT: v_mov_b32_e32 v51, v0
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
@@ -10772,6 +10843,19 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s8, s18, 16
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
+; GFX9-NEXT: v_mov_b32_e32 v32, v11
+; GFX9-NEXT: v_mov_b32_e32 v33, v10
+; GFX9-NEXT: v_mov_b32_e32 v34, v9
+; GFX9-NEXT: v_mov_b32_e32 v35, v8
+; GFX9-NEXT: v_mov_b32_e32 v36, v7
+; GFX9-NEXT: v_mov_b32_e32 v37, v6
+; GFX9-NEXT: v_mov_b32_e32 v38, v5
+; GFX9-NEXT: v_mov_b32_e32 v39, v4
+; GFX9-NEXT: v_mov_b32_e32 v48, v3
+; GFX9-NEXT: v_mov_b32_e32 v49, v2
+; GFX9-NEXT: v_mov_b32_e32 v50, v1
+; GFX9-NEXT: v_mov_b32_e32 v51, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
@@ -10788,7 +10872,6 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; GFX9-NEXT: v_lshrrev_b32_e32 v41, 16, v37
; GFX9-NEXT: v_lshrrev_b32_e32 v42, 16, v38
; GFX9-NEXT: v_lshrrev_b32_e32 v43, 16, v39
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -10803,6 +10886,7 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_lshrrev_b32_e32 v44, 16, v48
; GFX9-NEXT: v_lshrrev_b32_e32 v45, 16, v49
; GFX9-NEXT: v_lshrrev_b32_e32 v46, 16, v50
@@ -10914,7 +10998,9 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB19_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB19_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB19_2
+; GFX9-NEXT: s_branch .LBB19_3
;
; GFX11-TRUE16-LABEL: bitcast_v52f16_to_v26i32_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -10944,9 +11030,9 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v5
; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v6
; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v7
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s27, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
@@ -10958,15 +11044,14 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s42
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
@@ -10978,10 +11063,11 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s28, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s29, s41
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB19_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
@@ -10999,10 +11085,9 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s17
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s15 :: v_dual_mov_b32 v17, s16
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB19_3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB19_3
; GFX11-TRUE16-NEXT: .LBB19_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
@@ -11027,9 +11112,9 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
@@ -11042,7 +11127,9 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB19_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB19_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB19_2
+; GFX11-TRUE16-NEXT: s_branch .LBB19_3
;
; GFX11-FAKE16-LABEL: bitcast_v52f16_to_v26i32_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -11064,9 +11151,9 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_and_b32_e32 v50, 0xffff, v5
; GFX11-FAKE16-NEXT: v_and_b32_e32 v49, 0xffff, v6
; GFX11-FAKE16-NEXT: v_and_b32_e32 v48, 0xffff, v7
-; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s28, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s27, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s26, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s25, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s24, 16
@@ -11078,15 +11165,14 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s18, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s17, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s40, 0
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s44
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s45
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s43
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s42
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
@@ -11098,10 +11184,11 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s27, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s28, s15
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s29, s41
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB19_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
@@ -11119,10 +11206,9 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s17
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s15 :: v_dual_mov_b32 v17, s16
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB19_3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB19_3
; GFX11-FAKE16-NEXT: .LBB19_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
@@ -11147,9 +11233,9 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, s16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, s16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
@@ -11162,7 +11248,9 @@ define inreg <26 x i32> @bitcast_v52f16_to_v26i32_scalar(<52 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB19_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB19_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB19_2
+; GFX11-FAKE16-NEXT: s_branch .LBB19_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -11346,6 +11434,7 @@ define inreg <13 x i64> @bitcast_v26f32_to_v13i64_scalar(<26 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v25, v11
; SI-NEXT: v_mov_b32_e32 v24, v10
; SI-NEXT: v_mov_b32_e32 v23, v9
@@ -11359,7 +11448,7 @@ define inreg <13 x i64> @bitcast_v26f32_to_v13i64_scalar(<26 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v15, v1
; SI-NEXT: v_mov_b32_e32 v14, v0
; SI-NEXT: v_mov_b32_e32 v0, s16
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
; SI-NEXT: v_mov_b32_e32 v3, s19
@@ -11373,10 +11462,13 @@ define inreg <13 x i64> @bitcast_v26f32_to_v13i64_scalar(<26 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB21_4
+; SI-NEXT: s_cbranch_scc0 .LBB21_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB21_3
-; SI-NEXT: .LBB21_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB21_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB21_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e32 v25, 1.0, v25
; SI-NEXT: v_add_f32_e32 v24, 1.0, v24
; SI-NEXT: v_add_f32_e32 v23, 1.0, v23
@@ -11403,15 +11495,14 @@ define inreg <13 x i64> @bitcast_v26f32_to_v13i64_scalar(<26 x float> inreg %a,
; SI-NEXT: v_add_f32_e32 v2, 1.0, v2
; SI-NEXT: v_add_f32_e32 v1, 1.0, v1
; SI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; SI-NEXT: .LBB21_3: ; %end
+; SI-NEXT: .LBB21_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB21_4:
-; SI-NEXT: s_branch .LBB21_2
;
; VI-LABEL: bitcast_v26f32_to_v13i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v25, v11
; VI-NEXT: v_mov_b32_e32 v24, v10
; VI-NEXT: v_mov_b32_e32 v23, v9
@@ -11425,7 +11516,7 @@ define inreg <13 x i64> @bitcast_v26f32_to_v13i64_scalar(<26 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
@@ -11439,10 +11530,13 @@ define inreg <13 x i64> @bitcast_v26f32_to_v13i64_scalar(<26 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB21_4
+; VI-NEXT: s_cbranch_scc0 .LBB21_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB21_3
-; VI-NEXT: .LBB21_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB21_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB21_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e32 v25, 1.0, v25
; VI-NEXT: v_add_f32_e32 v24, 1.0, v24
; VI-NEXT: v_add_f32_e32 v23, 1.0, v23
@@ -11469,15 +11563,14 @@ define inreg <13 x i64> @bitcast_v26f32_to_v13i64_scalar(<26 x float> inreg %a,
; VI-NEXT: v_add_f32_e32 v2, 1.0, v2
; VI-NEXT: v_add_f32_e32 v1, 1.0, v1
; VI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; VI-NEXT: .LBB21_3: ; %end
+; VI-NEXT: .LBB21_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB21_4:
-; VI-NEXT: s_branch .LBB21_2
;
; GFX9-LABEL: bitcast_v26f32_to_v13i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v25, v11
; GFX9-NEXT: v_mov_b32_e32 v24, v10
; GFX9-NEXT: v_mov_b32_e32 v23, v9
@@ -11491,7 +11584,7 @@ define inreg <13 x i64> @bitcast_v26f32_to_v13i64_scalar(<26 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
@@ -11505,10 +11598,13 @@ define inreg <13 x i64> @bitcast_v26f32_to_v13i64_scalar(<26 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB21_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB21_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB21_3
-; GFX9-NEXT: .LBB21_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB21_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e32 v25, 1.0, v25
; GFX9-NEXT: v_add_f32_e32 v24, 1.0, v24
; GFX9-NEXT: v_add_f32_e32 v23, 1.0, v23
@@ -11535,40 +11631,38 @@ define inreg <13 x i64> @bitcast_v26f32_to_v13i64_scalar(<26 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e32 v2, 1.0, v2
; GFX9-NEXT: v_add_f32_e32 v1, 1.0, v1
; GFX9-NEXT: v_add_f32_e32 v0, 1.0, v0
-; GFX9-NEXT: .LBB21_3: ; %end
+; GFX9-NEXT: .LBB21_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB21_4:
-; GFX9-NEXT: s_branch .LBB21_2
;
; GFX11-LABEL: bitcast_v26f32_to_v13i64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-NEXT: v_dual_mov_b32 v15, v8 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB21_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB21_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB21_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB21_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB21_3:
-; GFX11-NEXT: .LBB21_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v24, 1.0, v24
; GFX11-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
@@ -11582,6 +11676,7 @@ define inreg <13 x i64> @bitcast_v26f32_to_v13i64_scalar(<26 x float> inreg %a,
; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-NEXT: .LBB21_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -11786,6 +11881,7 @@ define inreg <26 x float> @bitcast_v13i64_to_v26f32_scalar(<13 x i64> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v25, v11
; SI-NEXT: v_mov_b32_e32 v24, v10
; SI-NEXT: v_mov_b32_e32 v23, v9
@@ -11799,7 +11895,7 @@ define inreg <26 x float> @bitcast_v13i64_to_v26f32_scalar(<13 x i64> inreg %a,
; SI-NEXT: v_mov_b32_e32 v15, v1
; SI-NEXT: v_mov_b32_e32 v14, v0
; SI-NEXT: v_mov_b32_e32 v0, s16
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
; SI-NEXT: v_mov_b32_e32 v3, s19
@@ -11813,10 +11909,13 @@ define inreg <26 x float> @bitcast_v13i64_to_v26f32_scalar(<13 x i64> inreg %a,
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB23_4
+; SI-NEXT: s_cbranch_scc0 .LBB23_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB23_3
-; SI-NEXT: .LBB23_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB23_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB23_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v24, vcc, 3, v24
; SI-NEXT: v_addc_u32_e32 v25, vcc, 0, v25, vcc
; SI-NEXT: v_add_i32_e32 v22, vcc, 3, v22
@@ -11843,15 +11942,14 @@ define inreg <26 x float> @bitcast_v13i64_to_v26f32_scalar(<13 x i64> inreg %a,
; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT: .LBB23_3: ; %end
+; SI-NEXT: .LBB23_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB23_4:
-; SI-NEXT: s_branch .LBB23_2
;
; VI-LABEL: bitcast_v13i64_to_v26f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v25, v11
; VI-NEXT: v_mov_b32_e32 v24, v10
; VI-NEXT: v_mov_b32_e32 v23, v9
@@ -11865,7 +11963,7 @@ define inreg <26 x float> @bitcast_v13i64_to_v26f32_scalar(<13 x i64> inreg %a,
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
@@ -11879,10 +11977,13 @@ define inreg <26 x float> @bitcast_v13i64_to_v26f32_scalar(<13 x i64> inreg %a,
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB23_4
+; VI-NEXT: s_cbranch_scc0 .LBB23_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB23_3
-; VI-NEXT: .LBB23_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB23_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB23_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v24, vcc, 3, v24
; VI-NEXT: v_addc_u32_e32 v25, vcc, 0, v25, vcc
; VI-NEXT: v_add_u32_e32 v22, vcc, 3, v22
@@ -11909,15 +12010,14 @@ define inreg <26 x float> @bitcast_v13i64_to_v26f32_scalar(<13 x i64> inreg %a,
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT: .LBB23_3: ; %end
+; VI-NEXT: .LBB23_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB23_4:
-; VI-NEXT: s_branch .LBB23_2
;
; GFX9-LABEL: bitcast_v13i64_to_v26f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v25, v11
; GFX9-NEXT: v_mov_b32_e32 v24, v10
; GFX9-NEXT: v_mov_b32_e32 v23, v9
@@ -11931,7 +12031,7 @@ define inreg <26 x float> @bitcast_v13i64_to_v26f32_scalar(<13 x i64> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
@@ -11945,10 +12045,13 @@ define inreg <26 x float> @bitcast_v13i64_to_v26f32_scalar(<13 x i64> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB23_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB23_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB23_3
-; GFX9-NEXT: .LBB23_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB23_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_co_u32_e32 v24, vcc, 3, v24
; GFX9-NEXT: v_addc_co_u32_e32 v25, vcc, 0, v25, vcc
; GFX9-NEXT: v_add_co_u32_e32 v22, vcc, 3, v22
@@ -11975,40 +12078,38 @@ define inreg <26 x float> @bitcast_v13i64_to_v26f32_scalar(<13 x i64> inreg %a,
; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 3, v0
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
-; GFX9-NEXT: .LBB23_3: ; %end
+; GFX9-NEXT: .LBB23_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB23_4:
-; GFX9-NEXT: s_branch .LBB23_2
;
; GFX11-LABEL: bitcast_v13i64_to_v26f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-NEXT: v_dual_mov_b32 v15, v8 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB23_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB23_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB23_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB23_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB23_3:
-; GFX11-NEXT: .LBB23_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_co_u32 v24, vcc_lo, v24, 3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add_co_ci_u32_e64 v25, null, 0, v25, vcc_lo
@@ -12042,6 +12143,7 @@ define inreg <26 x float> @bitcast_v13i64_to_v26f32_scalar(<13 x i64> inreg %a,
; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-NEXT: .LBB23_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -12226,6 +12328,7 @@ define inreg <13 x double> @bitcast_v26f32_to_v13f64_scalar(<26 x float> inreg %
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v25, v11
; SI-NEXT: v_mov_b32_e32 v24, v10
; SI-NEXT: v_mov_b32_e32 v23, v9
@@ -12239,7 +12342,7 @@ define inreg <13 x double> @bitcast_v26f32_to_v13f64_scalar(<26 x float> inreg %
; SI-NEXT: v_mov_b32_e32 v15, v1
; SI-NEXT: v_mov_b32_e32 v14, v0
; SI-NEXT: v_mov_b32_e32 v0, s16
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
; SI-NEXT: v_mov_b32_e32 v3, s19
@@ -12253,10 +12356,13 @@ define inreg <13 x double> @bitcast_v26f32_to_v13f64_scalar(<26 x float> inreg %
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB25_4
+; SI-NEXT: s_cbranch_scc0 .LBB25_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB25_3
-; SI-NEXT: .LBB25_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB25_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB25_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e32 v25, 1.0, v25
; SI-NEXT: v_add_f32_e32 v24, 1.0, v24
; SI-NEXT: v_add_f32_e32 v23, 1.0, v23
@@ -12283,15 +12389,14 @@ define inreg <13 x double> @bitcast_v26f32_to_v13f64_scalar(<26 x float> inreg %
; SI-NEXT: v_add_f32_e32 v2, 1.0, v2
; SI-NEXT: v_add_f32_e32 v1, 1.0, v1
; SI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; SI-NEXT: .LBB25_3: ; %end
+; SI-NEXT: .LBB25_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB25_4:
-; SI-NEXT: s_branch .LBB25_2
;
; VI-LABEL: bitcast_v26f32_to_v13f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v25, v11
; VI-NEXT: v_mov_b32_e32 v24, v10
; VI-NEXT: v_mov_b32_e32 v23, v9
@@ -12305,7 +12410,7 @@ define inreg <13 x double> @bitcast_v26f32_to_v13f64_scalar(<26 x float> inreg %
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
@@ -12319,10 +12424,13 @@ define inreg <13 x double> @bitcast_v26f32_to_v13f64_scalar(<26 x float> inreg %
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB25_4
+; VI-NEXT: s_cbranch_scc0 .LBB25_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB25_3
-; VI-NEXT: .LBB25_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB25_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB25_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e32 v25, 1.0, v25
; VI-NEXT: v_add_f32_e32 v24, 1.0, v24
; VI-NEXT: v_add_f32_e32 v23, 1.0, v23
@@ -12349,15 +12457,14 @@ define inreg <13 x double> @bitcast_v26f32_to_v13f64_scalar(<26 x float> inreg %
; VI-NEXT: v_add_f32_e32 v2, 1.0, v2
; VI-NEXT: v_add_f32_e32 v1, 1.0, v1
; VI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; VI-NEXT: .LBB25_3: ; %end
+; VI-NEXT: .LBB25_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB25_4:
-; VI-NEXT: s_branch .LBB25_2
;
; GFX9-LABEL: bitcast_v26f32_to_v13f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v25, v11
; GFX9-NEXT: v_mov_b32_e32 v24, v10
; GFX9-NEXT: v_mov_b32_e32 v23, v9
@@ -12371,7 +12478,7 @@ define inreg <13 x double> @bitcast_v26f32_to_v13f64_scalar(<26 x float> inreg %
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
@@ -12385,10 +12492,13 @@ define inreg <13 x double> @bitcast_v26f32_to_v13f64_scalar(<26 x float> inreg %
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB25_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB25_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB25_3
-; GFX9-NEXT: .LBB25_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB25_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB25_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e32 v25, 1.0, v25
; GFX9-NEXT: v_add_f32_e32 v24, 1.0, v24
; GFX9-NEXT: v_add_f32_e32 v23, 1.0, v23
@@ -12415,40 +12525,38 @@ define inreg <13 x double> @bitcast_v26f32_to_v13f64_scalar(<26 x float> inreg %
; GFX9-NEXT: v_add_f32_e32 v2, 1.0, v2
; GFX9-NEXT: v_add_f32_e32 v1, 1.0, v1
; GFX9-NEXT: v_add_f32_e32 v0, 1.0, v0
-; GFX9-NEXT: .LBB25_3: ; %end
+; GFX9-NEXT: .LBB25_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB25_4:
-; GFX9-NEXT: s_branch .LBB25_2
;
; GFX11-LABEL: bitcast_v26f32_to_v13f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-NEXT: v_dual_mov_b32 v15, v8 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB25_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB25_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB25_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB25_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB25_3:
-; GFX11-NEXT: .LBB25_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB25_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v24, 1.0, v24
; GFX11-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v20, 1.0, v20
@@ -12462,6 +12570,7 @@ define inreg <13 x double> @bitcast_v26f32_to_v13f64_scalar(<26 x float> inreg %
; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-NEXT: .LBB25_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -12607,6 +12716,7 @@ define inreg <26 x float> @bitcast_v13f64_to_v26f32_scalar(<13 x double> inreg %
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v25, v11
; SI-NEXT: v_mov_b32_e32 v24, v10
; SI-NEXT: v_mov_b32_e32 v23, v9
@@ -12631,13 +12741,16 @@ define inreg <26 x float> @bitcast_v13f64_to_v26f32_scalar(<13 x double> inreg %
; SI-NEXT: v_mov_b32_e32 v9, s25
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB27_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB27_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB27_3
-; SI-NEXT: .LBB27_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB27_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB27_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
; SI-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
; SI-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
@@ -12651,15 +12764,14 @@ define inreg <26 x float> @bitcast_v13f64_to_v26f32_scalar(<13 x double> inreg %
; SI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; SI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; SI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; SI-NEXT: .LBB27_3: ; %end
+; SI-NEXT: .LBB27_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB27_4:
-; SI-NEXT: s_branch .LBB27_2
;
; VI-LABEL: bitcast_v13f64_to_v26f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v25, v11
; VI-NEXT: v_mov_b32_e32 v24, v10
; VI-NEXT: v_mov_b32_e32 v23, v9
@@ -12684,13 +12796,16 @@ define inreg <26 x float> @bitcast_v13f64_to_v26f32_scalar(<13 x double> inreg %
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB27_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB27_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB27_3
-; VI-NEXT: .LBB27_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB27_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB27_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
; VI-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
; VI-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
@@ -12704,15 +12819,14 @@ define inreg <26 x float> @bitcast_v13f64_to_v26f32_scalar(<13 x double> inreg %
; VI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; VI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; VI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; VI-NEXT: .LBB27_3: ; %end
+; VI-NEXT: .LBB27_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB27_4:
-; VI-NEXT: s_branch .LBB27_2
;
; GFX9-LABEL: bitcast_v13f64_to_v26f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v25, v11
; GFX9-NEXT: v_mov_b32_e32 v24, v10
; GFX9-NEXT: v_mov_b32_e32 v23, v9
@@ -12737,13 +12851,16 @@ define inreg <26 x float> @bitcast_v13f64_to_v26f32_scalar(<13 x double> inreg %
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB27_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB27_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB27_3
-; GFX9-NEXT: .LBB27_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB27_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB27_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
; GFX9-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
; GFX9-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
@@ -12757,40 +12874,38 @@ define inreg <26 x float> @bitcast_v13f64_to_v26f32_scalar(<13 x double> inreg %
; GFX9-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX9-NEXT: .LBB27_3: ; %end
+; GFX9-NEXT: .LBB27_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB27_4:
-; GFX9-NEXT: s_branch .LBB27_2
;
; GFX11-LABEL: bitcast_v13f64_to_v26f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-NEXT: v_dual_mov_b32 v15, v8 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB27_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB27_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB27_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB27_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB27_3:
-; GFX11-NEXT: .LBB27_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB27_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
; GFX11-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
; GFX11-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
@@ -12804,6 +12919,7 @@ define inreg <26 x float> @bitcast_v13f64_to_v26f32_scalar(<13 x double> inreg %
; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-NEXT: .LBB27_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -13639,6 +13755,7 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v28, s16
; SI-NEXT: v_mov_b32_e32 v27, s17
; SI-NEXT: v_mov_b32_e32 v25, s18
@@ -13646,7 +13763,7 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v21, s20
; SI-NEXT: v_mov_b32_e32 v19, s21
; SI-NEXT: v_mov_b32_e32 v22, s22
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v20, s23
; SI-NEXT: v_mov_b32_e32 v18, s24
; SI-NEXT: v_mov_b32_e32 v17, s25
@@ -13941,27 +14058,30 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a,
; SI-NEXT: ; implicit-def: $vgpr34
; SI-NEXT: ; implicit-def: $vgpr23
; SI-NEXT: ; implicit-def: $vgpr32
-; SI-NEXT: s_branch .LBB29_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB29_2
+; SI-NEXT: s_branch .LBB29_3
;
; VI-LABEL: bitcast_v26f32_to_v52i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
-; VI-NEXT: v_mov_b32_e32 v22, s16
-; VI-NEXT: v_mov_b32_e32 v20, s17
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: v_mov_b32_e32 v21, s16
+; VI-NEXT: v_mov_b32_e32 v19, s17
; VI-NEXT: v_mov_b32_e32 v18, s18
-; VI-NEXT: v_mov_b32_e32 v17, s19
+; VI-NEXT: v_mov_b32_e32 v16, s19
; VI-NEXT: v_mov_b32_e32 v15, s20
; VI-NEXT: v_mov_b32_e32 v14, s21
; VI-NEXT: v_mov_b32_e32 v13, s22
-; VI-NEXT: v_mov_b32_e32 v24, s23
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: v_mov_b32_e32 v23, s23
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v25, s24
-; VI-NEXT: v_mov_b32_e32 v23, s25
-; VI-NEXT: v_mov_b32_e32 v21, s26
-; VI-NEXT: v_mov_b32_e32 v19, s27
+; VI-NEXT: v_mov_b32_e32 v24, s25
+; VI-NEXT: v_mov_b32_e32 v22, s26
+; VI-NEXT: v_mov_b32_e32 v20, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: v_mov_b32_e32 v16, s29
+; VI-NEXT: v_mov_b32_e32 v17, s29
; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
@@ -13980,20 +14100,20 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v55, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v40, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v41, 16, v0
-; VI-NEXT: v_lshrrev_b32_e32 v42, 16, v16
+; VI-NEXT: v_lshrrev_b32_e32 v42, 16, v17
; VI-NEXT: v_lshrrev_b32_e32 v43, 16, v12
-; VI-NEXT: v_lshrrev_b32_e32 v35, 16, v19
-; VI-NEXT: v_lshrrev_b32_e32 v34, 16, v21
-; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v23
+; VI-NEXT: v_lshrrev_b32_e32 v35, 16, v20
+; VI-NEXT: v_lshrrev_b32_e32 v34, 16, v22
+; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v24
; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v25
-; VI-NEXT: v_lshrrev_b32_e32 v31, 16, v24
+; VI-NEXT: v_lshrrev_b32_e32 v31, 16, v23
; VI-NEXT: v_lshrrev_b32_e32 v30, 16, v13
; VI-NEXT: v_lshrrev_b32_e32 v29, 16, v14
; VI-NEXT: v_lshrrev_b32_e32 v28, 16, v15
-; VI-NEXT: v_lshrrev_b32_e32 v27, 16, v17
+; VI-NEXT: v_lshrrev_b32_e32 v27, 16, v16
; VI-NEXT: v_lshrrev_b32_e32 v26, 16, v18
-; VI-NEXT: v_lshrrev_b32_e32 v37, 16, v20
-; VI-NEXT: v_lshrrev_b32_e32 v36, 16, v22
+; VI-NEXT: v_lshrrev_b32_e32 v37, 16, v19
+; VI-NEXT: v_lshrrev_b32_e32 v36, 16, v21
; VI-NEXT: s_cbranch_execnz .LBB29_3
; VI-NEXT: .LBB29_2: ; %cmp.true
; VI-NEXT: v_add_f32_e32 v11, 1.0, v11
@@ -14008,20 +14128,20 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a,
; VI-NEXT: v_add_f32_e32 v2, 1.0, v2
; VI-NEXT: v_add_f32_e32 v1, 1.0, v1
; VI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; VI-NEXT: v_add_f32_e32 v16, 1.0, v16
+; VI-NEXT: v_add_f32_e32 v17, 1.0, v17
; VI-NEXT: v_add_f32_e32 v12, 1.0, v12
-; VI-NEXT: v_add_f32_e32 v19, 1.0, v19
-; VI-NEXT: v_add_f32_e32 v21, 1.0, v21
-; VI-NEXT: v_add_f32_e32 v23, 1.0, v23
-; VI-NEXT: v_add_f32_e32 v25, 1.0, v25
+; VI-NEXT: v_add_f32_e32 v20, 1.0, v20
+; VI-NEXT: v_add_f32_e32 v22, 1.0, v22
; VI-NEXT: v_add_f32_e32 v24, 1.0, v24
+; VI-NEXT: v_add_f32_e32 v25, 1.0, v25
+; VI-NEXT: v_add_f32_e32 v23, 1.0, v23
; VI-NEXT: v_add_f32_e32 v13, 1.0, v13
; VI-NEXT: v_add_f32_e32 v14, 1.0, v14
; VI-NEXT: v_add_f32_e32 v15, 1.0, v15
-; VI-NEXT: v_add_f32_e32 v17, 1.0, v17
+; VI-NEXT: v_add_f32_e32 v16, 1.0, v16
; VI-NEXT: v_add_f32_e32 v18, 1.0, v18
-; VI-NEXT: v_add_f32_e32 v20, 1.0, v20
-; VI-NEXT: v_add_f32_e32 v22, 1.0, v22
+; VI-NEXT: v_add_f32_e32 v19, 1.0, v19
+; VI-NEXT: v_add_f32_e32 v21, 1.0, v21
; VI-NEXT: v_lshrrev_b32_e32 v38, 16, v11
; VI-NEXT: v_lshrrev_b32_e32 v39, 16, v10
; VI-NEXT: v_lshrrev_b32_e32 v48, 16, v9
@@ -14034,45 +14154,45 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v55, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v40, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v41, 16, v0
-; VI-NEXT: v_lshrrev_b32_e32 v42, 16, v16
+; VI-NEXT: v_lshrrev_b32_e32 v42, 16, v17
; VI-NEXT: v_lshrrev_b32_e32 v43, 16, v12
-; VI-NEXT: v_lshrrev_b32_e32 v35, 16, v19
-; VI-NEXT: v_lshrrev_b32_e32 v34, 16, v21
-; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v23
+; VI-NEXT: v_lshrrev_b32_e32 v35, 16, v20
+; VI-NEXT: v_lshrrev_b32_e32 v34, 16, v22
+; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v24
; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v25
-; VI-NEXT: v_lshrrev_b32_e32 v31, 16, v24
+; VI-NEXT: v_lshrrev_b32_e32 v31, 16, v23
; VI-NEXT: v_lshrrev_b32_e32 v30, 16, v13
; VI-NEXT: v_lshrrev_b32_e32 v29, 16, v14
; VI-NEXT: v_lshrrev_b32_e32 v28, 16, v15
-; VI-NEXT: v_lshrrev_b32_e32 v27, 16, v17
+; VI-NEXT: v_lshrrev_b32_e32 v27, 16, v16
; VI-NEXT: v_lshrrev_b32_e32 v26, 16, v18
-; VI-NEXT: v_lshrrev_b32_e32 v37, 16, v20
-; VI-NEXT: v_lshrrev_b32_e32 v36, 16, v22
+; VI-NEXT: v_lshrrev_b32_e32 v37, 16, v19
+; VI-NEXT: v_lshrrev_b32_e32 v36, 16, v21
; VI-NEXT: .LBB29_3: ; %end
; VI-NEXT: v_lshlrev_b32_e32 v36, 16, v36
-; VI-NEXT: v_or_b32_sdwa v36, v22, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_e32 v22, 16, v37
-; VI-NEXT: v_or_b32_sdwa v37, v20, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_e32 v20, 16, v26
-; VI-NEXT: v_or_b32_sdwa v26, v18, v20 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v36, v21, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_e32 v21, 16, v37
+; VI-NEXT: v_or_b32_sdwa v37, v19, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_e32 v19, 16, v26
+; VI-NEXT: v_or_b32_sdwa v26, v18, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v18, 16, v27
-; VI-NEXT: v_or_b32_sdwa v27, v17, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_e32 v17, 16, v28
-; VI-NEXT: v_or_b32_sdwa v28, v15, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v27, v16, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_e32 v16, 16, v28
+; VI-NEXT: v_or_b32_sdwa v28, v15, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v15, 16, v29
; VI-NEXT: v_or_b32_sdwa v29, v14, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v14, 16, v30
; VI-NEXT: v_or_b32_sdwa v30, v13, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v13, 16, v31
-; VI-NEXT: v_or_b32_sdwa v31, v24, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v31, v23, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v13, 16, v32
; VI-NEXT: v_or_b32_sdwa v32, v25, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v13, 16, v33
-; VI-NEXT: v_or_b32_sdwa v33, v23, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v33, v24, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v13, 16, v34
-; VI-NEXT: v_or_b32_sdwa v34, v21, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v34, v22, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v13, 16, v35
-; VI-NEXT: v_or_b32_sdwa v35, v19, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v35, v20, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v13, 16, v43
; VI-NEXT: v_lshlrev_b32_e32 v14, 16, v41
; VI-NEXT: v_or_b32_sdwa v12, v12, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
@@ -14085,9 +14205,9 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a,
; VI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload
; VI-NEXT: v_or_b32_sdwa v15, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v55
-; VI-NEXT: v_or_b32_sdwa v13, v16, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v16, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v54
+; VI-NEXT: v_or_b32_sdwa v13, v17, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v17, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v53
; VI-NEXT: v_or_b32_sdwa v18, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
@@ -14146,27 +14266,30 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a,
; VI-NEXT: ; implicit-def: $vgpr48
; VI-NEXT: ; implicit-def: $vgpr39
; VI-NEXT: ; implicit-def: $vgpr38
-; VI-NEXT: s_branch .LBB29_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB29_2
+; VI-NEXT: s_branch .LBB29_3
;
; GFX9-LABEL: bitcast_v26f32_to_v52i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
-; GFX9-NEXT: v_mov_b32_e32 v22, s16
-; GFX9-NEXT: v_mov_b32_e32 v20, s17
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: v_mov_b32_e32 v21, s16
+; GFX9-NEXT: v_mov_b32_e32 v19, s17
; GFX9-NEXT: v_mov_b32_e32 v18, s18
-; GFX9-NEXT: v_mov_b32_e32 v17, s19
+; GFX9-NEXT: v_mov_b32_e32 v16, s19
; GFX9-NEXT: v_mov_b32_e32 v15, s20
; GFX9-NEXT: v_mov_b32_e32 v14, s21
; GFX9-NEXT: v_mov_b32_e32 v13, s22
-; GFX9-NEXT: v_mov_b32_e32 v24, s23
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: v_mov_b32_e32 v23, s23
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v25, s24
-; GFX9-NEXT: v_mov_b32_e32 v23, s25
-; GFX9-NEXT: v_mov_b32_e32 v21, s26
-; GFX9-NEXT: v_mov_b32_e32 v19, s27
+; GFX9-NEXT: v_mov_b32_e32 v24, s25
+; GFX9-NEXT: v_mov_b32_e32 v22, s26
+; GFX9-NEXT: v_mov_b32_e32 v20, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: v_mov_b32_e32 v16, s29
+; GFX9-NEXT: v_mov_b32_e32 v17, s29
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
@@ -14185,20 +14308,20 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v55, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v40, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v41, 16, v0
-; GFX9-NEXT: v_lshrrev_b32_e32 v42, 16, v16
+; GFX9-NEXT: v_lshrrev_b32_e32 v42, 16, v17
; GFX9-NEXT: v_lshrrev_b32_e32 v43, 16, v12
-; GFX9-NEXT: v_lshrrev_b32_e32 v35, 16, v19
-; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v21
-; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v23
+; GFX9-NEXT: v_lshrrev_b32_e32 v35, 16, v20
+; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v22
+; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v24
; GFX9-NEXT: v_lshrrev_b32_e32 v32, 16, v25
-; GFX9-NEXT: v_lshrrev_b32_e32 v31, 16, v24
+; GFX9-NEXT: v_lshrrev_b32_e32 v31, 16, v23
; GFX9-NEXT: v_lshrrev_b32_e32 v30, 16, v13
; GFX9-NEXT: v_lshrrev_b32_e32 v29, 16, v14
; GFX9-NEXT: v_lshrrev_b32_e32 v28, 16, v15
-; GFX9-NEXT: v_lshrrev_b32_e32 v27, 16, v17
+; GFX9-NEXT: v_lshrrev_b32_e32 v27, 16, v16
; GFX9-NEXT: v_lshrrev_b32_e32 v26, 16, v18
-; GFX9-NEXT: v_lshrrev_b32_e32 v37, 16, v20
-; GFX9-NEXT: v_lshrrev_b32_e32 v36, 16, v22
+; GFX9-NEXT: v_lshrrev_b32_e32 v37, 16, v19
+; GFX9-NEXT: v_lshrrev_b32_e32 v36, 16, v21
; GFX9-NEXT: s_cbranch_execnz .LBB29_3
; GFX9-NEXT: .LBB29_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e32 v11, 1.0, v11
@@ -14213,20 +14336,20 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e32 v2, 1.0, v2
; GFX9-NEXT: v_add_f32_e32 v1, 1.0, v1
; GFX9-NEXT: v_add_f32_e32 v0, 1.0, v0
-; GFX9-NEXT: v_add_f32_e32 v16, 1.0, v16
+; GFX9-NEXT: v_add_f32_e32 v17, 1.0, v17
; GFX9-NEXT: v_add_f32_e32 v12, 1.0, v12
-; GFX9-NEXT: v_add_f32_e32 v19, 1.0, v19
-; GFX9-NEXT: v_add_f32_e32 v21, 1.0, v21
-; GFX9-NEXT: v_add_f32_e32 v23, 1.0, v23
-; GFX9-NEXT: v_add_f32_e32 v25, 1.0, v25
+; GFX9-NEXT: v_add_f32_e32 v20, 1.0, v20
+; GFX9-NEXT: v_add_f32_e32 v22, 1.0, v22
; GFX9-NEXT: v_add_f32_e32 v24, 1.0, v24
+; GFX9-NEXT: v_add_f32_e32 v25, 1.0, v25
+; GFX9-NEXT: v_add_f32_e32 v23, 1.0, v23
; GFX9-NEXT: v_add_f32_e32 v13, 1.0, v13
; GFX9-NEXT: v_add_f32_e32 v14, 1.0, v14
; GFX9-NEXT: v_add_f32_e32 v15, 1.0, v15
-; GFX9-NEXT: v_add_f32_e32 v17, 1.0, v17
+; GFX9-NEXT: v_add_f32_e32 v16, 1.0, v16
; GFX9-NEXT: v_add_f32_e32 v18, 1.0, v18
-; GFX9-NEXT: v_add_f32_e32 v20, 1.0, v20
-; GFX9-NEXT: v_add_f32_e32 v22, 1.0, v22
+; GFX9-NEXT: v_add_f32_e32 v19, 1.0, v19
+; GFX9-NEXT: v_add_f32_e32 v21, 1.0, v21
; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v11
; GFX9-NEXT: v_lshrrev_b32_e32 v39, 16, v10
; GFX9-NEXT: v_lshrrev_b32_e32 v48, 16, v9
@@ -14239,39 +14362,39 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v55, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v40, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v41, 16, v0
-; GFX9-NEXT: v_lshrrev_b32_e32 v42, 16, v16
+; GFX9-NEXT: v_lshrrev_b32_e32 v42, 16, v17
; GFX9-NEXT: v_lshrrev_b32_e32 v43, 16, v12
-; GFX9-NEXT: v_lshrrev_b32_e32 v35, 16, v19
-; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v21
-; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v23
+; GFX9-NEXT: v_lshrrev_b32_e32 v35, 16, v20
+; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v22
+; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v24
; GFX9-NEXT: v_lshrrev_b32_e32 v32, 16, v25
-; GFX9-NEXT: v_lshrrev_b32_e32 v31, 16, v24
+; GFX9-NEXT: v_lshrrev_b32_e32 v31, 16, v23
; GFX9-NEXT: v_lshrrev_b32_e32 v30, 16, v13
; GFX9-NEXT: v_lshrrev_b32_e32 v29, 16, v14
; GFX9-NEXT: v_lshrrev_b32_e32 v28, 16, v15
-; GFX9-NEXT: v_lshrrev_b32_e32 v27, 16, v17
+; GFX9-NEXT: v_lshrrev_b32_e32 v27, 16, v16
; GFX9-NEXT: v_lshrrev_b32_e32 v26, 16, v18
-; GFX9-NEXT: v_lshrrev_b32_e32 v37, 16, v20
-; GFX9-NEXT: v_lshrrev_b32_e32 v36, 16, v22
+; GFX9-NEXT: v_lshrrev_b32_e32 v37, 16, v19
+; GFX9-NEXT: v_lshrrev_b32_e32 v36, 16, v21
; GFX9-NEXT: .LBB29_3: ; %end
; GFX9-NEXT: v_and_b32_e32 v13, 0xffff, v13
; GFX9-NEXT: v_lshl_or_b32 v30, v30, 16, v13
-; GFX9-NEXT: v_and_b32_e32 v13, 0xffff, v24
+; GFX9-NEXT: v_and_b32_e32 v13, 0xffff, v23
; GFX9-NEXT: v_lshl_or_b32 v31, v31, 16, v13
; GFX9-NEXT: v_and_b32_e32 v13, 0xffff, v25
; GFX9-NEXT: v_lshl_or_b32 v32, v32, 16, v13
-; GFX9-NEXT: v_and_b32_e32 v13, 0xffff, v23
+; GFX9-NEXT: v_and_b32_e32 v13, 0xffff, v24
; GFX9-NEXT: v_lshl_or_b32 v33, v33, 16, v13
-; GFX9-NEXT: v_and_b32_e32 v13, 0xffff, v21
+; GFX9-NEXT: v_and_b32_e32 v13, 0xffff, v22
; GFX9-NEXT: v_and_b32_e32 v14, 0xffff, v14
; GFX9-NEXT: v_lshl_or_b32 v34, v34, 16, v13
-; GFX9-NEXT: v_and_b32_e32 v13, 0xffff, v19
+; GFX9-NEXT: v_and_b32_e32 v13, 0xffff, v20
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_and_b32_e32 v15, 0xffff, v15
; GFX9-NEXT: v_lshl_or_b32 v29, v29, 16, v14
; GFX9-NEXT: v_lshl_or_b32 v35, v35, 16, v13
; GFX9-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX9-NEXT: v_and_b32_e32 v13, 0xffff, v16
+; GFX9-NEXT: v_and_b32_e32 v13, 0xffff, v17
; GFX9-NEXT: v_lshl_or_b32 v14, v41, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v1
; GFX9-NEXT: v_lshl_or_b32 v28, v28, 16, v15
@@ -14282,27 +14405,27 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a,
; GFX9-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload
+; GFX9-NEXT: v_and_b32_e32 v16, 0xffff, v16
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v2
-; GFX9-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX9-NEXT: v_lshl_or_b32 v27, v27, 16, v16
; GFX9-NEXT: v_lshl_or_b32 v16, v55, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v3
; GFX9-NEXT: v_and_b32_e32 v18, 0xffff, v18
-; GFX9-NEXT: v_lshl_or_b32 v27, v27, 16, v17
; GFX9-NEXT: v_lshl_or_b32 v17, v54, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v4
+; GFX9-NEXT: v_and_b32_e32 v19, 0xffff, v19
; GFX9-NEXT: v_lshl_or_b32 v26, v26, 16, v18
; GFX9-NEXT: v_lshl_or_b32 v18, v53, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v5
-; GFX9-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX9-NEXT: v_lshl_or_b32 v37, v37, 16, v19
; GFX9-NEXT: v_lshl_or_b32 v19, v52, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v6
-; GFX9-NEXT: v_lshl_or_b32 v37, v37, 16, v20
+; GFX9-NEXT: v_and_b32_e32 v21, 0xffff, v21
; GFX9-NEXT: v_lshl_or_b32 v20, v51, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v7
-; GFX9-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX9-NEXT: v_lshl_or_b32 v36, v36, 16, v21
; GFX9-NEXT: v_lshl_or_b32 v21, v50, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v8
-; GFX9-NEXT: v_lshl_or_b32 v36, v36, 16, v22
; GFX9-NEXT: v_lshl_or_b32 v22, v49, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v9
; GFX9-NEXT: v_lshl_or_b32 v23, v48, 16, v0
@@ -14351,7 +14474,9 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr48
; GFX9-NEXT: ; implicit-def: $vgpr39
; GFX9-NEXT: ; implicit-def: $vgpr38
-; GFX9-NEXT: s_branch .LBB29_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB29_2
+; GFX9-NEXT: s_branch .LBB29_3
;
; GFX11-LABEL: bitcast_v26f32_to_v52i16_scalar:
; GFX11: ; %bb.0:
@@ -14362,12 +14487,12 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v22, s16 :: v_dual_mov_b32 v21, s17
; GFX11-NEXT: v_dual_mov_b32 v20, s18 :: v_dual_mov_b32 v19, s19
; GFX11-NEXT: v_dual_mov_b32 v18, s20 :: v_dual_mov_b32 v9, s22
-; GFX11-NEXT: v_dual_mov_b32 v10, s21 :: v_dual_mov_b32 v15, s23
-; GFX11-NEXT: v_dual_mov_b32 v14, s24 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s26 :: v_dual_mov_b32 v11, s27
+; GFX11-NEXT: v_dual_mov_b32 v10, s21 :: v_dual_mov_b32 v13, s24
+; GFX11-NEXT: v_dual_mov_b32 v14, s23 :: v_dual_mov_b32 v11, s26
+; GFX11-NEXT: v_dual_mov_b32 v12, s25 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB29_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v7
@@ -14380,11 +14505,11 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v0
; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v16
; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v11
+; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v12
+; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v13
+; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v14
; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v9
; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v10
; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v18
@@ -14396,17 +14521,16 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v24
; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v25
; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v26
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB29_3
+; GFX11-NEXT: s_cbranch_execnz .LBB29_3
; GFX11-NEXT: .LBB29_2: ; %cmp.true
; GFX11-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
; GFX11-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
-; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v12, 1.0, v12
-; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v14, 1.0, v14
-; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v10, 1.0, v10
; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v18, 1.0, v18
; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v22, 1.0, v22
@@ -14422,11 +14546,11 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v0
; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v16
; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v11
+; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v12
+; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v13
+; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v14
; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v9
; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v10
; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v18
@@ -14446,8 +14570,8 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a,
; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
; GFX11-NEXT: v_lshl_or_b32 v31, v31, 16, v19
; GFX11-NEXT: v_lshl_or_b32 v8, v8, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v13
; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v12
; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v11
; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
@@ -14458,10 +14582,10 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a,
; GFX11-NEXT: v_lshl_or_b32 v29, v29, 16, v21
; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_lshl_or_b32 v11, v64, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v12, v55, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v14, v53, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v15, v52, 16, v19
+; GFX11-NEXT: v_lshl_or_b32 v11, v64, 16, v14
+; GFX11-NEXT: v_lshl_or_b32 v12, v55, 16, v13
+; GFX11-NEXT: v_lshl_or_b32 v13, v54, 16, v18
+; GFX11-NEXT: v_lshl_or_b32 v14, v53, 16, v19
; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v16
; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
@@ -14470,7 +14594,7 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a,
; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v4
; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v10
; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v9
-; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v13
+; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
; GFX11-NEXT: v_lshl_or_b32 v16, v51, 16, v17
; GFX11-NEXT: v_lshl_or_b32 v17, v50, 16, v18
; GFX11-NEXT: v_lshl_or_b32 v18, v49, 16, v0
@@ -14487,7 +14611,7 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a,
; GFX11-NEXT: v_lshl_or_b32 v26, v67, 16, v24
; GFX11-NEXT: v_lshl_or_b32 v9, v66, 16, v10
; GFX11-NEXT: v_lshl_or_b32 v10, v65, 16, v21
-; GFX11-NEXT: v_lshl_or_b32 v13, v54, 16, v13
+; GFX11-NEXT: v_lshl_or_b32 v15, v52, 16, v15
; GFX11-NEXT: v_lshl_or_b32 v21, v38, 16, v0
; GFX11-NEXT: v_lshl_or_b32 v23, v36, 16, v2
; GFX11-NEXT: v_lshl_or_b32 v24, v35, 16, v3
@@ -14524,7 +14648,9 @@ define inreg <52 x i16> @bitcast_v26f32_to_v52i16_scalar(<26 x float> inreg %a,
; GFX11-NEXT: ; implicit-def: $vgpr36
; GFX11-NEXT: ; implicit-def: $vgpr35
; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: s_branch .LBB29_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_vccz .LBB29_2
+; GFX11-NEXT: s_branch .LBB29_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -15736,6 +15862,7 @@ define inreg <26 x float> @bitcast_v52i16_to_v26f32_scalar(<52 x i16> inreg %a,
; SI-NEXT: v_mov_b32_e32 v38, v14
; SI-NEXT: v_mov_b32_e32 v39, v12
; SI-NEXT: v_mov_b32_e32 v48, v10
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v3
@@ -15758,7 +15885,7 @@ define inreg <26 x float> @bitcast_v52i16_to_v26f32_scalar(<52 x i16> inreg %a,
; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v2
; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_lshlrev_b32_e32 v59, 16, v4
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_and_b64 s[6:7], vcc, exec
; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v6
; SI-NEXT: s_waitcnt vmcnt(1)
@@ -16052,7 +16179,9 @@ define inreg <26 x float> @bitcast_v52i16_to_v26f32_scalar(<52 x i16> inreg %a,
; SI-NEXT: v_mov_b32_e32 v32, v57
; SI-NEXT: v_mov_b32_e32 v57, v63
; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
-; SI-NEXT: s_branch .LBB31_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB31_2
+; SI-NEXT: s_branch .LBB31_3
;
; VI-LABEL: bitcast_v52i16_to_v26f32_scalar:
; VI: ; %bb.0:
@@ -16072,6 +16201,7 @@ define inreg <26 x float> @bitcast_v52i16_to_v26f32_scalar(<52 x i16> inreg %a,
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v11
; VI-NEXT: v_mov_b32_e32 v33, v10
; VI-NEXT: v_mov_b32_e32 v34, v9
@@ -16084,7 +16214,7 @@ define inreg <26 x float> @bitcast_v52i16_to_v26f32_scalar(<52 x i16> inreg %a,
; VI-NEXT: v_mov_b32_e32 v49, v2
; VI-NEXT: v_mov_b32_e32 v50, v1
; VI-NEXT: v_mov_b32_e32 v51, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB31_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -16185,79 +16315,79 @@ define inreg <26 x float> @bitcast_v52i16_to_v26f32_scalar(<52 x i16> inreg %a,
; VI-NEXT: v_add_u32_e32 v16, vcc, 0x30000, v0
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v48
; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; VI-NEXT: s_add_i32 s16, s16, 3
; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: s_and_b32 s4, s16, 0xffff
-; VI-NEXT: s_lshl_b32 s5, s43, 16
-; VI-NEXT: s_add_i32 s17, s17, 3
+; VI-NEXT: s_add_i32 s16, s16, 3
; VI-NEXT: v_add_u32_e32 v17, vcc, 0x30000, v0
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v39
; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: s_and_b32 s4, s16, 0xffff
+; VI-NEXT: s_lshl_b32 s5, s43, 16
+; VI-NEXT: s_add_i32 s17, s17, 3
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s4, s5, s4
; VI-NEXT: s_and_b32 s5, s17, 0xffff
; VI-NEXT: s_lshl_b32 s16, s42, 16
; VI-NEXT: s_add_i32 s18, s18, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v18, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v38
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s5, s16, s5
; VI-NEXT: s_and_b32 s16, s18, 0xffff
; VI-NEXT: s_lshl_b32 s17, s41, 16
; VI-NEXT: s_add_i32 s19, s19, 3
-; VI-NEXT: v_add_u32_e32 v18, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v38
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s16, s17, s16
; VI-NEXT: s_and_b32 s17, s19, 0xffff
; VI-NEXT: s_lshl_b32 s18, s40, 16
; VI-NEXT: s_add_i32 s20, s20, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v19, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v37
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s17, s18, s17
; VI-NEXT: s_and_b32 s18, s20, 0xffff
; VI-NEXT: s_lshl_b32 s15, s15, 16
; VI-NEXT: s_add_i32 s21, s21, 3
-; VI-NEXT: v_add_u32_e32 v19, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v37
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s15, s15, s18
; VI-NEXT: s_and_b32 s18, s21, 0xffff
; VI-NEXT: s_lshl_b32 s14, s14, 16
; VI-NEXT: s_add_i32 s22, s22, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v20, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v36
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s14, s14, s18
; VI-NEXT: s_and_b32 s18, s22, 0xffff
; VI-NEXT: s_lshl_b32 s13, s13, 16
; VI-NEXT: s_add_i32 s23, s23, 3
-; VI-NEXT: v_add_u32_e32 v20, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v36
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s13, s13, s18
; VI-NEXT: s_and_b32 s18, s23, 0xffff
; VI-NEXT: s_lshl_b32 s12, s12, 16
; VI-NEXT: s_add_i32 s24, s24, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v21, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v35
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s12, s12, s18
; VI-NEXT: s_and_b32 s18, s24, 0xffff
; VI-NEXT: s_lshl_b32 s11, s11, 16
; VI-NEXT: s_add_i32 s25, s25, 3
-; VI-NEXT: v_add_u32_e32 v21, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v35
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s11, s11, s18
; VI-NEXT: s_and_b32 s18, s25, 0xffff
; VI-NEXT: s_lshl_b32 s10, s10, 16
; VI-NEXT: s_add_i32 s26, s26, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v22, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v34
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s10, s10, s18
; VI-NEXT: s_and_b32 s18, s26, 0xffff
; VI-NEXT: s_lshl_b32 s9, s9, 16
; VI-NEXT: s_add_i32 s27, s27, 3
-; VI-NEXT: v_add_u32_e32 v22, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v34
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s9, s9, s18
; VI-NEXT: s_and_b32 s18, s27, 0xffff
; VI-NEXT: s_lshl_b32 s8, s8, 16
; VI-NEXT: s_add_i32 s28, s28, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s8, s8, s18
; VI-NEXT: s_and_b32 s18, s28, 0xffff
; VI-NEXT: s_lshl_b32 s7, s7, 16
@@ -16307,23 +16437,13 @@ define inreg <26 x float> @bitcast_v52i16_to_v26f32_scalar(<52 x i16> inreg %a,
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB31_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB31_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB31_2
+; VI-NEXT: s_branch .LBB31_3
;
; GFX9-LABEL: bitcast_v52i16_to_v26f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v11
-; GFX9-NEXT: v_mov_b32_e32 v33, v10
-; GFX9-NEXT: v_mov_b32_e32 v34, v9
-; GFX9-NEXT: v_mov_b32_e32 v35, v8
-; GFX9-NEXT: v_mov_b32_e32 v36, v7
-; GFX9-NEXT: v_mov_b32_e32 v37, v6
-; GFX9-NEXT: v_mov_b32_e32 v38, v5
-; GFX9-NEXT: v_mov_b32_e32 v39, v4
-; GFX9-NEXT: v_mov_b32_e32 v48, v3
-; GFX9-NEXT: v_mov_b32_e32 v49, v2
-; GFX9-NEXT: v_mov_b32_e32 v50, v1
-; GFX9-NEXT: v_mov_b32_e32 v51, v0
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
@@ -16339,6 +16459,19 @@ define inreg <26 x float> @bitcast_v52i16_to_v26f32_scalar(<52 x i16> inreg %a,
; GFX9-NEXT: s_lshr_b32 s8, s18, 16
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
+; GFX9-NEXT: v_mov_b32_e32 v32, v11
+; GFX9-NEXT: v_mov_b32_e32 v33, v10
+; GFX9-NEXT: v_mov_b32_e32 v34, v9
+; GFX9-NEXT: v_mov_b32_e32 v35, v8
+; GFX9-NEXT: v_mov_b32_e32 v36, v7
+; GFX9-NEXT: v_mov_b32_e32 v37, v6
+; GFX9-NEXT: v_mov_b32_e32 v38, v5
+; GFX9-NEXT: v_mov_b32_e32 v39, v4
+; GFX9-NEXT: v_mov_b32_e32 v48, v3
+; GFX9-NEXT: v_mov_b32_e32 v49, v2
+; GFX9-NEXT: v_mov_b32_e32 v50, v1
+; GFX9-NEXT: v_mov_b32_e32 v51, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
@@ -16355,7 +16488,6 @@ define inreg <26 x float> @bitcast_v52i16_to_v26f32_scalar(<52 x i16> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v41, 16, v37
; GFX9-NEXT: v_lshrrev_b32_e32 v42, 16, v38
; GFX9-NEXT: v_lshrrev_b32_e32 v43, 16, v39
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -16370,6 +16502,7 @@ define inreg <26 x float> @bitcast_v52i16_to_v26f32_scalar(<52 x i16> inreg %a,
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_lshrrev_b32_e32 v44, 16, v48
; GFX9-NEXT: v_lshrrev_b32_e32 v45, 16, v49
; GFX9-NEXT: v_lshrrev_b32_e32 v46, 16, v50
@@ -16479,7 +16612,9 @@ define inreg <26 x float> @bitcast_v52i16_to_v26f32_scalar(<52 x i16> inreg %a,
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB31_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB31_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB31_2
+; GFX9-NEXT: s_branch .LBB31_3
;
; GFX11-TRUE16-LABEL: bitcast_v52i16_to_v26f32_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -16509,9 +16644,9 @@ define inreg <26 x float> @bitcast_v52i16_to_v26f32_scalar(<52 x i16> inreg %a,
; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v5
; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v6
; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v7
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s27, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
@@ -16523,15 +16658,14 @@ define inreg <26 x float> @bitcast_v52i16_to_v26f32_scalar(<52 x i16> inreg %a,
; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s42
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
@@ -16543,10 +16677,11 @@ define inreg <26 x float> @bitcast_v52i16_to_v26f32_scalar(<52 x i16> inreg %a,
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s28, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s29, s41
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB31_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
@@ -16564,10 +16699,9 @@ define inreg <26 x float> @bitcast_v52i16_to_v26f32_scalar(<52 x i16> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s17
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s15 :: v_dual_mov_b32 v17, s16
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB31_3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB31_3
; GFX11-TRUE16-NEXT: .LBB31_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
@@ -16592,9 +16726,9 @@ define inreg <26 x float> @bitcast_v52i16_to_v26f32_scalar(<52 x i16> inreg %a,
; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s15, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
@@ -16607,7 +16741,9 @@ define inreg <26 x float> @bitcast_v52i16_to_v26f32_scalar(<52 x i16> inreg %a,
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB31_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB31_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB31_2
+; GFX11-TRUE16-NEXT: s_branch .LBB31_3
;
; GFX11-FAKE16-LABEL: bitcast_v52i16_to_v26f32_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -16629,9 +16765,9 @@ define inreg <26 x float> @bitcast_v52i16_to_v26f32_scalar(<52 x i16> inreg %a,
; GFX11-FAKE16-NEXT: v_and_b32_e32 v50, 0xffff, v5
; GFX11-FAKE16-NEXT: v_and_b32_e32 v49, 0xffff, v6
; GFX11-FAKE16-NEXT: v_and_b32_e32 v48, 0xffff, v7
-; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s28, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s27, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s26, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s25, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s24, 16
@@ -16643,15 +16779,14 @@ define inreg <26 x float> @bitcast_v52i16_to_v26f32_scalar(<52 x i16> inreg %a,
; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s18, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s17, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s40, 0
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s44
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s45
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s43
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s42
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
@@ -16663,10 +16798,11 @@ define inreg <26 x float> @bitcast_v52i16_to_v26f32_scalar(<52 x i16> inreg %a,
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s27, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s28, s15
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s29, s41
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB31_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
@@ -16684,10 +16820,9 @@ define inreg <26 x float> @bitcast_v52i16_to_v26f32_scalar(<52 x i16> inreg %a,
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s17
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s15 :: v_dual_mov_b32 v17, s16
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB31_3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB31_3
; GFX11-FAKE16-NEXT: .LBB31_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
@@ -16712,9 +16847,9 @@ define inreg <26 x float> @bitcast_v52i16_to_v26f32_scalar(<52 x i16> inreg %a,
; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, s17, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, s15, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, s16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, s15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, s16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
@@ -16727,7 +16862,9 @@ define inreg <26 x float> @bitcast_v52i16_to_v26f32_scalar(<52 x i16> inreg %a,
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB31_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB31_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB31_2
+; GFX11-FAKE16-NEXT: s_branch .LBB31_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -17839,6 +17976,7 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s41, v1
; SI-NEXT: v_readfirstlane_b32 s40, v2
; SI-NEXT: v_readfirstlane_b32 s15, v3
@@ -17847,11 +17985,11 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s12, v6
; SI-NEXT: v_readfirstlane_b32 s11, v7
; SI-NEXT: v_readfirstlane_b32 s10, v8
-; SI-NEXT: v_readfirstlane_b32 s8, v9
-; SI-NEXT: v_readfirstlane_b32 s7, v10
-; SI-NEXT: v_readfirstlane_b32 s6, v11
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: v_readfirstlane_b32 s9, v12
+; SI-NEXT: v_readfirstlane_b32 s9, v9
+; SI-NEXT: v_readfirstlane_b32 s8, v10
+; SI-NEXT: v_readfirstlane_b32 s7, v11
+; SI-NEXT: v_readfirstlane_b32 s6, v12
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
@@ -17870,13 +18008,13 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB33_4
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_lshr_b32 s4, s9, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v1, s4
; SI-NEXT: s_lshr_b32 s4, s6, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v2, s4
+; SI-NEXT: v_cvt_f32_f16_e32 v1, s4
; SI-NEXT: s_lshr_b32 s4, s7, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v3, s4
+; SI-NEXT: v_cvt_f32_f16_e32 v2, s4
; SI-NEXT: s_lshr_b32 s4, s8, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v3, s4
+; SI-NEXT: s_lshr_b32 s4, s9, 16
; SI-NEXT: v_cvt_f32_f16_e32 v5, s4
; SI-NEXT: s_lshr_b32 s4, s10, 16
; SI-NEXT: v_cvt_f32_f16_e32 v7, s4
@@ -17923,10 +18061,10 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; SI-NEXT: v_cvt_f32_f16_e32 v42, s4
; SI-NEXT: s_lshr_b32 s4, s16, 16
; SI-NEXT: v_cvt_f32_f16_e32 v44, s4
-; SI-NEXT: v_cvt_f32_f16_e32 v45, s9
-; SI-NEXT: v_cvt_f32_f16_e32 v32, s6
-; SI-NEXT: v_cvt_f32_f16_e32 v34, s7
-; SI-NEXT: v_cvt_f32_f16_e32 v36, s8
+; SI-NEXT: v_cvt_f32_f16_e32 v45, s6
+; SI-NEXT: v_cvt_f32_f16_e32 v32, s7
+; SI-NEXT: v_cvt_f32_f16_e32 v34, s8
+; SI-NEXT: v_cvt_f32_f16_e32 v36, s9
; SI-NEXT: v_cvt_f32_f16_e32 v38, s10
; SI-NEXT: v_cvt_f32_f16_e32 v48, s11
; SI-NEXT: v_cvt_f32_f16_e32 v15, s12
@@ -17973,10 +18111,10 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; SI-NEXT: v_add_f32_e64 v15, s12, 1.0
; SI-NEXT: v_add_f32_e64 v13, s11, 1.0
; SI-NEXT: v_add_f32_e64 v12, s10, 1.0
-; SI-NEXT: v_add_f32_e64 v10, s8, 1.0
-; SI-NEXT: v_add_f32_e64 v8, s7, 1.0
-; SI-NEXT: v_add_f32_e64 v6, s6, 1.0
-; SI-NEXT: v_add_f32_e64 v29, s9, 1.0
+; SI-NEXT: v_add_f32_e64 v10, s9, 1.0
+; SI-NEXT: v_add_f32_e64 v8, s8, 1.0
+; SI-NEXT: v_add_f32_e64 v6, s7, 1.0
+; SI-NEXT: v_add_f32_e64 v29, s6, 1.0
; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v1
; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v2
; SI-NEXT: v_lshrrev_b32_e32 v40, 16, v3
@@ -18314,27 +18452,30 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr45
; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: s_branch .LBB33_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB33_2
+; SI-NEXT: s_branch .LBB33_3
;
; VI-LABEL: bitcast_v26f32_to_v52f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
-; VI-NEXT: v_mov_b32_e32 v22, s16
-; VI-NEXT: v_mov_b32_e32 v20, s17
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: v_mov_b32_e32 v21, s16
+; VI-NEXT: v_mov_b32_e32 v19, s17
; VI-NEXT: v_mov_b32_e32 v18, s18
-; VI-NEXT: v_mov_b32_e32 v17, s19
+; VI-NEXT: v_mov_b32_e32 v16, s19
; VI-NEXT: v_mov_b32_e32 v15, s20
; VI-NEXT: v_mov_b32_e32 v14, s21
; VI-NEXT: v_mov_b32_e32 v13, s22
-; VI-NEXT: v_mov_b32_e32 v24, s23
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: v_mov_b32_e32 v23, s23
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v25, s24
-; VI-NEXT: v_mov_b32_e32 v23, s25
-; VI-NEXT: v_mov_b32_e32 v21, s26
-; VI-NEXT: v_mov_b32_e32 v19, s27
+; VI-NEXT: v_mov_b32_e32 v24, s25
+; VI-NEXT: v_mov_b32_e32 v22, s26
+; VI-NEXT: v_mov_b32_e32 v20, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
-; VI-NEXT: v_mov_b32_e32 v16, s29
+; VI-NEXT: v_mov_b32_e32 v17, s29
; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
@@ -18353,20 +18494,20 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v55, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v40, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v41, 16, v0
-; VI-NEXT: v_lshrrev_b32_e32 v42, 16, v16
+; VI-NEXT: v_lshrrev_b32_e32 v42, 16, v17
; VI-NEXT: v_lshrrev_b32_e32 v43, 16, v12
-; VI-NEXT: v_lshrrev_b32_e32 v35, 16, v19
-; VI-NEXT: v_lshrrev_b32_e32 v34, 16, v21
-; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v23
+; VI-NEXT: v_lshrrev_b32_e32 v35, 16, v20
+; VI-NEXT: v_lshrrev_b32_e32 v34, 16, v22
+; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v24
; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v25
-; VI-NEXT: v_lshrrev_b32_e32 v31, 16, v24
+; VI-NEXT: v_lshrrev_b32_e32 v31, 16, v23
; VI-NEXT: v_lshrrev_b32_e32 v30, 16, v13
; VI-NEXT: v_lshrrev_b32_e32 v29, 16, v14
; VI-NEXT: v_lshrrev_b32_e32 v28, 16, v15
-; VI-NEXT: v_lshrrev_b32_e32 v27, 16, v17
+; VI-NEXT: v_lshrrev_b32_e32 v27, 16, v16
; VI-NEXT: v_lshrrev_b32_e32 v26, 16, v18
-; VI-NEXT: v_lshrrev_b32_e32 v37, 16, v20
-; VI-NEXT: v_lshrrev_b32_e32 v36, 16, v22
+; VI-NEXT: v_lshrrev_b32_e32 v37, 16, v19
+; VI-NEXT: v_lshrrev_b32_e32 v36, 16, v21
; VI-NEXT: s_cbranch_execnz .LBB33_3
; VI-NEXT: .LBB33_2: ; %cmp.true
; VI-NEXT: v_add_f32_e32 v11, 1.0, v11
@@ -18381,20 +18522,20 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; VI-NEXT: v_add_f32_e32 v2, 1.0, v2
; VI-NEXT: v_add_f32_e32 v1, 1.0, v1
; VI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; VI-NEXT: v_add_f32_e32 v16, 1.0, v16
+; VI-NEXT: v_add_f32_e32 v17, 1.0, v17
; VI-NEXT: v_add_f32_e32 v12, 1.0, v12
-; VI-NEXT: v_add_f32_e32 v19, 1.0, v19
-; VI-NEXT: v_add_f32_e32 v21, 1.0, v21
-; VI-NEXT: v_add_f32_e32 v23, 1.0, v23
-; VI-NEXT: v_add_f32_e32 v25, 1.0, v25
+; VI-NEXT: v_add_f32_e32 v20, 1.0, v20
+; VI-NEXT: v_add_f32_e32 v22, 1.0, v22
; VI-NEXT: v_add_f32_e32 v24, 1.0, v24
+; VI-NEXT: v_add_f32_e32 v25, 1.0, v25
+; VI-NEXT: v_add_f32_e32 v23, 1.0, v23
; VI-NEXT: v_add_f32_e32 v13, 1.0, v13
; VI-NEXT: v_add_f32_e32 v14, 1.0, v14
; VI-NEXT: v_add_f32_e32 v15, 1.0, v15
-; VI-NEXT: v_add_f32_e32 v17, 1.0, v17
+; VI-NEXT: v_add_f32_e32 v16, 1.0, v16
; VI-NEXT: v_add_f32_e32 v18, 1.0, v18
-; VI-NEXT: v_add_f32_e32 v20, 1.0, v20
-; VI-NEXT: v_add_f32_e32 v22, 1.0, v22
+; VI-NEXT: v_add_f32_e32 v19, 1.0, v19
+; VI-NEXT: v_add_f32_e32 v21, 1.0, v21
; VI-NEXT: v_lshrrev_b32_e32 v38, 16, v11
; VI-NEXT: v_lshrrev_b32_e32 v39, 16, v10
; VI-NEXT: v_lshrrev_b32_e32 v48, 16, v9
@@ -18407,45 +18548,45 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v55, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v40, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v41, 16, v0
-; VI-NEXT: v_lshrrev_b32_e32 v42, 16, v16
+; VI-NEXT: v_lshrrev_b32_e32 v42, 16, v17
; VI-NEXT: v_lshrrev_b32_e32 v43, 16, v12
-; VI-NEXT: v_lshrrev_b32_e32 v35, 16, v19
-; VI-NEXT: v_lshrrev_b32_e32 v34, 16, v21
-; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v23
+; VI-NEXT: v_lshrrev_b32_e32 v35, 16, v20
+; VI-NEXT: v_lshrrev_b32_e32 v34, 16, v22
+; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v24
; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v25
-; VI-NEXT: v_lshrrev_b32_e32 v31, 16, v24
+; VI-NEXT: v_lshrrev_b32_e32 v31, 16, v23
; VI-NEXT: v_lshrrev_b32_e32 v30, 16, v13
; VI-NEXT: v_lshrrev_b32_e32 v29, 16, v14
; VI-NEXT: v_lshrrev_b32_e32 v28, 16, v15
-; VI-NEXT: v_lshrrev_b32_e32 v27, 16, v17
+; VI-NEXT: v_lshrrev_b32_e32 v27, 16, v16
; VI-NEXT: v_lshrrev_b32_e32 v26, 16, v18
-; VI-NEXT: v_lshrrev_b32_e32 v37, 16, v20
-; VI-NEXT: v_lshrrev_b32_e32 v36, 16, v22
+; VI-NEXT: v_lshrrev_b32_e32 v37, 16, v19
+; VI-NEXT: v_lshrrev_b32_e32 v36, 16, v21
; VI-NEXT: .LBB33_3: ; %end
; VI-NEXT: v_lshlrev_b32_e32 v36, 16, v36
-; VI-NEXT: v_or_b32_sdwa v36, v22, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_e32 v22, 16, v37
-; VI-NEXT: v_or_b32_sdwa v37, v20, v22 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_e32 v20, 16, v26
-; VI-NEXT: v_or_b32_sdwa v26, v18, v20 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v36, v21, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_e32 v21, 16, v37
+; VI-NEXT: v_or_b32_sdwa v37, v19, v21 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_e32 v19, 16, v26
+; VI-NEXT: v_or_b32_sdwa v26, v18, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v18, 16, v27
-; VI-NEXT: v_or_b32_sdwa v27, v17, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_e32 v17, 16, v28
-; VI-NEXT: v_or_b32_sdwa v28, v15, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v27, v16, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_e32 v16, 16, v28
+; VI-NEXT: v_or_b32_sdwa v28, v15, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v15, 16, v29
; VI-NEXT: v_or_b32_sdwa v29, v14, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v14, 16, v30
; VI-NEXT: v_or_b32_sdwa v30, v13, v14 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v13, 16, v31
-; VI-NEXT: v_or_b32_sdwa v31, v24, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v31, v23, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v13, 16, v32
; VI-NEXT: v_or_b32_sdwa v32, v25, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v13, 16, v33
-; VI-NEXT: v_or_b32_sdwa v33, v23, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v33, v24, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v13, 16, v34
-; VI-NEXT: v_or_b32_sdwa v34, v21, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v34, v22, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v13, 16, v35
-; VI-NEXT: v_or_b32_sdwa v35, v19, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v35, v20, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v13, 16, v43
; VI-NEXT: v_lshlrev_b32_e32 v14, 16, v41
; VI-NEXT: v_or_b32_sdwa v12, v12, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
@@ -18458,9 +18599,9 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; VI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload
; VI-NEXT: v_or_b32_sdwa v15, v1, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v55
-; VI-NEXT: v_or_b32_sdwa v13, v16, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v16, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v54
+; VI-NEXT: v_or_b32_sdwa v13, v17, v13 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_or_b32_sdwa v17, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v0, 16, v53
; VI-NEXT: v_or_b32_sdwa v18, v4, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
@@ -18519,27 +18660,30 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; VI-NEXT: ; implicit-def: $vgpr48
; VI-NEXT: ; implicit-def: $vgpr39
; VI-NEXT: ; implicit-def: $vgpr38
-; VI-NEXT: s_branch .LBB33_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB33_2
+; VI-NEXT: s_branch .LBB33_3
;
; GFX9-LABEL: bitcast_v26f32_to_v52f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
-; GFX9-NEXT: v_mov_b32_e32 v22, s16
-; GFX9-NEXT: v_mov_b32_e32 v20, s17
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: v_mov_b32_e32 v21, s16
+; GFX9-NEXT: v_mov_b32_e32 v19, s17
; GFX9-NEXT: v_mov_b32_e32 v18, s18
-; GFX9-NEXT: v_mov_b32_e32 v17, s19
+; GFX9-NEXT: v_mov_b32_e32 v16, s19
; GFX9-NEXT: v_mov_b32_e32 v15, s20
; GFX9-NEXT: v_mov_b32_e32 v14, s21
; GFX9-NEXT: v_mov_b32_e32 v13, s22
-; GFX9-NEXT: v_mov_b32_e32 v24, s23
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: v_mov_b32_e32 v23, s23
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v25, s24
-; GFX9-NEXT: v_mov_b32_e32 v23, s25
-; GFX9-NEXT: v_mov_b32_e32 v21, s26
-; GFX9-NEXT: v_mov_b32_e32 v19, s27
+; GFX9-NEXT: v_mov_b32_e32 v24, s25
+; GFX9-NEXT: v_mov_b32_e32 v22, s26
+; GFX9-NEXT: v_mov_b32_e32 v20, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
-; GFX9-NEXT: v_mov_b32_e32 v16, s29
+; GFX9-NEXT: v_mov_b32_e32 v17, s29
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
@@ -18558,20 +18702,20 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v55, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v40, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v41, 16, v0
-; GFX9-NEXT: v_lshrrev_b32_e32 v42, 16, v16
+; GFX9-NEXT: v_lshrrev_b32_e32 v42, 16, v17
; GFX9-NEXT: v_lshrrev_b32_e32 v43, 16, v12
-; GFX9-NEXT: v_lshrrev_b32_e32 v35, 16, v19
-; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v21
-; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v23
+; GFX9-NEXT: v_lshrrev_b32_e32 v35, 16, v20
+; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v22
+; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v24
; GFX9-NEXT: v_lshrrev_b32_e32 v32, 16, v25
-; GFX9-NEXT: v_lshrrev_b32_e32 v31, 16, v24
+; GFX9-NEXT: v_lshrrev_b32_e32 v31, 16, v23
; GFX9-NEXT: v_lshrrev_b32_e32 v30, 16, v13
; GFX9-NEXT: v_lshrrev_b32_e32 v29, 16, v14
; GFX9-NEXT: v_lshrrev_b32_e32 v28, 16, v15
-; GFX9-NEXT: v_lshrrev_b32_e32 v27, 16, v17
+; GFX9-NEXT: v_lshrrev_b32_e32 v27, 16, v16
; GFX9-NEXT: v_lshrrev_b32_e32 v26, 16, v18
-; GFX9-NEXT: v_lshrrev_b32_e32 v37, 16, v20
-; GFX9-NEXT: v_lshrrev_b32_e32 v36, 16, v22
+; GFX9-NEXT: v_lshrrev_b32_e32 v37, 16, v19
+; GFX9-NEXT: v_lshrrev_b32_e32 v36, 16, v21
; GFX9-NEXT: s_cbranch_execnz .LBB33_3
; GFX9-NEXT: .LBB33_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e32 v11, 1.0, v11
@@ -18586,20 +18730,20 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e32 v2, 1.0, v2
; GFX9-NEXT: v_add_f32_e32 v1, 1.0, v1
; GFX9-NEXT: v_add_f32_e32 v0, 1.0, v0
-; GFX9-NEXT: v_add_f32_e32 v16, 1.0, v16
+; GFX9-NEXT: v_add_f32_e32 v17, 1.0, v17
; GFX9-NEXT: v_add_f32_e32 v12, 1.0, v12
-; GFX9-NEXT: v_add_f32_e32 v19, 1.0, v19
-; GFX9-NEXT: v_add_f32_e32 v21, 1.0, v21
-; GFX9-NEXT: v_add_f32_e32 v23, 1.0, v23
-; GFX9-NEXT: v_add_f32_e32 v25, 1.0, v25
+; GFX9-NEXT: v_add_f32_e32 v20, 1.0, v20
+; GFX9-NEXT: v_add_f32_e32 v22, 1.0, v22
; GFX9-NEXT: v_add_f32_e32 v24, 1.0, v24
+; GFX9-NEXT: v_add_f32_e32 v25, 1.0, v25
+; GFX9-NEXT: v_add_f32_e32 v23, 1.0, v23
; GFX9-NEXT: v_add_f32_e32 v13, 1.0, v13
; GFX9-NEXT: v_add_f32_e32 v14, 1.0, v14
; GFX9-NEXT: v_add_f32_e32 v15, 1.0, v15
-; GFX9-NEXT: v_add_f32_e32 v17, 1.0, v17
+; GFX9-NEXT: v_add_f32_e32 v16, 1.0, v16
; GFX9-NEXT: v_add_f32_e32 v18, 1.0, v18
-; GFX9-NEXT: v_add_f32_e32 v20, 1.0, v20
-; GFX9-NEXT: v_add_f32_e32 v22, 1.0, v22
+; GFX9-NEXT: v_add_f32_e32 v19, 1.0, v19
+; GFX9-NEXT: v_add_f32_e32 v21, 1.0, v21
; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v11
; GFX9-NEXT: v_lshrrev_b32_e32 v39, 16, v10
; GFX9-NEXT: v_lshrrev_b32_e32 v48, 16, v9
@@ -18612,39 +18756,39 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v55, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v40, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v41, 16, v0
-; GFX9-NEXT: v_lshrrev_b32_e32 v42, 16, v16
+; GFX9-NEXT: v_lshrrev_b32_e32 v42, 16, v17
; GFX9-NEXT: v_lshrrev_b32_e32 v43, 16, v12
-; GFX9-NEXT: v_lshrrev_b32_e32 v35, 16, v19
-; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v21
-; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v23
+; GFX9-NEXT: v_lshrrev_b32_e32 v35, 16, v20
+; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v22
+; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v24
; GFX9-NEXT: v_lshrrev_b32_e32 v32, 16, v25
-; GFX9-NEXT: v_lshrrev_b32_e32 v31, 16, v24
+; GFX9-NEXT: v_lshrrev_b32_e32 v31, 16, v23
; GFX9-NEXT: v_lshrrev_b32_e32 v30, 16, v13
; GFX9-NEXT: v_lshrrev_b32_e32 v29, 16, v14
; GFX9-NEXT: v_lshrrev_b32_e32 v28, 16, v15
-; GFX9-NEXT: v_lshrrev_b32_e32 v27, 16, v17
+; GFX9-NEXT: v_lshrrev_b32_e32 v27, 16, v16
; GFX9-NEXT: v_lshrrev_b32_e32 v26, 16, v18
-; GFX9-NEXT: v_lshrrev_b32_e32 v37, 16, v20
-; GFX9-NEXT: v_lshrrev_b32_e32 v36, 16, v22
+; GFX9-NEXT: v_lshrrev_b32_e32 v37, 16, v19
+; GFX9-NEXT: v_lshrrev_b32_e32 v36, 16, v21
; GFX9-NEXT: .LBB33_3: ; %end
; GFX9-NEXT: v_and_b32_e32 v13, 0xffff, v13
; GFX9-NEXT: v_lshl_or_b32 v30, v30, 16, v13
-; GFX9-NEXT: v_and_b32_e32 v13, 0xffff, v24
+; GFX9-NEXT: v_and_b32_e32 v13, 0xffff, v23
; GFX9-NEXT: v_lshl_or_b32 v31, v31, 16, v13
; GFX9-NEXT: v_and_b32_e32 v13, 0xffff, v25
; GFX9-NEXT: v_lshl_or_b32 v32, v32, 16, v13
-; GFX9-NEXT: v_and_b32_e32 v13, 0xffff, v23
+; GFX9-NEXT: v_and_b32_e32 v13, 0xffff, v24
; GFX9-NEXT: v_lshl_or_b32 v33, v33, 16, v13
-; GFX9-NEXT: v_and_b32_e32 v13, 0xffff, v21
+; GFX9-NEXT: v_and_b32_e32 v13, 0xffff, v22
; GFX9-NEXT: v_and_b32_e32 v14, 0xffff, v14
; GFX9-NEXT: v_lshl_or_b32 v34, v34, 16, v13
-; GFX9-NEXT: v_and_b32_e32 v13, 0xffff, v19
+; GFX9-NEXT: v_and_b32_e32 v13, 0xffff, v20
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_and_b32_e32 v15, 0xffff, v15
; GFX9-NEXT: v_lshl_or_b32 v29, v29, 16, v14
; GFX9-NEXT: v_lshl_or_b32 v35, v35, 16, v13
; GFX9-NEXT: v_and_b32_e32 v12, 0xffff, v12
-; GFX9-NEXT: v_and_b32_e32 v13, 0xffff, v16
+; GFX9-NEXT: v_and_b32_e32 v13, 0xffff, v17
; GFX9-NEXT: v_lshl_or_b32 v14, v41, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v1
; GFX9-NEXT: v_lshl_or_b32 v28, v28, 16, v15
@@ -18655,27 +18799,27 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; GFX9-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:4 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:8 ; 4-byte Folded Reload
; GFX9-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:12 ; 4-byte Folded Reload
+; GFX9-NEXT: v_and_b32_e32 v16, 0xffff, v16
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v2
-; GFX9-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX9-NEXT: v_lshl_or_b32 v27, v27, 16, v16
; GFX9-NEXT: v_lshl_or_b32 v16, v55, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v3
; GFX9-NEXT: v_and_b32_e32 v18, 0xffff, v18
-; GFX9-NEXT: v_lshl_or_b32 v27, v27, 16, v17
; GFX9-NEXT: v_lshl_or_b32 v17, v54, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v4
+; GFX9-NEXT: v_and_b32_e32 v19, 0xffff, v19
; GFX9-NEXT: v_lshl_or_b32 v26, v26, 16, v18
; GFX9-NEXT: v_lshl_or_b32 v18, v53, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v5
-; GFX9-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX9-NEXT: v_lshl_or_b32 v37, v37, 16, v19
; GFX9-NEXT: v_lshl_or_b32 v19, v52, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v6
-; GFX9-NEXT: v_lshl_or_b32 v37, v37, 16, v20
+; GFX9-NEXT: v_and_b32_e32 v21, 0xffff, v21
; GFX9-NEXT: v_lshl_or_b32 v20, v51, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v7
-; GFX9-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX9-NEXT: v_lshl_or_b32 v36, v36, 16, v21
; GFX9-NEXT: v_lshl_or_b32 v21, v50, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v8
-; GFX9-NEXT: v_lshl_or_b32 v36, v36, 16, v22
; GFX9-NEXT: v_lshl_or_b32 v22, v49, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v9
; GFX9-NEXT: v_lshl_or_b32 v23, v48, 16, v0
@@ -18724,7 +18868,9 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr48
; GFX9-NEXT: ; implicit-def: $vgpr39
; GFX9-NEXT: ; implicit-def: $vgpr38
-; GFX9-NEXT: s_branch .LBB33_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB33_2
+; GFX9-NEXT: s_branch .LBB33_3
;
; GFX11-LABEL: bitcast_v26f32_to_v52f16_scalar:
; GFX11: ; %bb.0:
@@ -18735,12 +18881,12 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v22, s16 :: v_dual_mov_b32 v21, s17
; GFX11-NEXT: v_dual_mov_b32 v20, s18 :: v_dual_mov_b32 v19, s19
; GFX11-NEXT: v_dual_mov_b32 v18, s20 :: v_dual_mov_b32 v9, s22
-; GFX11-NEXT: v_dual_mov_b32 v10, s21 :: v_dual_mov_b32 v15, s23
-; GFX11-NEXT: v_dual_mov_b32 v14, s24 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s26 :: v_dual_mov_b32 v11, s27
+; GFX11-NEXT: v_dual_mov_b32 v10, s21 :: v_dual_mov_b32 v13, s24
+; GFX11-NEXT: v_dual_mov_b32 v14, s23 :: v_dual_mov_b32 v11, s26
+; GFX11-NEXT: v_dual_mov_b32 v12, s25 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB33_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v7
@@ -18753,11 +18899,11 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v0
; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v16
; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v11
+; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v12
+; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v13
+; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v14
; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v9
; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v10
; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v18
@@ -18769,17 +18915,16 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v24
; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v25
; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v26
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB33_3
+; GFX11-NEXT: s_cbranch_execnz .LBB33_3
; GFX11-NEXT: .LBB33_2: ; %cmp.true
; GFX11-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
; GFX11-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
-; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v12, 1.0, v12
-; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v14, 1.0, v14
-; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v10, 1.0, v10
+; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v10, 1.0, v10
; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v18, 1.0, v18
; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v22, 1.0, v22
@@ -18795,11 +18940,11 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v49, 16, v0
; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v16
; GFX11-NEXT: v_lshrrev_b32_e32 v51, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v11
-; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v12
-; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v52, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v11
+; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v12
+; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v13
+; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v14
; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v9
; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v10
; GFX11-NEXT: v_lshrrev_b32_e32 v8, 16, v18
@@ -18819,8 +18964,8 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
; GFX11-NEXT: v_lshl_or_b32 v31, v31, 16, v19
; GFX11-NEXT: v_lshl_or_b32 v8, v8, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
+; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v13
; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v12
; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v11
; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
@@ -18831,10 +18976,10 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; GFX11-NEXT: v_lshl_or_b32 v29, v29, 16, v21
; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
-; GFX11-NEXT: v_lshl_or_b32 v11, v64, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v12, v55, 16, v14
-; GFX11-NEXT: v_lshl_or_b32 v14, v53, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v15, v52, 16, v19
+; GFX11-NEXT: v_lshl_or_b32 v11, v64, 16, v14
+; GFX11-NEXT: v_lshl_or_b32 v12, v55, 16, v13
+; GFX11-NEXT: v_lshl_or_b32 v13, v54, 16, v18
+; GFX11-NEXT: v_lshl_or_b32 v14, v53, 16, v19
; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v16
; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
@@ -18843,7 +18988,7 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v4
; GFX11-NEXT: v_and_b32_e32 v10, 0xffff, v10
; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v9
-; GFX11-NEXT: v_and_b32_e32 v13, 0xffff, v13
+; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
; GFX11-NEXT: v_lshl_or_b32 v16, v51, 16, v17
; GFX11-NEXT: v_lshl_or_b32 v17, v50, 16, v18
; GFX11-NEXT: v_lshl_or_b32 v18, v49, 16, v0
@@ -18860,7 +19005,7 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; GFX11-NEXT: v_lshl_or_b32 v26, v67, 16, v24
; GFX11-NEXT: v_lshl_or_b32 v9, v66, 16, v10
; GFX11-NEXT: v_lshl_or_b32 v10, v65, 16, v21
-; GFX11-NEXT: v_lshl_or_b32 v13, v54, 16, v13
+; GFX11-NEXT: v_lshl_or_b32 v15, v52, 16, v15
; GFX11-NEXT: v_lshl_or_b32 v21, v38, 16, v0
; GFX11-NEXT: v_lshl_or_b32 v23, v36, 16, v2
; GFX11-NEXT: v_lshl_or_b32 v24, v35, 16, v3
@@ -18897,7 +19042,9 @@ define inreg <52 x half> @bitcast_v26f32_to_v52f16_scalar(<26 x float> inreg %a,
; GFX11-NEXT: ; implicit-def: $vgpr36
; GFX11-NEXT: ; implicit-def: $vgpr35
; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: s_branch .LBB33_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_vccz .LBB33_2
+; GFX11-NEXT: s_branch .LBB33_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -20283,15 +20430,15 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v28
-; SI-NEXT: v_cvt_f16_f32_e32 v42, v5
-; SI-NEXT: v_cvt_f16_f32_e32 v55, v4
-; SI-NEXT: v_cvt_f16_f32_e32 v56, v7
+; SI-NEXT: v_cvt_f16_f32_e32 v55, v5
+; SI-NEXT: v_cvt_f16_f32_e32 v42, v4
+; SI-NEXT: v_cvt_f16_f32_e32 v54, v7
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
-; SI-NEXT: v_cvt_f16_f32_e32 v54, v6
-; SI-NEXT: v_cvt_f16_f32_e32 v43, v9
-; SI-NEXT: v_cvt_f16_f32_e32 v47, v8
-; SI-NEXT: v_cvt_f16_f32_e32 v57, v11
-; SI-NEXT: v_cvt_f16_f32_e32 v60, v10
+; SI-NEXT: v_cvt_f16_f32_e32 v57, v6
+; SI-NEXT: v_cvt_f16_f32_e32 v56, v9
+; SI-NEXT: v_cvt_f16_f32_e32 v60, v8
+; SI-NEXT: v_cvt_f16_f32_e32 v47, v11
+; SI-NEXT: v_cvt_f16_f32_e32 v53, v10
; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
; SI-NEXT: v_cvt_f16_f32_e32 v52, v12
; SI-NEXT: v_cvt_f16_f32_e32 v15, v15
@@ -20309,13 +20456,13 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v27, v26
; SI-NEXT: v_cvt_f16_f32_e32 v26, v29
; SI-NEXT: v_cvt_f16_f32_e32 v30, v30
-; SI-NEXT: v_cvt_f16_f32_e32 v53, s17
-; SI-NEXT: v_cvt_f16_f32_e32 v11, s16
+; SI-NEXT: v_cvt_f16_f32_e32 v43, s17
+; SI-NEXT: v_cvt_f16_f32_e32 v3, s16
; SI-NEXT: v_cvt_f16_f32_e32 v1, s19
; SI-NEXT: v_cvt_f16_f32_e32 v2, s18
; SI-NEXT: v_cvt_f16_f32_e32 v12, s21
; SI-NEXT: v_cvt_f16_f32_e32 v14, s20
-; SI-NEXT: v_cvt_f16_f32_e32 v3, s23
+; SI-NEXT: v_cvt_f16_f32_e32 v11, s23
; SI-NEXT: v_cvt_f16_f32_e32 v10, s22
; SI-NEXT: v_cvt_f16_f32_e32 v4, s25
; SI-NEXT: v_cvt_f16_f32_e32 v9, s24
@@ -20333,6 +20480,7 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v44
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v48
@@ -20358,8 +20506,8 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB35_4
@@ -20370,17 +20518,18 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(2)
+; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v43
+; SI-NEXT: s_waitcnt expcnt(3)
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v53
+; SI-NEXT: v_or_b32_e32 v0, v3, v0
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v12
-; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v11
; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; SI-NEXT: v_mov_b32_e32 v39, v54
; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v26
-; SI-NEXT: v_or_b32_e32 v0, v11, v0
; SI-NEXT: v_or_b32_e32 v2, v14, v2
; SI-NEXT: v_or_b32_e32 v3, v10, v3
; SI-NEXT: v_or_b32_e32 v4, v9, v4
@@ -20388,11 +20537,12 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; SI-NEXT: v_or_b32_e32 v6, v7, v6
; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v46
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v41
-; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v42
-; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v56
-; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v43
+; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v55
+; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v39
+; SI-NEXT: s_waitcnt expcnt(2)
+; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v56
; SI-NEXT: s_waitcnt expcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v57
+; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v47
; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v15
@@ -20404,10 +20554,10 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; SI-NEXT: v_mov_b32_e32 v51, v46
; SI-NEXT: v_or_b32_e32 v7, v45, v7
; SI-NEXT: v_or_b32_e32 v8, v40, v8
-; SI-NEXT: v_or_b32_e32 v9, v55, v9
-; SI-NEXT: v_or_b32_e32 v10, v54, v10
-; SI-NEXT: v_or_b32_e32 v11, v47, v11
-; SI-NEXT: v_or_b32_e32 v12, v60, v12
+; SI-NEXT: v_or_b32_e32 v9, v42, v9
+; SI-NEXT: v_or_b32_e32 v10, v57, v10
+; SI-NEXT: v_or_b32_e32 v11, v60, v11
+; SI-NEXT: v_or_b32_e32 v12, v53, v12
; SI-NEXT: v_or_b32_e32 v13, v52, v13
; SI-NEXT: v_or_b32_e32 v14, v63, v14
; SI-NEXT: v_or_b32_e32 v15, v61, v15
@@ -20435,15 +20585,16 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; SI-NEXT: v_or_b32_e32 v25, v38, v25
; SI-NEXT: s_cbranch_execnz .LBB35_3
; SI-NEXT: .LBB35_2: ; %cmp.true
-; SI-NEXT: s_waitcnt expcnt(2)
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(3)
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
-; SI-NEXT: v_cvt_f32_f16_e32 v0, v53
+; SI-NEXT: v_cvt_f32_f16_e32 v0, v43
; SI-NEXT: v_cvt_f32_f16_e32 v9, v40
-; SI-NEXT: v_cvt_f32_f16_e32 v10, v55
-; SI-NEXT: v_cvt_f32_f16_e32 v11, v54
+; SI-NEXT: v_cvt_f32_f16_e32 v10, v42
+; SI-NEXT: s_waitcnt expcnt(2)
+; SI-NEXT: v_cvt_f32_f16_e32 v11, v57
; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9
@@ -20454,8 +20605,8 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11
; SI-NEXT: v_cvt_f16_f32_e32 v11, v11
; SI-NEXT: s_waitcnt expcnt(1)
-; SI-NEXT: v_cvt_f32_f16_e32 v12, v47
-; SI-NEXT: v_cvt_f32_f16_e32 v13, v60
+; SI-NEXT: v_cvt_f32_f16_e32 v12, v60
+; SI-NEXT: v_cvt_f32_f16_e32 v13, v53
; SI-NEXT: v_cvt_f32_f16_e32 v15, v52
; SI-NEXT: v_cvt_f32_f16_e32 v16, v63
; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12
@@ -20555,7 +20706,7 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; SI-NEXT: v_or_b32_e32 v2, v3, v2
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3
@@ -20593,22 +20744,22 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v8, v8
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8
; SI-NEXT: v_or_b32_e32 v8, v9, v8
-; SI-NEXT: v_cvt_f32_f16_e32 v9, v42
+; SI-NEXT: v_cvt_f32_f16_e32 v9, v55
; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9
; SI-NEXT: v_cvt_f16_f32_e32 v9, v9
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9
; SI-NEXT: v_or_b32_e32 v9, v10, v9
-; SI-NEXT: v_cvt_f32_f16_e32 v10, v56
+; SI-NEXT: v_cvt_f32_f16_e32 v10, v39
; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10
; SI-NEXT: v_cvt_f16_f32_e32 v10, v10
; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10
; SI-NEXT: v_or_b32_e32 v10, v11, v10
-; SI-NEXT: v_cvt_f32_f16_e32 v11, v43
+; SI-NEXT: v_cvt_f32_f16_e32 v11, v56
; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11
; SI-NEXT: v_cvt_f16_f32_e32 v11, v11
; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11
; SI-NEXT: v_or_b32_e32 v11, v12, v11
-; SI-NEXT: v_cvt_f32_f16_e32 v12, v57
+; SI-NEXT: v_cvt_f32_f16_e32 v12, v47
; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12
; SI-NEXT: v_cvt_f16_f32_e32 v12, v12
; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12
@@ -20745,7 +20896,10 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; SI-NEXT: v_mov_b32_e32 v30, v58
; SI-NEXT: v_mov_b32_e32 v58, v63
; SI-NEXT: v_mov_b32_e32 v63, v50
-; SI-NEXT: s_branch .LBB35_2
+; SI-NEXT: v_mov_b32_e32 v39, v54
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB35_2
+; SI-NEXT: s_branch .LBB35_3
;
; VI-LABEL: bitcast_v52f16_to_v26f32_scalar:
; VI: ; %bb.0:
@@ -20765,6 +20919,7 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v11
; VI-NEXT: v_mov_b32_e32 v33, v10
; VI-NEXT: v_mov_b32_e32 v34, v9
@@ -20777,7 +20932,7 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; VI-NEXT: v_mov_b32_e32 v49, v2
; VI-NEXT: v_mov_b32_e32 v50, v1
; VI-NEXT: v_mov_b32_e32 v51, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB35_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -20961,23 +21116,13 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB35_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB35_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB35_2
+; VI-NEXT: s_branch .LBB35_3
;
; GFX9-LABEL: bitcast_v52f16_to_v26f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v11
-; GFX9-NEXT: v_mov_b32_e32 v33, v10
-; GFX9-NEXT: v_mov_b32_e32 v34, v9
-; GFX9-NEXT: v_mov_b32_e32 v35, v8
-; GFX9-NEXT: v_mov_b32_e32 v36, v7
-; GFX9-NEXT: v_mov_b32_e32 v37, v6
-; GFX9-NEXT: v_mov_b32_e32 v38, v5
-; GFX9-NEXT: v_mov_b32_e32 v39, v4
-; GFX9-NEXT: v_mov_b32_e32 v48, v3
-; GFX9-NEXT: v_mov_b32_e32 v49, v2
-; GFX9-NEXT: v_mov_b32_e32 v50, v1
-; GFX9-NEXT: v_mov_b32_e32 v51, v0
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
@@ -20993,6 +21138,19 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; GFX9-NEXT: s_lshr_b32 s8, s18, 16
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
+; GFX9-NEXT: v_mov_b32_e32 v32, v11
+; GFX9-NEXT: v_mov_b32_e32 v33, v10
+; GFX9-NEXT: v_mov_b32_e32 v34, v9
+; GFX9-NEXT: v_mov_b32_e32 v35, v8
+; GFX9-NEXT: v_mov_b32_e32 v36, v7
+; GFX9-NEXT: v_mov_b32_e32 v37, v6
+; GFX9-NEXT: v_mov_b32_e32 v38, v5
+; GFX9-NEXT: v_mov_b32_e32 v39, v4
+; GFX9-NEXT: v_mov_b32_e32 v48, v3
+; GFX9-NEXT: v_mov_b32_e32 v49, v2
+; GFX9-NEXT: v_mov_b32_e32 v50, v1
+; GFX9-NEXT: v_mov_b32_e32 v51, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
@@ -21009,7 +21167,6 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v41, 16, v37
; GFX9-NEXT: v_lshrrev_b32_e32 v42, 16, v38
; GFX9-NEXT: v_lshrrev_b32_e32 v43, 16, v39
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -21024,6 +21181,7 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_lshrrev_b32_e32 v44, 16, v48
; GFX9-NEXT: v_lshrrev_b32_e32 v45, 16, v49
; GFX9-NEXT: v_lshrrev_b32_e32 v46, 16, v50
@@ -21135,7 +21293,9 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB35_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB35_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB35_2
+; GFX9-NEXT: s_branch .LBB35_3
;
; GFX11-TRUE16-LABEL: bitcast_v52f16_to_v26f32_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -21165,9 +21325,9 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v5
; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v6
; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v7
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s27, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
@@ -21179,15 +21339,14 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s42
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
@@ -21199,10 +21358,11 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s28, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s29, s41
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB35_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
@@ -21220,10 +21380,9 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s17
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s15 :: v_dual_mov_b32 v17, s16
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB35_3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB35_3
; GFX11-TRUE16-NEXT: .LBB35_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
@@ -21248,9 +21407,9 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
@@ -21263,7 +21422,9 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB35_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB35_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB35_2
+; GFX11-TRUE16-NEXT: s_branch .LBB35_3
;
; GFX11-FAKE16-LABEL: bitcast_v52f16_to_v26f32_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -21285,9 +21446,9 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; GFX11-FAKE16-NEXT: v_and_b32_e32 v50, 0xffff, v5
; GFX11-FAKE16-NEXT: v_and_b32_e32 v49, 0xffff, v6
; GFX11-FAKE16-NEXT: v_and_b32_e32 v48, 0xffff, v7
-; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s28, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s27, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s26, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s25, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s24, 16
@@ -21299,15 +21460,14 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s18, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s17, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s40, 0
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s44
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s45
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s43
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s42
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
@@ -21319,10 +21479,11 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s27, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s28, s15
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s29, s41
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB35_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
@@ -21340,10 +21501,9 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s17
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s15 :: v_dual_mov_b32 v17, s16
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB35_3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB35_3
; GFX11-FAKE16-NEXT: .LBB35_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
@@ -21368,9 +21528,9 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, s16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, s16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
@@ -21383,7 +21543,9 @@ define inreg <26 x float> @bitcast_v52f16_to_v26f32_scalar(<52 x half> inreg %a,
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB35_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB35_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB35_2
+; GFX11-FAKE16-NEXT: s_branch .LBB35_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -21587,6 +21749,7 @@ define inreg <13 x double> @bitcast_v13i64_to_v13f64_scalar(<13 x i64> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v25, v11
; SI-NEXT: v_mov_b32_e32 v24, v10
; SI-NEXT: v_mov_b32_e32 v23, v9
@@ -21600,7 +21763,7 @@ define inreg <13 x double> @bitcast_v13i64_to_v13f64_scalar(<13 x i64> inreg %a,
; SI-NEXT: v_mov_b32_e32 v15, v1
; SI-NEXT: v_mov_b32_e32 v14, v0
; SI-NEXT: v_mov_b32_e32 v0, s16
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
; SI-NEXT: v_mov_b32_e32 v3, s19
@@ -21614,10 +21777,13 @@ define inreg <13 x double> @bitcast_v13i64_to_v13f64_scalar(<13 x i64> inreg %a,
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB37_4
+; SI-NEXT: s_cbranch_scc0 .LBB37_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB37_3
-; SI-NEXT: .LBB37_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB37_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB37_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2
@@ -21644,15 +21810,14 @@ define inreg <13 x double> @bitcast_v13i64_to_v13f64_scalar(<13 x i64> inreg %a,
; SI-NEXT: v_addc_u32_e32 v23, vcc, 0, v23, vcc
; SI-NEXT: v_add_i32_e32 v24, vcc, 3, v24
; SI-NEXT: v_addc_u32_e32 v25, vcc, 0, v25, vcc
-; SI-NEXT: .LBB37_3: ; %end
+; SI-NEXT: .LBB37_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB37_4:
-; SI-NEXT: s_branch .LBB37_2
;
; VI-LABEL: bitcast_v13i64_to_v13f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v25, v11
; VI-NEXT: v_mov_b32_e32 v24, v10
; VI-NEXT: v_mov_b32_e32 v23, v9
@@ -21666,7 +21831,7 @@ define inreg <13 x double> @bitcast_v13i64_to_v13f64_scalar(<13 x i64> inreg %a,
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
@@ -21680,10 +21845,13 @@ define inreg <13 x double> @bitcast_v13i64_to_v13f64_scalar(<13 x i64> inreg %a,
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB37_4
+; VI-NEXT: s_cbranch_scc0 .LBB37_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB37_3
-; VI-NEXT: .LBB37_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB37_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB37_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
@@ -21710,15 +21878,14 @@ define inreg <13 x double> @bitcast_v13i64_to_v13f64_scalar(<13 x i64> inreg %a,
; VI-NEXT: v_addc_u32_e32 v23, vcc, 0, v23, vcc
; VI-NEXT: v_add_u32_e32 v24, vcc, 3, v24
; VI-NEXT: v_addc_u32_e32 v25, vcc, 0, v25, vcc
-; VI-NEXT: .LBB37_3: ; %end
+; VI-NEXT: .LBB37_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB37_4:
-; VI-NEXT: s_branch .LBB37_2
;
; GFX9-LABEL: bitcast_v13i64_to_v13f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v25, v11
; GFX9-NEXT: v_mov_b32_e32 v24, v10
; GFX9-NEXT: v_mov_b32_e32 v23, v9
@@ -21732,7 +21899,7 @@ define inreg <13 x double> @bitcast_v13i64_to_v13f64_scalar(<13 x i64> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
@@ -21746,10 +21913,13 @@ define inreg <13 x double> @bitcast_v13i64_to_v13f64_scalar(<13 x i64> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB37_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB37_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB37_3
-; GFX9-NEXT: .LBB37_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB37_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB37_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 3, v0
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, 3, v2
@@ -21776,40 +21946,38 @@ define inreg <13 x double> @bitcast_v13i64_to_v13f64_scalar(<13 x i64> inreg %a,
; GFX9-NEXT: v_addc_co_u32_e32 v23, vcc, 0, v23, vcc
; GFX9-NEXT: v_add_co_u32_e32 v24, vcc, 3, v24
; GFX9-NEXT: v_addc_co_u32_e32 v25, vcc, 0, v25, vcc
-; GFX9-NEXT: .LBB37_3: ; %end
+; GFX9-NEXT: .LBB37_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB37_4:
-; GFX9-NEXT: s_branch .LBB37_2
;
; GFX11-LABEL: bitcast_v13i64_to_v13f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-NEXT: v_dual_mov_b32 v15, v8 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB37_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB37_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB37_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB37_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB37_3:
-; GFX11-NEXT: .LBB37_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB37_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
@@ -21843,6 +22011,7 @@ define inreg <13 x double> @bitcast_v13i64_to_v13f64_scalar(<13 x i64> inreg %a,
; GFX11-NEXT: v_add_co_u32 v24, vcc_lo, v24, 3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_add_co_ci_u32_e64 v25, null, 0, v25, vcc_lo
+; GFX11-NEXT: .LBB37_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -21988,6 +22157,7 @@ define inreg <13 x i64> @bitcast_v13f64_to_v13i64_scalar(<13 x double> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v25, v11
; SI-NEXT: v_mov_b32_e32 v24, v10
; SI-NEXT: v_mov_b32_e32 v23, v9
@@ -22012,13 +22182,16 @@ define inreg <13 x i64> @bitcast_v13f64_to_v13i64_scalar(<13 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v9, s25
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB39_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB39_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB39_3
-; SI-NEXT: .LBB39_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB39_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB39_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
; SI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; SI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
@@ -22032,15 +22205,14 @@ define inreg <13 x i64> @bitcast_v13f64_to_v13i64_scalar(<13 x double> inreg %a,
; SI-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
; SI-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
; SI-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
-; SI-NEXT: .LBB39_3: ; %end
+; SI-NEXT: .LBB39_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB39_4:
-; SI-NEXT: s_branch .LBB39_2
;
; VI-LABEL: bitcast_v13f64_to_v13i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v25, v11
; VI-NEXT: v_mov_b32_e32 v24, v10
; VI-NEXT: v_mov_b32_e32 v23, v9
@@ -22065,13 +22237,16 @@ define inreg <13 x i64> @bitcast_v13f64_to_v13i64_scalar(<13 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB39_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB39_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB39_3
-; VI-NEXT: .LBB39_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB39_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB39_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
; VI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; VI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
@@ -22085,15 +22260,14 @@ define inreg <13 x i64> @bitcast_v13f64_to_v13i64_scalar(<13 x double> inreg %a,
; VI-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
; VI-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
; VI-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
-; VI-NEXT: .LBB39_3: ; %end
+; VI-NEXT: .LBB39_4: ; %end
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB39_4:
-; VI-NEXT: s_branch .LBB39_2
;
; GFX9-LABEL: bitcast_v13f64_to_v13i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v25, v11
; GFX9-NEXT: v_mov_b32_e32 v24, v10
; GFX9-NEXT: v_mov_b32_e32 v23, v9
@@ -22118,13 +22292,16 @@ define inreg <13 x i64> @bitcast_v13f64_to_v13i64_scalar(<13 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB39_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB39_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB39_3
-; GFX9-NEXT: .LBB39_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB39_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB39_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX9-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
@@ -22138,40 +22315,38 @@ define inreg <13 x i64> @bitcast_v13f64_to_v13i64_scalar(<13 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
; GFX9-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
; GFX9-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
-; GFX9-NEXT: .LBB39_3: ; %end
+; GFX9-NEXT: .LBB39_4: ; %end
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB39_4:
-; GFX9-NEXT: s_branch .LBB39_2
;
; GFX11-LABEL: bitcast_v13f64_to_v13i64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
+; GFX11-NEXT: v_dual_mov_b32 v15, v8 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB39_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB39_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB39_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB39_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB39_3:
-; GFX11-NEXT: .LBB39_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB39_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
@@ -22185,6 +22360,7 @@ define inreg <13 x i64> @bitcast_v13f64_to_v13i64_scalar(<13 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[20:21], v[20:21], 1.0
; GFX11-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
; GFX11-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
+; GFX11-NEXT: .LBB39_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -23060,6 +23236,7 @@ define inreg <52 x i16> @bitcast_v13i64_to_v52i16_scalar(<13 x i64> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s41, v1
; SI-NEXT: v_readfirstlane_b32 s40, v2
; SI-NEXT: v_readfirstlane_b32 s15, v3
@@ -23071,8 +23248,8 @@ define inreg <52 x i16> @bitcast_v13i64_to_v52i16_scalar(<13 x i64> inreg %a, i3
; SI-NEXT: v_readfirstlane_b32 s9, v9
; SI-NEXT: v_readfirstlane_b32 s8, v10
; SI-NEXT: v_readfirstlane_b32 s7, v11
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s6, v12
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB41_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v1, s7
@@ -23366,12 +23543,15 @@ define inreg <52 x i16> @bitcast_v13i64_to_v52i16_scalar(<13 x i64> inreg %a, i3
; SI-NEXT: ; implicit-def: $sgpr43
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; implicit-def: $sgpr42
-; SI-NEXT: s_branch .LBB41_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB41_2
+; SI-NEXT: s_branch .LBB41_3
;
; VI-LABEL: bitcast_v13i64_to_v52i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s41, v0
; VI-NEXT: v_readfirstlane_b32 s40, v1
; VI-NEXT: v_readfirstlane_b32 s15, v2
@@ -23382,13 +23562,13 @@ define inreg <52 x i16> @bitcast_v13i64_to_v52i16_scalar(<13 x i64> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s10, v7
; VI-NEXT: v_readfirstlane_b32 s9, v8
; VI-NEXT: v_readfirstlane_b32 s8, v9
-; VI-NEXT: v_readfirstlane_b32 s6, v10
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: v_readfirstlane_b32 s7, v11
+; VI-NEXT: v_readfirstlane_b32 s7, v10
+; VI-NEXT: v_readfirstlane_b32 s6, v11
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB41_4
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_lshr_b32 s42, s7, 16
-; VI-NEXT: s_lshr_b32 s43, s6, 16
+; VI-NEXT: s_lshr_b32 s42, s6, 16
+; VI-NEXT: s_lshr_b32 s43, s7, 16
; VI-NEXT: s_lshr_b32 s44, s8, 16
; VI-NEXT: s_lshr_b32 s45, s9, 16
; VI-NEXT: s_lshr_b32 s46, s10, 16
@@ -23415,8 +23595,8 @@ define inreg <52 x i16> @bitcast_v13i64_to_v52i16_scalar(<13 x i64> inreg %a, i3
; VI-NEXT: s_lshr_b32 s91, s16, 16
; VI-NEXT: s_cbranch_execnz .LBB41_3
; VI-NEXT: .LBB41_2: ; %cmp.true
-; VI-NEXT: s_add_u32 s6, s6, 3
-; VI-NEXT: s_addc_u32 s7, s7, 0
+; VI-NEXT: s_add_u32 s7, s7, 3
+; VI-NEXT: s_addc_u32 s6, s6, 0
; VI-NEXT: s_add_u32 s9, s9, 3
; VI-NEXT: s_addc_u32 s8, s8, 0
; VI-NEXT: s_add_u32 s11, s11, 3
@@ -23441,8 +23621,8 @@ define inreg <52 x i16> @bitcast_v13i64_to_v52i16_scalar(<13 x i64> inreg %a, i3
; VI-NEXT: s_addc_u32 s19, s19, 0
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
-; VI-NEXT: s_lshr_b32 s42, s7, 16
-; VI-NEXT: s_lshr_b32 s43, s6, 16
+; VI-NEXT: s_lshr_b32 s42, s6, 16
+; VI-NEXT: s_lshr_b32 s43, s7, 16
; VI-NEXT: s_lshr_b32 s44, s8, 16
; VI-NEXT: s_lshr_b32 s45, s9, 16
; VI-NEXT: s_lshr_b32 s46, s10, 16
@@ -23540,12 +23720,12 @@ define inreg <52 x i16> @bitcast_v13i64_to_v52i16_scalar(<13 x i64> inreg %a, i3
; VI-NEXT: s_and_b32 s8, 0xffff, s8
; VI-NEXT: s_lshl_b32 s40, s44, 16
; VI-NEXT: s_or_b32 s8, s8, s40
-; VI-NEXT: s_and_b32 s6, 0xffff, s6
-; VI-NEXT: s_lshl_b32 s40, s43, 16
-; VI-NEXT: s_or_b32 s6, s6, s40
; VI-NEXT: s_and_b32 s7, 0xffff, s7
-; VI-NEXT: s_lshl_b32 s40, s42, 16
+; VI-NEXT: s_lshl_b32 s40, s43, 16
; VI-NEXT: s_or_b32 s7, s7, s40
+; VI-NEXT: s_and_b32 s6, 0xffff, s6
+; VI-NEXT: s_lshl_b32 s40, s42, 16
+; VI-NEXT: s_or_b32 s6, s6, s40
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: v_mov_b32_e32 v2, s16
@@ -23570,8 +23750,8 @@ define inreg <52 x i16> @bitcast_v13i64_to_v52i16_scalar(<13 x i64> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v21, s10
; VI-NEXT: v_mov_b32_e32 v22, s9
; VI-NEXT: v_mov_b32_e32 v23, s8
-; VI-NEXT: v_mov_b32_e32 v24, s6
-; VI-NEXT: v_mov_b32_e32 v25, s7
+; VI-NEXT: v_mov_b32_e32 v24, s7
+; VI-NEXT: v_mov_b32_e32 v25, s6
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB41_4:
; VI-NEXT: ; implicit-def: $sgpr91
@@ -23600,39 +23780,42 @@ define inreg <52 x i16> @bitcast_v13i64_to_v52i16_scalar(<13 x i64> inreg %a, i3
; VI-NEXT: ; implicit-def: $sgpr44
; VI-NEXT: ; implicit-def: $sgpr43
; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: s_branch .LBB41_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB41_2
+; VI-NEXT: s_branch .LBB41_3
;
; GFX9-LABEL: bitcast_v13i64_to_v52i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
-; GFX9-NEXT: v_readfirstlane_b32 s6, v0
-; GFX9-NEXT: v_readfirstlane_b32 s7, v1
-; GFX9-NEXT: v_readfirstlane_b32 s8, v2
-; GFX9-NEXT: v_readfirstlane_b32 s9, v3
-; GFX9-NEXT: v_readfirstlane_b32 s10, v4
-; GFX9-NEXT: v_readfirstlane_b32 s11, v5
-; GFX9-NEXT: v_readfirstlane_b32 s12, v6
-; GFX9-NEXT: v_readfirstlane_b32 s13, v7
-; GFX9-NEXT: v_readfirstlane_b32 s14, v8
-; GFX9-NEXT: v_readfirstlane_b32 s15, v9
-; GFX9-NEXT: v_readfirstlane_b32 s40, v10
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: v_readfirstlane_b32 s41, v11
+; GFX9-NEXT: v_readfirstlane_b32 s7, v0
+; GFX9-NEXT: v_readfirstlane_b32 s8, v1
+; GFX9-NEXT: v_readfirstlane_b32 s9, v2
+; GFX9-NEXT: v_readfirstlane_b32 s10, v3
+; GFX9-NEXT: v_readfirstlane_b32 s11, v4
+; GFX9-NEXT: v_readfirstlane_b32 s12, v5
+; GFX9-NEXT: v_readfirstlane_b32 s13, v6
+; GFX9-NEXT: v_readfirstlane_b32 s14, v7
+; GFX9-NEXT: v_readfirstlane_b32 s15, v8
+; GFX9-NEXT: v_readfirstlane_b32 s40, v9
+; GFX9-NEXT: v_readfirstlane_b32 s41, v10
+; GFX9-NEXT: v_readfirstlane_b32 s6, v11
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB41_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_lshr_b32 s42, s41, 16
-; GFX9-NEXT: s_lshr_b32 s43, s40, 16
-; GFX9-NEXT: s_lshr_b32 s44, s15, 16
-; GFX9-NEXT: s_lshr_b32 s45, s14, 16
-; GFX9-NEXT: s_lshr_b32 s46, s13, 16
-; GFX9-NEXT: s_lshr_b32 s47, s12, 16
-; GFX9-NEXT: s_lshr_b32 s56, s11, 16
-; GFX9-NEXT: s_lshr_b32 s57, s10, 16
-; GFX9-NEXT: s_lshr_b32 s58, s9, 16
-; GFX9-NEXT: s_lshr_b32 s59, s8, 16
-; GFX9-NEXT: s_lshr_b32 s60, s7, 16
-; GFX9-NEXT: s_lshr_b32 s61, s6, 16
+; GFX9-NEXT: s_lshr_b32 s42, s6, 16
+; GFX9-NEXT: s_lshr_b32 s43, s41, 16
+; GFX9-NEXT: s_lshr_b32 s44, s40, 16
+; GFX9-NEXT: s_lshr_b32 s45, s15, 16
+; GFX9-NEXT: s_lshr_b32 s46, s14, 16
+; GFX9-NEXT: s_lshr_b32 s47, s13, 16
+; GFX9-NEXT: s_lshr_b32 s56, s12, 16
+; GFX9-NEXT: s_lshr_b32 s57, s11, 16
+; GFX9-NEXT: s_lshr_b32 s58, s10, 16
+; GFX9-NEXT: s_lshr_b32 s59, s9, 16
+; GFX9-NEXT: s_lshr_b32 s60, s8, 16
+; GFX9-NEXT: s_lshr_b32 s61, s7, 16
; GFX9-NEXT: s_lshr_b32 s62, s29, 16
; GFX9-NEXT: s_lshr_b32 s63, s28, 16
; GFX9-NEXT: s_lshr_b32 s72, s27, 16
@@ -23649,18 +23832,18 @@ define inreg <52 x i16> @bitcast_v13i64_to_v52i16_scalar(<13 x i64> inreg %a, i3
; GFX9-NEXT: s_lshr_b32 s91, s16, 16
; GFX9-NEXT: s_cbranch_execnz .LBB41_3
; GFX9-NEXT: .LBB41_2: ; %cmp.true
-; GFX9-NEXT: s_add_u32 s40, s40, 3
-; GFX9-NEXT: s_addc_u32 s41, s41, 0
-; GFX9-NEXT: s_add_u32 s14, s14, 3
-; GFX9-NEXT: s_addc_u32 s15, s15, 0
-; GFX9-NEXT: s_add_u32 s12, s12, 3
-; GFX9-NEXT: s_addc_u32 s13, s13, 0
-; GFX9-NEXT: s_add_u32 s10, s10, 3
-; GFX9-NEXT: s_addc_u32 s11, s11, 0
-; GFX9-NEXT: s_add_u32 s8, s8, 3
-; GFX9-NEXT: s_addc_u32 s9, s9, 0
-; GFX9-NEXT: s_add_u32 s6, s6, 3
-; GFX9-NEXT: s_addc_u32 s7, s7, 0
+; GFX9-NEXT: s_add_u32 s41, s41, 3
+; GFX9-NEXT: s_addc_u32 s6, s6, 0
+; GFX9-NEXT: s_add_u32 s15, s15, 3
+; GFX9-NEXT: s_addc_u32 s40, s40, 0
+; GFX9-NEXT: s_add_u32 s13, s13, 3
+; GFX9-NEXT: s_addc_u32 s14, s14, 0
+; GFX9-NEXT: s_add_u32 s11, s11, 3
+; GFX9-NEXT: s_addc_u32 s12, s12, 0
+; GFX9-NEXT: s_add_u32 s9, s9, 3
+; GFX9-NEXT: s_addc_u32 s10, s10, 0
+; GFX9-NEXT: s_add_u32 s7, s7, 3
+; GFX9-NEXT: s_addc_u32 s8, s8, 0
; GFX9-NEXT: s_add_u32 s28, s28, 3
; GFX9-NEXT: s_addc_u32 s29, s29, 0
; GFX9-NEXT: s_add_u32 s26, s26, 3
@@ -23675,18 +23858,18 @@ define inreg <52 x i16> @bitcast_v13i64_to_v52i16_scalar(<13 x i64> inreg %a, i3
; GFX9-NEXT: s_addc_u32 s19, s19, 0
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
-; GFX9-NEXT: s_lshr_b32 s42, s41, 16
-; GFX9-NEXT: s_lshr_b32 s43, s40, 16
-; GFX9-NEXT: s_lshr_b32 s44, s15, 16
-; GFX9-NEXT: s_lshr_b32 s45, s14, 16
-; GFX9-NEXT: s_lshr_b32 s46, s13, 16
-; GFX9-NEXT: s_lshr_b32 s47, s12, 16
-; GFX9-NEXT: s_lshr_b32 s56, s11, 16
-; GFX9-NEXT: s_lshr_b32 s57, s10, 16
-; GFX9-NEXT: s_lshr_b32 s58, s9, 16
-; GFX9-NEXT: s_lshr_b32 s59, s8, 16
-; GFX9-NEXT: s_lshr_b32 s60, s7, 16
-; GFX9-NEXT: s_lshr_b32 s61, s6, 16
+; GFX9-NEXT: s_lshr_b32 s42, s6, 16
+; GFX9-NEXT: s_lshr_b32 s43, s41, 16
+; GFX9-NEXT: s_lshr_b32 s44, s40, 16
+; GFX9-NEXT: s_lshr_b32 s45, s15, 16
+; GFX9-NEXT: s_lshr_b32 s46, s14, 16
+; GFX9-NEXT: s_lshr_b32 s47, s13, 16
+; GFX9-NEXT: s_lshr_b32 s56, s12, 16
+; GFX9-NEXT: s_lshr_b32 s57, s11, 16
+; GFX9-NEXT: s_lshr_b32 s58, s10, 16
+; GFX9-NEXT: s_lshr_b32 s59, s9, 16
+; GFX9-NEXT: s_lshr_b32 s60, s8, 16
+; GFX9-NEXT: s_lshr_b32 s61, s7, 16
; GFX9-NEXT: s_lshr_b32 s62, s29, 16
; GFX9-NEXT: s_lshr_b32 s63, s28, 16
; GFX9-NEXT: s_lshr_b32 s72, s27, 16
@@ -23716,18 +23899,18 @@ define inreg <52 x i16> @bitcast_v13i64_to_v52i16_scalar(<13 x i64> inreg %a, i3
; GFX9-NEXT: s_pack_ll_b32_b16 s25, s27, s72
; GFX9-NEXT: s_pack_ll_b32_b16 s26, s28, s63
; GFX9-NEXT: s_pack_ll_b32_b16 s27, s29, s62
-; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s61
-; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s60
-; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s59
-; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s58
-; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s57
-; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s56
-; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s47
-; GFX9-NEXT: s_pack_ll_b32_b16 s13, s13, s46
-; GFX9-NEXT: s_pack_ll_b32_b16 s14, s14, s45
-; GFX9-NEXT: s_pack_ll_b32_b16 s15, s15, s44
-; GFX9-NEXT: s_pack_ll_b32_b16 s28, s40, s43
-; GFX9-NEXT: s_pack_ll_b32_b16 s29, s41, s42
+; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s61
+; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s60
+; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s59
+; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s58
+; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s57
+; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s56
+; GFX9-NEXT: s_pack_ll_b32_b16 s13, s13, s47
+; GFX9-NEXT: s_pack_ll_b32_b16 s14, s14, s46
+; GFX9-NEXT: s_pack_ll_b32_b16 s15, s15, s45
+; GFX9-NEXT: s_pack_ll_b32_b16 s28, s40, s44
+; GFX9-NEXT: s_pack_ll_b32_b16 s29, s41, s43
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s42
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: v_mov_b32_e32 v2, s16
@@ -23742,18 +23925,18 @@ define inreg <52 x i16> @bitcast_v13i64_to_v52i16_scalar(<13 x i64> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v11, s25
; GFX9-NEXT: v_mov_b32_e32 v12, s26
; GFX9-NEXT: v_mov_b32_e32 v13, s27
-; GFX9-NEXT: v_mov_b32_e32 v14, s6
-; GFX9-NEXT: v_mov_b32_e32 v15, s7
-; GFX9-NEXT: v_mov_b32_e32 v16, s8
-; GFX9-NEXT: v_mov_b32_e32 v17, s9
-; GFX9-NEXT: v_mov_b32_e32 v18, s10
-; GFX9-NEXT: v_mov_b32_e32 v19, s11
-; GFX9-NEXT: v_mov_b32_e32 v20, s12
-; GFX9-NEXT: v_mov_b32_e32 v21, s13
-; GFX9-NEXT: v_mov_b32_e32 v22, s14
-; GFX9-NEXT: v_mov_b32_e32 v23, s15
-; GFX9-NEXT: v_mov_b32_e32 v24, s28
-; GFX9-NEXT: v_mov_b32_e32 v25, s29
+; GFX9-NEXT: v_mov_b32_e32 v14, s7
+; GFX9-NEXT: v_mov_b32_e32 v15, s8
+; GFX9-NEXT: v_mov_b32_e32 v16, s9
+; GFX9-NEXT: v_mov_b32_e32 v17, s10
+; GFX9-NEXT: v_mov_b32_e32 v18, s11
+; GFX9-NEXT: v_mov_b32_e32 v19, s12
+; GFX9-NEXT: v_mov_b32_e32 v20, s13
+; GFX9-NEXT: v_mov_b32_e32 v21, s14
+; GFX9-NEXT: v_mov_b32_e32 v22, s15
+; GFX9-NEXT: v_mov_b32_e32 v23, s28
+; GFX9-NEXT: v_mov_b32_e32 v24, s29
+; GFX9-NEXT: v_mov_b32_e32 v25, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB41_4:
; GFX9-NEXT: ; implicit-def: $sgpr91
@@ -23782,7 +23965,9 @@ define inreg <52 x i16> @bitcast_v13i64_to_v52i16_scalar(<13 x i64> inreg %a, i3
; GFX9-NEXT: ; implicit-def: $sgpr44
; GFX9-NEXT: ; implicit-def: $sgpr43
; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: s_branch .LBB41_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB41_2
+; GFX9-NEXT: s_branch .LBB41_3
;
; GFX11-LABEL: bitcast_v13i64_to_v52i16_scalar:
; GFX11: ; %bb.0:
@@ -23793,16 +23978,16 @@ define inreg <52 x i16> @bitcast_v13i64_to_v52i16_scalar(<13 x i64> inreg %a, i3
; GFX11-NEXT: v_readfirstlane_b32 s6, v2
; GFX11-NEXT: v_readfirstlane_b32 s7, v3
; GFX11-NEXT: v_readfirstlane_b32 s8, v4
-; GFX11-NEXT: v_readfirstlane_b32 s9, v5
+; GFX11-NEXT: v_readfirstlane_b32 s10, v5
; GFX11-NEXT: v_readfirstlane_b32 s11, v6
-; GFX11-NEXT: v_readfirstlane_b32 s10, v7
-; GFX11-NEXT: s_mov_b32 s78, 0
+; GFX11-NEXT: v_readfirstlane_b32 s9, v7
+; GFX11-NEXT: s_mov_b32 s78, -1
; GFX11-NEXT: s_and_b32 s12, vcc_lo, exec_lo
; GFX11-NEXT: s_cbranch_scc0 .LBB41_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s12, s10, 16
+; GFX11-NEXT: s_lshr_b32 s12, s9, 16
; GFX11-NEXT: s_lshr_b32 s13, s11, 16
-; GFX11-NEXT: s_lshr_b32 s14, s9, 16
+; GFX11-NEXT: s_lshr_b32 s14, s10, 16
; GFX11-NEXT: s_lshr_b32 s15, s8, 16
; GFX11-NEXT: s_lshr_b32 s40, s7, 16
; GFX11-NEXT: s_lshr_b32 s41, s6, 16
@@ -23826,13 +24011,12 @@ define inreg <52 x i16> @bitcast_v13i64_to_v52i16_scalar(<13 x i64> inreg %a, i3
; GFX11-NEXT: s_lshr_b32 s75, s2, 16
; GFX11-NEXT: s_lshr_b32 s76, s1, 16
; GFX11-NEXT: s_lshr_b32 s77, s0, 16
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s78
-; GFX11-NEXT: s_cbranch_vccnz .LBB41_3
+; GFX11-NEXT: s_cbranch_execnz .LBB41_3
; GFX11-NEXT: .LBB41_2: ; %cmp.true
; GFX11-NEXT: s_add_u32 s11, s11, 3
-; GFX11-NEXT: s_addc_u32 s10, s10, 0
-; GFX11-NEXT: s_add_u32 s8, s8, 3
; GFX11-NEXT: s_addc_u32 s9, s9, 0
+; GFX11-NEXT: s_add_u32 s8, s8, 3
+; GFX11-NEXT: s_addc_u32 s10, s10, 0
; GFX11-NEXT: s_add_u32 s6, s6, 3
; GFX11-NEXT: s_addc_u32 s7, s7, 0
; GFX11-NEXT: s_add_u32 s4, s4, 3
@@ -23855,9 +24039,9 @@ define inreg <52 x i16> @bitcast_v13i64_to_v52i16_scalar(<13 x i64> inreg %a, i3
; GFX11-NEXT: s_addc_u32 s3, s3, 0
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: s_lshr_b32 s12, s10, 16
+; GFX11-NEXT: s_lshr_b32 s12, s9, 16
; GFX11-NEXT: s_lshr_b32 s13, s11, 16
-; GFX11-NEXT: s_lshr_b32 s14, s9, 16
+; GFX11-NEXT: s_lshr_b32 s14, s10, 16
; GFX11-NEXT: s_lshr_b32 s15, s8, 16
; GFX11-NEXT: s_lshr_b32 s40, s7, 16
; GFX11-NEXT: s_lshr_b32 s41, s6, 16
@@ -23906,9 +24090,9 @@ define inreg <52 x i16> @bitcast_v13i64_to_v52i16_scalar(<13 x i64> inreg %a, i3
; GFX11-NEXT: s_pack_ll_b32_b16 s6, s6, s41
; GFX11-NEXT: s_pack_ll_b32_b16 s7, s7, s40
; GFX11-NEXT: s_pack_ll_b32_b16 s8, s8, s15
-; GFX11-NEXT: s_pack_ll_b32_b16 s9, s9, s14
+; GFX11-NEXT: s_pack_ll_b32_b16 s10, s10, s14
; GFX11-NEXT: s_pack_ll_b32_b16 s11, s11, s13
-; GFX11-NEXT: s_pack_ll_b32_b16 s10, s10, s12
+; GFX11-NEXT: s_pack_ll_b32_b16 s9, s9, s12
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -23920,8 +24104,8 @@ define inreg <52 x i16> @bitcast_v13i64_to_v52i16_scalar(<13 x i64> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
; GFX11-NEXT: v_dual_mov_b32 v18, s4 :: v_dual_mov_b32 v19, s5
; GFX11-NEXT: v_dual_mov_b32 v20, s6 :: v_dual_mov_b32 v21, s7
-; GFX11-NEXT: v_dual_mov_b32 v22, s8 :: v_dual_mov_b32 v23, s9
-; GFX11-NEXT: v_dual_mov_b32 v24, s11 :: v_dual_mov_b32 v25, s10
+; GFX11-NEXT: v_dual_mov_b32 v22, s8 :: v_dual_mov_b32 v23, s10
+; GFX11-NEXT: v_dual_mov_b32 v24, s11 :: v_dual_mov_b32 v25, s9
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB41_4:
; GFX11-NEXT: ; implicit-def: $sgpr77
@@ -23950,7 +24134,9 @@ define inreg <52 x i16> @bitcast_v13i64_to_v52i16_scalar(<13 x i64> inreg %a, i3
; GFX11-NEXT: ; implicit-def: $sgpr14
; GFX11-NEXT: ; implicit-def: $sgpr13
; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: s_branch .LBB41_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s78
+; GFX11-NEXT: s_cbranch_vccz .LBB41_2
+; GFX11-NEXT: s_branch .LBB41_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -25162,6 +25348,7 @@ define inreg <13 x i64> @bitcast_v52i16_to_v13i64_scalar(<52 x i16> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v38, v14
; SI-NEXT: v_mov_b32_e32 v39, v12
; SI-NEXT: v_mov_b32_e32 v48, v10
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v3
@@ -25184,7 +25371,7 @@ define inreg <13 x i64> @bitcast_v52i16_to_v13i64_scalar(<52 x i16> inreg %a, i3
; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v2
; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_lshlrev_b32_e32 v59, 16, v4
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_and_b64 s[6:7], vcc, exec
; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v6
; SI-NEXT: s_waitcnt vmcnt(1)
@@ -25478,7 +25665,9 @@ define inreg <13 x i64> @bitcast_v52i16_to_v13i64_scalar(<52 x i16> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v32, v57
; SI-NEXT: v_mov_b32_e32 v57, v63
; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
-; SI-NEXT: s_branch .LBB43_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB43_2
+; SI-NEXT: s_branch .LBB43_3
;
; VI-LABEL: bitcast_v52i16_to_v13i64_scalar:
; VI: ; %bb.0:
@@ -25498,6 +25687,7 @@ define inreg <13 x i64> @bitcast_v52i16_to_v13i64_scalar(<52 x i16> inreg %a, i3
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v11
; VI-NEXT: v_mov_b32_e32 v33, v10
; VI-NEXT: v_mov_b32_e32 v34, v9
@@ -25510,7 +25700,7 @@ define inreg <13 x i64> @bitcast_v52i16_to_v13i64_scalar(<52 x i16> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v49, v2
; VI-NEXT: v_mov_b32_e32 v50, v1
; VI-NEXT: v_mov_b32_e32 v51, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB43_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -25611,79 +25801,79 @@ define inreg <13 x i64> @bitcast_v52i16_to_v13i64_scalar(<52 x i16> inreg %a, i3
; VI-NEXT: v_add_u32_e32 v16, vcc, 0x30000, v0
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v48
; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; VI-NEXT: s_add_i32 s16, s16, 3
; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: s_and_b32 s4, s16, 0xffff
-; VI-NEXT: s_lshl_b32 s5, s43, 16
-; VI-NEXT: s_add_i32 s17, s17, 3
+; VI-NEXT: s_add_i32 s16, s16, 3
; VI-NEXT: v_add_u32_e32 v17, vcc, 0x30000, v0
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v39
; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: s_and_b32 s4, s16, 0xffff
+; VI-NEXT: s_lshl_b32 s5, s43, 16
+; VI-NEXT: s_add_i32 s17, s17, 3
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s4, s5, s4
; VI-NEXT: s_and_b32 s5, s17, 0xffff
; VI-NEXT: s_lshl_b32 s16, s42, 16
; VI-NEXT: s_add_i32 s18, s18, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v18, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v38
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s5, s16, s5
; VI-NEXT: s_and_b32 s16, s18, 0xffff
; VI-NEXT: s_lshl_b32 s17, s41, 16
; VI-NEXT: s_add_i32 s19, s19, 3
-; VI-NEXT: v_add_u32_e32 v18, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v38
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s16, s17, s16
; VI-NEXT: s_and_b32 s17, s19, 0xffff
; VI-NEXT: s_lshl_b32 s18, s40, 16
; VI-NEXT: s_add_i32 s20, s20, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v19, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v37
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s17, s18, s17
; VI-NEXT: s_and_b32 s18, s20, 0xffff
; VI-NEXT: s_lshl_b32 s15, s15, 16
; VI-NEXT: s_add_i32 s21, s21, 3
-; VI-NEXT: v_add_u32_e32 v19, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v37
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s15, s15, s18
; VI-NEXT: s_and_b32 s18, s21, 0xffff
; VI-NEXT: s_lshl_b32 s14, s14, 16
; VI-NEXT: s_add_i32 s22, s22, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v20, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v36
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s14, s14, s18
; VI-NEXT: s_and_b32 s18, s22, 0xffff
; VI-NEXT: s_lshl_b32 s13, s13, 16
; VI-NEXT: s_add_i32 s23, s23, 3
-; VI-NEXT: v_add_u32_e32 v20, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v36
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s13, s13, s18
; VI-NEXT: s_and_b32 s18, s23, 0xffff
; VI-NEXT: s_lshl_b32 s12, s12, 16
; VI-NEXT: s_add_i32 s24, s24, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v21, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v35
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s12, s12, s18
; VI-NEXT: s_and_b32 s18, s24, 0xffff
; VI-NEXT: s_lshl_b32 s11, s11, 16
; VI-NEXT: s_add_i32 s25, s25, 3
-; VI-NEXT: v_add_u32_e32 v21, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v35
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s11, s11, s18
; VI-NEXT: s_and_b32 s18, s25, 0xffff
; VI-NEXT: s_lshl_b32 s10, s10, 16
; VI-NEXT: s_add_i32 s26, s26, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v22, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v34
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s10, s10, s18
; VI-NEXT: s_and_b32 s18, s26, 0xffff
; VI-NEXT: s_lshl_b32 s9, s9, 16
; VI-NEXT: s_add_i32 s27, s27, 3
-; VI-NEXT: v_add_u32_e32 v22, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v34
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s9, s9, s18
; VI-NEXT: s_and_b32 s18, s27, 0xffff
; VI-NEXT: s_lshl_b32 s8, s8, 16
; VI-NEXT: s_add_i32 s28, s28, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s8, s8, s18
; VI-NEXT: s_and_b32 s18, s28, 0xffff
; VI-NEXT: s_lshl_b32 s7, s7, 16
@@ -25733,23 +25923,13 @@ define inreg <13 x i64> @bitcast_v52i16_to_v13i64_scalar(<52 x i16> inreg %a, i3
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB43_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB43_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB43_2
+; VI-NEXT: s_branch .LBB43_3
;
; GFX9-LABEL: bitcast_v52i16_to_v13i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v11
-; GFX9-NEXT: v_mov_b32_e32 v33, v10
-; GFX9-NEXT: v_mov_b32_e32 v34, v9
-; GFX9-NEXT: v_mov_b32_e32 v35, v8
-; GFX9-NEXT: v_mov_b32_e32 v36, v7
-; GFX9-NEXT: v_mov_b32_e32 v37, v6
-; GFX9-NEXT: v_mov_b32_e32 v38, v5
-; GFX9-NEXT: v_mov_b32_e32 v39, v4
-; GFX9-NEXT: v_mov_b32_e32 v48, v3
-; GFX9-NEXT: v_mov_b32_e32 v49, v2
-; GFX9-NEXT: v_mov_b32_e32 v50, v1
-; GFX9-NEXT: v_mov_b32_e32 v51, v0
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
@@ -25765,6 +25945,19 @@ define inreg <13 x i64> @bitcast_v52i16_to_v13i64_scalar(<52 x i16> inreg %a, i3
; GFX9-NEXT: s_lshr_b32 s8, s18, 16
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
+; GFX9-NEXT: v_mov_b32_e32 v32, v11
+; GFX9-NEXT: v_mov_b32_e32 v33, v10
+; GFX9-NEXT: v_mov_b32_e32 v34, v9
+; GFX9-NEXT: v_mov_b32_e32 v35, v8
+; GFX9-NEXT: v_mov_b32_e32 v36, v7
+; GFX9-NEXT: v_mov_b32_e32 v37, v6
+; GFX9-NEXT: v_mov_b32_e32 v38, v5
+; GFX9-NEXT: v_mov_b32_e32 v39, v4
+; GFX9-NEXT: v_mov_b32_e32 v48, v3
+; GFX9-NEXT: v_mov_b32_e32 v49, v2
+; GFX9-NEXT: v_mov_b32_e32 v50, v1
+; GFX9-NEXT: v_mov_b32_e32 v51, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
@@ -25781,7 +25974,6 @@ define inreg <13 x i64> @bitcast_v52i16_to_v13i64_scalar(<52 x i16> inreg %a, i3
; GFX9-NEXT: v_lshrrev_b32_e32 v41, 16, v37
; GFX9-NEXT: v_lshrrev_b32_e32 v42, 16, v38
; GFX9-NEXT: v_lshrrev_b32_e32 v43, 16, v39
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -25796,6 +25988,7 @@ define inreg <13 x i64> @bitcast_v52i16_to_v13i64_scalar(<52 x i16> inreg %a, i3
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_lshrrev_b32_e32 v44, 16, v48
; GFX9-NEXT: v_lshrrev_b32_e32 v45, 16, v49
; GFX9-NEXT: v_lshrrev_b32_e32 v46, 16, v50
@@ -25905,7 +26098,9 @@ define inreg <13 x i64> @bitcast_v52i16_to_v13i64_scalar(<52 x i16> inreg %a, i3
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB43_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB43_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB43_2
+; GFX9-NEXT: s_branch .LBB43_3
;
; GFX11-TRUE16-LABEL: bitcast_v52i16_to_v13i64_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -25935,9 +26130,9 @@ define inreg <13 x i64> @bitcast_v52i16_to_v13i64_scalar(<52 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v5
; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v6
; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v7
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s27, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
@@ -25949,15 +26144,14 @@ define inreg <13 x i64> @bitcast_v52i16_to_v13i64_scalar(<52 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s42
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
@@ -25969,10 +26163,11 @@ define inreg <13 x i64> @bitcast_v52i16_to_v13i64_scalar(<52 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s28, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s29, s41
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB43_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
@@ -25990,10 +26185,9 @@ define inreg <13 x i64> @bitcast_v52i16_to_v13i64_scalar(<52 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s17
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s15 :: v_dual_mov_b32 v17, s16
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB43_3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB43_3
; GFX11-TRUE16-NEXT: .LBB43_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
@@ -26018,9 +26212,9 @@ define inreg <13 x i64> @bitcast_v52i16_to_v13i64_scalar(<52 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s15, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
@@ -26033,7 +26227,9 @@ define inreg <13 x i64> @bitcast_v52i16_to_v13i64_scalar(<52 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB43_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB43_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB43_2
+; GFX11-TRUE16-NEXT: s_branch .LBB43_3
;
; GFX11-FAKE16-LABEL: bitcast_v52i16_to_v13i64_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -26055,9 +26251,9 @@ define inreg <13 x i64> @bitcast_v52i16_to_v13i64_scalar(<52 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: v_and_b32_e32 v50, 0xffff, v5
; GFX11-FAKE16-NEXT: v_and_b32_e32 v49, 0xffff, v6
; GFX11-FAKE16-NEXT: v_and_b32_e32 v48, 0xffff, v7
-; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s28, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s27, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s26, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s25, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s24, 16
@@ -26069,15 +26265,14 @@ define inreg <13 x i64> @bitcast_v52i16_to_v13i64_scalar(<52 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s18, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s17, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s40, 0
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s44
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s45
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s43
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s42
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
@@ -26089,10 +26284,11 @@ define inreg <13 x i64> @bitcast_v52i16_to_v13i64_scalar(<52 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s27, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s28, s15
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s29, s41
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB43_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
@@ -26110,10 +26306,9 @@ define inreg <13 x i64> @bitcast_v52i16_to_v13i64_scalar(<52 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s17
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s15 :: v_dual_mov_b32 v17, s16
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB43_3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB43_3
; GFX11-FAKE16-NEXT: .LBB43_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
@@ -26138,9 +26333,9 @@ define inreg <13 x i64> @bitcast_v52i16_to_v13i64_scalar(<52 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, s17, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, s15, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, s16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, s15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, s16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
@@ -26153,7 +26348,9 @@ define inreg <13 x i64> @bitcast_v52i16_to_v13i64_scalar(<52 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB43_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB43_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB43_2
+; GFX11-FAKE16-NEXT: s_branch .LBB43_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -27306,6 +27503,7 @@ define inreg <52 x half> @bitcast_v13i64_to_v52f16_scalar(<13 x i64> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s40, v1
; SI-NEXT: v_readfirstlane_b32 s41, v2
; SI-NEXT: v_readfirstlane_b32 s14, v3
@@ -27314,11 +27512,11 @@ define inreg <52 x half> @bitcast_v13i64_to_v52f16_scalar(<13 x i64> inreg %a, i
; SI-NEXT: v_readfirstlane_b32 s13, v6
; SI-NEXT: v_readfirstlane_b32 s10, v7
; SI-NEXT: v_readfirstlane_b32 s11, v8
-; SI-NEXT: v_readfirstlane_b32 s7, v9
-; SI-NEXT: v_readfirstlane_b32 s8, v10
+; SI-NEXT: v_readfirstlane_b32 s8, v9
+; SI-NEXT: v_readfirstlane_b32 s9, v10
; SI-NEXT: v_readfirstlane_b32 s6, v11
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: v_readfirstlane_b32 s9, v12
+; SI-NEXT: v_readfirstlane_b32 s7, v12
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
@@ -27326,13 +27524,13 @@ define inreg <52 x half> @bitcast_v13i64_to_v52f16_scalar(<13 x i64> inreg %a, i
; SI-NEXT: buffer_store_dword v44, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB45_4
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_lshr_b32 s4, s9, 16
+; SI-NEXT: s_lshr_b32 s4, s7, 16
; SI-NEXT: v_cvt_f32_f16_e32 v1, s4
; SI-NEXT: s_lshr_b32 s4, s6, 16
; SI-NEXT: v_cvt_f32_f16_e32 v2, s4
-; SI-NEXT: s_lshr_b32 s4, s8, 16
+; SI-NEXT: s_lshr_b32 s4, s9, 16
; SI-NEXT: v_cvt_f32_f16_e32 v3, s4
-; SI-NEXT: s_lshr_b32 s4, s7, 16
+; SI-NEXT: s_lshr_b32 s4, s8, 16
; SI-NEXT: v_cvt_f32_f16_e32 v5, s4
; SI-NEXT: s_lshr_b32 s4, s11, 16
; SI-NEXT: v_cvt_f32_f16_e32 v7, s4
@@ -27381,10 +27579,10 @@ define inreg <52 x half> @bitcast_v13i64_to_v52f16_scalar(<13 x i64> inreg %a, i
; SI-NEXT: s_lshr_b32 s4, s16, 16
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v44, s4
-; SI-NEXT: v_cvt_f32_f16_e32 v4, s9
+; SI-NEXT: v_cvt_f32_f16_e32 v4, s7
; SI-NEXT: v_cvt_f32_f16_e32 v6, s6
-; SI-NEXT: v_cvt_f32_f16_e32 v8, s8
-; SI-NEXT: v_cvt_f32_f16_e32 v10, s7
+; SI-NEXT: v_cvt_f32_f16_e32 v8, s9
+; SI-NEXT: v_cvt_f32_f16_e32 v10, s8
; SI-NEXT: v_cvt_f32_f16_e32 v12, s11
; SI-NEXT: v_cvt_f32_f16_e32 v13, s10
; SI-NEXT: v_cvt_f32_f16_e32 v15, s13
@@ -27453,18 +27651,18 @@ define inreg <52 x half> @bitcast_v13i64_to_v52f16_scalar(<13 x i64> inreg %a, i
; SI-NEXT: s_addc_u32 s11, s11, 0
; SI-NEXT: s_lshr_b32 s76, s10, 16
; SI-NEXT: s_lshr_b32 s77, s11, 16
-; SI-NEXT: s_add_u32 s7, s7, 3
-; SI-NEXT: s_addc_u32 s8, s8, 0
-; SI-NEXT: s_lshr_b32 s78, s7, 16
-; SI-NEXT: s_lshr_b32 s79, s8, 16
-; SI-NEXT: s_add_u32 s6, s6, 3
+; SI-NEXT: s_add_u32 s8, s8, 3
; SI-NEXT: s_addc_u32 s9, s9, 0
+; SI-NEXT: s_lshr_b32 s78, s8, 16
+; SI-NEXT: s_lshr_b32 s79, s9, 16
+; SI-NEXT: s_add_u32 s6, s6, 3
+; SI-NEXT: s_addc_u32 s7, s7, 0
; SI-NEXT: s_lshr_b32 s88, s6, 16
-; SI-NEXT: s_lshr_b32 s89, s9, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v4, s9
+; SI-NEXT: s_lshr_b32 s89, s7, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v4, s7
; SI-NEXT: v_cvt_f32_f16_e32 v6, s6
-; SI-NEXT: v_cvt_f32_f16_e32 v8, s8
-; SI-NEXT: v_cvt_f32_f16_e32 v10, s7
+; SI-NEXT: v_cvt_f32_f16_e32 v8, s9
+; SI-NEXT: v_cvt_f32_f16_e32 v10, s8
; SI-NEXT: v_cvt_f32_f16_e32 v12, s11
; SI-NEXT: v_cvt_f32_f16_e32 v13, s10
; SI-NEXT: v_cvt_f32_f16_e32 v15, s13
@@ -27758,12 +27956,15 @@ define inreg <52 x half> @bitcast_v13i64_to_v52f16_scalar(<13 x i64> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr4
; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: s_branch .LBB45_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB45_2
+; SI-NEXT: s_branch .LBB45_3
;
; VI-LABEL: bitcast_v13i64_to_v52f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_readfirstlane_b32 s41, v0
; VI-NEXT: v_readfirstlane_b32 s40, v1
; VI-NEXT: v_readfirstlane_b32 s15, v2
@@ -27774,13 +27975,13 @@ define inreg <52 x half> @bitcast_v13i64_to_v52f16_scalar(<13 x i64> inreg %a, i
; VI-NEXT: v_readfirstlane_b32 s10, v7
; VI-NEXT: v_readfirstlane_b32 s9, v8
; VI-NEXT: v_readfirstlane_b32 s8, v9
-; VI-NEXT: v_readfirstlane_b32 s6, v10
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: v_readfirstlane_b32 s7, v11
+; VI-NEXT: v_readfirstlane_b32 s7, v10
+; VI-NEXT: v_readfirstlane_b32 s6, v11
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB45_4
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_lshr_b32 s42, s7, 16
-; VI-NEXT: s_lshr_b32 s43, s6, 16
+; VI-NEXT: s_lshr_b32 s42, s6, 16
+; VI-NEXT: s_lshr_b32 s43, s7, 16
; VI-NEXT: s_lshr_b32 s44, s8, 16
; VI-NEXT: s_lshr_b32 s45, s9, 16
; VI-NEXT: s_lshr_b32 s46, s10, 16
@@ -27807,8 +28008,8 @@ define inreg <52 x half> @bitcast_v13i64_to_v52f16_scalar(<13 x i64> inreg %a, i
; VI-NEXT: s_lshr_b32 s91, s16, 16
; VI-NEXT: s_cbranch_execnz .LBB45_3
; VI-NEXT: .LBB45_2: ; %cmp.true
-; VI-NEXT: s_add_u32 s6, s6, 3
-; VI-NEXT: s_addc_u32 s7, s7, 0
+; VI-NEXT: s_add_u32 s7, s7, 3
+; VI-NEXT: s_addc_u32 s6, s6, 0
; VI-NEXT: s_add_u32 s9, s9, 3
; VI-NEXT: s_addc_u32 s8, s8, 0
; VI-NEXT: s_add_u32 s11, s11, 3
@@ -27833,8 +28034,8 @@ define inreg <52 x half> @bitcast_v13i64_to_v52f16_scalar(<13 x i64> inreg %a, i
; VI-NEXT: s_addc_u32 s19, s19, 0
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
-; VI-NEXT: s_lshr_b32 s42, s7, 16
-; VI-NEXT: s_lshr_b32 s43, s6, 16
+; VI-NEXT: s_lshr_b32 s42, s6, 16
+; VI-NEXT: s_lshr_b32 s43, s7, 16
; VI-NEXT: s_lshr_b32 s44, s8, 16
; VI-NEXT: s_lshr_b32 s45, s9, 16
; VI-NEXT: s_lshr_b32 s46, s10, 16
@@ -27932,12 +28133,12 @@ define inreg <52 x half> @bitcast_v13i64_to_v52f16_scalar(<13 x i64> inreg %a, i
; VI-NEXT: s_and_b32 s8, 0xffff, s8
; VI-NEXT: s_lshl_b32 s40, s44, 16
; VI-NEXT: s_or_b32 s8, s8, s40
-; VI-NEXT: s_and_b32 s6, 0xffff, s6
-; VI-NEXT: s_lshl_b32 s40, s43, 16
-; VI-NEXT: s_or_b32 s6, s6, s40
; VI-NEXT: s_and_b32 s7, 0xffff, s7
-; VI-NEXT: s_lshl_b32 s40, s42, 16
+; VI-NEXT: s_lshl_b32 s40, s43, 16
; VI-NEXT: s_or_b32 s7, s7, s40
+; VI-NEXT: s_and_b32 s6, 0xffff, s6
+; VI-NEXT: s_lshl_b32 s40, s42, 16
+; VI-NEXT: s_or_b32 s6, s6, s40
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: v_mov_b32_e32 v2, s16
@@ -27962,8 +28163,8 @@ define inreg <52 x half> @bitcast_v13i64_to_v52f16_scalar(<13 x i64> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v21, s10
; VI-NEXT: v_mov_b32_e32 v22, s9
; VI-NEXT: v_mov_b32_e32 v23, s8
-; VI-NEXT: v_mov_b32_e32 v24, s6
-; VI-NEXT: v_mov_b32_e32 v25, s7
+; VI-NEXT: v_mov_b32_e32 v24, s7
+; VI-NEXT: v_mov_b32_e32 v25, s6
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB45_4:
; VI-NEXT: ; implicit-def: $sgpr91
@@ -27992,39 +28193,42 @@ define inreg <52 x half> @bitcast_v13i64_to_v52f16_scalar(<13 x i64> inreg %a, i
; VI-NEXT: ; implicit-def: $sgpr44
; VI-NEXT: ; implicit-def: $sgpr43
; VI-NEXT: ; implicit-def: $sgpr42
-; VI-NEXT: s_branch .LBB45_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB45_2
+; VI-NEXT: s_branch .LBB45_3
;
; GFX9-LABEL: bitcast_v13i64_to_v52f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
-; GFX9-NEXT: v_readfirstlane_b32 s6, v0
-; GFX9-NEXT: v_readfirstlane_b32 s7, v1
-; GFX9-NEXT: v_readfirstlane_b32 s8, v2
-; GFX9-NEXT: v_readfirstlane_b32 s9, v3
-; GFX9-NEXT: v_readfirstlane_b32 s10, v4
-; GFX9-NEXT: v_readfirstlane_b32 s11, v5
-; GFX9-NEXT: v_readfirstlane_b32 s12, v6
-; GFX9-NEXT: v_readfirstlane_b32 s13, v7
-; GFX9-NEXT: v_readfirstlane_b32 s14, v8
-; GFX9-NEXT: v_readfirstlane_b32 s15, v9
-; GFX9-NEXT: v_readfirstlane_b32 s40, v10
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: v_readfirstlane_b32 s41, v11
+; GFX9-NEXT: v_readfirstlane_b32 s7, v0
+; GFX9-NEXT: v_readfirstlane_b32 s8, v1
+; GFX9-NEXT: v_readfirstlane_b32 s9, v2
+; GFX9-NEXT: v_readfirstlane_b32 s10, v3
+; GFX9-NEXT: v_readfirstlane_b32 s11, v4
+; GFX9-NEXT: v_readfirstlane_b32 s12, v5
+; GFX9-NEXT: v_readfirstlane_b32 s13, v6
+; GFX9-NEXT: v_readfirstlane_b32 s14, v7
+; GFX9-NEXT: v_readfirstlane_b32 s15, v8
+; GFX9-NEXT: v_readfirstlane_b32 s40, v9
+; GFX9-NEXT: v_readfirstlane_b32 s41, v10
+; GFX9-NEXT: v_readfirstlane_b32 s6, v11
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB45_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_lshr_b32 s42, s41, 16
-; GFX9-NEXT: s_lshr_b32 s43, s40, 16
-; GFX9-NEXT: s_lshr_b32 s44, s15, 16
-; GFX9-NEXT: s_lshr_b32 s45, s14, 16
-; GFX9-NEXT: s_lshr_b32 s46, s13, 16
-; GFX9-NEXT: s_lshr_b32 s47, s12, 16
-; GFX9-NEXT: s_lshr_b32 s56, s11, 16
-; GFX9-NEXT: s_lshr_b32 s57, s10, 16
-; GFX9-NEXT: s_lshr_b32 s58, s9, 16
-; GFX9-NEXT: s_lshr_b32 s59, s8, 16
-; GFX9-NEXT: s_lshr_b32 s60, s7, 16
-; GFX9-NEXT: s_lshr_b32 s61, s6, 16
+; GFX9-NEXT: s_lshr_b32 s42, s6, 16
+; GFX9-NEXT: s_lshr_b32 s43, s41, 16
+; GFX9-NEXT: s_lshr_b32 s44, s40, 16
+; GFX9-NEXT: s_lshr_b32 s45, s15, 16
+; GFX9-NEXT: s_lshr_b32 s46, s14, 16
+; GFX9-NEXT: s_lshr_b32 s47, s13, 16
+; GFX9-NEXT: s_lshr_b32 s56, s12, 16
+; GFX9-NEXT: s_lshr_b32 s57, s11, 16
+; GFX9-NEXT: s_lshr_b32 s58, s10, 16
+; GFX9-NEXT: s_lshr_b32 s59, s9, 16
+; GFX9-NEXT: s_lshr_b32 s60, s8, 16
+; GFX9-NEXT: s_lshr_b32 s61, s7, 16
; GFX9-NEXT: s_lshr_b32 s62, s29, 16
; GFX9-NEXT: s_lshr_b32 s63, s28, 16
; GFX9-NEXT: s_lshr_b32 s72, s27, 16
@@ -28041,18 +28245,18 @@ define inreg <52 x half> @bitcast_v13i64_to_v52f16_scalar(<13 x i64> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s91, s16, 16
; GFX9-NEXT: s_cbranch_execnz .LBB45_3
; GFX9-NEXT: .LBB45_2: ; %cmp.true
-; GFX9-NEXT: s_add_u32 s40, s40, 3
-; GFX9-NEXT: s_addc_u32 s41, s41, 0
-; GFX9-NEXT: s_add_u32 s14, s14, 3
-; GFX9-NEXT: s_addc_u32 s15, s15, 0
-; GFX9-NEXT: s_add_u32 s12, s12, 3
-; GFX9-NEXT: s_addc_u32 s13, s13, 0
-; GFX9-NEXT: s_add_u32 s10, s10, 3
-; GFX9-NEXT: s_addc_u32 s11, s11, 0
-; GFX9-NEXT: s_add_u32 s8, s8, 3
-; GFX9-NEXT: s_addc_u32 s9, s9, 0
-; GFX9-NEXT: s_add_u32 s6, s6, 3
-; GFX9-NEXT: s_addc_u32 s7, s7, 0
+; GFX9-NEXT: s_add_u32 s41, s41, 3
+; GFX9-NEXT: s_addc_u32 s6, s6, 0
+; GFX9-NEXT: s_add_u32 s15, s15, 3
+; GFX9-NEXT: s_addc_u32 s40, s40, 0
+; GFX9-NEXT: s_add_u32 s13, s13, 3
+; GFX9-NEXT: s_addc_u32 s14, s14, 0
+; GFX9-NEXT: s_add_u32 s11, s11, 3
+; GFX9-NEXT: s_addc_u32 s12, s12, 0
+; GFX9-NEXT: s_add_u32 s9, s9, 3
+; GFX9-NEXT: s_addc_u32 s10, s10, 0
+; GFX9-NEXT: s_add_u32 s7, s7, 3
+; GFX9-NEXT: s_addc_u32 s8, s8, 0
; GFX9-NEXT: s_add_u32 s28, s28, 3
; GFX9-NEXT: s_addc_u32 s29, s29, 0
; GFX9-NEXT: s_add_u32 s26, s26, 3
@@ -28067,18 +28271,18 @@ define inreg <52 x half> @bitcast_v13i64_to_v52f16_scalar(<13 x i64> inreg %a, i
; GFX9-NEXT: s_addc_u32 s19, s19, 0
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
-; GFX9-NEXT: s_lshr_b32 s42, s41, 16
-; GFX9-NEXT: s_lshr_b32 s43, s40, 16
-; GFX9-NEXT: s_lshr_b32 s44, s15, 16
-; GFX9-NEXT: s_lshr_b32 s45, s14, 16
-; GFX9-NEXT: s_lshr_b32 s46, s13, 16
-; GFX9-NEXT: s_lshr_b32 s47, s12, 16
-; GFX9-NEXT: s_lshr_b32 s56, s11, 16
-; GFX9-NEXT: s_lshr_b32 s57, s10, 16
-; GFX9-NEXT: s_lshr_b32 s58, s9, 16
-; GFX9-NEXT: s_lshr_b32 s59, s8, 16
-; GFX9-NEXT: s_lshr_b32 s60, s7, 16
-; GFX9-NEXT: s_lshr_b32 s61, s6, 16
+; GFX9-NEXT: s_lshr_b32 s42, s6, 16
+; GFX9-NEXT: s_lshr_b32 s43, s41, 16
+; GFX9-NEXT: s_lshr_b32 s44, s40, 16
+; GFX9-NEXT: s_lshr_b32 s45, s15, 16
+; GFX9-NEXT: s_lshr_b32 s46, s14, 16
+; GFX9-NEXT: s_lshr_b32 s47, s13, 16
+; GFX9-NEXT: s_lshr_b32 s56, s12, 16
+; GFX9-NEXT: s_lshr_b32 s57, s11, 16
+; GFX9-NEXT: s_lshr_b32 s58, s10, 16
+; GFX9-NEXT: s_lshr_b32 s59, s9, 16
+; GFX9-NEXT: s_lshr_b32 s60, s8, 16
+; GFX9-NEXT: s_lshr_b32 s61, s7, 16
; GFX9-NEXT: s_lshr_b32 s62, s29, 16
; GFX9-NEXT: s_lshr_b32 s63, s28, 16
; GFX9-NEXT: s_lshr_b32 s72, s27, 16
@@ -28108,18 +28312,18 @@ define inreg <52 x half> @bitcast_v13i64_to_v52f16_scalar(<13 x i64> inreg %a, i
; GFX9-NEXT: s_pack_ll_b32_b16 s25, s27, s72
; GFX9-NEXT: s_pack_ll_b32_b16 s26, s28, s63
; GFX9-NEXT: s_pack_ll_b32_b16 s27, s29, s62
-; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s61
-; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s60
-; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s59
-; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s58
-; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s57
-; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s56
-; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s47
-; GFX9-NEXT: s_pack_ll_b32_b16 s13, s13, s46
-; GFX9-NEXT: s_pack_ll_b32_b16 s14, s14, s45
-; GFX9-NEXT: s_pack_ll_b32_b16 s15, s15, s44
-; GFX9-NEXT: s_pack_ll_b32_b16 s28, s40, s43
-; GFX9-NEXT: s_pack_ll_b32_b16 s29, s41, s42
+; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s61
+; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s60
+; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s59
+; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s58
+; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s57
+; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s56
+; GFX9-NEXT: s_pack_ll_b32_b16 s13, s13, s47
+; GFX9-NEXT: s_pack_ll_b32_b16 s14, s14, s46
+; GFX9-NEXT: s_pack_ll_b32_b16 s15, s15, s45
+; GFX9-NEXT: s_pack_ll_b32_b16 s28, s40, s44
+; GFX9-NEXT: s_pack_ll_b32_b16 s29, s41, s43
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s42
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: v_mov_b32_e32 v2, s16
@@ -28134,18 +28338,18 @@ define inreg <52 x half> @bitcast_v13i64_to_v52f16_scalar(<13 x i64> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v11, s25
; GFX9-NEXT: v_mov_b32_e32 v12, s26
; GFX9-NEXT: v_mov_b32_e32 v13, s27
-; GFX9-NEXT: v_mov_b32_e32 v14, s6
-; GFX9-NEXT: v_mov_b32_e32 v15, s7
-; GFX9-NEXT: v_mov_b32_e32 v16, s8
-; GFX9-NEXT: v_mov_b32_e32 v17, s9
-; GFX9-NEXT: v_mov_b32_e32 v18, s10
-; GFX9-NEXT: v_mov_b32_e32 v19, s11
-; GFX9-NEXT: v_mov_b32_e32 v20, s12
-; GFX9-NEXT: v_mov_b32_e32 v21, s13
-; GFX9-NEXT: v_mov_b32_e32 v22, s14
-; GFX9-NEXT: v_mov_b32_e32 v23, s15
-; GFX9-NEXT: v_mov_b32_e32 v24, s28
-; GFX9-NEXT: v_mov_b32_e32 v25, s29
+; GFX9-NEXT: v_mov_b32_e32 v14, s7
+; GFX9-NEXT: v_mov_b32_e32 v15, s8
+; GFX9-NEXT: v_mov_b32_e32 v16, s9
+; GFX9-NEXT: v_mov_b32_e32 v17, s10
+; GFX9-NEXT: v_mov_b32_e32 v18, s11
+; GFX9-NEXT: v_mov_b32_e32 v19, s12
+; GFX9-NEXT: v_mov_b32_e32 v20, s13
+; GFX9-NEXT: v_mov_b32_e32 v21, s14
+; GFX9-NEXT: v_mov_b32_e32 v22, s15
+; GFX9-NEXT: v_mov_b32_e32 v23, s28
+; GFX9-NEXT: v_mov_b32_e32 v24, s29
+; GFX9-NEXT: v_mov_b32_e32 v25, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB45_4:
; GFX9-NEXT: ; implicit-def: $sgpr91
@@ -28174,7 +28378,9 @@ define inreg <52 x half> @bitcast_v13i64_to_v52f16_scalar(<13 x i64> inreg %a, i
; GFX9-NEXT: ; implicit-def: $sgpr44
; GFX9-NEXT: ; implicit-def: $sgpr43
; GFX9-NEXT: ; implicit-def: $sgpr42
-; GFX9-NEXT: s_branch .LBB45_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB45_2
+; GFX9-NEXT: s_branch .LBB45_3
;
; GFX11-LABEL: bitcast_v13i64_to_v52f16_scalar:
; GFX11: ; %bb.0:
@@ -28185,16 +28391,16 @@ define inreg <52 x half> @bitcast_v13i64_to_v52f16_scalar(<13 x i64> inreg %a, i
; GFX11-NEXT: v_readfirstlane_b32 s6, v2
; GFX11-NEXT: v_readfirstlane_b32 s7, v3
; GFX11-NEXT: v_readfirstlane_b32 s8, v4
-; GFX11-NEXT: v_readfirstlane_b32 s9, v5
+; GFX11-NEXT: v_readfirstlane_b32 s10, v5
; GFX11-NEXT: v_readfirstlane_b32 s11, v6
-; GFX11-NEXT: v_readfirstlane_b32 s10, v7
-; GFX11-NEXT: s_mov_b32 s78, 0
+; GFX11-NEXT: v_readfirstlane_b32 s9, v7
+; GFX11-NEXT: s_mov_b32 s78, -1
; GFX11-NEXT: s_and_b32 s12, vcc_lo, exec_lo
; GFX11-NEXT: s_cbranch_scc0 .LBB45_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s12, s10, 16
+; GFX11-NEXT: s_lshr_b32 s12, s9, 16
; GFX11-NEXT: s_lshr_b32 s13, s11, 16
-; GFX11-NEXT: s_lshr_b32 s14, s9, 16
+; GFX11-NEXT: s_lshr_b32 s14, s10, 16
; GFX11-NEXT: s_lshr_b32 s15, s8, 16
; GFX11-NEXT: s_lshr_b32 s40, s7, 16
; GFX11-NEXT: s_lshr_b32 s41, s6, 16
@@ -28218,13 +28424,12 @@ define inreg <52 x half> @bitcast_v13i64_to_v52f16_scalar(<13 x i64> inreg %a, i
; GFX11-NEXT: s_lshr_b32 s75, s2, 16
; GFX11-NEXT: s_lshr_b32 s76, s1, 16
; GFX11-NEXT: s_lshr_b32 s77, s0, 16
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s78
-; GFX11-NEXT: s_cbranch_vccnz .LBB45_3
+; GFX11-NEXT: s_cbranch_execnz .LBB45_3
; GFX11-NEXT: .LBB45_2: ; %cmp.true
; GFX11-NEXT: s_add_u32 s11, s11, 3
-; GFX11-NEXT: s_addc_u32 s10, s10, 0
-; GFX11-NEXT: s_add_u32 s8, s8, 3
; GFX11-NEXT: s_addc_u32 s9, s9, 0
+; GFX11-NEXT: s_add_u32 s8, s8, 3
+; GFX11-NEXT: s_addc_u32 s10, s10, 0
; GFX11-NEXT: s_add_u32 s6, s6, 3
; GFX11-NEXT: s_addc_u32 s7, s7, 0
; GFX11-NEXT: s_add_u32 s4, s4, 3
@@ -28247,9 +28452,9 @@ define inreg <52 x half> @bitcast_v13i64_to_v52f16_scalar(<13 x i64> inreg %a, i
; GFX11-NEXT: s_addc_u32 s3, s3, 0
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: s_lshr_b32 s12, s10, 16
+; GFX11-NEXT: s_lshr_b32 s12, s9, 16
; GFX11-NEXT: s_lshr_b32 s13, s11, 16
-; GFX11-NEXT: s_lshr_b32 s14, s9, 16
+; GFX11-NEXT: s_lshr_b32 s14, s10, 16
; GFX11-NEXT: s_lshr_b32 s15, s8, 16
; GFX11-NEXT: s_lshr_b32 s40, s7, 16
; GFX11-NEXT: s_lshr_b32 s41, s6, 16
@@ -28298,9 +28503,9 @@ define inreg <52 x half> @bitcast_v13i64_to_v52f16_scalar(<13 x i64> inreg %a, i
; GFX11-NEXT: s_pack_ll_b32_b16 s6, s6, s41
; GFX11-NEXT: s_pack_ll_b32_b16 s7, s7, s40
; GFX11-NEXT: s_pack_ll_b32_b16 s8, s8, s15
-; GFX11-NEXT: s_pack_ll_b32_b16 s9, s9, s14
+; GFX11-NEXT: s_pack_ll_b32_b16 s10, s10, s14
; GFX11-NEXT: s_pack_ll_b32_b16 s11, s11, s13
-; GFX11-NEXT: s_pack_ll_b32_b16 s10, s10, s12
+; GFX11-NEXT: s_pack_ll_b32_b16 s9, s9, s12
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -28312,8 +28517,8 @@ define inreg <52 x half> @bitcast_v13i64_to_v52f16_scalar(<13 x i64> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
; GFX11-NEXT: v_dual_mov_b32 v18, s4 :: v_dual_mov_b32 v19, s5
; GFX11-NEXT: v_dual_mov_b32 v20, s6 :: v_dual_mov_b32 v21, s7
-; GFX11-NEXT: v_dual_mov_b32 v22, s8 :: v_dual_mov_b32 v23, s9
-; GFX11-NEXT: v_dual_mov_b32 v24, s11 :: v_dual_mov_b32 v25, s10
+; GFX11-NEXT: v_dual_mov_b32 v22, s8 :: v_dual_mov_b32 v23, s10
+; GFX11-NEXT: v_dual_mov_b32 v24, s11 :: v_dual_mov_b32 v25, s9
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB45_4:
; GFX11-NEXT: ; implicit-def: $sgpr77
@@ -28342,7 +28547,9 @@ define inreg <52 x half> @bitcast_v13i64_to_v52f16_scalar(<13 x i64> inreg %a, i
; GFX11-NEXT: ; implicit-def: $sgpr14
; GFX11-NEXT: ; implicit-def: $sgpr13
; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: s_branch .LBB45_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s78
+; GFX11-NEXT: s_cbranch_vccz .LBB45_2
+; GFX11-NEXT: s_branch .LBB45_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -29728,15 +29935,15 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v28
-; SI-NEXT: v_cvt_f16_f32_e32 v42, v5
-; SI-NEXT: v_cvt_f16_f32_e32 v55, v4
-; SI-NEXT: v_cvt_f16_f32_e32 v56, v7
+; SI-NEXT: v_cvt_f16_f32_e32 v55, v5
+; SI-NEXT: v_cvt_f16_f32_e32 v42, v4
+; SI-NEXT: v_cvt_f16_f32_e32 v54, v7
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
-; SI-NEXT: v_cvt_f16_f32_e32 v54, v6
-; SI-NEXT: v_cvt_f16_f32_e32 v43, v9
-; SI-NEXT: v_cvt_f16_f32_e32 v47, v8
-; SI-NEXT: v_cvt_f16_f32_e32 v57, v11
-; SI-NEXT: v_cvt_f16_f32_e32 v60, v10
+; SI-NEXT: v_cvt_f16_f32_e32 v57, v6
+; SI-NEXT: v_cvt_f16_f32_e32 v56, v9
+; SI-NEXT: v_cvt_f16_f32_e32 v60, v8
+; SI-NEXT: v_cvt_f16_f32_e32 v47, v11
+; SI-NEXT: v_cvt_f16_f32_e32 v53, v10
; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
; SI-NEXT: v_cvt_f16_f32_e32 v52, v12
; SI-NEXT: v_cvt_f16_f32_e32 v15, v15
@@ -29754,13 +29961,13 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v27, v26
; SI-NEXT: v_cvt_f16_f32_e32 v26, v29
; SI-NEXT: v_cvt_f16_f32_e32 v30, v30
-; SI-NEXT: v_cvt_f16_f32_e32 v53, s17
-; SI-NEXT: v_cvt_f16_f32_e32 v11, s16
+; SI-NEXT: v_cvt_f16_f32_e32 v43, s17
+; SI-NEXT: v_cvt_f16_f32_e32 v3, s16
; SI-NEXT: v_cvt_f16_f32_e32 v1, s19
; SI-NEXT: v_cvt_f16_f32_e32 v2, s18
; SI-NEXT: v_cvt_f16_f32_e32 v12, s21
; SI-NEXT: v_cvt_f16_f32_e32 v14, s20
-; SI-NEXT: v_cvt_f16_f32_e32 v3, s23
+; SI-NEXT: v_cvt_f16_f32_e32 v11, s23
; SI-NEXT: v_cvt_f16_f32_e32 v10, s22
; SI-NEXT: v_cvt_f16_f32_e32 v4, s25
; SI-NEXT: v_cvt_f16_f32_e32 v9, s24
@@ -29778,6 +29985,7 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v44
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v48
@@ -29803,8 +30011,8 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB47_4
@@ -29815,17 +30023,18 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(2)
+; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v43
+; SI-NEXT: s_waitcnt expcnt(3)
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v53
+; SI-NEXT: v_or_b32_e32 v0, v3, v0
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v12
-; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v11
; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; SI-NEXT: v_mov_b32_e32 v39, v54
; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v26
-; SI-NEXT: v_or_b32_e32 v0, v11, v0
; SI-NEXT: v_or_b32_e32 v2, v14, v2
; SI-NEXT: v_or_b32_e32 v3, v10, v3
; SI-NEXT: v_or_b32_e32 v4, v9, v4
@@ -29833,11 +30042,12 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; SI-NEXT: v_or_b32_e32 v6, v7, v6
; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v46
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v41
-; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v42
-; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v56
-; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v43
+; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v55
+; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v39
+; SI-NEXT: s_waitcnt expcnt(2)
+; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v56
; SI-NEXT: s_waitcnt expcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v57
+; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v47
; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v15
@@ -29849,10 +30059,10 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v51, v46
; SI-NEXT: v_or_b32_e32 v7, v45, v7
; SI-NEXT: v_or_b32_e32 v8, v40, v8
-; SI-NEXT: v_or_b32_e32 v9, v55, v9
-; SI-NEXT: v_or_b32_e32 v10, v54, v10
-; SI-NEXT: v_or_b32_e32 v11, v47, v11
-; SI-NEXT: v_or_b32_e32 v12, v60, v12
+; SI-NEXT: v_or_b32_e32 v9, v42, v9
+; SI-NEXT: v_or_b32_e32 v10, v57, v10
+; SI-NEXT: v_or_b32_e32 v11, v60, v11
+; SI-NEXT: v_or_b32_e32 v12, v53, v12
; SI-NEXT: v_or_b32_e32 v13, v52, v13
; SI-NEXT: v_or_b32_e32 v14, v63, v14
; SI-NEXT: v_or_b32_e32 v15, v61, v15
@@ -29880,15 +30090,16 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; SI-NEXT: v_or_b32_e32 v25, v38, v25
; SI-NEXT: s_cbranch_execnz .LBB47_3
; SI-NEXT: .LBB47_2: ; %cmp.true
-; SI-NEXT: s_waitcnt expcnt(2)
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(3)
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
-; SI-NEXT: v_cvt_f32_f16_e32 v0, v53
+; SI-NEXT: v_cvt_f32_f16_e32 v0, v43
; SI-NEXT: v_cvt_f32_f16_e32 v9, v40
-; SI-NEXT: v_cvt_f32_f16_e32 v10, v55
-; SI-NEXT: v_cvt_f32_f16_e32 v11, v54
+; SI-NEXT: v_cvt_f32_f16_e32 v10, v42
+; SI-NEXT: s_waitcnt expcnt(2)
+; SI-NEXT: v_cvt_f32_f16_e32 v11, v57
; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9
@@ -29899,8 +30110,8 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11
; SI-NEXT: v_cvt_f16_f32_e32 v11, v11
; SI-NEXT: s_waitcnt expcnt(1)
-; SI-NEXT: v_cvt_f32_f16_e32 v12, v47
-; SI-NEXT: v_cvt_f32_f16_e32 v13, v60
+; SI-NEXT: v_cvt_f32_f16_e32 v12, v60
+; SI-NEXT: v_cvt_f32_f16_e32 v13, v53
; SI-NEXT: v_cvt_f32_f16_e32 v15, v52
; SI-NEXT: v_cvt_f32_f16_e32 v16, v63
; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12
@@ -30000,7 +30211,7 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; SI-NEXT: v_or_b32_e32 v2, v3, v2
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3
@@ -30038,22 +30249,22 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v8, v8
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8
; SI-NEXT: v_or_b32_e32 v8, v9, v8
-; SI-NEXT: v_cvt_f32_f16_e32 v9, v42
+; SI-NEXT: v_cvt_f32_f16_e32 v9, v55
; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9
; SI-NEXT: v_cvt_f16_f32_e32 v9, v9
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9
; SI-NEXT: v_or_b32_e32 v9, v10, v9
-; SI-NEXT: v_cvt_f32_f16_e32 v10, v56
+; SI-NEXT: v_cvt_f32_f16_e32 v10, v39
; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10
; SI-NEXT: v_cvt_f16_f32_e32 v10, v10
; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10
; SI-NEXT: v_or_b32_e32 v10, v11, v10
-; SI-NEXT: v_cvt_f32_f16_e32 v11, v43
+; SI-NEXT: v_cvt_f32_f16_e32 v11, v56
; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11
; SI-NEXT: v_cvt_f16_f32_e32 v11, v11
; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11
; SI-NEXT: v_or_b32_e32 v11, v12, v11
-; SI-NEXT: v_cvt_f32_f16_e32 v12, v57
+; SI-NEXT: v_cvt_f32_f16_e32 v12, v47
; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12
; SI-NEXT: v_cvt_f16_f32_e32 v12, v12
; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12
@@ -30190,7 +30401,10 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v30, v58
; SI-NEXT: v_mov_b32_e32 v58, v63
; SI-NEXT: v_mov_b32_e32 v63, v50
-; SI-NEXT: s_branch .LBB47_2
+; SI-NEXT: v_mov_b32_e32 v39, v54
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB47_2
+; SI-NEXT: s_branch .LBB47_3
;
; VI-LABEL: bitcast_v52f16_to_v13i64_scalar:
; VI: ; %bb.0:
@@ -30210,6 +30424,7 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v11
; VI-NEXT: v_mov_b32_e32 v33, v10
; VI-NEXT: v_mov_b32_e32 v34, v9
@@ -30222,7 +30437,7 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v49, v2
; VI-NEXT: v_mov_b32_e32 v50, v1
; VI-NEXT: v_mov_b32_e32 v51, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB47_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -30406,23 +30621,13 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB47_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB47_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB47_2
+; VI-NEXT: s_branch .LBB47_3
;
; GFX9-LABEL: bitcast_v52f16_to_v13i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v11
-; GFX9-NEXT: v_mov_b32_e32 v33, v10
-; GFX9-NEXT: v_mov_b32_e32 v34, v9
-; GFX9-NEXT: v_mov_b32_e32 v35, v8
-; GFX9-NEXT: v_mov_b32_e32 v36, v7
-; GFX9-NEXT: v_mov_b32_e32 v37, v6
-; GFX9-NEXT: v_mov_b32_e32 v38, v5
-; GFX9-NEXT: v_mov_b32_e32 v39, v4
-; GFX9-NEXT: v_mov_b32_e32 v48, v3
-; GFX9-NEXT: v_mov_b32_e32 v49, v2
-; GFX9-NEXT: v_mov_b32_e32 v50, v1
-; GFX9-NEXT: v_mov_b32_e32 v51, v0
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
@@ -30438,6 +30643,19 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s8, s18, 16
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
+; GFX9-NEXT: v_mov_b32_e32 v32, v11
+; GFX9-NEXT: v_mov_b32_e32 v33, v10
+; GFX9-NEXT: v_mov_b32_e32 v34, v9
+; GFX9-NEXT: v_mov_b32_e32 v35, v8
+; GFX9-NEXT: v_mov_b32_e32 v36, v7
+; GFX9-NEXT: v_mov_b32_e32 v37, v6
+; GFX9-NEXT: v_mov_b32_e32 v38, v5
+; GFX9-NEXT: v_mov_b32_e32 v39, v4
+; GFX9-NEXT: v_mov_b32_e32 v48, v3
+; GFX9-NEXT: v_mov_b32_e32 v49, v2
+; GFX9-NEXT: v_mov_b32_e32 v50, v1
+; GFX9-NEXT: v_mov_b32_e32 v51, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
@@ -30454,7 +30672,6 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; GFX9-NEXT: v_lshrrev_b32_e32 v41, 16, v37
; GFX9-NEXT: v_lshrrev_b32_e32 v42, 16, v38
; GFX9-NEXT: v_lshrrev_b32_e32 v43, 16, v39
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -30469,6 +30686,7 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_lshrrev_b32_e32 v44, 16, v48
; GFX9-NEXT: v_lshrrev_b32_e32 v45, 16, v49
; GFX9-NEXT: v_lshrrev_b32_e32 v46, 16, v50
@@ -30580,7 +30798,9 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB47_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB47_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB47_2
+; GFX9-NEXT: s_branch .LBB47_3
;
; GFX11-TRUE16-LABEL: bitcast_v52f16_to_v13i64_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -30610,9 +30830,9 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v5
; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v6
; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v7
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s27, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
@@ -30624,15 +30844,14 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s42
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
@@ -30644,10 +30863,11 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s28, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s29, s41
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB47_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
@@ -30665,10 +30885,9 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s17
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s15 :: v_dual_mov_b32 v17, s16
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB47_3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB47_3
; GFX11-TRUE16-NEXT: .LBB47_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
@@ -30693,9 +30912,9 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
@@ -30708,7 +30927,9 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB47_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB47_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB47_2
+; GFX11-TRUE16-NEXT: s_branch .LBB47_3
;
; GFX11-FAKE16-LABEL: bitcast_v52f16_to_v13i64_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -30730,9 +30951,9 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_and_b32_e32 v50, 0xffff, v5
; GFX11-FAKE16-NEXT: v_and_b32_e32 v49, 0xffff, v6
; GFX11-FAKE16-NEXT: v_and_b32_e32 v48, 0xffff, v7
-; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s28, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s27, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s26, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s25, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s24, 16
@@ -30744,15 +30965,14 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s18, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s17, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s40, 0
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s44
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s45
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s43
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s42
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
@@ -30764,10 +30984,11 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s27, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s28, s15
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s29, s41
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB47_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
@@ -30785,10 +31006,9 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s17
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s15 :: v_dual_mov_b32 v17, s16
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB47_3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB47_3
; GFX11-FAKE16-NEXT: .LBB47_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
@@ -30813,9 +31033,9 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, s16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, s16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
@@ -30828,7 +31048,9 @@ define inreg <13 x i64> @bitcast_v52f16_to_v13i64_scalar(<52 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB47_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB47_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB47_2
+; GFX11-FAKE16-NEXT: s_branch .LBB47_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -31624,6 +31846,7 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v25, s16
; SI-NEXT: v_mov_b32_e32 v26, s17
; SI-NEXT: v_mov_b32_e32 v23, s18
@@ -31636,9 +31859,9 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v18, s25
; SI-NEXT: v_mov_b32_e32 v15, s26
; SI-NEXT: v_mov_b32_e32 v16, s27
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v13, s28
; SI-NEXT: v_mov_b32_e32 v14, s29
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:16 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
@@ -31913,12 +32136,15 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a,
; SI-NEXT: ; implicit-def: $vgpr35
; SI-NEXT: ; implicit-def: $vgpr27
; SI-NEXT: ; implicit-def: $vgpr33
-; SI-NEXT: s_branch .LBB49_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB49_2
+; SI-NEXT: s_branch .LBB49_3
;
; VI-LABEL: bitcast_v13f64_to_v52i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v21, s16
; VI-NEXT: v_mov_b32_e32 v22, s17
; VI-NEXT: v_mov_b32_e32 v17, s18
@@ -31931,9 +32157,9 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v24, s25
; VI-NEXT: v_mov_b32_e32 v19, s26
; VI-NEXT: v_mov_b32_e32 v20, s27
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v15, s28
; VI-NEXT: v_mov_b32_e32 v16, s29
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
@@ -32105,12 +32331,15 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a,
; VI-NEXT: ; implicit-def: $vgpr39
; VI-NEXT: ; implicit-def: $vgpr38
; VI-NEXT: ; implicit-def: $vgpr25
-; VI-NEXT: s_branch .LBB49_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB49_2
+; VI-NEXT: s_branch .LBB49_3
;
; GFX9-LABEL: bitcast_v13f64_to_v52i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v21, s16
; GFX9-NEXT: v_mov_b32_e32 v22, s17
; GFX9-NEXT: v_mov_b32_e32 v17, s18
@@ -32123,9 +32352,9 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v24, s25
; GFX9-NEXT: v_mov_b32_e32 v19, s26
; GFX9-NEXT: v_mov_b32_e32 v20, s27
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v15, s28
; GFX9-NEXT: v_mov_b32_e32 v16, s29
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
@@ -32297,7 +32526,9 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr39
; GFX9-NEXT: ; implicit-def: $vgpr38
; GFX9-NEXT: ; implicit-def: $vgpr25
-; GFX9-NEXT: s_branch .LBB49_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB49_2
+; GFX9-NEXT: s_branch .LBB49_3
;
; GFX11-LABEL: bitcast_v13f64_to_v52i16_scalar:
; GFX11: ; %bb.0:
@@ -32312,8 +32543,8 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v13, s24 :: v_dual_mov_b32 v14, s25
; GFX11-NEXT: v_dual_mov_b32 v11, s26 :: v_dual_mov_b32 v12, s27
; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB49_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v7
@@ -32342,8 +32573,7 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v24
; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v27
; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v26
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB49_3
+; GFX11-NEXT: s_cbranch_execnz .LBB49_3
; GFX11-NEXT: .LBB49_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
@@ -32470,7 +32700,9 @@ define inreg <52 x i16> @bitcast_v13f64_to_v52i16_scalar(<13 x double> inreg %a,
; GFX11-NEXT: ; implicit-def: $vgpr36
; GFX11-NEXT: ; implicit-def: $vgpr35
; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: s_branch .LBB49_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_vccz .LBB49_2
+; GFX11-NEXT: s_branch .LBB49_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -33682,6 +33914,7 @@ define inreg <13 x double> @bitcast_v52i16_to_v13f64_scalar(<52 x i16> inreg %a,
; SI-NEXT: v_mov_b32_e32 v38, v14
; SI-NEXT: v_mov_b32_e32 v39, v12
; SI-NEXT: v_mov_b32_e32 v48, v10
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v63, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v3
@@ -33704,7 +33937,7 @@ define inreg <13 x double> @bitcast_v52i16_to_v13f64_scalar(<52 x i16> inreg %a,
; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v2
; SI-NEXT: s_waitcnt vmcnt(5)
; SI-NEXT: v_lshlrev_b32_e32 v59, 16, v4
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_and_b64 s[6:7], vcc, exec
; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v6
; SI-NEXT: s_waitcnt vmcnt(1)
@@ -33998,7 +34231,9 @@ define inreg <13 x double> @bitcast_v52i16_to_v13f64_scalar(<52 x i16> inreg %a,
; SI-NEXT: v_mov_b32_e32 v32, v57
; SI-NEXT: v_mov_b32_e32 v57, v63
; SI-NEXT: buffer_load_dword v63, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
-; SI-NEXT: s_branch .LBB51_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB51_2
+; SI-NEXT: s_branch .LBB51_3
;
; VI-LABEL: bitcast_v52i16_to_v13f64_scalar:
; VI: ; %bb.0:
@@ -34018,6 +34253,7 @@ define inreg <13 x double> @bitcast_v52i16_to_v13f64_scalar(<52 x i16> inreg %a,
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v11
; VI-NEXT: v_mov_b32_e32 v33, v10
; VI-NEXT: v_mov_b32_e32 v34, v9
@@ -34030,7 +34266,7 @@ define inreg <13 x double> @bitcast_v52i16_to_v13f64_scalar(<52 x i16> inreg %a,
; VI-NEXT: v_mov_b32_e32 v49, v2
; VI-NEXT: v_mov_b32_e32 v50, v1
; VI-NEXT: v_mov_b32_e32 v51, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB51_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -34131,79 +34367,79 @@ define inreg <13 x double> @bitcast_v52i16_to_v13f64_scalar(<52 x i16> inreg %a,
; VI-NEXT: v_add_u32_e32 v16, vcc, 0x30000, v0
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v48
; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v48 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
-; VI-NEXT: s_add_i32 s16, s16, 3
; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
-; VI-NEXT: s_and_b32 s4, s16, 0xffff
-; VI-NEXT: s_lshl_b32 s5, s43, 16
-; VI-NEXT: s_add_i32 s17, s17, 3
+; VI-NEXT: s_add_i32 s16, s16, 3
; VI-NEXT: v_add_u32_e32 v17, vcc, 0x30000, v0
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v39
; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v39 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: s_and_b32 s4, s16, 0xffff
+; VI-NEXT: s_lshl_b32 s5, s43, 16
+; VI-NEXT: s_add_i32 s17, s17, 3
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s4, s5, s4
; VI-NEXT: s_and_b32 s5, s17, 0xffff
; VI-NEXT: s_lshl_b32 s16, s42, 16
; VI-NEXT: s_add_i32 s18, s18, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v18, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v38
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s5, s16, s5
; VI-NEXT: s_and_b32 s16, s18, 0xffff
; VI-NEXT: s_lshl_b32 s17, s41, 16
; VI-NEXT: s_add_i32 s19, s19, 3
-; VI-NEXT: v_add_u32_e32 v18, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v38
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v38 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s16, s17, s16
; VI-NEXT: s_and_b32 s17, s19, 0xffff
; VI-NEXT: s_lshl_b32 s18, s40, 16
; VI-NEXT: s_add_i32 s20, s20, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v19, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v37
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s17, s18, s17
; VI-NEXT: s_and_b32 s18, s20, 0xffff
; VI-NEXT: s_lshl_b32 s15, s15, 16
; VI-NEXT: s_add_i32 s21, s21, 3
-; VI-NEXT: v_add_u32_e32 v19, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v37
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v37 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s15, s15, s18
; VI-NEXT: s_and_b32 s18, s21, 0xffff
; VI-NEXT: s_lshl_b32 s14, s14, 16
; VI-NEXT: s_add_i32 s22, s22, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v20, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v36
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s14, s14, s18
; VI-NEXT: s_and_b32 s18, s22, 0xffff
; VI-NEXT: s_lshl_b32 s13, s13, 16
; VI-NEXT: s_add_i32 s23, s23, 3
-; VI-NEXT: v_add_u32_e32 v20, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v36
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v36 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s13, s13, s18
; VI-NEXT: s_and_b32 s18, s23, 0xffff
; VI-NEXT: s_lshl_b32 s12, s12, 16
; VI-NEXT: s_add_i32 s24, s24, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v21, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v35
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s12, s12, s18
; VI-NEXT: s_and_b32 s18, s24, 0xffff
; VI-NEXT: s_lshl_b32 s11, s11, 16
; VI-NEXT: s_add_i32 s25, s25, 3
-; VI-NEXT: v_add_u32_e32 v21, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v35
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v35 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s11, s11, s18
; VI-NEXT: s_and_b32 s18, s25, 0xffff
; VI-NEXT: s_lshl_b32 s10, s10, 16
; VI-NEXT: s_add_i32 s26, s26, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
+; VI-NEXT: v_add_u32_e32 v22, vcc, 0x30000, v0
+; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v34
+; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; VI-NEXT: s_or_b32 s10, s10, s18
; VI-NEXT: s_and_b32 s18, s26, 0xffff
; VI-NEXT: s_lshl_b32 s9, s9, 16
; VI-NEXT: s_add_i32 s27, s27, 3
-; VI-NEXT: v_add_u32_e32 v22, vcc, 0x30000, v0
-; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v34
-; VI-NEXT: v_lshlrev_b32_sdwa v2, v1, v34 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
+; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s9, s9, s18
; VI-NEXT: s_and_b32 s18, s27, 0xffff
; VI-NEXT: s_lshl_b32 s8, s8, 16
; VI-NEXT: s_add_i32 s28, s28, 3
-; VI-NEXT: v_or_b32_sdwa v0, v2, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_0
; VI-NEXT: s_or_b32 s8, s8, s18
; VI-NEXT: s_and_b32 s18, s28, 0xffff
; VI-NEXT: s_lshl_b32 s7, s7, 16
@@ -34253,23 +34489,13 @@ define inreg <13 x double> @bitcast_v52i16_to_v13f64_scalar(<52 x i16> inreg %a,
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB51_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB51_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB51_2
+; VI-NEXT: s_branch .LBB51_3
;
; GFX9-LABEL: bitcast_v52i16_to_v13f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v11
-; GFX9-NEXT: v_mov_b32_e32 v33, v10
-; GFX9-NEXT: v_mov_b32_e32 v34, v9
-; GFX9-NEXT: v_mov_b32_e32 v35, v8
-; GFX9-NEXT: v_mov_b32_e32 v36, v7
-; GFX9-NEXT: v_mov_b32_e32 v37, v6
-; GFX9-NEXT: v_mov_b32_e32 v38, v5
-; GFX9-NEXT: v_mov_b32_e32 v39, v4
-; GFX9-NEXT: v_mov_b32_e32 v48, v3
-; GFX9-NEXT: v_mov_b32_e32 v49, v2
-; GFX9-NEXT: v_mov_b32_e32 v50, v1
-; GFX9-NEXT: v_mov_b32_e32 v51, v0
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
@@ -34285,6 +34511,19 @@ define inreg <13 x double> @bitcast_v52i16_to_v13f64_scalar(<52 x i16> inreg %a,
; GFX9-NEXT: s_lshr_b32 s8, s18, 16
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
+; GFX9-NEXT: v_mov_b32_e32 v32, v11
+; GFX9-NEXT: v_mov_b32_e32 v33, v10
+; GFX9-NEXT: v_mov_b32_e32 v34, v9
+; GFX9-NEXT: v_mov_b32_e32 v35, v8
+; GFX9-NEXT: v_mov_b32_e32 v36, v7
+; GFX9-NEXT: v_mov_b32_e32 v37, v6
+; GFX9-NEXT: v_mov_b32_e32 v38, v5
+; GFX9-NEXT: v_mov_b32_e32 v39, v4
+; GFX9-NEXT: v_mov_b32_e32 v48, v3
+; GFX9-NEXT: v_mov_b32_e32 v49, v2
+; GFX9-NEXT: v_mov_b32_e32 v50, v1
+; GFX9-NEXT: v_mov_b32_e32 v51, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
@@ -34301,7 +34540,6 @@ define inreg <13 x double> @bitcast_v52i16_to_v13f64_scalar(<52 x i16> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v41, 16, v37
; GFX9-NEXT: v_lshrrev_b32_e32 v42, 16, v38
; GFX9-NEXT: v_lshrrev_b32_e32 v43, 16, v39
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -34316,6 +34554,7 @@ define inreg <13 x double> @bitcast_v52i16_to_v13f64_scalar(<52 x i16> inreg %a,
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_lshrrev_b32_e32 v44, 16, v48
; GFX9-NEXT: v_lshrrev_b32_e32 v45, 16, v49
; GFX9-NEXT: v_lshrrev_b32_e32 v46, 16, v50
@@ -34425,7 +34664,9 @@ define inreg <13 x double> @bitcast_v52i16_to_v13f64_scalar(<52 x i16> inreg %a,
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB51_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB51_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB51_2
+; GFX9-NEXT: s_branch .LBB51_3
;
; GFX11-TRUE16-LABEL: bitcast_v52i16_to_v13f64_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -34455,9 +34696,9 @@ define inreg <13 x double> @bitcast_v52i16_to_v13f64_scalar(<52 x i16> inreg %a,
; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v5
; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v6
; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v7
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s27, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
@@ -34469,15 +34710,14 @@ define inreg <13 x double> @bitcast_v52i16_to_v13f64_scalar(<52 x i16> inreg %a,
; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s42
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
@@ -34489,10 +34729,11 @@ define inreg <13 x double> @bitcast_v52i16_to_v13f64_scalar(<52 x i16> inreg %a,
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s28, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s29, s41
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB51_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
@@ -34510,10 +34751,9 @@ define inreg <13 x double> @bitcast_v52i16_to_v13f64_scalar(<52 x i16> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s17
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s15 :: v_dual_mov_b32 v17, s16
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB51_3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB51_3
; GFX11-TRUE16-NEXT: .LBB51_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
@@ -34538,9 +34778,9 @@ define inreg <13 x double> @bitcast_v52i16_to_v13f64_scalar(<52 x i16> inreg %a,
; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s15, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
@@ -34553,7 +34793,9 @@ define inreg <13 x double> @bitcast_v52i16_to_v13f64_scalar(<52 x i16> inreg %a,
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB51_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB51_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB51_2
+; GFX11-TRUE16-NEXT: s_branch .LBB51_3
;
; GFX11-FAKE16-LABEL: bitcast_v52i16_to_v13f64_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -34575,9 +34817,9 @@ define inreg <13 x double> @bitcast_v52i16_to_v13f64_scalar(<52 x i16> inreg %a,
; GFX11-FAKE16-NEXT: v_and_b32_e32 v50, 0xffff, v5
; GFX11-FAKE16-NEXT: v_and_b32_e32 v49, 0xffff, v6
; GFX11-FAKE16-NEXT: v_and_b32_e32 v48, 0xffff, v7
-; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s28, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s27, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s26, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s25, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s24, 16
@@ -34589,15 +34831,14 @@ define inreg <13 x double> @bitcast_v52i16_to_v13f64_scalar(<52 x i16> inreg %a,
; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s18, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s17, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s40, 0
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s44
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s45
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s43
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s42
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
@@ -34609,10 +34850,11 @@ define inreg <13 x double> @bitcast_v52i16_to_v13f64_scalar(<52 x i16> inreg %a,
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s27, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s28, s15
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s29, s41
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB51_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
@@ -34630,10 +34872,9 @@ define inreg <13 x double> @bitcast_v52i16_to_v13f64_scalar(<52 x i16> inreg %a,
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s17
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s15 :: v_dual_mov_b32 v17, s16
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB51_3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB51_3
; GFX11-FAKE16-NEXT: .LBB51_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
@@ -34658,9 +34899,9 @@ define inreg <13 x double> @bitcast_v52i16_to_v13f64_scalar(<52 x i16> inreg %a,
; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, s17, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, s15, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, s16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, s15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, s16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
@@ -34673,7 +34914,9 @@ define inreg <13 x double> @bitcast_v52i16_to_v13f64_scalar(<52 x i16> inreg %a,
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB51_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB51_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB51_2
+; GFX11-FAKE16-NEXT: s_branch .LBB51_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -35733,6 +35976,7 @@ define inreg <52 x half> @bitcast_v13f64_to_v52f16_scalar(<13 x double> inreg %a
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v13
+; SI-NEXT: s_and_b64 s[40:41], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s14, v1
; SI-NEXT: v_readfirstlane_b32 s15, v2
; SI-NEXT: v_readfirstlane_b32 s12, v3
@@ -35744,8 +35988,8 @@ define inreg <52 x half> @bitcast_v13f64_to_v52f16_scalar(<13 x double> inreg %a
; SI-NEXT: v_readfirstlane_b32 s6, v9
; SI-NEXT: v_readfirstlane_b32 s7, v10
; SI-NEXT: v_readfirstlane_b32 s4, v11
-; SI-NEXT: s_and_b64 s[40:41], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s5, v12
+; SI-NEXT: s_mov_b64 s[40:41], -1
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
@@ -36195,12 +36439,15 @@ define inreg <52 x half> @bitcast_v13f64_to_v52f16_scalar(<13 x double> inreg %a
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr46
; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: s_branch .LBB53_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[40:41]
+; SI-NEXT: s_cbranch_vccz .LBB53_2
+; SI-NEXT: s_branch .LBB53_3
;
; VI-LABEL: bitcast_v13f64_to_v52f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v21, s16
; VI-NEXT: v_mov_b32_e32 v22, s17
; VI-NEXT: v_mov_b32_e32 v17, s18
@@ -36213,9 +36460,9 @@ define inreg <52 x half> @bitcast_v13f64_to_v52f16_scalar(<13 x double> inreg %a
; VI-NEXT: v_mov_b32_e32 v24, s25
; VI-NEXT: v_mov_b32_e32 v19, s26
; VI-NEXT: v_mov_b32_e32 v20, s27
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v15, s28
; VI-NEXT: v_mov_b32_e32 v16, s29
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
@@ -36387,12 +36634,15 @@ define inreg <52 x half> @bitcast_v13f64_to_v52f16_scalar(<13 x double> inreg %a
; VI-NEXT: ; implicit-def: $vgpr39
; VI-NEXT: ; implicit-def: $vgpr38
; VI-NEXT: ; implicit-def: $vgpr25
-; VI-NEXT: s_branch .LBB53_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB53_2
+; VI-NEXT: s_branch .LBB53_3
;
; GFX9-LABEL: bitcast_v13f64_to_v52f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v21, s16
; GFX9-NEXT: v_mov_b32_e32 v22, s17
; GFX9-NEXT: v_mov_b32_e32 v17, s18
@@ -36405,9 +36655,9 @@ define inreg <52 x half> @bitcast_v13f64_to_v52f16_scalar(<13 x double> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v24, s25
; GFX9-NEXT: v_mov_b32_e32 v19, s26
; GFX9-NEXT: v_mov_b32_e32 v20, s27
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v15, s28
; GFX9-NEXT: v_mov_b32_e32 v16, s29
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:12 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
@@ -36579,7 +36829,9 @@ define inreg <52 x half> @bitcast_v13f64_to_v52f16_scalar(<13 x double> inreg %a
; GFX9-NEXT: ; implicit-def: $vgpr39
; GFX9-NEXT: ; implicit-def: $vgpr38
; GFX9-NEXT: ; implicit-def: $vgpr25
-; GFX9-NEXT: s_branch .LBB53_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB53_2
+; GFX9-NEXT: s_branch .LBB53_3
;
; GFX11-LABEL: bitcast_v13f64_to_v52f16_scalar:
; GFX11: ; %bb.0:
@@ -36594,8 +36846,8 @@ define inreg <52 x half> @bitcast_v13f64_to_v52f16_scalar(<13 x double> inreg %a
; GFX11-NEXT: v_dual_mov_b32 v13, s24 :: v_dual_mov_b32 v14, s25
; GFX11-NEXT: v_dual_mov_b32 v11, s26 :: v_dual_mov_b32 v12, s27
; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB53_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_lshrrev_b32_e32 v34, 16, v7
@@ -36624,8 +36876,7 @@ define inreg <52 x half> @bitcast_v13f64_to_v52f16_scalar(<13 x double> inreg %a
; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v24
; GFX11-NEXT: v_lshrrev_b32_e32 v33, 16, v27
; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v26
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB53_3
+; GFX11-NEXT: s_cbranch_execnz .LBB53_3
; GFX11-NEXT: .LBB53_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
@@ -36752,7 +37003,9 @@ define inreg <52 x half> @bitcast_v13f64_to_v52f16_scalar(<13 x double> inreg %a
; GFX11-NEXT: ; implicit-def: $vgpr36
; GFX11-NEXT: ; implicit-def: $vgpr35
; GFX11-NEXT: ; implicit-def: $vgpr34
-; GFX11-NEXT: s_branch .LBB53_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_vccz .LBB53_2
+; GFX11-NEXT: s_branch .LBB53_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -38138,15 +38391,15 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:124 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v28
-; SI-NEXT: v_cvt_f16_f32_e32 v42, v5
-; SI-NEXT: v_cvt_f16_f32_e32 v55, v4
-; SI-NEXT: v_cvt_f16_f32_e32 v56, v7
+; SI-NEXT: v_cvt_f16_f32_e32 v55, v5
+; SI-NEXT: v_cvt_f16_f32_e32 v42, v4
+; SI-NEXT: v_cvt_f16_f32_e32 v54, v7
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:120 ; 4-byte Folded Spill
-; SI-NEXT: v_cvt_f16_f32_e32 v54, v6
-; SI-NEXT: v_cvt_f16_f32_e32 v43, v9
-; SI-NEXT: v_cvt_f16_f32_e32 v47, v8
-; SI-NEXT: v_cvt_f16_f32_e32 v57, v11
-; SI-NEXT: v_cvt_f16_f32_e32 v60, v10
+; SI-NEXT: v_cvt_f16_f32_e32 v57, v6
+; SI-NEXT: v_cvt_f16_f32_e32 v56, v9
+; SI-NEXT: v_cvt_f16_f32_e32 v60, v8
+; SI-NEXT: v_cvt_f16_f32_e32 v47, v11
+; SI-NEXT: v_cvt_f16_f32_e32 v53, v10
; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
; SI-NEXT: v_cvt_f16_f32_e32 v52, v12
; SI-NEXT: v_cvt_f16_f32_e32 v15, v15
@@ -38164,13 +38417,13 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; SI-NEXT: v_cvt_f16_f32_e32 v27, v26
; SI-NEXT: v_cvt_f16_f32_e32 v26, v29
; SI-NEXT: v_cvt_f16_f32_e32 v30, v30
-; SI-NEXT: v_cvt_f16_f32_e32 v53, s17
-; SI-NEXT: v_cvt_f16_f32_e32 v11, s16
+; SI-NEXT: v_cvt_f16_f32_e32 v43, s17
+; SI-NEXT: v_cvt_f16_f32_e32 v3, s16
; SI-NEXT: v_cvt_f16_f32_e32 v1, s19
; SI-NEXT: v_cvt_f16_f32_e32 v2, s18
; SI-NEXT: v_cvt_f16_f32_e32 v12, s21
; SI-NEXT: v_cvt_f16_f32_e32 v14, s20
-; SI-NEXT: v_cvt_f16_f32_e32 v3, s23
+; SI-NEXT: v_cvt_f16_f32_e32 v11, s23
; SI-NEXT: v_cvt_f16_f32_e32 v10, s22
; SI-NEXT: v_cvt_f16_f32_e32 v4, s25
; SI-NEXT: v_cvt_f16_f32_e32 v9, s24
@@ -38188,6 +38441,7 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; SI-NEXT: s_waitcnt vmcnt(3)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v44
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v48
@@ -38213,8 +38467,8 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v14, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB55_4
@@ -38225,17 +38479,18 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:104 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:96 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(2)
+; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v43
+; SI-NEXT: s_waitcnt expcnt(3)
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
-; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v53
+; SI-NEXT: v_or_b32_e32 v0, v3, v0
; SI-NEXT: v_or_b32_e32 v1, v2, v1
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v12
-; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v11
; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6
+; SI-NEXT: v_mov_b32_e32 v39, v54
; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v26
-; SI-NEXT: v_or_b32_e32 v0, v11, v0
; SI-NEXT: v_or_b32_e32 v2, v14, v2
; SI-NEXT: v_or_b32_e32 v3, v10, v3
; SI-NEXT: v_or_b32_e32 v4, v9, v4
@@ -38243,11 +38498,12 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; SI-NEXT: v_or_b32_e32 v6, v7, v6
; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v46
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v41
-; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v42
-; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v56
-; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v43
+; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v55
+; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v39
+; SI-NEXT: s_waitcnt expcnt(2)
+; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v56
; SI-NEXT: s_waitcnt expcnt(1)
-; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v57
+; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v47
; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v15
@@ -38259,10 +38515,10 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; SI-NEXT: v_mov_b32_e32 v51, v46
; SI-NEXT: v_or_b32_e32 v7, v45, v7
; SI-NEXT: v_or_b32_e32 v8, v40, v8
-; SI-NEXT: v_or_b32_e32 v9, v55, v9
-; SI-NEXT: v_or_b32_e32 v10, v54, v10
-; SI-NEXT: v_or_b32_e32 v11, v47, v11
-; SI-NEXT: v_or_b32_e32 v12, v60, v12
+; SI-NEXT: v_or_b32_e32 v9, v42, v9
+; SI-NEXT: v_or_b32_e32 v10, v57, v10
+; SI-NEXT: v_or_b32_e32 v11, v60, v11
+; SI-NEXT: v_or_b32_e32 v12, v53, v12
; SI-NEXT: v_or_b32_e32 v13, v52, v13
; SI-NEXT: v_or_b32_e32 v14, v63, v14
; SI-NEXT: v_or_b32_e32 v15, v61, v15
@@ -38290,15 +38546,16 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; SI-NEXT: v_or_b32_e32 v25, v38, v25
; SI-NEXT: s_cbranch_execnz .LBB55_3
; SI-NEXT: .LBB55_2: ; %cmp.true
-; SI-NEXT: s_waitcnt expcnt(2)
-; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(3)
+; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
-; SI-NEXT: v_cvt_f32_f16_e32 v0, v53
+; SI-NEXT: v_cvt_f32_f16_e32 v0, v43
; SI-NEXT: v_cvt_f32_f16_e32 v9, v40
-; SI-NEXT: v_cvt_f32_f16_e32 v10, v55
-; SI-NEXT: v_cvt_f32_f16_e32 v11, v54
+; SI-NEXT: v_cvt_f32_f16_e32 v10, v42
+; SI-NEXT: s_waitcnt expcnt(2)
+; SI-NEXT: v_cvt_f32_f16_e32 v11, v57
; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9
@@ -38309,8 +38566,8 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11
; SI-NEXT: v_cvt_f16_f32_e32 v11, v11
; SI-NEXT: s_waitcnt expcnt(1)
-; SI-NEXT: v_cvt_f32_f16_e32 v12, v47
-; SI-NEXT: v_cvt_f32_f16_e32 v13, v60
+; SI-NEXT: v_cvt_f32_f16_e32 v12, v60
+; SI-NEXT: v_cvt_f32_f16_e32 v13, v53
; SI-NEXT: v_cvt_f32_f16_e32 v15, v52
; SI-NEXT: v_cvt_f32_f16_e32 v16, v63
; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12
@@ -38410,7 +38667,7 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; SI-NEXT: v_or_b32_e32 v2, v3, v2
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3
@@ -38448,22 +38705,22 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; SI-NEXT: v_cvt_f16_f32_e32 v8, v8
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8
; SI-NEXT: v_or_b32_e32 v8, v9, v8
-; SI-NEXT: v_cvt_f32_f16_e32 v9, v42
+; SI-NEXT: v_cvt_f32_f16_e32 v9, v55
; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9
; SI-NEXT: v_cvt_f16_f32_e32 v9, v9
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9
; SI-NEXT: v_or_b32_e32 v9, v10, v9
-; SI-NEXT: v_cvt_f32_f16_e32 v10, v56
+; SI-NEXT: v_cvt_f32_f16_e32 v10, v39
; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10
; SI-NEXT: v_cvt_f16_f32_e32 v10, v10
; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10
; SI-NEXT: v_or_b32_e32 v10, v11, v10
-; SI-NEXT: v_cvt_f32_f16_e32 v11, v43
+; SI-NEXT: v_cvt_f32_f16_e32 v11, v56
; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11
; SI-NEXT: v_cvt_f16_f32_e32 v11, v11
; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11
; SI-NEXT: v_or_b32_e32 v11, v12, v11
-; SI-NEXT: v_cvt_f32_f16_e32 v12, v57
+; SI-NEXT: v_cvt_f32_f16_e32 v12, v47
; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12
; SI-NEXT: v_cvt_f16_f32_e32 v12, v12
; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12
@@ -38600,7 +38857,10 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; SI-NEXT: v_mov_b32_e32 v30, v58
; SI-NEXT: v_mov_b32_e32 v58, v63
; SI-NEXT: v_mov_b32_e32 v63, v50
-; SI-NEXT: s_branch .LBB55_2
+; SI-NEXT: v_mov_b32_e32 v39, v54
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB55_2
+; SI-NEXT: s_branch .LBB55_3
;
; VI-LABEL: bitcast_v52f16_to_v13f64_scalar:
; VI: ; %bb.0:
@@ -38620,6 +38880,7 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v11
; VI-NEXT: v_mov_b32_e32 v33, v10
; VI-NEXT: v_mov_b32_e32 v34, v9
@@ -38632,7 +38893,7 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; VI-NEXT: v_mov_b32_e32 v49, v2
; VI-NEXT: v_mov_b32_e32 v50, v1
; VI-NEXT: v_mov_b32_e32 v51, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB55_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -38816,23 +39077,13 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB55_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB55_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB55_2
+; VI-NEXT: s_branch .LBB55_3
;
; GFX9-LABEL: bitcast_v52f16_to_v13f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v11
-; GFX9-NEXT: v_mov_b32_e32 v33, v10
-; GFX9-NEXT: v_mov_b32_e32 v34, v9
-; GFX9-NEXT: v_mov_b32_e32 v35, v8
-; GFX9-NEXT: v_mov_b32_e32 v36, v7
-; GFX9-NEXT: v_mov_b32_e32 v37, v6
-; GFX9-NEXT: v_mov_b32_e32 v38, v5
-; GFX9-NEXT: v_mov_b32_e32 v39, v4
-; GFX9-NEXT: v_mov_b32_e32 v48, v3
-; GFX9-NEXT: v_mov_b32_e32 v49, v2
-; GFX9-NEXT: v_mov_b32_e32 v50, v1
-; GFX9-NEXT: v_mov_b32_e32 v51, v0
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v12
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
@@ -38848,6 +39099,19 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; GFX9-NEXT: s_lshr_b32 s8, s18, 16
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
+; GFX9-NEXT: v_mov_b32_e32 v32, v11
+; GFX9-NEXT: v_mov_b32_e32 v33, v10
+; GFX9-NEXT: v_mov_b32_e32 v34, v9
+; GFX9-NEXT: v_mov_b32_e32 v35, v8
+; GFX9-NEXT: v_mov_b32_e32 v36, v7
+; GFX9-NEXT: v_mov_b32_e32 v37, v6
+; GFX9-NEXT: v_mov_b32_e32 v38, v5
+; GFX9-NEXT: v_mov_b32_e32 v39, v4
+; GFX9-NEXT: v_mov_b32_e32 v48, v3
+; GFX9-NEXT: v_mov_b32_e32 v49, v2
+; GFX9-NEXT: v_mov_b32_e32 v50, v1
+; GFX9-NEXT: v_mov_b32_e32 v51, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
@@ -38864,7 +39128,6 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; GFX9-NEXT: v_lshrrev_b32_e32 v41, 16, v37
; GFX9-NEXT: v_lshrrev_b32_e32 v42, 16, v38
; GFX9-NEXT: v_lshrrev_b32_e32 v43, 16, v39
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -38879,6 +39142,7 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_lshrrev_b32_e32 v44, 16, v48
; GFX9-NEXT: v_lshrrev_b32_e32 v45, 16, v49
; GFX9-NEXT: v_lshrrev_b32_e32 v46, 16, v50
@@ -38990,7 +39254,9 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB55_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB55_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB55_2
+; GFX9-NEXT: s_branch .LBB55_3
;
; GFX11-TRUE16-LABEL: bitcast_v52f16_to_v13f64_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -39020,9 +39286,9 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v5
; GFX11-TRUE16-NEXT: v_and_b32_e32 v49, 0xffff, v6
; GFX11-TRUE16-NEXT: v_and_b32_e32 v48, 0xffff, v7
-; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s27, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
@@ -39034,15 +39300,14 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s40, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s42
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
@@ -39054,10 +39319,11 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s28, s15
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s29, s41
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB55_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
@@ -39075,10 +39341,9 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s17
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s15 :: v_dual_mov_b32 v17, s16
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB55_3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB55_3
; GFX11-TRUE16-NEXT: .LBB55_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
@@ -39103,9 +39368,9 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
@@ -39118,7 +39383,9 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB55_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB55_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB55_2
+; GFX11-TRUE16-NEXT: s_branch .LBB55_3
;
; GFX11-FAKE16-LABEL: bitcast_v52f16_to_v13f64_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -39140,9 +39407,9 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; GFX11-FAKE16-NEXT: v_and_b32_e32 v50, 0xffff, v5
; GFX11-FAKE16-NEXT: v_and_b32_e32 v49, 0xffff, v6
; GFX11-FAKE16-NEXT: v_and_b32_e32 v48, 0xffff, v7
-; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s29, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s28, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s27, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s26, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s25, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s24, 16
@@ -39154,15 +39421,14 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s18, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s17, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s40, 0
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s44
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s45
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s43
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s42
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
@@ -39174,10 +39440,11 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s27, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s28, s15
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s29, s41
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB55_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
@@ -39195,10 +39462,9 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s17
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s15 :: v_dual_mov_b32 v17, s16
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s40
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB55_3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB55_3
; GFX11-FAKE16-NEXT: .LBB55_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v39, 16, v55
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v38, 16, v54
@@ -39223,9 +39489,9 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, s15 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, s16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, s16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
@@ -39238,7 +39504,9 @@ define inreg <13 x double> @bitcast_v52f16_to_v13f64_scalar(<52 x half> inreg %a
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB55_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB55_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB55_2
+; GFX11-FAKE16-NEXT: s_branch .LBB55_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -40633,6 +40901,7 @@ define inreg <52 x half> @bitcast_v52i16_to_v52f16_scalar(<52 x i16> inreg %a, i
; SI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:328 ; 4-byte Folded Spill
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB57_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_cvt_f32_f16_e32 v31, s22
@@ -40746,9 +41015,6 @@ define inreg <52 x half> @bitcast_v52i16_to_v52f16_scalar(<52 x i16> inreg %a, i
; SI-NEXT: .LBB57_2:
; SI-NEXT: ; implicit-def: $vgpr31
; SI-NEXT: ; kill: killed $vgpr31
-; SI-NEXT: s_mov_b64 s[4:5], -1
-; SI-NEXT: ; implicit-def: $vgpr31
-; SI-NEXT: ; kill: killed $vgpr31
; SI-NEXT: ; implicit-def: $vgpr33
; SI-NEXT: ; implicit-def: $vgpr49
; SI-NEXT: ; implicit-def: $vgpr34
@@ -40824,6 +41090,8 @@ define inreg <52 x half> @bitcast_v52i16_to_v52f16_scalar(<52 x i16> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr31
; SI-NEXT: ; kill: killed $vgpr31
; SI-NEXT: ; implicit-def: $vgpr31
+; SI-NEXT: ; kill: killed $vgpr31
+; SI-NEXT: ; implicit-def: $vgpr31
; SI-NEXT: .LBB57_3: ; %Flow
; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
; SI-NEXT: s_waitcnt expcnt(0)
@@ -41330,6 +41598,7 @@ define inreg <52 x half> @bitcast_v52i16_to_v52f16_scalar(<52 x i16> inreg %a, i
; VI-NEXT: s_lshr_b32 s41, s18, 16
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_lshrrev_b32_e32 v13, 16, v11
; VI-NEXT: v_lshrrev_b32_e32 v24, 16, v10
; VI-NEXT: v_lshrrev_b32_e32 v23, 16, v9
@@ -41338,15 +41607,18 @@ define inreg <52 x half> @bitcast_v52i16_to_v52f16_scalar(<52 x i16> inreg %a, i
; VI-NEXT: v_lshrrev_b32_e32 v20, 16, v6
; VI-NEXT: v_lshrrev_b32_e32 v19, 16, v5
; VI-NEXT: v_lshrrev_b32_e32 v12, 16, v4
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_lshrrev_b32_e32 v17, 16, v3
; VI-NEXT: v_lshrrev_b32_e32 v16, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v15, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v14, 16, v0
-; VI-NEXT: s_cbranch_scc0 .LBB57_4
+; VI-NEXT: s_cbranch_scc0 .LBB57_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB57_3
-; VI-NEXT: .LBB57_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB57_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB57_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s16, s16, 3
; VI-NEXT: s_add_i32 s43, s43, 3
; VI-NEXT: s_add_i32 s17, s17, 3
@@ -41399,7 +41671,7 @@ define inreg <52 x half> @bitcast_v52i16_to_v52f16_scalar(<52 x i16> inreg %a, i
; VI-NEXT: v_add_u32_e32 v24, vcc, 3, v24
; VI-NEXT: v_add_u32_e32 v11, vcc, 3, v11
; VI-NEXT: v_add_u32_e32 v13, vcc, 3, v13
-; VI-NEXT: .LBB57_3: ; %end
+; VI-NEXT: .LBB57_4: ; %end
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s43, 16
; VI-NEXT: s_or_b32 s4, s4, s5
@@ -41481,8 +41753,6 @@ define inreg <52 x half> @bitcast_v52i16_to_v52f16_scalar(<52 x i16> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v12, s7
; VI-NEXT: v_mov_b32_e32 v13, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB57_4:
-; VI-NEXT: s_branch .LBB57_2
;
; GFX9-LABEL: bitcast_v52i16_to_v52f16_scalar:
; GFX9: ; %bb.0:
@@ -41502,6 +41772,7 @@ define inreg <52 x half> @bitcast_v52i16_to_v52f16_scalar(<52 x i16> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s8, s18, 16
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_lshrrev_b32_e32 v25, 16, v11
; GFX9-NEXT: v_lshrrev_b32_e32 v24, 16, v10
; GFX9-NEXT: v_lshrrev_b32_e32 v23, 16, v9
@@ -41510,7 +41781,7 @@ define inreg <52 x half> @bitcast_v52i16_to_v52f16_scalar(<52 x i16> inreg %a, i
; GFX9-NEXT: v_lshrrev_b32_e32 v20, 16, v6
; GFX9-NEXT: v_lshrrev_b32_e32 v19, 16, v5
; GFX9-NEXT: v_lshrrev_b32_e32 v18, 16, v4
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_lshrrev_b32_e32 v17, 16, v3
; GFX9-NEXT: v_lshrrev_b32_e32 v16, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v15, 16, v1
@@ -41519,10 +41790,13 @@ define inreg <52 x half> @bitcast_v52i16_to_v52f16_scalar(<52 x i16> inreg %a, i
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX9-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB57_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB57_4
-; GFX9-NEXT: .LBB57_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB57_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB57_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_pack_ll_b32_b16 s4, s29, s43
; GFX9-NEXT: v_pk_add_u16 v13, s4, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_pack_ll_b32_b16 s4, s28, s42
@@ -41614,8 +41888,6 @@ define inreg <52 x half> @bitcast_v52i16_to_v52f16_scalar(<52 x i16> inreg %a, i
; GFX9-NEXT: v_lshrrev_b32_e32 v24, 16, v10
; GFX9-NEXT: v_lshrrev_b32_e32 v25, 16, v11
; GFX9-NEXT: s_branch .LBB57_5
-; GFX9-NEXT: .LBB57_3:
-; GFX9-NEXT: s_branch .LBB57_2
; GFX9-NEXT: .LBB57_4:
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v12, s28
@@ -41755,13 +42027,16 @@ define inreg <52 x half> @bitcast_v52i16_to_v52f16_scalar(<52 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s2, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s0, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s46, -1
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB57_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_mov_b32 s46, 0
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB57_3
-; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: .LBB57_2: ; %Flow
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB57_4
-; GFX11-TRUE16-NEXT: .LBB57_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: ; %bb.3: ; %cmp.true
; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
@@ -41849,8 +42124,6 @@ define inreg <52 x half> @bitcast_v52i16_to_v52f16_scalar(<52 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v6
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v7
; GFX11-TRUE16-NEXT: s_branch .LBB57_5
-; GFX11-TRUE16-NEXT: .LBB57_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB57_2
; GFX11-TRUE16-NEXT: .LBB57_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s29 :: v_dual_mov_b32 v17, s28
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s27 :: v_dual_mov_b32 v12, s26
@@ -41953,19 +42226,22 @@ define inreg <52 x half> @bitcast_v52i16_to_v52f16_scalar(<52 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s20, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s19, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s18, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s17, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s3, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s17, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s16, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s2, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s0, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_mov_b32 s46, -1
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB57_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_mov_b32 s46, 0
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB57_3
-; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: .LBB57_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB57_4
-; GFX11-FAKE16-NEXT: .LBB57_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
@@ -41994,10 +42270,10 @@ define inreg <52 x half> @bitcast_v52i16_to_v52f16_scalar(<52 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s20, s12
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s19, s11
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s18, s10
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s17, s9
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s6
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s17, s8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s16, s6
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s9
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s7
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s5
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, v7, 3 op_sel_hi:[1,0]
@@ -42020,12 +42296,12 @@ define inreg <52 x half> @bitcast_v52i16_to_v52f16_scalar(<52 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, s12, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v30, s11, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v31, s10, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v26, s9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v26, s8, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v33, s0, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v32, s1, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v29, s2, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v28, s3, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v27, s7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v27, s6, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v33
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v32
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v29
@@ -42053,8 +42329,6 @@ define inreg <52 x half> @bitcast_v52i16_to_v52f16_scalar(<52 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v6
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v7
; GFX11-FAKE16-NEXT: s_branch .LBB57_5
-; GFX11-FAKE16-NEXT: .LBB57_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB57_2
; GFX11-FAKE16-NEXT: .LBB57_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s29 :: v_dual_mov_b32 v17, s28
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v11, s27 :: v_dual_mov_b32 v12, s26
@@ -42071,8 +42345,8 @@ define inreg <52 x half> @bitcast_v52i16_to_v52f16_scalar(<52 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v48, s15 :: v_dual_mov_b32 v49, s14
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v50, s13 :: v_dual_mov_b32 v51, s12
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v52, s11 :: v_dual_mov_b32 v53, s10
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v54, s9 :: v_dual_mov_b32 v55, s7
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v64, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v54, s8 :: v_dual_mov_b32 v55, s6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v64, s9 :: v_dual_mov_b32 v65, s7
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v66, s4 :: v_dual_mov_b32 v67, s5
; GFX11-FAKE16-NEXT: .LBB57_5: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v33, 0xffff, v33
@@ -43293,10 +43567,14 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v58
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: s_cbranch_scc0 .LBB59_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB59_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB59_3
-; SI-NEXT: .LBB59_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB59_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB59_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v44, v44
; SI-NEXT: v_cvt_f32_f16_e32 v43, v43
; SI-NEXT: v_cvt_f32_f16_e32 v42, v42
@@ -43518,7 +43796,7 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i
; SI-NEXT: v_alignbit_b32 v51, v9, v56, 16
; SI-NEXT: v_alignbit_b32 v29, v3, v28, 16
; SI-NEXT: v_alignbit_b32 v28, v5, v57, 16
-; SI-NEXT: .LBB59_3: ; %end
+; SI-NEXT: .LBB59_4: ; %end
; SI-NEXT: v_and_b32_e32 v27, 0xffff, v27
; SI-NEXT: v_lshlrev_b32_e32 v44, 16, v44
; SI-NEXT: v_and_b32_e32 v24, 0xffff, v24
@@ -43681,8 +43959,6 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i
; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:76 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB59_4:
-; SI-NEXT: s_branch .LBB59_2
;
; VI-LABEL: bitcast_v52f16_to_v52i16_scalar:
; VI: ; %bb.0:
@@ -43702,6 +43978,7 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i
; VI-NEXT: s_lshr_b32 s41, s18, 16
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_lshrrev_b32_e32 v25, 16, v11
; VI-NEXT: v_lshrrev_b32_e32 v24, 16, v10
; VI-NEXT: v_lshrrev_b32_e32 v23, 16, v9
@@ -43710,7 +43987,7 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i
; VI-NEXT: v_lshrrev_b32_e32 v20, 16, v6
; VI-NEXT: v_lshrrev_b32_e32 v19, 16, v5
; VI-NEXT: v_lshrrev_b32_e32 v18, 16, v4
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_lshrrev_b32_e32 v17, 16, v3
; VI-NEXT: v_lshrrev_b32_e32 v16, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v15, 16, v1
@@ -43719,10 +43996,13 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i
; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v43, off, s[0:3], s32 ; 4-byte Folded Spill
-; VI-NEXT: s_cbranch_scc0 .LBB59_3
+; VI-NEXT: s_cbranch_scc0 .LBB59_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB59_4
-; VI-NEXT: .LBB59_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB59_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB59_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v38, 0x200
; VI-NEXT: v_add_f16_e32 v36, s16, v38
; VI-NEXT: v_add_f16_e32 v43, s43, v38
@@ -43777,8 +44057,6 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i
; VI-NEXT: v_add_f16_e32 v11, 0x200, v11
; VI-NEXT: v_add_f16_e32 v25, 0x200, v25
; VI-NEXT: s_branch .LBB59_5
-; VI-NEXT: .LBB59_3:
-; VI-NEXT: s_branch .LBB59_2
; VI-NEXT: .LBB59_4:
; VI-NEXT: v_mov_b32_e32 v38, s6
; VI-NEXT: v_mov_b32_e32 v13, s29
@@ -43898,6 +44176,7 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s8, s18, 16
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_lshrrev_b32_e32 v25, 16, v11
; GFX9-NEXT: v_lshrrev_b32_e32 v24, 16, v10
; GFX9-NEXT: v_lshrrev_b32_e32 v23, 16, v9
@@ -43906,7 +44185,7 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i
; GFX9-NEXT: v_lshrrev_b32_e32 v20, 16, v6
; GFX9-NEXT: v_lshrrev_b32_e32 v19, 16, v5
; GFX9-NEXT: v_lshrrev_b32_e32 v18, 16, v4
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_lshrrev_b32_e32 v17, 16, v3
; GFX9-NEXT: v_lshrrev_b32_e32 v16, 16, v2
; GFX9-NEXT: v_lshrrev_b32_e32 v15, 16, v1
@@ -43915,10 +44194,13 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v43, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX9-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB59_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB59_4
-; GFX9-NEXT: .LBB59_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB59_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB59_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_and_b32_e32 v11, 0xffff, v11
; GFX9-NEXT: v_and_b32_e32 v10, 0xffff, v10
; GFX9-NEXT: v_and_b32_e32 v9, 0xffff, v9
@@ -44012,8 +44294,6 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i
; GFX9-NEXT: v_lshrrev_b32_e32 v24, 16, v10
; GFX9-NEXT: v_lshrrev_b32_e32 v25, 16, v11
; GFX9-NEXT: s_branch .LBB59_5
-; GFX9-NEXT: .LBB59_3:
-; GFX9-NEXT: s_branch .LBB59_2
; GFX9-NEXT: .LBB59_4:
; GFX9-NEXT: v_mov_b32_e32 v13, s29
; GFX9-NEXT: v_mov_b32_e32 v12, s28
@@ -44153,13 +44433,16 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s2, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s0, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s46, -1
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB59_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_mov_b32 s46, 0
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB59_3
-; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: .LBB59_2: ; %Flow
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB59_4
-; GFX11-TRUE16-NEXT: .LBB59_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: ; %bb.3: ; %cmp.true
; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
; GFX11-TRUE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
; GFX11-TRUE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
@@ -44247,8 +44530,6 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v24, 16, v6
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v25, 16, v7
; GFX11-TRUE16-NEXT: s_branch .LBB59_5
-; GFX11-TRUE16-NEXT: .LBB59_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB59_2
; GFX11-TRUE16-NEXT: .LBB59_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s29 :: v_dual_mov_b32 v17, s28
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v11, s27 :: v_dual_mov_b32 v12, s26
@@ -44351,19 +44632,22 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s20, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s19, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s18, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s17, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s3, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s17, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s16, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s2, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s0, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_mov_b32 s46, -1
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB59_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_mov_b32 s46, 0
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB59_3
-; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: .LBB59_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB59_4
-; GFX11-FAKE16-NEXT: .LBB59_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
; GFX11-FAKE16-NEXT: v_and_b32_e32 v6, 0xffff, v6
; GFX11-FAKE16-NEXT: v_and_b32_e32 v5, 0xffff, v5
@@ -44392,10 +44676,10 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s20, s12
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s19, s11
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s18, s10
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s17, s9
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s6
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s17, s8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s16, s6
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s9
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s7
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s5
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, v7 op_sel_hi:[0,1]
@@ -44418,12 +44702,12 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, s12 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v30, 0x200, s11 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v31, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v26, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v26, 0x200, s8 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v33, 0x200, s0 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v32, 0x200, s1 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v29, 0x200, s2 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v28, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v27, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v27, 0x200, s6 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v67, 16, v33
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v66, 16, v32
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v65, 16, v29
@@ -44451,8 +44735,6 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v24, 16, v6
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v25, 16, v7
; GFX11-FAKE16-NEXT: s_branch .LBB59_5
-; GFX11-FAKE16-NEXT: .LBB59_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB59_2
; GFX11-FAKE16-NEXT: .LBB59_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s29 :: v_dual_mov_b32 v17, s28
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v11, s27 :: v_dual_mov_b32 v12, s26
@@ -44469,8 +44751,8 @@ define inreg <52 x i16> @bitcast_v52f16_to_v52i16_scalar(<52 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v48, s15 :: v_dual_mov_b32 v49, s14
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v50, s13 :: v_dual_mov_b32 v51, s12
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v52, s11 :: v_dual_mov_b32 v53, s10
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v54, s9 :: v_dual_mov_b32 v55, s7
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v64, s6 :: v_dual_mov_b32 v65, s8
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v54, s8 :: v_dual_mov_b32 v55, s6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v64, s9 :: v_dual_mov_b32 v65, s7
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v66, s4 :: v_dual_mov_b32 v67, s5
; GFX11-FAKE16-NEXT: .LBB59_5: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v33, 0xffff, v33
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.896bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.896bit.ll
index a43ce77..d760c6d 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.896bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.896bit.ll
@@ -193,6 +193,7 @@ define inreg <28 x float> @bitcast_v28i32_to_v28f32_scalar(<28 x i32> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v27, v13
; SI-NEXT: v_mov_b32_e32 v26, v12
; SI-NEXT: v_mov_b32_e32 v25, v11
@@ -213,7 +214,7 @@ define inreg <28 x float> @bitcast_v28i32_to_v28f32_scalar(<28 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v3, s19
; SI-NEXT: v_mov_b32_e32 v4, s20
; SI-NEXT: v_mov_b32_e32 v5, s21
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v6, s22
; SI-NEXT: v_mov_b32_e32 v7, s23
; SI-NEXT: v_mov_b32_e32 v8, s24
@@ -222,10 +223,13 @@ define inreg <28 x float> @bitcast_v28i32_to_v28f32_scalar(<28 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB1_3
-; SI-NEXT: .LBB1_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB1_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB1_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v27, vcc, 3, v27
; SI-NEXT: v_add_i32_e32 v26, vcc, 3, v26
; SI-NEXT: v_add_i32_e32 v25, vcc, 3, v25
@@ -254,16 +258,15 @@ define inreg <28 x float> @bitcast_v28i32_to_v28f32_scalar(<28 x i32> inreg %a,
; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
-; SI-NEXT: .LBB1_3: ; %end
+; SI-NEXT: .LBB1_4: ; %end
; SI-NEXT: v_mov_b32_e32 v14, v28
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: s_branch .LBB1_2
;
; VI-LABEL: bitcast_v28i32_to_v28f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v27, v13
; VI-NEXT: v_mov_b32_e32 v26, v12
; VI-NEXT: v_mov_b32_e32 v25, v11
@@ -284,7 +287,7 @@ define inreg <28 x float> @bitcast_v28i32_to_v28f32_scalar(<28 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: v_mov_b32_e32 v5, s21
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: v_mov_b32_e32 v8, s24
@@ -293,10 +296,13 @@ define inreg <28 x float> @bitcast_v28i32_to_v28f32_scalar(<28 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB1_4
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB1_3
-; VI-NEXT: .LBB1_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB1_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB1_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v27, vcc, 3, v27
; VI-NEXT: v_add_u32_e32 v26, vcc, 3, v26
; VI-NEXT: v_add_u32_e32 v25, vcc, 3, v25
@@ -325,16 +331,15 @@ define inreg <28 x float> @bitcast_v28i32_to_v28f32_scalar(<28 x i32> inreg %a,
; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: .LBB1_3: ; %end
+; VI-NEXT: .LBB1_4: ; %end
; VI-NEXT: v_mov_b32_e32 v14, v28
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_4:
-; VI-NEXT: s_branch .LBB1_2
;
; GFX9-LABEL: bitcast_v28i32_to_v28f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v27, v13
; GFX9-NEXT: v_mov_b32_e32 v26, v12
; GFX9-NEXT: v_mov_b32_e32 v25, v11
@@ -355,7 +360,7 @@ define inreg <28 x float> @bitcast_v28i32_to_v28f32_scalar(<28 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
; GFX9-NEXT: v_mov_b32_e32 v5, s21
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v6, s22
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: v_mov_b32_e32 v8, s24
@@ -364,10 +369,13 @@ define inreg <28 x float> @bitcast_v28i32_to_v28f32_scalar(<28 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB1_3
-; GFX9-NEXT: .LBB1_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB1_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_u32_e32 v27, 3, v27
; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
; GFX9-NEXT: v_add_u32_e32 v25, 3, v25
@@ -396,42 +404,40 @@ define inreg <28 x float> @bitcast_v28i32_to_v28f32_scalar(<28 x i32> inreg %a,
; GFX9-NEXT: v_add_u32_e32 v2, 3, v2
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: .LBB1_3: ; %end
+; GFX9-NEXT: .LBB1_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v14, v28
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-LABEL: bitcast_v28i32_to_v28f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v10 :: v_dual_mov_b32 v27, v9
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
+; GFX11-NEXT: v_dual_mov_b32 v15, v10 :: v_dual_mov_b32 v26, v8
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB1_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB1_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_3:
-; GFX11-NEXT: .LBB1_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_nc_u32_e32 v27, 3, v27
; GFX11-NEXT: v_add_nc_u32_e32 v26, 3, v26
; GFX11-NEXT: v_add_nc_u32_e32 v25, 3, v25
@@ -460,6 +466,7 @@ define inreg <28 x float> @bitcast_v28i32_to_v28f32_scalar(<28 x i32> inreg %a,
; GFX11-NEXT: v_add_nc_u32_e32 v2, 3, v2
; GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v1
; GFX11-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-NEXT: .LBB1_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -651,6 +658,7 @@ define inreg <28 x i32> @bitcast_v28f32_to_v28i32_scalar(<28 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v27, v13
; SI-NEXT: v_mov_b32_e32 v26, v12
; SI-NEXT: v_mov_b32_e32 v25, v11
@@ -671,7 +679,7 @@ define inreg <28 x i32> @bitcast_v28f32_to_v28i32_scalar(<28 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v3, s19
; SI-NEXT: v_mov_b32_e32 v4, s20
; SI-NEXT: v_mov_b32_e32 v5, s21
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v6, s22
; SI-NEXT: v_mov_b32_e32 v7, s23
; SI-NEXT: v_mov_b32_e32 v8, s24
@@ -680,10 +688,13 @@ define inreg <28 x i32> @bitcast_v28f32_to_v28i32_scalar(<28 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB3_4
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB3_3
-; SI-NEXT: .LBB3_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB3_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB3_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e32 v27, 1.0, v27
; SI-NEXT: v_add_f32_e32 v26, 1.0, v26
; SI-NEXT: v_add_f32_e32 v25, 1.0, v25
@@ -712,16 +723,15 @@ define inreg <28 x i32> @bitcast_v28f32_to_v28i32_scalar(<28 x float> inreg %a,
; SI-NEXT: v_add_f32_e32 v2, 1.0, v2
; SI-NEXT: v_add_f32_e32 v1, 1.0, v1
; SI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; SI-NEXT: .LBB3_3: ; %end
+; SI-NEXT: .LBB3_4: ; %end
; SI-NEXT: v_mov_b32_e32 v14, v28
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB3_4:
-; SI-NEXT: s_branch .LBB3_2
;
; VI-LABEL: bitcast_v28f32_to_v28i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v27, v13
; VI-NEXT: v_mov_b32_e32 v26, v12
; VI-NEXT: v_mov_b32_e32 v25, v11
@@ -742,7 +752,7 @@ define inreg <28 x i32> @bitcast_v28f32_to_v28i32_scalar(<28 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: v_mov_b32_e32 v5, s21
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: v_mov_b32_e32 v8, s24
@@ -751,10 +761,13 @@ define inreg <28 x i32> @bitcast_v28f32_to_v28i32_scalar(<28 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB3_4
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_3
-; VI-NEXT: .LBB3_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB3_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB3_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e32 v27, 1.0, v27
; VI-NEXT: v_add_f32_e32 v26, 1.0, v26
; VI-NEXT: v_add_f32_e32 v25, 1.0, v25
@@ -783,16 +796,15 @@ define inreg <28 x i32> @bitcast_v28f32_to_v28i32_scalar(<28 x float> inreg %a,
; VI-NEXT: v_add_f32_e32 v2, 1.0, v2
; VI-NEXT: v_add_f32_e32 v1, 1.0, v1
; VI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; VI-NEXT: .LBB3_3: ; %end
+; VI-NEXT: .LBB3_4: ; %end
; VI-NEXT: v_mov_b32_e32 v14, v28
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB3_4:
-; VI-NEXT: s_branch .LBB3_2
;
; GFX9-LABEL: bitcast_v28f32_to_v28i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v27, v13
; GFX9-NEXT: v_mov_b32_e32 v26, v12
; GFX9-NEXT: v_mov_b32_e32 v25, v11
@@ -813,7 +825,7 @@ define inreg <28 x i32> @bitcast_v28f32_to_v28i32_scalar(<28 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
; GFX9-NEXT: v_mov_b32_e32 v5, s21
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v6, s22
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: v_mov_b32_e32 v8, s24
@@ -822,10 +834,13 @@ define inreg <28 x i32> @bitcast_v28f32_to_v28i32_scalar(<28 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_3
-; GFX9-NEXT: .LBB3_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB3_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e32 v27, 1.0, v27
; GFX9-NEXT: v_add_f32_e32 v26, 1.0, v26
; GFX9-NEXT: v_add_f32_e32 v25, 1.0, v25
@@ -854,42 +869,40 @@ define inreg <28 x i32> @bitcast_v28f32_to_v28i32_scalar(<28 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e32 v2, 1.0, v2
; GFX9-NEXT: v_add_f32_e32 v1, 1.0, v1
; GFX9-NEXT: v_add_f32_e32 v0, 1.0, v0
-; GFX9-NEXT: .LBB3_3: ; %end
+; GFX9-NEXT: .LBB3_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v14, v28
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB3_4:
-; GFX9-NEXT: s_branch .LBB3_2
;
; GFX11-LABEL: bitcast_v28f32_to_v28i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v10 :: v_dual_mov_b32 v27, v9
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
+; GFX11-NEXT: v_dual_mov_b32 v15, v10 :: v_dual_mov_b32 v26, v8
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB3_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB3_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB3_3:
-; GFX11-NEXT: .LBB3_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_dual_add_f32 v27, 1.0, v27 :: v_dual_add_f32 v26, 1.0, v26
; GFX11-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v24, 1.0, v24
; GFX11-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
@@ -904,6 +917,7 @@ define inreg <28 x i32> @bitcast_v28f32_to_v28i32_scalar(<28 x float> inreg %a,
; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-NEXT: .LBB3_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1109,6 +1123,7 @@ define inreg <14 x i64> @bitcast_v28i32_to_v14i64_scalar(<28 x i32> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v27, v13
; SI-NEXT: v_mov_b32_e32 v26, v12
; SI-NEXT: v_mov_b32_e32 v25, v11
@@ -1129,7 +1144,7 @@ define inreg <14 x i64> @bitcast_v28i32_to_v14i64_scalar(<28 x i32> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v3, s19
; SI-NEXT: v_mov_b32_e32 v4, s20
; SI-NEXT: v_mov_b32_e32 v5, s21
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v6, s22
; SI-NEXT: v_mov_b32_e32 v7, s23
; SI-NEXT: v_mov_b32_e32 v8, s24
@@ -1138,10 +1153,13 @@ define inreg <14 x i64> @bitcast_v28i32_to_v14i64_scalar(<28 x i32> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB5_4
+; SI-NEXT: s_cbranch_scc0 .LBB5_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB5_3
-; SI-NEXT: .LBB5_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB5_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB5_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v27, vcc, 3, v27
; SI-NEXT: v_add_i32_e32 v26, vcc, 3, v26
; SI-NEXT: v_add_i32_e32 v25, vcc, 3, v25
@@ -1170,16 +1188,15 @@ define inreg <14 x i64> @bitcast_v28i32_to_v14i64_scalar(<28 x i32> inreg %a, i3
; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
-; SI-NEXT: .LBB5_3: ; %end
+; SI-NEXT: .LBB5_4: ; %end
; SI-NEXT: v_mov_b32_e32 v14, v28
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB5_4:
-; SI-NEXT: s_branch .LBB5_2
;
; VI-LABEL: bitcast_v28i32_to_v14i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v27, v13
; VI-NEXT: v_mov_b32_e32 v26, v12
; VI-NEXT: v_mov_b32_e32 v25, v11
@@ -1200,7 +1217,7 @@ define inreg <14 x i64> @bitcast_v28i32_to_v14i64_scalar(<28 x i32> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: v_mov_b32_e32 v5, s21
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: v_mov_b32_e32 v8, s24
@@ -1209,10 +1226,13 @@ define inreg <14 x i64> @bitcast_v28i32_to_v14i64_scalar(<28 x i32> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB5_4
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB5_3
-; VI-NEXT: .LBB5_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB5_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB5_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v27, vcc, 3, v27
; VI-NEXT: v_add_u32_e32 v26, vcc, 3, v26
; VI-NEXT: v_add_u32_e32 v25, vcc, 3, v25
@@ -1241,16 +1261,15 @@ define inreg <14 x i64> @bitcast_v28i32_to_v14i64_scalar(<28 x i32> inreg %a, i3
; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: .LBB5_3: ; %end
+; VI-NEXT: .LBB5_4: ; %end
; VI-NEXT: v_mov_b32_e32 v14, v28
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB5_4:
-; VI-NEXT: s_branch .LBB5_2
;
; GFX9-LABEL: bitcast_v28i32_to_v14i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v27, v13
; GFX9-NEXT: v_mov_b32_e32 v26, v12
; GFX9-NEXT: v_mov_b32_e32 v25, v11
@@ -1271,7 +1290,7 @@ define inreg <14 x i64> @bitcast_v28i32_to_v14i64_scalar(<28 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
; GFX9-NEXT: v_mov_b32_e32 v5, s21
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v6, s22
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: v_mov_b32_e32 v8, s24
@@ -1280,10 +1299,13 @@ define inreg <14 x i64> @bitcast_v28i32_to_v14i64_scalar(<28 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB5_3
-; GFX9-NEXT: .LBB5_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB5_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_u32_e32 v27, 3, v27
; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
; GFX9-NEXT: v_add_u32_e32 v25, 3, v25
@@ -1312,42 +1334,40 @@ define inreg <14 x i64> @bitcast_v28i32_to_v14i64_scalar(<28 x i32> inreg %a, i3
; GFX9-NEXT: v_add_u32_e32 v2, 3, v2
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: .LBB5_3: ; %end
+; GFX9-NEXT: .LBB5_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v14, v28
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_4:
-; GFX9-NEXT: s_branch .LBB5_2
;
; GFX11-LABEL: bitcast_v28i32_to_v14i64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v10 :: v_dual_mov_b32 v27, v9
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
+; GFX11-NEXT: v_dual_mov_b32 v15, v10 :: v_dual_mov_b32 v26, v8
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB5_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB5_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB5_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB5_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB5_3:
-; GFX11-NEXT: .LBB5_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_nc_u32_e32 v27, 3, v27
; GFX11-NEXT: v_add_nc_u32_e32 v26, 3, v26
; GFX11-NEXT: v_add_nc_u32_e32 v25, 3, v25
@@ -1376,6 +1396,7 @@ define inreg <14 x i64> @bitcast_v28i32_to_v14i64_scalar(<28 x i32> inreg %a, i3
; GFX11-NEXT: v_add_nc_u32_e32 v2, 3, v2
; GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v1
; GFX11-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-NEXT: .LBB5_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1588,6 +1609,7 @@ define inreg <28 x i32> @bitcast_v14i64_to_v28i32_scalar(<14 x i64> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v27, v13
; SI-NEXT: v_mov_b32_e32 v26, v12
; SI-NEXT: v_mov_b32_e32 v25, v11
@@ -1608,7 +1630,7 @@ define inreg <28 x i32> @bitcast_v14i64_to_v28i32_scalar(<14 x i64> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v3, s19
; SI-NEXT: v_mov_b32_e32 v4, s20
; SI-NEXT: v_mov_b32_e32 v5, s21
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v6, s22
; SI-NEXT: v_mov_b32_e32 v7, s23
; SI-NEXT: v_mov_b32_e32 v8, s24
@@ -1617,10 +1639,13 @@ define inreg <28 x i32> @bitcast_v14i64_to_v28i32_scalar(<14 x i64> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB7_4
+; SI-NEXT: s_cbranch_scc0 .LBB7_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB7_3
-; SI-NEXT: .LBB7_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB7_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB7_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v26, vcc, 3, v26
; SI-NEXT: v_addc_u32_e32 v27, vcc, 0, v27, vcc
; SI-NEXT: v_add_i32_e32 v24, vcc, 3, v24
@@ -1649,16 +1674,15 @@ define inreg <28 x i32> @bitcast_v14i64_to_v28i32_scalar(<14 x i64> inreg %a, i3
; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT: .LBB7_3: ; %end
+; SI-NEXT: .LBB7_4: ; %end
; SI-NEXT: v_mov_b32_e32 v14, v28
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB7_4:
-; SI-NEXT: s_branch .LBB7_2
;
; VI-LABEL: bitcast_v14i64_to_v28i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v27, v13
; VI-NEXT: v_mov_b32_e32 v26, v12
; VI-NEXT: v_mov_b32_e32 v25, v11
@@ -1679,7 +1703,7 @@ define inreg <28 x i32> @bitcast_v14i64_to_v28i32_scalar(<14 x i64> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: v_mov_b32_e32 v5, s21
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: v_mov_b32_e32 v8, s24
@@ -1688,10 +1712,13 @@ define inreg <28 x i32> @bitcast_v14i64_to_v28i32_scalar(<14 x i64> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB7_4
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB7_3
-; VI-NEXT: .LBB7_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB7_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB7_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v26, vcc, 3, v26
; VI-NEXT: v_addc_u32_e32 v27, vcc, 0, v27, vcc
; VI-NEXT: v_add_u32_e32 v24, vcc, 3, v24
@@ -1720,16 +1747,15 @@ define inreg <28 x i32> @bitcast_v14i64_to_v28i32_scalar(<14 x i64> inreg %a, i3
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT: .LBB7_3: ; %end
+; VI-NEXT: .LBB7_4: ; %end
; VI-NEXT: v_mov_b32_e32 v14, v28
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB7_4:
-; VI-NEXT: s_branch .LBB7_2
;
; GFX9-LABEL: bitcast_v14i64_to_v28i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v27, v13
; GFX9-NEXT: v_mov_b32_e32 v26, v12
; GFX9-NEXT: v_mov_b32_e32 v25, v11
@@ -1750,7 +1776,7 @@ define inreg <28 x i32> @bitcast_v14i64_to_v28i32_scalar(<14 x i64> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
; GFX9-NEXT: v_mov_b32_e32 v5, s21
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v6, s22
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: v_mov_b32_e32 v8, s24
@@ -1759,10 +1785,13 @@ define inreg <28 x i32> @bitcast_v14i64_to_v28i32_scalar(<14 x i64> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB7_3
-; GFX9-NEXT: .LBB7_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB7_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB7_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_co_u32_e32 v26, vcc, 3, v26
; GFX9-NEXT: v_addc_co_u32_e32 v27, vcc, 0, v27, vcc
; GFX9-NEXT: v_add_co_u32_e32 v24, vcc, 3, v24
@@ -1791,42 +1820,40 @@ define inreg <28 x i32> @bitcast_v14i64_to_v28i32_scalar(<14 x i64> inreg %a, i3
; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 3, v0
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
-; GFX9-NEXT: .LBB7_3: ; %end
+; GFX9-NEXT: .LBB7_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v14, v28
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB7_4:
-; GFX9-NEXT: s_branch .LBB7_2
;
; GFX11-LABEL: bitcast_v14i64_to_v28i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v10 :: v_dual_mov_b32 v27, v9
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
+; GFX11-NEXT: v_dual_mov_b32 v15, v10 :: v_dual_mov_b32 v26, v8
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB7_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB7_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB7_3:
-; GFX11-NEXT: .LBB7_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB7_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_co_u32 v26, vcc_lo, v26, 3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add_co_ci_u32_e64 v27, null, 0, v27, vcc_lo
@@ -1862,6 +1889,7 @@ define inreg <28 x i32> @bitcast_v14i64_to_v28i32_scalar(<14 x i64> inreg %a, i3
; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-NEXT: .LBB7_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2067,6 +2095,7 @@ define inreg <14 x double> @bitcast_v28i32_to_v14f64_scalar(<28 x i32> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v27, v13
; SI-NEXT: v_mov_b32_e32 v26, v12
; SI-NEXT: v_mov_b32_e32 v25, v11
@@ -2087,7 +2116,7 @@ define inreg <14 x double> @bitcast_v28i32_to_v14f64_scalar(<28 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v3, s19
; SI-NEXT: v_mov_b32_e32 v4, s20
; SI-NEXT: v_mov_b32_e32 v5, s21
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v6, s22
; SI-NEXT: v_mov_b32_e32 v7, s23
; SI-NEXT: v_mov_b32_e32 v8, s24
@@ -2096,10 +2125,13 @@ define inreg <14 x double> @bitcast_v28i32_to_v14f64_scalar(<28 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB9_4
+; SI-NEXT: s_cbranch_scc0 .LBB9_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB9_3
-; SI-NEXT: .LBB9_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB9_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB9_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v27, vcc, 3, v27
; SI-NEXT: v_add_i32_e32 v26, vcc, 3, v26
; SI-NEXT: v_add_i32_e32 v25, vcc, 3, v25
@@ -2128,16 +2160,15 @@ define inreg <14 x double> @bitcast_v28i32_to_v14f64_scalar(<28 x i32> inreg %a,
; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
-; SI-NEXT: .LBB9_3: ; %end
+; SI-NEXT: .LBB9_4: ; %end
; SI-NEXT: v_mov_b32_e32 v14, v28
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB9_4:
-; SI-NEXT: s_branch .LBB9_2
;
; VI-LABEL: bitcast_v28i32_to_v14f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v27, v13
; VI-NEXT: v_mov_b32_e32 v26, v12
; VI-NEXT: v_mov_b32_e32 v25, v11
@@ -2158,7 +2189,7 @@ define inreg <14 x double> @bitcast_v28i32_to_v14f64_scalar(<28 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: v_mov_b32_e32 v5, s21
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: v_mov_b32_e32 v8, s24
@@ -2167,10 +2198,13 @@ define inreg <14 x double> @bitcast_v28i32_to_v14f64_scalar(<28 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB9_4
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB9_3
-; VI-NEXT: .LBB9_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB9_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB9_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v27, vcc, 3, v27
; VI-NEXT: v_add_u32_e32 v26, vcc, 3, v26
; VI-NEXT: v_add_u32_e32 v25, vcc, 3, v25
@@ -2199,16 +2233,15 @@ define inreg <14 x double> @bitcast_v28i32_to_v14f64_scalar(<28 x i32> inreg %a,
; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: .LBB9_3: ; %end
+; VI-NEXT: .LBB9_4: ; %end
; VI-NEXT: v_mov_b32_e32 v14, v28
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB9_4:
-; VI-NEXT: s_branch .LBB9_2
;
; GFX9-LABEL: bitcast_v28i32_to_v14f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v27, v13
; GFX9-NEXT: v_mov_b32_e32 v26, v12
; GFX9-NEXT: v_mov_b32_e32 v25, v11
@@ -2229,7 +2262,7 @@ define inreg <14 x double> @bitcast_v28i32_to_v14f64_scalar(<28 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
; GFX9-NEXT: v_mov_b32_e32 v5, s21
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v6, s22
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: v_mov_b32_e32 v8, s24
@@ -2238,10 +2271,13 @@ define inreg <14 x double> @bitcast_v28i32_to_v14f64_scalar(<28 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB9_3
-; GFX9-NEXT: .LBB9_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB9_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_u32_e32 v27, 3, v27
; GFX9-NEXT: v_add_u32_e32 v26, 3, v26
; GFX9-NEXT: v_add_u32_e32 v25, 3, v25
@@ -2270,42 +2306,40 @@ define inreg <14 x double> @bitcast_v28i32_to_v14f64_scalar(<28 x i32> inreg %a,
; GFX9-NEXT: v_add_u32_e32 v2, 3, v2
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: .LBB9_3: ; %end
+; GFX9-NEXT: .LBB9_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v14, v28
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB9_4:
-; GFX9-NEXT: s_branch .LBB9_2
;
; GFX11-LABEL: bitcast_v28i32_to_v14f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v10 :: v_dual_mov_b32 v27, v9
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
+; GFX11-NEXT: v_dual_mov_b32 v15, v10 :: v_dual_mov_b32 v26, v8
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB9_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB9_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB9_3:
-; GFX11-NEXT: .LBB9_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_nc_u32_e32 v27, 3, v27
; GFX11-NEXT: v_add_nc_u32_e32 v26, 3, v26
; GFX11-NEXT: v_add_nc_u32_e32 v25, 3, v25
@@ -2334,6 +2368,7 @@ define inreg <14 x double> @bitcast_v28i32_to_v14f64_scalar(<28 x i32> inreg %a,
; GFX11-NEXT: v_add_nc_u32_e32 v2, 3, v2
; GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v1
; GFX11-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-NEXT: .LBB9_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2483,6 +2518,7 @@ define inreg <28 x i32> @bitcast_v14f64_to_v28i32_scalar(<14 x double> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v27, v13
; SI-NEXT: v_mov_b32_e32 v26, v12
; SI-NEXT: v_mov_b32_e32 v25, v11
@@ -2509,13 +2545,16 @@ define inreg <28 x i32> @bitcast_v14f64_to_v28i32_scalar(<14 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v9, s25
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB11_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB11_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB11_3
-; SI-NEXT: .LBB11_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB11_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB11_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
; SI-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
; SI-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
@@ -2530,17 +2569,16 @@ define inreg <28 x i32> @bitcast_v14f64_to_v28i32_scalar(<14 x double> inreg %a,
; SI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; SI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; SI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; SI-NEXT: .LBB11_3: ; %end
+; SI-NEXT: .LBB11_4: ; %end
; SI-NEXT: v_mov_b32_e32 v14, v28
; SI-NEXT: v_mov_b32_e32 v15, v29
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB11_4:
-; SI-NEXT: s_branch .LBB11_2
;
; VI-LABEL: bitcast_v14f64_to_v28i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v27, v13
; VI-NEXT: v_mov_b32_e32 v26, v12
; VI-NEXT: v_mov_b32_e32 v25, v11
@@ -2567,13 +2605,16 @@ define inreg <28 x i32> @bitcast_v14f64_to_v28i32_scalar(<14 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB11_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB11_3
-; VI-NEXT: .LBB11_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB11_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB11_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
; VI-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
; VI-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
@@ -2588,17 +2629,16 @@ define inreg <28 x i32> @bitcast_v14f64_to_v28i32_scalar(<14 x double> inreg %a,
; VI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; VI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; VI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; VI-NEXT: .LBB11_3: ; %end
+; VI-NEXT: .LBB11_4: ; %end
; VI-NEXT: v_mov_b32_e32 v14, v28
; VI-NEXT: v_mov_b32_e32 v15, v29
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB11_4:
-; VI-NEXT: s_branch .LBB11_2
;
; GFX9-LABEL: bitcast_v14f64_to_v28i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v27, v13
; GFX9-NEXT: v_mov_b32_e32 v26, v12
; GFX9-NEXT: v_mov_b32_e32 v25, v11
@@ -2625,13 +2665,16 @@ define inreg <28 x i32> @bitcast_v14f64_to_v28i32_scalar(<14 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_3
-; GFX9-NEXT: .LBB11_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB11_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
; GFX9-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
; GFX9-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
@@ -2646,43 +2689,41 @@ define inreg <28 x i32> @bitcast_v14f64_to_v28i32_scalar(<14 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX9-NEXT: .LBB11_3: ; %end
+; GFX9-NEXT: .LBB11_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v14, v28
; GFX9-NEXT: v_mov_b32_e32 v15, v29
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB11_4:
-; GFX9-NEXT: s_branch .LBB11_2
;
; GFX11-LABEL: bitcast_v14f64_to_v28i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v10 :: v_dual_mov_b32 v27, v9
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
+; GFX11-NEXT: v_dual_mov_b32 v15, v10 :: v_dual_mov_b32 v26, v8
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB11_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB11_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: .LBB11_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
; GFX11-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
; GFX11-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
@@ -2697,6 +2738,7 @@ define inreg <28 x i32> @bitcast_v14f64_to_v28i32_scalar(<14 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-NEXT: .LBB11_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -3638,6 +3680,7 @@ define inreg <56 x i16> @bitcast_v28i32_to_v56i16_scalar(<28 x i32> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v15
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s43, v1
; SI-NEXT: v_readfirstlane_b32 s42, v2
; SI-NEXT: v_readfirstlane_b32 s41, v3
@@ -3651,8 +3694,8 @@ define inreg <56 x i16> @bitcast_v28i32_to_v56i16_scalar(<28 x i32> inreg %a, i3
; SI-NEXT: v_readfirstlane_b32 s9, v11
; SI-NEXT: v_readfirstlane_b32 s8, v12
; SI-NEXT: v_readfirstlane_b32 s7, v13
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s6, v14
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB13_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v1, s7
@@ -3968,7 +4011,9 @@ define inreg <56 x i16> @bitcast_v28i32_to_v56i16_scalar(<28 x i32> inreg %a, i3
; SI-NEXT: ; implicit-def: $sgpr45
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: s_branch .LBB13_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB13_2
+; SI-NEXT: s_branch .LBB13_3
;
; VI-LABEL: bitcast_v28i32_to_v56i16_scalar:
; VI: ; %bb.0:
@@ -3977,8 +4022,9 @@ define inreg <56 x i16> @bitcast_v28i32_to_v56i16_scalar(<28 x i32> inreg %a, i3
; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 ; 4-byte Folded Spill
; VI-NEXT: s_mov_b64 exec, s[4:5]
; VI-NEXT: v_writelane_b32 v28, s30, 0
-; VI-NEXT: v_writelane_b32 v28, s31, 1
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; VI-NEXT: v_writelane_b32 v28, s31, 1
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_writelane_b32 v28, s34, 2
; VI-NEXT: v_readfirstlane_b32 s43, v0
; VI-NEXT: v_readfirstlane_b32 s42, v1
@@ -3992,14 +4038,14 @@ define inreg <56 x i16> @bitcast_v28i32_to_v56i16_scalar(<28 x i32> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s10, v9
; VI-NEXT: v_readfirstlane_b32 s9, v10
; VI-NEXT: v_readfirstlane_b32 s8, v11
-; VI-NEXT: v_readfirstlane_b32 s6, v12
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: v_readfirstlane_b32 s7, v13
+; VI-NEXT: v_readfirstlane_b32 s7, v12
+; VI-NEXT: v_readfirstlane_b32 s6, v13
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_writelane_b32 v28, s35, 3
; VI-NEXT: s_cbranch_scc0 .LBB13_4
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_lshr_b32 s44, s7, 16
-; VI-NEXT: s_lshr_b32 s45, s6, 16
+; VI-NEXT: s_lshr_b32 s44, s6, 16
+; VI-NEXT: s_lshr_b32 s45, s7, 16
; VI-NEXT: s_lshr_b32 s46, s8, 16
; VI-NEXT: s_lshr_b32 s47, s9, 16
; VI-NEXT: s_lshr_b32 s56, s10, 16
@@ -4028,8 +4074,8 @@ define inreg <56 x i16> @bitcast_v28i32_to_v56i16_scalar(<28 x i32> inreg %a, i3
; VI-NEXT: s_lshr_b32 s35, s16, 16
; VI-NEXT: s_cbranch_execnz .LBB13_3
; VI-NEXT: .LBB13_2: ; %cmp.true
-; VI-NEXT: s_add_i32 s7, s7, 3
; VI-NEXT: s_add_i32 s6, s6, 3
+; VI-NEXT: s_add_i32 s7, s7, 3
; VI-NEXT: s_add_i32 s8, s8, 3
; VI-NEXT: s_add_i32 s9, s9, 3
; VI-NEXT: s_add_i32 s10, s10, 3
@@ -4056,8 +4102,8 @@ define inreg <56 x i16> @bitcast_v28i32_to_v56i16_scalar(<28 x i32> inreg %a, i3
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: s_lshr_b32 s44, s7, 16
-; VI-NEXT: s_lshr_b32 s45, s6, 16
+; VI-NEXT: s_lshr_b32 s44, s6, 16
+; VI-NEXT: s_lshr_b32 s45, s7, 16
; VI-NEXT: s_lshr_b32 s46, s8, 16
; VI-NEXT: s_lshr_b32 s47, s9, 16
; VI-NEXT: s_lshr_b32 s56, s10, 16
@@ -4163,12 +4209,12 @@ define inreg <56 x i16> @bitcast_v28i32_to_v56i16_scalar(<28 x i32> inreg %a, i3
; VI-NEXT: s_and_b32 s8, 0xffff, s8
; VI-NEXT: s_lshl_b32 s42, s46, 16
; VI-NEXT: s_or_b32 s8, s8, s42
-; VI-NEXT: s_and_b32 s6, 0xffff, s6
-; VI-NEXT: s_lshl_b32 s42, s45, 16
-; VI-NEXT: s_or_b32 s6, s6, s42
; VI-NEXT: s_and_b32 s7, 0xffff, s7
-; VI-NEXT: s_lshl_b32 s42, s44, 16
+; VI-NEXT: s_lshl_b32 s42, s45, 16
; VI-NEXT: s_or_b32 s7, s7, s42
+; VI-NEXT: s_and_b32 s6, 0xffff, s6
+; VI-NEXT: s_lshl_b32 s42, s44, 16
+; VI-NEXT: s_or_b32 s6, s6, s42
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: v_mov_b32_e32 v2, s16
@@ -4195,8 +4241,8 @@ define inreg <56 x i16> @bitcast_v28i32_to_v56i16_scalar(<28 x i32> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v23, s10
; VI-NEXT: v_mov_b32_e32 v24, s9
; VI-NEXT: v_mov_b32_e32 v25, s8
-; VI-NEXT: v_mov_b32_e32 v26, s6
-; VI-NEXT: v_mov_b32_e32 v27, s7
+; VI-NEXT: v_mov_b32_e32 v26, s7
+; VI-NEXT: v_mov_b32_e32 v27, s6
; VI-NEXT: v_readlane_b32 s35, v28, 3
; VI-NEXT: v_readlane_b32 s34, v28, 2
; VI-NEXT: v_readlane_b32 s31, v28, 1
@@ -4235,43 +4281,46 @@ define inreg <56 x i16> @bitcast_v28i32_to_v56i16_scalar(<28 x i32> inreg %a, i3
; VI-NEXT: ; implicit-def: $sgpr46
; VI-NEXT: ; implicit-def: $sgpr45
; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: s_branch .LBB13_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB13_2
+; VI-NEXT: s_branch .LBB13_3
;
; GFX9-LABEL: bitcast_v28i32_to_v56i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
-; GFX9-NEXT: v_readfirstlane_b32 s6, v0
-; GFX9-NEXT: v_readfirstlane_b32 s7, v1
-; GFX9-NEXT: v_readfirstlane_b32 s8, v2
-; GFX9-NEXT: v_readfirstlane_b32 s9, v3
-; GFX9-NEXT: v_readfirstlane_b32 s10, v4
-; GFX9-NEXT: v_readfirstlane_b32 s11, v5
-; GFX9-NEXT: v_readfirstlane_b32 s12, v6
-; GFX9-NEXT: v_readfirstlane_b32 s13, v7
-; GFX9-NEXT: v_readfirstlane_b32 s14, v8
-; GFX9-NEXT: v_readfirstlane_b32 s15, v9
-; GFX9-NEXT: v_readfirstlane_b32 s40, v10
-; GFX9-NEXT: v_readfirstlane_b32 s41, v11
-; GFX9-NEXT: v_readfirstlane_b32 s42, v12
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: v_readfirstlane_b32 s43, v13
+; GFX9-NEXT: v_readfirstlane_b32 s7, v0
+; GFX9-NEXT: v_readfirstlane_b32 s8, v1
+; GFX9-NEXT: v_readfirstlane_b32 s9, v2
+; GFX9-NEXT: v_readfirstlane_b32 s10, v3
+; GFX9-NEXT: v_readfirstlane_b32 s11, v4
+; GFX9-NEXT: v_readfirstlane_b32 s12, v5
+; GFX9-NEXT: v_readfirstlane_b32 s13, v6
+; GFX9-NEXT: v_readfirstlane_b32 s14, v7
+; GFX9-NEXT: v_readfirstlane_b32 s15, v8
+; GFX9-NEXT: v_readfirstlane_b32 s40, v9
+; GFX9-NEXT: v_readfirstlane_b32 s41, v10
+; GFX9-NEXT: v_readfirstlane_b32 s42, v11
+; GFX9-NEXT: v_readfirstlane_b32 s43, v12
+; GFX9-NEXT: v_readfirstlane_b32 s6, v13
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB13_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_lshr_b32 s44, s43, 16
-; GFX9-NEXT: s_lshr_b32 s45, s42, 16
-; GFX9-NEXT: s_lshr_b32 s46, s41, 16
-; GFX9-NEXT: s_lshr_b32 s47, s40, 16
-; GFX9-NEXT: s_lshr_b32 s56, s15, 16
-; GFX9-NEXT: s_lshr_b32 s57, s14, 16
-; GFX9-NEXT: s_lshr_b32 s58, s13, 16
-; GFX9-NEXT: s_lshr_b32 s59, s12, 16
-; GFX9-NEXT: s_lshr_b32 s60, s11, 16
-; GFX9-NEXT: s_lshr_b32 s61, s10, 16
-; GFX9-NEXT: s_lshr_b32 s62, s9, 16
-; GFX9-NEXT: s_lshr_b32 s63, s8, 16
-; GFX9-NEXT: s_lshr_b32 s72, s7, 16
-; GFX9-NEXT: s_lshr_b32 s73, s6, 16
+; GFX9-NEXT: s_lshr_b32 s44, s6, 16
+; GFX9-NEXT: s_lshr_b32 s45, s43, 16
+; GFX9-NEXT: s_lshr_b32 s46, s42, 16
+; GFX9-NEXT: s_lshr_b32 s47, s41, 16
+; GFX9-NEXT: s_lshr_b32 s56, s40, 16
+; GFX9-NEXT: s_lshr_b32 s57, s15, 16
+; GFX9-NEXT: s_lshr_b32 s58, s14, 16
+; GFX9-NEXT: s_lshr_b32 s59, s13, 16
+; GFX9-NEXT: s_lshr_b32 s60, s12, 16
+; GFX9-NEXT: s_lshr_b32 s61, s11, 16
+; GFX9-NEXT: s_lshr_b32 s62, s10, 16
+; GFX9-NEXT: s_lshr_b32 s63, s9, 16
+; GFX9-NEXT: s_lshr_b32 s72, s8, 16
+; GFX9-NEXT: s_lshr_b32 s73, s7, 16
; GFX9-NEXT: s_lshr_b32 s74, s29, 16
; GFX9-NEXT: s_lshr_b32 s75, s28, 16
; GFX9-NEXT: s_lshr_b32 s76, s27, 16
@@ -4288,6 +4337,7 @@ define inreg <56 x i16> @bitcast_v28i32_to_v56i16_scalar(<28 x i32> inreg %a, i3
; GFX9-NEXT: s_lshr_b32 s95, s16, 16
; GFX9-NEXT: s_cbranch_execnz .LBB13_3
; GFX9-NEXT: .LBB13_2: ; %cmp.true
+; GFX9-NEXT: s_add_i32 s6, s6, 3
; GFX9-NEXT: s_add_i32 s43, s43, 3
; GFX9-NEXT: s_add_i32 s42, s42, 3
; GFX9-NEXT: s_add_i32 s41, s41, 3
@@ -4301,7 +4351,6 @@ define inreg <56 x i16> @bitcast_v28i32_to_v56i16_scalar(<28 x i32> inreg %a, i3
; GFX9-NEXT: s_add_i32 s9, s9, 3
; GFX9-NEXT: s_add_i32 s8, s8, 3
; GFX9-NEXT: s_add_i32 s7, s7, 3
-; GFX9-NEXT: s_add_i32 s6, s6, 3
; GFX9-NEXT: s_add_i32 s29, s29, 3
; GFX9-NEXT: s_add_i32 s28, s28, 3
; GFX9-NEXT: s_add_i32 s27, s27, 3
@@ -4316,20 +4365,20 @@ define inreg <56 x i16> @bitcast_v28i32_to_v56i16_scalar(<28 x i32> inreg %a, i3
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: s_lshr_b32 s44, s43, 16
-; GFX9-NEXT: s_lshr_b32 s45, s42, 16
-; GFX9-NEXT: s_lshr_b32 s46, s41, 16
-; GFX9-NEXT: s_lshr_b32 s47, s40, 16
-; GFX9-NEXT: s_lshr_b32 s56, s15, 16
-; GFX9-NEXT: s_lshr_b32 s57, s14, 16
-; GFX9-NEXT: s_lshr_b32 s58, s13, 16
-; GFX9-NEXT: s_lshr_b32 s59, s12, 16
-; GFX9-NEXT: s_lshr_b32 s60, s11, 16
-; GFX9-NEXT: s_lshr_b32 s61, s10, 16
-; GFX9-NEXT: s_lshr_b32 s62, s9, 16
-; GFX9-NEXT: s_lshr_b32 s63, s8, 16
-; GFX9-NEXT: s_lshr_b32 s72, s7, 16
-; GFX9-NEXT: s_lshr_b32 s73, s6, 16
+; GFX9-NEXT: s_lshr_b32 s44, s6, 16
+; GFX9-NEXT: s_lshr_b32 s45, s43, 16
+; GFX9-NEXT: s_lshr_b32 s46, s42, 16
+; GFX9-NEXT: s_lshr_b32 s47, s41, 16
+; GFX9-NEXT: s_lshr_b32 s56, s40, 16
+; GFX9-NEXT: s_lshr_b32 s57, s15, 16
+; GFX9-NEXT: s_lshr_b32 s58, s14, 16
+; GFX9-NEXT: s_lshr_b32 s59, s13, 16
+; GFX9-NEXT: s_lshr_b32 s60, s12, 16
+; GFX9-NEXT: s_lshr_b32 s61, s11, 16
+; GFX9-NEXT: s_lshr_b32 s62, s10, 16
+; GFX9-NEXT: s_lshr_b32 s63, s9, 16
+; GFX9-NEXT: s_lshr_b32 s72, s8, 16
+; GFX9-NEXT: s_lshr_b32 s73, s7, 16
; GFX9-NEXT: s_lshr_b32 s74, s29, 16
; GFX9-NEXT: s_lshr_b32 s75, s28, 16
; GFX9-NEXT: s_lshr_b32 s76, s27, 16
@@ -4359,20 +4408,20 @@ define inreg <56 x i16> @bitcast_v28i32_to_v56i16_scalar(<28 x i32> inreg %a, i3
; GFX9-NEXT: s_pack_ll_b32_b16 s25, s27, s76
; GFX9-NEXT: s_pack_ll_b32_b16 s26, s28, s75
; GFX9-NEXT: s_pack_ll_b32_b16 s27, s29, s74
-; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s73
-; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s72
-; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s63
-; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s62
-; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s61
-; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s60
-; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s59
-; GFX9-NEXT: s_pack_ll_b32_b16 s13, s13, s58
-; GFX9-NEXT: s_pack_ll_b32_b16 s14, s14, s57
-; GFX9-NEXT: s_pack_ll_b32_b16 s15, s15, s56
-; GFX9-NEXT: s_pack_ll_b32_b16 s28, s40, s47
-; GFX9-NEXT: s_pack_ll_b32_b16 s29, s41, s46
-; GFX9-NEXT: s_pack_ll_b32_b16 s40, s42, s45
-; GFX9-NEXT: s_pack_ll_b32_b16 s41, s43, s44
+; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s73
+; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s72
+; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s63
+; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s62
+; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s61
+; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s60
+; GFX9-NEXT: s_pack_ll_b32_b16 s13, s13, s59
+; GFX9-NEXT: s_pack_ll_b32_b16 s14, s14, s58
+; GFX9-NEXT: s_pack_ll_b32_b16 s15, s15, s57
+; GFX9-NEXT: s_pack_ll_b32_b16 s28, s40, s56
+; GFX9-NEXT: s_pack_ll_b32_b16 s29, s41, s47
+; GFX9-NEXT: s_pack_ll_b32_b16 s40, s42, s46
+; GFX9-NEXT: s_pack_ll_b32_b16 s41, s43, s45
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s44
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: v_mov_b32_e32 v2, s16
@@ -4387,20 +4436,20 @@ define inreg <56 x i16> @bitcast_v28i32_to_v56i16_scalar(<28 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v11, s25
; GFX9-NEXT: v_mov_b32_e32 v12, s26
; GFX9-NEXT: v_mov_b32_e32 v13, s27
-; GFX9-NEXT: v_mov_b32_e32 v14, s6
-; GFX9-NEXT: v_mov_b32_e32 v15, s7
-; GFX9-NEXT: v_mov_b32_e32 v16, s8
-; GFX9-NEXT: v_mov_b32_e32 v17, s9
-; GFX9-NEXT: v_mov_b32_e32 v18, s10
-; GFX9-NEXT: v_mov_b32_e32 v19, s11
-; GFX9-NEXT: v_mov_b32_e32 v20, s12
-; GFX9-NEXT: v_mov_b32_e32 v21, s13
-; GFX9-NEXT: v_mov_b32_e32 v22, s14
-; GFX9-NEXT: v_mov_b32_e32 v23, s15
-; GFX9-NEXT: v_mov_b32_e32 v24, s28
-; GFX9-NEXT: v_mov_b32_e32 v25, s29
-; GFX9-NEXT: v_mov_b32_e32 v26, s40
-; GFX9-NEXT: v_mov_b32_e32 v27, s41
+; GFX9-NEXT: v_mov_b32_e32 v14, s7
+; GFX9-NEXT: v_mov_b32_e32 v15, s8
+; GFX9-NEXT: v_mov_b32_e32 v16, s9
+; GFX9-NEXT: v_mov_b32_e32 v17, s10
+; GFX9-NEXT: v_mov_b32_e32 v18, s11
+; GFX9-NEXT: v_mov_b32_e32 v19, s12
+; GFX9-NEXT: v_mov_b32_e32 v20, s13
+; GFX9-NEXT: v_mov_b32_e32 v21, s14
+; GFX9-NEXT: v_mov_b32_e32 v22, s15
+; GFX9-NEXT: v_mov_b32_e32 v23, s28
+; GFX9-NEXT: v_mov_b32_e32 v24, s29
+; GFX9-NEXT: v_mov_b32_e32 v25, s40
+; GFX9-NEXT: v_mov_b32_e32 v26, s41
+; GFX9-NEXT: v_mov_b32_e32 v27, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB13_4:
; GFX9-NEXT: ; implicit-def: $sgpr95
@@ -4431,7 +4480,9 @@ define inreg <56 x i16> @bitcast_v28i32_to_v56i16_scalar(<28 x i32> inreg %a, i3
; GFX9-NEXT: ; implicit-def: $sgpr46
; GFX9-NEXT: ; implicit-def: $sgpr45
; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: s_branch .LBB13_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB13_2
+; GFX9-NEXT: s_branch .LBB13_3
;
; GFX11-LABEL: bitcast_v28i32_to_v56i16_scalar:
; GFX11: ; %bb.0:
@@ -4444,16 +4495,16 @@ define inreg <56 x i16> @bitcast_v28i32_to_v56i16_scalar(<28 x i32> inreg %a, i3
; GFX11-NEXT: v_readfirstlane_b32 s8, v4
; GFX11-NEXT: v_readfirstlane_b32 s9, v5
; GFX11-NEXT: v_readfirstlane_b32 s10, v6
-; GFX11-NEXT: v_readfirstlane_b32 s11, v7
+; GFX11-NEXT: v_readfirstlane_b32 s12, v7
; GFX11-NEXT: v_readfirstlane_b32 s13, v8
-; GFX11-NEXT: v_readfirstlane_b32 s12, v9
-; GFX11-NEXT: s_mov_b32 s90, 0
+; GFX11-NEXT: v_readfirstlane_b32 s11, v9
+; GFX11-NEXT: s_mov_b32 s90, -1
; GFX11-NEXT: s_and_b32 s14, vcc_lo, exec_lo
; GFX11-NEXT: s_cbranch_scc0 .LBB13_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s14, s12, 16
+; GFX11-NEXT: s_lshr_b32 s14, s11, 16
; GFX11-NEXT: s_lshr_b32 s15, s13, 16
-; GFX11-NEXT: s_lshr_b32 s40, s11, 16
+; GFX11-NEXT: s_lshr_b32 s40, s12, 16
; GFX11-NEXT: s_lshr_b32 s41, s10, 16
; GFX11-NEXT: s_lshr_b32 s42, s9, 16
; GFX11-NEXT: s_lshr_b32 s43, s8, 16
@@ -4479,12 +4530,11 @@ define inreg <56 x i16> @bitcast_v28i32_to_v56i16_scalar(<28 x i32> inreg %a, i3
; GFX11-NEXT: s_lshr_b32 s79, s2, 16
; GFX11-NEXT: s_lshr_b32 s88, s1, 16
; GFX11-NEXT: s_lshr_b32 s89, s0, 16
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s90
-; GFX11-NEXT: s_cbranch_vccnz .LBB13_3
+; GFX11-NEXT: s_cbranch_execnz .LBB13_3
; GFX11-NEXT: .LBB13_2: ; %cmp.true
-; GFX11-NEXT: s_add_i32 s12, s12, 3
-; GFX11-NEXT: s_add_i32 s13, s13, 3
; GFX11-NEXT: s_add_i32 s11, s11, 3
+; GFX11-NEXT: s_add_i32 s13, s13, 3
+; GFX11-NEXT: s_add_i32 s12, s12, 3
; GFX11-NEXT: s_add_i32 s10, s10, 3
; GFX11-NEXT: s_add_i32 s9, s9, 3
; GFX11-NEXT: s_add_i32 s8, s8, 3
@@ -4510,9 +4560,9 @@ define inreg <56 x i16> @bitcast_v28i32_to_v56i16_scalar(<28 x i32> inreg %a, i3
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: s_lshr_b32 s14, s12, 16
+; GFX11-NEXT: s_lshr_b32 s14, s11, 16
; GFX11-NEXT: s_lshr_b32 s15, s13, 16
-; GFX11-NEXT: s_lshr_b32 s40, s11, 16
+; GFX11-NEXT: s_lshr_b32 s40, s12, 16
; GFX11-NEXT: s_lshr_b32 s41, s10, 16
; GFX11-NEXT: s_lshr_b32 s42, s9, 16
; GFX11-NEXT: s_lshr_b32 s43, s8, 16
@@ -4565,9 +4615,9 @@ define inreg <56 x i16> @bitcast_v28i32_to_v56i16_scalar(<28 x i32> inreg %a, i3
; GFX11-NEXT: s_pack_ll_b32_b16 s8, s8, s43
; GFX11-NEXT: s_pack_ll_b32_b16 s9, s9, s42
; GFX11-NEXT: s_pack_ll_b32_b16 s10, s10, s41
-; GFX11-NEXT: s_pack_ll_b32_b16 s11, s11, s40
+; GFX11-NEXT: s_pack_ll_b32_b16 s12, s12, s40
; GFX11-NEXT: s_pack_ll_b32_b16 s13, s13, s15
-; GFX11-NEXT: s_pack_ll_b32_b16 s12, s12, s14
+; GFX11-NEXT: s_pack_ll_b32_b16 s11, s11, s14
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -4580,8 +4630,8 @@ define inreg <56 x i16> @bitcast_v28i32_to_v56i16_scalar(<28 x i32> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v18, s4 :: v_dual_mov_b32 v19, s5
; GFX11-NEXT: v_dual_mov_b32 v20, s6 :: v_dual_mov_b32 v21, s7
; GFX11-NEXT: v_dual_mov_b32 v22, s8 :: v_dual_mov_b32 v23, s9
-; GFX11-NEXT: v_dual_mov_b32 v24, s10 :: v_dual_mov_b32 v25, s11
-; GFX11-NEXT: v_dual_mov_b32 v26, s13 :: v_dual_mov_b32 v27, s12
+; GFX11-NEXT: v_dual_mov_b32 v24, s10 :: v_dual_mov_b32 v25, s12
+; GFX11-NEXT: v_dual_mov_b32 v26, s13 :: v_dual_mov_b32 v27, s11
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB13_4:
; GFX11-NEXT: ; implicit-def: $sgpr89
@@ -4612,7 +4662,9 @@ define inreg <56 x i16> @bitcast_v28i32_to_v56i16_scalar(<28 x i32> inreg %a, i3
; GFX11-NEXT: ; implicit-def: $sgpr40
; GFX11-NEXT: ; implicit-def: $sgpr15
; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: s_branch .LBB13_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s90
+; GFX11-NEXT: s_cbranch_vccz .LBB13_2
+; GFX11-NEXT: s_branch .LBB13_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -5926,6 +5978,7 @@ define inreg <28 x i32> @bitcast_v56i16_to_v28i32_scalar(<56 x i16> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v36, v18
; SI-NEXT: v_mov_b32_e32 v37, v16
; SI-NEXT: v_mov_b32_e32 v38, v14
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v3
; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v5
@@ -5952,7 +6005,7 @@ define inreg <28 x i32> @bitcast_v56i16_to_v28i32_scalar(<56 x i16> inreg %a, i3
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: v_lshlrev_b32_e32 v46, 16, v2
; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v4
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_and_b64 s[6:7], vcc, exec
; SI-NEXT: s_waitcnt vmcnt(13)
; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v6
; SI-NEXT: s_waitcnt vmcnt(11)
@@ -6266,7 +6319,9 @@ define inreg <28 x i32> @bitcast_v56i16_to_v28i32_scalar(<56 x i16> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v45, v56
; SI-NEXT: v_mov_b32_e32 v56, v59
; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
-; SI-NEXT: s_branch .LBB15_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB15_2
+; SI-NEXT: s_branch .LBB15_3
;
; VI-LABEL: bitcast_v56i16_to_v28i32_scalar:
; VI: ; %bb.0:
@@ -6286,6 +6341,7 @@ define inreg <28 x i32> @bitcast_v56i16_to_v28i32_scalar(<56 x i16> inreg %a, i3
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v13
; VI-NEXT: v_mov_b32_e32 v33, v12
; VI-NEXT: v_mov_b32_e32 v34, v11
@@ -6300,7 +6356,7 @@ define inreg <28 x i32> @bitcast_v56i16_to_v28i32_scalar(<56 x i16> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v51, v2
; VI-NEXT: v_mov_b32_e32 v52, v1
; VI-NEXT: v_mov_b32_e32 v53, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB15_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -6535,25 +6591,13 @@ define inreg <28 x i32> @bitcast_v56i16_to_v28i32_scalar(<56 x i16> inreg %a, i3
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB15_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB15_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB15_2
+; VI-NEXT: s_branch .LBB15_3
;
; GFX9-LABEL: bitcast_v56i16_to_v28i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v13
-; GFX9-NEXT: v_mov_b32_e32 v33, v12
-; GFX9-NEXT: v_mov_b32_e32 v34, v11
-; GFX9-NEXT: v_mov_b32_e32 v35, v10
-; GFX9-NEXT: v_mov_b32_e32 v36, v9
-; GFX9-NEXT: v_mov_b32_e32 v37, v8
-; GFX9-NEXT: v_mov_b32_e32 v38, v7
-; GFX9-NEXT: v_mov_b32_e32 v39, v6
-; GFX9-NEXT: v_mov_b32_e32 v48, v5
-; GFX9-NEXT: v_mov_b32_e32 v49, v4
-; GFX9-NEXT: v_mov_b32_e32 v50, v3
-; GFX9-NEXT: v_mov_b32_e32 v51, v2
-; GFX9-NEXT: v_mov_b32_e32 v52, v1
-; GFX9-NEXT: v_mov_b32_e32 v53, v0
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
@@ -6569,6 +6613,21 @@ define inreg <28 x i32> @bitcast_v56i16_to_v28i32_scalar(<56 x i16> inreg %a, i3
; GFX9-NEXT: s_lshr_b32 s8, s18, 16
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
+; GFX9-NEXT: v_mov_b32_e32 v32, v13
+; GFX9-NEXT: v_mov_b32_e32 v33, v12
+; GFX9-NEXT: v_mov_b32_e32 v34, v11
+; GFX9-NEXT: v_mov_b32_e32 v35, v10
+; GFX9-NEXT: v_mov_b32_e32 v36, v9
+; GFX9-NEXT: v_mov_b32_e32 v37, v8
+; GFX9-NEXT: v_mov_b32_e32 v38, v7
+; GFX9-NEXT: v_mov_b32_e32 v39, v6
+; GFX9-NEXT: v_mov_b32_e32 v48, v5
+; GFX9-NEXT: v_mov_b32_e32 v49, v4
+; GFX9-NEXT: v_mov_b32_e32 v50, v3
+; GFX9-NEXT: v_mov_b32_e32 v51, v2
+; GFX9-NEXT: v_mov_b32_e32 v52, v1
+; GFX9-NEXT: v_mov_b32_e32 v53, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
@@ -6587,7 +6646,6 @@ define inreg <28 x i32> @bitcast_v56i16_to_v28i32_scalar(<56 x i16> inreg %a, i3
; GFX9-NEXT: v_lshrrev_b32_e32 v41, 16, v35
; GFX9-NEXT: v_lshrrev_b32_e32 v42, 16, v36
; GFX9-NEXT: v_lshrrev_b32_e32 v43, 16, v37
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -6602,6 +6660,7 @@ define inreg <28 x i32> @bitcast_v56i16_to_v28i32_scalar(<56 x i16> inreg %a, i3
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_lshrrev_b32_e32 v44, 16, v38
; GFX9-NEXT: v_lshrrev_b32_e32 v45, 16, v39
; GFX9-NEXT: v_lshrrev_b32_e32 v46, 16, v48
@@ -6729,7 +6788,9 @@ define inreg <28 x i32> @bitcast_v56i16_to_v28i32_scalar(<56 x i16> inreg %a, i3
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB15_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB15_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB15_2
+; GFX9-NEXT: s_branch .LBB15_3
;
; GFX11-TRUE16-LABEL: bitcast_v56i16_to_v28i32_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -6767,7 +6828,7 @@ define inreg <28 x i32> @bitcast_v56i16_to_v28i32_scalar(<56 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v9
; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s27, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
@@ -6779,15 +6840,14 @@ define inreg <28 x i32> @bitcast_v56i16_to_v28i32_scalar(<56 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s42
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
@@ -6799,10 +6859,11 @@ define inreg <28 x i32> @bitcast_v56i16_to_v28i32_scalar(<56 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB15_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
@@ -6822,10 +6883,9 @@ define inreg <28 x i32> @bitcast_v56i16_to_v28i32_scalar(<56 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB15_3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB15_3
; GFX11-TRUE16-NEXT: .LBB15_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
@@ -6852,9 +6912,9 @@ define inreg <28 x i32> @bitcast_v56i16_to_v28i32_scalar(<56 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
@@ -6869,7 +6929,9 @@ define inreg <28 x i32> @bitcast_v56i16_to_v28i32_scalar(<56 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB15_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB15_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB15_2
+; GFX11-TRUE16-NEXT: s_branch .LBB15_3
;
; GFX11-FAKE16-LABEL: bitcast_v56i16_to_v28i32_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -6897,7 +6959,7 @@ define inreg <28 x i32> @bitcast_v56i16_to_v28i32_scalar(<56 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: v_and_b32_e32 v50, 0xffff, v9
; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s27, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s26, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s25, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s24, 16
@@ -6909,15 +6971,14 @@ define inreg <28 x i32> @bitcast_v56i16_to_v28i32_scalar(<56 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s18, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s17, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s15, 0
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s44
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s45
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s43
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s42
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
@@ -6929,10 +6990,11 @@ define inreg <28 x i32> @bitcast_v56i16_to_v28i32_scalar(<56 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB15_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
@@ -6952,10 +7014,9 @@ define inreg <28 x i32> @bitcast_v56i16_to_v28i32_scalar(<56 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB15_3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB15_3
; GFX11-FAKE16-NEXT: .LBB15_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
@@ -6982,9 +7043,9 @@ define inreg <28 x i32> @bitcast_v56i16_to_v28i32_scalar(<56 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, s16, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, s17, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, s18, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, s15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, s16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
@@ -6999,7 +7060,9 @@ define inreg <28 x i32> @bitcast_v56i16_to_v28i32_scalar(<56 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB15_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB15_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB15_2
+; GFX11-FAKE16-NEXT: s_branch .LBB15_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -8257,6 +8320,7 @@ define inreg <56 x half> @bitcast_v28i32_to_v56f16_scalar(<28 x i32> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v15
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s43, v1
; SI-NEXT: v_readfirstlane_b32 s42, v2
; SI-NEXT: v_readfirstlane_b32 s41, v3
@@ -8267,11 +8331,11 @@ define inreg <56 x half> @bitcast_v28i32_to_v56f16_scalar(<28 x i32> inreg %a, i
; SI-NEXT: v_readfirstlane_b32 s12, v8
; SI-NEXT: v_readfirstlane_b32 s11, v9
; SI-NEXT: v_readfirstlane_b32 s10, v10
-; SI-NEXT: v_readfirstlane_b32 s8, v11
-; SI-NEXT: v_readfirstlane_b32 s7, v12
-; SI-NEXT: v_readfirstlane_b32 s6, v13
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: v_readfirstlane_b32 s9, v14
+; SI-NEXT: v_readfirstlane_b32 s9, v11
+; SI-NEXT: v_readfirstlane_b32 s8, v12
+; SI-NEXT: v_readfirstlane_b32 s7, v13
+; SI-NEXT: v_readfirstlane_b32 s6, v14
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
@@ -8283,13 +8347,13 @@ define inreg <56 x half> @bitcast_v28i32_to_v56f16_scalar(<28 x i32> inreg %a, i
; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB17_4
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_lshr_b32 s4, s9, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v1, s4
; SI-NEXT: s_lshr_b32 s4, s6, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v2, s4
+; SI-NEXT: v_cvt_f32_f16_e32 v1, s4
; SI-NEXT: s_lshr_b32 s4, s7, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v3, s4
+; SI-NEXT: v_cvt_f32_f16_e32 v2, s4
; SI-NEXT: s_lshr_b32 s4, s8, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v3, s4
+; SI-NEXT: s_lshr_b32 s4, s9, 16
; SI-NEXT: v_cvt_f32_f16_e32 v5, s4
; SI-NEXT: s_lshr_b32 s4, s10, 16
; SI-NEXT: v_cvt_f32_f16_e32 v7, s4
@@ -8343,10 +8407,10 @@ define inreg <56 x half> @bitcast_v28i32_to_v56f16_scalar(<28 x i32> inreg %a, i
; SI-NEXT: s_lshr_b32 s4, s16, 16
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v56, s4
-; SI-NEXT: v_cvt_f32_f16_e32 v4, s9
-; SI-NEXT: v_cvt_f32_f16_e32 v6, s6
-; SI-NEXT: v_cvt_f32_f16_e32 v8, s7
-; SI-NEXT: v_cvt_f32_f16_e32 v10, s8
+; SI-NEXT: v_cvt_f32_f16_e32 v4, s6
+; SI-NEXT: v_cvt_f32_f16_e32 v6, s7
+; SI-NEXT: v_cvt_f32_f16_e32 v8, s8
+; SI-NEXT: v_cvt_f32_f16_e32 v10, s9
; SI-NEXT: v_cvt_f32_f16_e32 v12, s10
; SI-NEXT: v_cvt_f32_f16_e32 v14, s11
; SI-NEXT: v_cvt_f32_f16_e32 v16, s12
@@ -8397,10 +8461,10 @@ define inreg <56 x half> @bitcast_v28i32_to_v56f16_scalar(<28 x i32> inreg %a, i
; SI-NEXT: s_add_i32 s12, s12, 3
; SI-NEXT: s_add_i32 s11, s11, 3
; SI-NEXT: s_add_i32 s10, s10, 3
+; SI-NEXT: s_add_i32 s9, s9, 3
; SI-NEXT: s_add_i32 s8, s8, 3
; SI-NEXT: s_add_i32 s7, s7, 3
; SI-NEXT: s_add_i32 s6, s6, 3
-; SI-NEXT: s_add_i32 s9, s9, 3
; SI-NEXT: s_lshr_b32 s4, s16, 16
; SI-NEXT: s_lshr_b32 s5, s17, 16
; SI-NEXT: s_lshr_b32 s44, s18, 16
@@ -8425,14 +8489,14 @@ define inreg <56 x half> @bitcast_v28i32_to_v56f16_scalar(<28 x i32> inreg %a, i
; SI-NEXT: s_lshr_b32 s79, s12, 16
; SI-NEXT: s_lshr_b32 s88, s11, 16
; SI-NEXT: s_lshr_b32 s89, s10, 16
-; SI-NEXT: s_lshr_b32 s90, s8, 16
-; SI-NEXT: s_lshr_b32 s91, s7, 16
-; SI-NEXT: s_lshr_b32 s92, s6, 16
-; SI-NEXT: s_lshr_b32 s93, s9, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v4, s9
-; SI-NEXT: v_cvt_f32_f16_e32 v6, s6
-; SI-NEXT: v_cvt_f32_f16_e32 v8, s7
-; SI-NEXT: v_cvt_f32_f16_e32 v10, s8
+; SI-NEXT: s_lshr_b32 s90, s9, 16
+; SI-NEXT: s_lshr_b32 s91, s8, 16
+; SI-NEXT: s_lshr_b32 s92, s7, 16
+; SI-NEXT: s_lshr_b32 s93, s6, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v4, s6
+; SI-NEXT: v_cvt_f32_f16_e32 v6, s7
+; SI-NEXT: v_cvt_f32_f16_e32 v8, s8
+; SI-NEXT: v_cvt_f32_f16_e32 v10, s9
; SI-NEXT: v_cvt_f32_f16_e32 v12, s10
; SI-NEXT: v_cvt_f32_f16_e32 v14, s11
; SI-NEXT: v_cvt_f32_f16_e32 v16, s12
@@ -8753,7 +8817,9 @@ define inreg <56 x half> @bitcast_v28i32_to_v56f16_scalar(<28 x i32> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr4
; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: s_branch .LBB17_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB17_2
+; SI-NEXT: s_branch .LBB17_3
;
; VI-LABEL: bitcast_v28i32_to_v56f16_scalar:
; VI: ; %bb.0:
@@ -8762,8 +8828,9 @@ define inreg <56 x half> @bitcast_v28i32_to_v56f16_scalar(<28 x i32> inreg %a, i
; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 ; 4-byte Folded Spill
; VI-NEXT: s_mov_b64 exec, s[4:5]
; VI-NEXT: v_writelane_b32 v28, s30, 0
-; VI-NEXT: v_writelane_b32 v28, s31, 1
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; VI-NEXT: v_writelane_b32 v28, s31, 1
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_writelane_b32 v28, s34, 2
; VI-NEXT: v_readfirstlane_b32 s43, v0
; VI-NEXT: v_readfirstlane_b32 s42, v1
@@ -8777,14 +8844,14 @@ define inreg <56 x half> @bitcast_v28i32_to_v56f16_scalar(<28 x i32> inreg %a, i
; VI-NEXT: v_readfirstlane_b32 s10, v9
; VI-NEXT: v_readfirstlane_b32 s9, v10
; VI-NEXT: v_readfirstlane_b32 s8, v11
-; VI-NEXT: v_readfirstlane_b32 s6, v12
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: v_readfirstlane_b32 s7, v13
+; VI-NEXT: v_readfirstlane_b32 s7, v12
+; VI-NEXT: v_readfirstlane_b32 s6, v13
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_writelane_b32 v28, s35, 3
; VI-NEXT: s_cbranch_scc0 .LBB17_4
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_lshr_b32 s44, s7, 16
-; VI-NEXT: s_lshr_b32 s45, s6, 16
+; VI-NEXT: s_lshr_b32 s44, s6, 16
+; VI-NEXT: s_lshr_b32 s45, s7, 16
; VI-NEXT: s_lshr_b32 s46, s8, 16
; VI-NEXT: s_lshr_b32 s47, s9, 16
; VI-NEXT: s_lshr_b32 s56, s10, 16
@@ -8813,8 +8880,8 @@ define inreg <56 x half> @bitcast_v28i32_to_v56f16_scalar(<28 x i32> inreg %a, i
; VI-NEXT: s_lshr_b32 s35, s16, 16
; VI-NEXT: s_cbranch_execnz .LBB17_3
; VI-NEXT: .LBB17_2: ; %cmp.true
-; VI-NEXT: s_add_i32 s7, s7, 3
; VI-NEXT: s_add_i32 s6, s6, 3
+; VI-NEXT: s_add_i32 s7, s7, 3
; VI-NEXT: s_add_i32 s8, s8, 3
; VI-NEXT: s_add_i32 s9, s9, 3
; VI-NEXT: s_add_i32 s10, s10, 3
@@ -8841,8 +8908,8 @@ define inreg <56 x half> @bitcast_v28i32_to_v56f16_scalar(<28 x i32> inreg %a, i
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: s_lshr_b32 s44, s7, 16
-; VI-NEXT: s_lshr_b32 s45, s6, 16
+; VI-NEXT: s_lshr_b32 s44, s6, 16
+; VI-NEXT: s_lshr_b32 s45, s7, 16
; VI-NEXT: s_lshr_b32 s46, s8, 16
; VI-NEXT: s_lshr_b32 s47, s9, 16
; VI-NEXT: s_lshr_b32 s56, s10, 16
@@ -8948,12 +9015,12 @@ define inreg <56 x half> @bitcast_v28i32_to_v56f16_scalar(<28 x i32> inreg %a, i
; VI-NEXT: s_and_b32 s8, 0xffff, s8
; VI-NEXT: s_lshl_b32 s42, s46, 16
; VI-NEXT: s_or_b32 s8, s8, s42
-; VI-NEXT: s_and_b32 s6, 0xffff, s6
-; VI-NEXT: s_lshl_b32 s42, s45, 16
-; VI-NEXT: s_or_b32 s6, s6, s42
; VI-NEXT: s_and_b32 s7, 0xffff, s7
-; VI-NEXT: s_lshl_b32 s42, s44, 16
+; VI-NEXT: s_lshl_b32 s42, s45, 16
; VI-NEXT: s_or_b32 s7, s7, s42
+; VI-NEXT: s_and_b32 s6, 0xffff, s6
+; VI-NEXT: s_lshl_b32 s42, s44, 16
+; VI-NEXT: s_or_b32 s6, s6, s42
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: v_mov_b32_e32 v2, s16
@@ -8980,8 +9047,8 @@ define inreg <56 x half> @bitcast_v28i32_to_v56f16_scalar(<28 x i32> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v23, s10
; VI-NEXT: v_mov_b32_e32 v24, s9
; VI-NEXT: v_mov_b32_e32 v25, s8
-; VI-NEXT: v_mov_b32_e32 v26, s6
-; VI-NEXT: v_mov_b32_e32 v27, s7
+; VI-NEXT: v_mov_b32_e32 v26, s7
+; VI-NEXT: v_mov_b32_e32 v27, s6
; VI-NEXT: v_readlane_b32 s35, v28, 3
; VI-NEXT: v_readlane_b32 s34, v28, 2
; VI-NEXT: v_readlane_b32 s31, v28, 1
@@ -9020,43 +9087,46 @@ define inreg <56 x half> @bitcast_v28i32_to_v56f16_scalar(<28 x i32> inreg %a, i
; VI-NEXT: ; implicit-def: $sgpr46
; VI-NEXT: ; implicit-def: $sgpr45
; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: s_branch .LBB17_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB17_2
+; VI-NEXT: s_branch .LBB17_3
;
; GFX9-LABEL: bitcast_v28i32_to_v56f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
-; GFX9-NEXT: v_readfirstlane_b32 s6, v0
-; GFX9-NEXT: v_readfirstlane_b32 s7, v1
-; GFX9-NEXT: v_readfirstlane_b32 s8, v2
-; GFX9-NEXT: v_readfirstlane_b32 s9, v3
-; GFX9-NEXT: v_readfirstlane_b32 s10, v4
-; GFX9-NEXT: v_readfirstlane_b32 s11, v5
-; GFX9-NEXT: v_readfirstlane_b32 s12, v6
-; GFX9-NEXT: v_readfirstlane_b32 s13, v7
-; GFX9-NEXT: v_readfirstlane_b32 s14, v8
-; GFX9-NEXT: v_readfirstlane_b32 s15, v9
-; GFX9-NEXT: v_readfirstlane_b32 s40, v10
-; GFX9-NEXT: v_readfirstlane_b32 s41, v11
-; GFX9-NEXT: v_readfirstlane_b32 s42, v12
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: v_readfirstlane_b32 s43, v13
+; GFX9-NEXT: v_readfirstlane_b32 s7, v0
+; GFX9-NEXT: v_readfirstlane_b32 s8, v1
+; GFX9-NEXT: v_readfirstlane_b32 s9, v2
+; GFX9-NEXT: v_readfirstlane_b32 s10, v3
+; GFX9-NEXT: v_readfirstlane_b32 s11, v4
+; GFX9-NEXT: v_readfirstlane_b32 s12, v5
+; GFX9-NEXT: v_readfirstlane_b32 s13, v6
+; GFX9-NEXT: v_readfirstlane_b32 s14, v7
+; GFX9-NEXT: v_readfirstlane_b32 s15, v8
+; GFX9-NEXT: v_readfirstlane_b32 s40, v9
+; GFX9-NEXT: v_readfirstlane_b32 s41, v10
+; GFX9-NEXT: v_readfirstlane_b32 s42, v11
+; GFX9-NEXT: v_readfirstlane_b32 s43, v12
+; GFX9-NEXT: v_readfirstlane_b32 s6, v13
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB17_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_lshr_b32 s44, s43, 16
-; GFX9-NEXT: s_lshr_b32 s45, s42, 16
-; GFX9-NEXT: s_lshr_b32 s46, s41, 16
-; GFX9-NEXT: s_lshr_b32 s47, s40, 16
-; GFX9-NEXT: s_lshr_b32 s56, s15, 16
-; GFX9-NEXT: s_lshr_b32 s57, s14, 16
-; GFX9-NEXT: s_lshr_b32 s58, s13, 16
-; GFX9-NEXT: s_lshr_b32 s59, s12, 16
-; GFX9-NEXT: s_lshr_b32 s60, s11, 16
-; GFX9-NEXT: s_lshr_b32 s61, s10, 16
-; GFX9-NEXT: s_lshr_b32 s62, s9, 16
-; GFX9-NEXT: s_lshr_b32 s63, s8, 16
-; GFX9-NEXT: s_lshr_b32 s72, s7, 16
-; GFX9-NEXT: s_lshr_b32 s73, s6, 16
+; GFX9-NEXT: s_lshr_b32 s44, s6, 16
+; GFX9-NEXT: s_lshr_b32 s45, s43, 16
+; GFX9-NEXT: s_lshr_b32 s46, s42, 16
+; GFX9-NEXT: s_lshr_b32 s47, s41, 16
+; GFX9-NEXT: s_lshr_b32 s56, s40, 16
+; GFX9-NEXT: s_lshr_b32 s57, s15, 16
+; GFX9-NEXT: s_lshr_b32 s58, s14, 16
+; GFX9-NEXT: s_lshr_b32 s59, s13, 16
+; GFX9-NEXT: s_lshr_b32 s60, s12, 16
+; GFX9-NEXT: s_lshr_b32 s61, s11, 16
+; GFX9-NEXT: s_lshr_b32 s62, s10, 16
+; GFX9-NEXT: s_lshr_b32 s63, s9, 16
+; GFX9-NEXT: s_lshr_b32 s72, s8, 16
+; GFX9-NEXT: s_lshr_b32 s73, s7, 16
; GFX9-NEXT: s_lshr_b32 s74, s29, 16
; GFX9-NEXT: s_lshr_b32 s75, s28, 16
; GFX9-NEXT: s_lshr_b32 s76, s27, 16
@@ -9073,6 +9143,7 @@ define inreg <56 x half> @bitcast_v28i32_to_v56f16_scalar(<28 x i32> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s95, s16, 16
; GFX9-NEXT: s_cbranch_execnz .LBB17_3
; GFX9-NEXT: .LBB17_2: ; %cmp.true
+; GFX9-NEXT: s_add_i32 s6, s6, 3
; GFX9-NEXT: s_add_i32 s43, s43, 3
; GFX9-NEXT: s_add_i32 s42, s42, 3
; GFX9-NEXT: s_add_i32 s41, s41, 3
@@ -9086,7 +9157,6 @@ define inreg <56 x half> @bitcast_v28i32_to_v56f16_scalar(<28 x i32> inreg %a, i
; GFX9-NEXT: s_add_i32 s9, s9, 3
; GFX9-NEXT: s_add_i32 s8, s8, 3
; GFX9-NEXT: s_add_i32 s7, s7, 3
-; GFX9-NEXT: s_add_i32 s6, s6, 3
; GFX9-NEXT: s_add_i32 s29, s29, 3
; GFX9-NEXT: s_add_i32 s28, s28, 3
; GFX9-NEXT: s_add_i32 s27, s27, 3
@@ -9101,20 +9171,20 @@ define inreg <56 x half> @bitcast_v28i32_to_v56f16_scalar(<28 x i32> inreg %a, i
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: s_lshr_b32 s44, s43, 16
-; GFX9-NEXT: s_lshr_b32 s45, s42, 16
-; GFX9-NEXT: s_lshr_b32 s46, s41, 16
-; GFX9-NEXT: s_lshr_b32 s47, s40, 16
-; GFX9-NEXT: s_lshr_b32 s56, s15, 16
-; GFX9-NEXT: s_lshr_b32 s57, s14, 16
-; GFX9-NEXT: s_lshr_b32 s58, s13, 16
-; GFX9-NEXT: s_lshr_b32 s59, s12, 16
-; GFX9-NEXT: s_lshr_b32 s60, s11, 16
-; GFX9-NEXT: s_lshr_b32 s61, s10, 16
-; GFX9-NEXT: s_lshr_b32 s62, s9, 16
-; GFX9-NEXT: s_lshr_b32 s63, s8, 16
-; GFX9-NEXT: s_lshr_b32 s72, s7, 16
-; GFX9-NEXT: s_lshr_b32 s73, s6, 16
+; GFX9-NEXT: s_lshr_b32 s44, s6, 16
+; GFX9-NEXT: s_lshr_b32 s45, s43, 16
+; GFX9-NEXT: s_lshr_b32 s46, s42, 16
+; GFX9-NEXT: s_lshr_b32 s47, s41, 16
+; GFX9-NEXT: s_lshr_b32 s56, s40, 16
+; GFX9-NEXT: s_lshr_b32 s57, s15, 16
+; GFX9-NEXT: s_lshr_b32 s58, s14, 16
+; GFX9-NEXT: s_lshr_b32 s59, s13, 16
+; GFX9-NEXT: s_lshr_b32 s60, s12, 16
+; GFX9-NEXT: s_lshr_b32 s61, s11, 16
+; GFX9-NEXT: s_lshr_b32 s62, s10, 16
+; GFX9-NEXT: s_lshr_b32 s63, s9, 16
+; GFX9-NEXT: s_lshr_b32 s72, s8, 16
+; GFX9-NEXT: s_lshr_b32 s73, s7, 16
; GFX9-NEXT: s_lshr_b32 s74, s29, 16
; GFX9-NEXT: s_lshr_b32 s75, s28, 16
; GFX9-NEXT: s_lshr_b32 s76, s27, 16
@@ -9144,20 +9214,20 @@ define inreg <56 x half> @bitcast_v28i32_to_v56f16_scalar(<28 x i32> inreg %a, i
; GFX9-NEXT: s_pack_ll_b32_b16 s25, s27, s76
; GFX9-NEXT: s_pack_ll_b32_b16 s26, s28, s75
; GFX9-NEXT: s_pack_ll_b32_b16 s27, s29, s74
-; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s73
-; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s72
-; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s63
-; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s62
-; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s61
-; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s60
-; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s59
-; GFX9-NEXT: s_pack_ll_b32_b16 s13, s13, s58
-; GFX9-NEXT: s_pack_ll_b32_b16 s14, s14, s57
-; GFX9-NEXT: s_pack_ll_b32_b16 s15, s15, s56
-; GFX9-NEXT: s_pack_ll_b32_b16 s28, s40, s47
-; GFX9-NEXT: s_pack_ll_b32_b16 s29, s41, s46
-; GFX9-NEXT: s_pack_ll_b32_b16 s40, s42, s45
-; GFX9-NEXT: s_pack_ll_b32_b16 s41, s43, s44
+; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s73
+; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s72
+; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s63
+; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s62
+; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s61
+; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s60
+; GFX9-NEXT: s_pack_ll_b32_b16 s13, s13, s59
+; GFX9-NEXT: s_pack_ll_b32_b16 s14, s14, s58
+; GFX9-NEXT: s_pack_ll_b32_b16 s15, s15, s57
+; GFX9-NEXT: s_pack_ll_b32_b16 s28, s40, s56
+; GFX9-NEXT: s_pack_ll_b32_b16 s29, s41, s47
+; GFX9-NEXT: s_pack_ll_b32_b16 s40, s42, s46
+; GFX9-NEXT: s_pack_ll_b32_b16 s41, s43, s45
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s44
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: v_mov_b32_e32 v2, s16
@@ -9172,20 +9242,20 @@ define inreg <56 x half> @bitcast_v28i32_to_v56f16_scalar(<28 x i32> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v11, s25
; GFX9-NEXT: v_mov_b32_e32 v12, s26
; GFX9-NEXT: v_mov_b32_e32 v13, s27
-; GFX9-NEXT: v_mov_b32_e32 v14, s6
-; GFX9-NEXT: v_mov_b32_e32 v15, s7
-; GFX9-NEXT: v_mov_b32_e32 v16, s8
-; GFX9-NEXT: v_mov_b32_e32 v17, s9
-; GFX9-NEXT: v_mov_b32_e32 v18, s10
-; GFX9-NEXT: v_mov_b32_e32 v19, s11
-; GFX9-NEXT: v_mov_b32_e32 v20, s12
-; GFX9-NEXT: v_mov_b32_e32 v21, s13
-; GFX9-NEXT: v_mov_b32_e32 v22, s14
-; GFX9-NEXT: v_mov_b32_e32 v23, s15
-; GFX9-NEXT: v_mov_b32_e32 v24, s28
-; GFX9-NEXT: v_mov_b32_e32 v25, s29
-; GFX9-NEXT: v_mov_b32_e32 v26, s40
-; GFX9-NEXT: v_mov_b32_e32 v27, s41
+; GFX9-NEXT: v_mov_b32_e32 v14, s7
+; GFX9-NEXT: v_mov_b32_e32 v15, s8
+; GFX9-NEXT: v_mov_b32_e32 v16, s9
+; GFX9-NEXT: v_mov_b32_e32 v17, s10
+; GFX9-NEXT: v_mov_b32_e32 v18, s11
+; GFX9-NEXT: v_mov_b32_e32 v19, s12
+; GFX9-NEXT: v_mov_b32_e32 v20, s13
+; GFX9-NEXT: v_mov_b32_e32 v21, s14
+; GFX9-NEXT: v_mov_b32_e32 v22, s15
+; GFX9-NEXT: v_mov_b32_e32 v23, s28
+; GFX9-NEXT: v_mov_b32_e32 v24, s29
+; GFX9-NEXT: v_mov_b32_e32 v25, s40
+; GFX9-NEXT: v_mov_b32_e32 v26, s41
+; GFX9-NEXT: v_mov_b32_e32 v27, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB17_4:
; GFX9-NEXT: ; implicit-def: $sgpr95
@@ -9216,7 +9286,9 @@ define inreg <56 x half> @bitcast_v28i32_to_v56f16_scalar(<28 x i32> inreg %a, i
; GFX9-NEXT: ; implicit-def: $sgpr46
; GFX9-NEXT: ; implicit-def: $sgpr45
; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: s_branch .LBB17_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB17_2
+; GFX9-NEXT: s_branch .LBB17_3
;
; GFX11-LABEL: bitcast_v28i32_to_v56f16_scalar:
; GFX11: ; %bb.0:
@@ -9229,16 +9301,16 @@ define inreg <56 x half> @bitcast_v28i32_to_v56f16_scalar(<28 x i32> inreg %a, i
; GFX11-NEXT: v_readfirstlane_b32 s8, v4
; GFX11-NEXT: v_readfirstlane_b32 s9, v5
; GFX11-NEXT: v_readfirstlane_b32 s10, v6
-; GFX11-NEXT: v_readfirstlane_b32 s11, v7
+; GFX11-NEXT: v_readfirstlane_b32 s12, v7
; GFX11-NEXT: v_readfirstlane_b32 s13, v8
-; GFX11-NEXT: v_readfirstlane_b32 s12, v9
-; GFX11-NEXT: s_mov_b32 s90, 0
+; GFX11-NEXT: v_readfirstlane_b32 s11, v9
+; GFX11-NEXT: s_mov_b32 s90, -1
; GFX11-NEXT: s_and_b32 s14, vcc_lo, exec_lo
; GFX11-NEXT: s_cbranch_scc0 .LBB17_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s14, s12, 16
+; GFX11-NEXT: s_lshr_b32 s14, s11, 16
; GFX11-NEXT: s_lshr_b32 s15, s13, 16
-; GFX11-NEXT: s_lshr_b32 s40, s11, 16
+; GFX11-NEXT: s_lshr_b32 s40, s12, 16
; GFX11-NEXT: s_lshr_b32 s41, s10, 16
; GFX11-NEXT: s_lshr_b32 s42, s9, 16
; GFX11-NEXT: s_lshr_b32 s43, s8, 16
@@ -9264,12 +9336,11 @@ define inreg <56 x half> @bitcast_v28i32_to_v56f16_scalar(<28 x i32> inreg %a, i
; GFX11-NEXT: s_lshr_b32 s79, s2, 16
; GFX11-NEXT: s_lshr_b32 s88, s1, 16
; GFX11-NEXT: s_lshr_b32 s89, s0, 16
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s90
-; GFX11-NEXT: s_cbranch_vccnz .LBB17_3
+; GFX11-NEXT: s_cbranch_execnz .LBB17_3
; GFX11-NEXT: .LBB17_2: ; %cmp.true
-; GFX11-NEXT: s_add_i32 s12, s12, 3
-; GFX11-NEXT: s_add_i32 s13, s13, 3
; GFX11-NEXT: s_add_i32 s11, s11, 3
+; GFX11-NEXT: s_add_i32 s13, s13, 3
+; GFX11-NEXT: s_add_i32 s12, s12, 3
; GFX11-NEXT: s_add_i32 s10, s10, 3
; GFX11-NEXT: s_add_i32 s9, s9, 3
; GFX11-NEXT: s_add_i32 s8, s8, 3
@@ -9295,9 +9366,9 @@ define inreg <56 x half> @bitcast_v28i32_to_v56f16_scalar(<28 x i32> inreg %a, i
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: s_lshr_b32 s14, s12, 16
+; GFX11-NEXT: s_lshr_b32 s14, s11, 16
; GFX11-NEXT: s_lshr_b32 s15, s13, 16
-; GFX11-NEXT: s_lshr_b32 s40, s11, 16
+; GFX11-NEXT: s_lshr_b32 s40, s12, 16
; GFX11-NEXT: s_lshr_b32 s41, s10, 16
; GFX11-NEXT: s_lshr_b32 s42, s9, 16
; GFX11-NEXT: s_lshr_b32 s43, s8, 16
@@ -9350,9 +9421,9 @@ define inreg <56 x half> @bitcast_v28i32_to_v56f16_scalar(<28 x i32> inreg %a, i
; GFX11-NEXT: s_pack_ll_b32_b16 s8, s8, s43
; GFX11-NEXT: s_pack_ll_b32_b16 s9, s9, s42
; GFX11-NEXT: s_pack_ll_b32_b16 s10, s10, s41
-; GFX11-NEXT: s_pack_ll_b32_b16 s11, s11, s40
+; GFX11-NEXT: s_pack_ll_b32_b16 s12, s12, s40
; GFX11-NEXT: s_pack_ll_b32_b16 s13, s13, s15
-; GFX11-NEXT: s_pack_ll_b32_b16 s12, s12, s14
+; GFX11-NEXT: s_pack_ll_b32_b16 s11, s11, s14
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -9365,8 +9436,8 @@ define inreg <56 x half> @bitcast_v28i32_to_v56f16_scalar(<28 x i32> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v18, s4 :: v_dual_mov_b32 v19, s5
; GFX11-NEXT: v_dual_mov_b32 v20, s6 :: v_dual_mov_b32 v21, s7
; GFX11-NEXT: v_dual_mov_b32 v22, s8 :: v_dual_mov_b32 v23, s9
-; GFX11-NEXT: v_dual_mov_b32 v24, s10 :: v_dual_mov_b32 v25, s11
-; GFX11-NEXT: v_dual_mov_b32 v26, s13 :: v_dual_mov_b32 v27, s12
+; GFX11-NEXT: v_dual_mov_b32 v24, s10 :: v_dual_mov_b32 v25, s12
+; GFX11-NEXT: v_dual_mov_b32 v26, s13 :: v_dual_mov_b32 v27, s11
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB17_4:
; GFX11-NEXT: ; implicit-def: $sgpr89
@@ -9397,7 +9468,9 @@ define inreg <56 x half> @bitcast_v28i32_to_v56f16_scalar(<28 x i32> inreg %a, i
; GFX11-NEXT: ; implicit-def: $sgpr40
; GFX11-NEXT: ; implicit-def: $sgpr15
; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: s_branch .LBB17_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s90
+; GFX11-NEXT: s_cbranch_vccz .LBB17_2
+; GFX11-NEXT: s_branch .LBB17_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -10887,56 +10960,53 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:28
; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:40
; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:36
-; SI-NEXT: s_waitcnt expcnt(2)
-; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:44
-; SI-NEXT: v_cvt_f16_f32_e32 v47, v0
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v23
+; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:44
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
; SI-NEXT: v_cvt_f16_f32_e32 v44, v1
-; SI-NEXT: v_cvt_f16_f32_e32 v46, v3
-; SI-NEXT: v_cvt_f16_f32_e32 v58, v2
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(5)
+; SI-NEXT: v_cvt_f16_f32_e32 v58, v3
+; SI-NEXT: s_waitcnt expcnt(2)
+; SI-NEXT: v_cvt_f16_f32_e32 v61, v2
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v22
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v7
; SI-NEXT: v_cvt_f16_f32_e32 v60, v5
; SI-NEXT: v_cvt_f16_f32_e32 v59, v4
-; SI-NEXT: v_cvt_f16_f32_e32 v62, v7
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v25
-; SI-NEXT: v_cvt_f16_f32_e32 v56, v6
-; SI-NEXT: v_cvt_f16_f32_e32 v57, v9
-; SI-NEXT: v_cvt_f16_f32_e32 v45, v8
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; SI-NEXT: v_cvt_f16_f32_e32 v46, v6
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v24
-; SI-NEXT: v_cvt_f16_f32_e32 v63, v11
-; SI-NEXT: v_cvt_f16_f32_e32 v38, v10
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v11
+; SI-NEXT: v_cvt_f16_f32_e32 v56, v9
+; SI-NEXT: v_cvt_f16_f32_e32 v57, v8
; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v27
-; SI-NEXT: v_cvt_f16_f32_e32 v36, v12
-; SI-NEXT: v_cvt_f16_f32_e32 v34, v15
-; SI-NEXT: v_cvt_f16_f32_e32 v35, v14
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v10
+; SI-NEXT: v_cvt_f16_f32_e32 v47, v12
+; SI-NEXT: v_cvt_f16_f32_e32 v45, v14
+; SI-NEXT: v_cvt_f16_f32_e32 v62, v16
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v26
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v15
; SI-NEXT: v_cvt_f16_f32_e32 v15, v17
-; SI-NEXT: v_cvt_f16_f32_e32 v32, v16
; SI-NEXT: v_cvt_f16_f32_e32 v16, v19
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v28
; SI-NEXT: v_cvt_f16_f32_e32 v18, v18
-; SI-NEXT: v_cvt_f16_f32_e32 v17, v21
-; SI-NEXT: v_cvt_f16_f32_e32 v37, v20
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v30
-; SI-NEXT: v_cvt_f16_f32_e32 v29, v29
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v23
+; SI-NEXT: v_cvt_f16_f32_e32 v17, v21
+; SI-NEXT: v_cvt_f16_f32_e32 v63, v20
+; SI-NEXT: v_cvt_f16_f32_e32 v49, v22
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
+; SI-NEXT: v_cvt_f16_f32_e32 v48, v25
+; SI-NEXT: v_cvt_f16_f32_e32 v39, v24
+; SI-NEXT: v_cvt_f16_f32_e32 v38, v27
+; SI-NEXT: v_cvt_f16_f32_e32 v37, v26
+; SI-NEXT: v_cvt_f16_f32_e32 v36, v29
+; SI-NEXT: v_cvt_f16_f32_e32 v35, v28
+; SI-NEXT: v_cvt_f16_f32_e32 v33, v30
; SI-NEXT: v_cvt_f16_f32_e32 v2, s21
; SI-NEXT: v_cvt_f16_f32_e32 v11, s20
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
; SI-NEXT: v_cvt_f16_f32_e32 v3, s23
; SI-NEXT: v_cvt_f16_f32_e32 v10, s22
; SI-NEXT: v_cvt_f16_f32_e32 v4, s25
@@ -10945,32 +11015,26 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v8, s26
; SI-NEXT: v_cvt_f16_f32_e32 v6, s29
; SI-NEXT: v_cvt_f16_f32_e32 v7, s28
-; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v31
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v50
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v51
-; SI-NEXT: s_waitcnt vmcnt(10)
-; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v61
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v52
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(14)
+; SI-NEXT: v_cvt_f16_f32_e32 v34, v31
+; SI-NEXT: v_cvt_f16_f32_e32 v31, v50
+; SI-NEXT: v_cvt_f16_f32_e32 v30, v51
+; SI-NEXT: v_cvt_f16_f32_e32 v29, v52
+; SI-NEXT: s_waitcnt vmcnt(13) expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v53
+; SI-NEXT: v_cvt_f16_f32_e32 v51, s16
; SI-NEXT: v_cvt_f16_f32_e32 v53, s18
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(13) expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v54
; SI-NEXT: v_cvt_f16_f32_e32 v54, s19
+; SI-NEXT: s_waitcnt vmcnt(7)
+; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v32
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v55
-; SI-NEXT: v_cvt_f16_f32_e32 v55, s16
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v40
@@ -10985,120 +11049,119 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v0, v43
; SI-NEXT: v_cvt_f16_f32_e32 v43, s17
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB19_4
; SI-NEXT: ; %bb.1: ; %cmp.false
+; SI-NEXT: s_waitcnt expcnt(4)
+; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; SI-NEXT: v_or_b32_e32 v4, v9, v4
+; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v60
+; SI-NEXT: v_or_b32_e32 v9, v59, v9
+; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
+; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v29
+; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
+; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; SI-NEXT: v_or_b32_e32 v5, v8, v5
+; SI-NEXT: v_mov_b32_e32 v32, v44
+; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v42, v58
+; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v58
+; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16
+; SI-NEXT: v_mov_b32_e32 v40, v60
+; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
; SI-NEXT: v_or_b32_e32 v16, v18, v16
; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
-; SI-NEXT: v_mov_b32_e32 v49, v2
+; SI-NEXT: s_waitcnt expcnt(2)
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; SI-NEXT: v_mov_b32_e32 v48, v3
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6
-; SI-NEXT: v_mov_b32_e32 v61, v44
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v43
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v54
-; SI-NEXT: v_mov_b32_e32 v39, v11
; SI-NEXT: v_or_b32_e32 v2, v11, v2
-; SI-NEXT: v_mov_b32_e32 v33, v10
; SI-NEXT: v_or_b32_e32 v3, v10, v3
-; SI-NEXT: v_or_b32_e32 v4, v9, v4
-; SI-NEXT: v_or_b32_e32 v5, v8, v5
; SI-NEXT: v_or_b32_e32 v6, v7, v6
-; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v61
-; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v46
-; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v60
-; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v62
-; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v57
-; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v63
+; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v32
+; SI-NEXT: v_mov_b32_e32 v41, v61
+; SI-NEXT: v_or_b32_e32 v8, v61, v8
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v56
; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
-; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v34
+; SI-NEXT: v_mov_b32_e32 v61, v45
; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17
-; SI-NEXT: v_or_b32_e32 v0, v55, v0
+; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v48
+; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v38
+; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v36
+; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v34
+; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v31
+; SI-NEXT: v_or_b32_e32 v0, v51, v0
; SI-NEXT: v_or_b32_e32 v1, v53, v1
-; SI-NEXT: v_or_b32_e32 v7, v47, v7
-; SI-NEXT: v_mov_b32_e32 v42, v58
-; SI-NEXT: v_or_b32_e32 v8, v58, v8
-; SI-NEXT: v_mov_b32_e32 v41, v60
-; SI-NEXT: v_or_b32_e32 v9, v59, v9
-; SI-NEXT: v_mov_b32_e32 v40, v56
-; SI-NEXT: v_or_b32_e32 v10, v56, v10
-; SI-NEXT: v_or_b32_e32 v11, v45, v11
-; SI-NEXT: v_or_b32_e32 v12, v38, v12
-; SI-NEXT: v_or_b32_e32 v13, v36, v13
-; SI-NEXT: v_or_b32_e32 v14, v35, v14
-; SI-NEXT: v_or_b32_e32 v15, v32, v15
-; SI-NEXT: v_or_b32_e32 v17, v37, v17
-; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(10)
-; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18
+; SI-NEXT: v_mov_b32_e32 v55, v56
+; SI-NEXT: v_or_b32_e32 v11, v57, v11
+; SI-NEXT: v_or_b32_e32 v13, v47, v13
+; SI-NEXT: v_or_b32_e32 v15, v62, v15
+; SI-NEXT: v_or_b32_e32 v17, v63, v17
+; SI-NEXT: v_or_b32_e32 v19, v39, v19
+; SI-NEXT: v_or_b32_e32 v20, v37, v20
+; SI-NEXT: v_or_b32_e32 v21, v35, v21
+; SI-NEXT: v_or_b32_e32 v22, v33, v22
+; SI-NEXT: v_or_b32_e32 v23, v30, v23
; SI-NEXT: s_waitcnt vmcnt(9)
-; SI-NEXT: v_or_b32_e32 v18, v19, v18
-; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19
-; SI-NEXT: v_or_b32_e32 v19, v20, v19
-; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20
-; SI-NEXT: v_or_b32_e32 v20, v21, v20
-; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v29
-; SI-NEXT: v_or_b32_e32 v21, v22, v21
-; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22
-; SI-NEXT: v_or_b32_e32 v22, v23, v22
-; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v23
-; SI-NEXT: v_or_b32_e32 v23, v24, v23
-; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v24
+; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v59
+; SI-NEXT: s_waitcnt vmcnt(8)
; SI-NEXT: v_or_b32_e32 v24, v25, v24
; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(8)
+; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v28
+; SI-NEXT: v_or_b32_e32 v10, v46, v10
+; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v58
+; SI-NEXT: v_or_b32_e32 v14, v61, v14
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: v_or_b32_e32 v12, v60, v12
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v25
; SI-NEXT: v_or_b32_e32 v25, v26, v25
; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload
+; SI-NEXT: v_or_b32_e32 v7, v44, v7
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v26
; SI-NEXT: v_or_b32_e32 v26, v27, v26
; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
+; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18
+; SI-NEXT: v_or_b32_e32 v18, v49, v18
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v27
; SI-NEXT: v_or_b32_e32 v27, v50, v27
+; SI-NEXT: v_mov_b32_e32 v50, v62
+; SI-NEXT: v_mov_b32_e32 v62, v44
; SI-NEXT: s_cbranch_execnz .LBB19_3
; SI-NEXT: .LBB19_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, v43
+; SI-NEXT: s_waitcnt expcnt(2)
; SI-NEXT: v_cvt_f32_f16_e32 v2, v54
-; SI-NEXT: v_cvt_f32_f16_e32 v1, v55
+; SI-NEXT: v_cvt_f32_f16_e32 v1, v51
; SI-NEXT: v_cvt_f32_f16_e32 v3, v53
; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0
; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2
@@ -11112,173 +11175,186 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_or_b32_e32 v1, v3, v2
-; SI-NEXT: v_cvt_f32_f16_e32 v2, v49
-; SI-NEXT: v_cvt_f32_f16_e32 v3, v39
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: v_cvt_f32_f16_e32 v8, v62
+; SI-NEXT: v_cvt_f32_f16_e32 v9, v41
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v4, v33
-; SI-NEXT: v_cvt_f32_f16_e32 v8, v47
-; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2
-; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
-; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3
-; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
-; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4
-; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
-; SI-NEXT: v_or_b32_e32 v2, v3, v2
-; SI-NEXT: v_cvt_f32_f16_e32 v3, v48
+; SI-NEXT: v_cvt_f32_f16_e32 v11, v46
+; SI-NEXT: v_cvt_f32_f16_e32 v13, v60
; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8
-; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
; SI-NEXT: v_cvt_f16_f32_e32 v8, v8
-; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3
-; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
-; SI-NEXT: v_cvt_f32_f16_e32 v9, v42
-; SI-NEXT: v_cvt_f32_f16_e32 v11, v40
-; SI-NEXT: v_cvt_f32_f16_e32 v12, v45
-; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; SI-NEXT: v_or_b32_e32 v3, v4, v3
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9
; SI-NEXT: v_cvt_f16_f32_e32 v9, v9
; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11
; SI-NEXT: v_cvt_f16_f32_e32 v11, v11
-; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12
-; SI-NEXT: v_cvt_f16_f32_e32 v12, v12
-; SI-NEXT: v_cvt_f32_f16_e32 v13, v38
-; SI-NEXT: v_cvt_f32_f16_e32 v14, v36
-; SI-NEXT: v_cvt_f32_f16_e32 v15, v35
-; SI-NEXT: v_cvt_f32_f16_e32 v17, v32
; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13
; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
+; SI-NEXT: v_cvt_f32_f16_e32 v14, v47
+; SI-NEXT: v_cvt_f32_f16_e32 v15, v61
+; SI-NEXT: v_cvt_f32_f16_e32 v17, v50
+; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14
; SI-NEXT: v_cvt_f16_f32_e32 v14, v14
; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15
; SI-NEXT: v_cvt_f16_f32_e32 v15, v15
+; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17
; SI-NEXT: v_cvt_f16_f32_e32 v17, v17
-; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
-; SI-NEXT: v_cvt_f32_f16_e32 v22, v29
-; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
+; SI-NEXT: v_cvt_f32_f16_e32 v20, v49
+; SI-NEXT: v_cvt_f32_f16_e32 v21, v39
+; SI-NEXT: v_cvt_f32_f16_e32 v22, v36
+; SI-NEXT: v_cvt_f32_f16_e32 v23, v35
+; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20
+; SI-NEXT: v_cvt_f16_f32_e32 v20, v20
+; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21
+; SI-NEXT: v_cvt_f16_f32_e32 v21, v21
; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22
; SI-NEXT: v_cvt_f16_f32_e32 v22, v22
+; SI-NEXT: v_cvt_f32_f16_e32 v24, v33
+; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23
+; SI-NEXT: v_cvt_f16_f32_e32 v23, v23
+; SI-NEXT: v_cvt_f32_f16_e32 v25, v29
+; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24
+; SI-NEXT: v_cvt_f16_f32_e32 v24, v24
; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(14)
-; SI-NEXT: v_cvt_f32_f16_e32 v5, v5
-; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5
-; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
-; SI-NEXT: v_cvt_f32_f16_e32 v4, v4
-; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4
-; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
-; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; SI-NEXT: v_or_b32_e32 v4, v5, v4
-; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
-; SI-NEXT: v_cvt_f32_f16_e32 v19, v19
+; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25
+; SI-NEXT: v_cvt_f16_f32_e32 v25, v25
+; SI-NEXT: s_waitcnt vmcnt(13)
+; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
; SI-NEXT: s_waitcnt vmcnt(12)
-; SI-NEXT: v_cvt_f32_f16_e32 v28, v28
-; SI-NEXT: s_waitcnt vmcnt(11)
-; SI-NEXT: v_cvt_f32_f16_e32 v6, v6
-; SI-NEXT: s_waitcnt vmcnt(10)
-; SI-NEXT: v_cvt_f32_f16_e32 v7, v7
+; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
+; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2
+; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
+; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3
+; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; SI-NEXT: v_or_b32_e32 v2, v3, v2
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(12)
+; SI-NEXT: v_cvt_f32_f16_e32 v19, v19
; SI-NEXT: s_waitcnt vmcnt(9)
-; SI-NEXT: v_cvt_f32_f16_e32 v10, v10
+; SI-NEXT: v_cvt_f32_f16_e32 v4, v4
; SI-NEXT: s_waitcnt vmcnt(8)
-; SI-NEXT: v_cvt_f32_f16_e32 v16, v16
+; SI-NEXT: v_cvt_f32_f16_e32 v5, v5
+; SI-NEXT: s_waitcnt vmcnt(7)
+; SI-NEXT: v_cvt_f32_f16_e32 v6, v6
+; SI-NEXT: s_waitcnt vmcnt(6)
+; SI-NEXT: v_cvt_f32_f16_e32 v7, v7
+; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4
+; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
+; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5
+; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6
; SI-NEXT: v_cvt_f16_f32_e32 v6, v6
; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7
; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
+; SI-NEXT: s_waitcnt vmcnt(5)
+; SI-NEXT: v_cvt_f32_f16_e32 v10, v10
+; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: v_cvt_f32_f16_e32 v12, v12
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: v_cvt_f32_f16_e32 v16, v16
+; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19
; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10
; SI-NEXT: v_cvt_f16_f32_e32 v10, v10
+; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12
+; SI-NEXT: v_cvt_f16_f32_e32 v12, v12
; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16
; SI-NEXT: v_cvt_f16_f32_e32 v16, v16
-; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19
; SI-NEXT: v_cvt_f16_f32_e32 v19, v19
-; SI-NEXT: s_waitcnt vmcnt(5)
-; SI-NEXT: v_cvt_f32_f16_e32 v21, v21
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_cvt_f32_f16_e32 v25, v25
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_cvt_f32_f16_e32 v29, v29
-; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28
-; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21
-; SI-NEXT: v_cvt_f16_f32_e32 v21, v21
-; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25
-; SI-NEXT: v_cvt_f16_f32_e32 v25, v25
; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v29
-; SI-NEXT: v_cvt_f16_f32_e32 v28, v28
; SI-NEXT: v_cvt_f16_f32_e32 v29, v29
; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
+; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3
+; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; SI-NEXT: v_or_b32_e32 v3, v4, v3
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_cvt_f32_f16_e32 v4, v4
+; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4
+; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
+; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; SI-NEXT: v_or_b32_e32 v4, v5, v4
+; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v5, v5
; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5
; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: v_or_b32_e32 v5, v6, v5
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v6, v6
; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6
; SI-NEXT: v_cvt_f16_f32_e32 v6, v6
; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6
; SI-NEXT: v_or_b32_e32 v6, v7, v6
-; SI-NEXT: v_cvt_f32_f16_e32 v7, v61
+; SI-NEXT: v_cvt_f32_f16_e32 v7, v32
; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7
; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7
; SI-NEXT: v_or_b32_e32 v7, v8, v7
-; SI-NEXT: v_cvt_f32_f16_e32 v8, v46
+; SI-NEXT: v_cvt_f32_f16_e32 v8, v42
; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8
; SI-NEXT: v_cvt_f16_f32_e32 v8, v8
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8
; SI-NEXT: v_or_b32_e32 v8, v9, v8
-; SI-NEXT: v_cvt_f32_f16_e32 v9, v41
+; SI-NEXT: v_cvt_f32_f16_e32 v9, v40
; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9
; SI-NEXT: v_cvt_f16_f32_e32 v9, v9
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9
; SI-NEXT: v_or_b32_e32 v9, v10, v9
-; SI-NEXT: v_cvt_f32_f16_e32 v10, v62
+; SI-NEXT: v_cvt_f32_f16_e32 v10, v59
; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10
; SI-NEXT: v_cvt_f16_f32_e32 v10, v10
; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10
; SI-NEXT: v_or_b32_e32 v10, v11, v10
-; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v11, v11
+; SI-NEXT: v_cvt_f32_f16_e32 v11, v55
; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11
; SI-NEXT: v_cvt_f16_f32_e32 v11, v11
; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11
; SI-NEXT: v_or_b32_e32 v11, v12, v11
-; SI-NEXT: v_cvt_f32_f16_e32 v12, v63
+; SI-NEXT: v_cvt_f32_f16_e32 v12, v28
+; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12
; SI-NEXT: v_cvt_f16_f32_e32 v12, v12
; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12
; SI-NEXT: v_or_b32_e32 v12, v13, v12
-; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_cvt_f32_f16_e32 v28, v28
+; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28
+; SI-NEXT: v_cvt_f16_f32_e32 v28, v28
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v13, v13
; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13
; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
; SI-NEXT: v_or_b32_e32 v13, v14, v13
-; SI-NEXT: v_cvt_f32_f16_e32 v14, v34
+; SI-NEXT: v_cvt_f32_f16_e32 v14, v58
; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14
; SI-NEXT: v_cvt_f16_f32_e32 v14, v14
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
; SI-NEXT: v_or_b32_e32 v14, v15, v14
; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v16
-; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
; SI-NEXT: v_or_b32_e32 v15, v17, v15
-; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
; SI-NEXT: v_cvt_f32_f16_e32 v18, v18
; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18
; SI-NEXT: v_cvt_f16_f32_e32 v18, v18
@@ -11292,59 +11368,38 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v17, v17
; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16
; SI-NEXT: v_or_b32_e32 v16, v18, v16
-; SI-NEXT: v_cvt_f32_f16_e32 v18, v37
+; SI-NEXT: v_cvt_f32_f16_e32 v18, v63
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17
; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18
; SI-NEXT: v_cvt_f16_f32_e32 v18, v18
; SI-NEXT: v_or_b32_e32 v17, v18, v17
; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v19
-; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
-; SI-NEXT: v_cvt_f32_f16_e32 v20, v20
-; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20
-; SI-NEXT: v_cvt_f16_f32_e32 v20, v20
+; SI-NEXT: v_cvt_f32_f16_e32 v19, v48
; SI-NEXT: v_or_b32_e32 v18, v20, v18
-; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_cvt_f32_f16_e32 v19, v19
+; SI-NEXT: v_cvt_f32_f16_e32 v20, v38
; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19
; SI-NEXT: v_cvt_f16_f32_e32 v19, v19
-; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19
-; SI-NEXT: v_or_b32_e32 v19, v21, v19
-; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_cvt_f32_f16_e32 v20, v20
; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20
; SI-NEXT: v_cvt_f16_f32_e32 v20, v20
+; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19
+; SI-NEXT: v_or_b32_e32 v19, v21, v19
+; SI-NEXT: v_cvt_f32_f16_e32 v21, v37
; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v21, v21
; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21
; SI-NEXT: v_cvt_f16_f32_e32 v21, v21
; SI-NEXT: v_or_b32_e32 v20, v21, v20
; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v22
-; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
-; SI-NEXT: v_cvt_f32_f16_e32 v23, v23
-; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23
-; SI-NEXT: v_cvt_f16_f32_e32 v23, v23
+; SI-NEXT: v_cvt_f32_f16_e32 v22, v34
; SI-NEXT: v_or_b32_e32 v21, v23, v21
-; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
-; SI-NEXT: v_cvt_f32_f16_e32 v24, v24
-; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24
-; SI-NEXT: v_cvt_f16_f32_e32 v24, v24
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_cvt_f32_f16_e32 v22, v22
+; SI-NEXT: v_cvt_f32_f16_e32 v23, v31
; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22
; SI-NEXT: v_cvt_f16_f32_e32 v22, v22
-; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22
-; SI-NEXT: v_or_b32_e32 v22, v24, v22
-; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_cvt_f32_f16_e32 v23, v23
; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23
; SI-NEXT: v_cvt_f16_f32_e32 v23, v23
+; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22
+; SI-NEXT: v_or_b32_e32 v22, v24, v22
+; SI-NEXT: v_cvt_f32_f16_e32 v24, v30
; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v23
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v24, v24
; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24
; SI-NEXT: v_cvt_f16_f32_e32 v24, v24
; SI-NEXT: v_or_b32_e32 v23, v24, v23
@@ -11382,7 +11437,6 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(6)
; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload
@@ -11398,20 +11452,48 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB19_4:
-; SI-NEXT: v_mov_b32_e32 v39, v11
-; SI-NEXT: v_mov_b32_e32 v33, v10
-; SI-NEXT: v_mov_b32_e32 v49, v2
-; SI-NEXT: v_mov_b32_e32 v48, v3
-; SI-NEXT: v_mov_b32_e32 v52, v37
-; SI-NEXT: v_mov_b32_e32 v37, v29
+; SI-NEXT: v_mov_b32_e32 v52, v63
+; SI-NEXT: v_mov_b32_e32 v63, v29
+; SI-NEXT: v_mov_b32_e32 v50, v49
+; SI-NEXT: v_mov_b32_e32 v49, v48
+; SI-NEXT: v_mov_b32_e32 v48, v39
+; SI-NEXT: v_mov_b32_e32 v39, v38
+; SI-NEXT: v_mov_b32_e32 v38, v37
+; SI-NEXT: v_mov_b32_e32 v37, v36
+; SI-NEXT: v_mov_b32_e32 v36, v35
+; SI-NEXT: v_mov_b32_e32 v35, v34
+; SI-NEXT: v_mov_b32_e32 v34, v33
+; SI-NEXT: v_mov_b32_e32 v33, v31
+; SI-NEXT: v_mov_b32_e32 v32, v30
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; SI-NEXT: v_mov_b32_e32 v42, v58
-; SI-NEXT: v_mov_b32_e32 v41, v60
-; SI-NEXT: v_mov_b32_e32 v40, v56
-; SI-NEXT: v_mov_b32_e32 v29, v37
-; SI-NEXT: v_mov_b32_e32 v37, v52
-; SI-NEXT: v_mov_b32_e32 v61, v44
-; SI-NEXT: s_branch .LBB19_2
+; SI-NEXT: v_mov_b32_e32 v40, v60
+; SI-NEXT: v_mov_b32_e32 v31, v33
+; SI-NEXT: v_mov_b32_e32 v33, v34
+; SI-NEXT: v_mov_b32_e32 v34, v35
+; SI-NEXT: v_mov_b32_e32 v35, v36
+; SI-NEXT: v_mov_b32_e32 v36, v37
+; SI-NEXT: v_mov_b32_e32 v37, v38
+; SI-NEXT: v_mov_b32_e32 v38, v39
+; SI-NEXT: v_mov_b32_e32 v39, v48
+; SI-NEXT: v_mov_b32_e32 v48, v49
+; SI-NEXT: v_mov_b32_e32 v49, v50
+; SI-NEXT: v_mov_b32_e32 v50, v62
+; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v41, v61
+; SI-NEXT: v_mov_b32_e32 v55, v56
+; SI-NEXT: v_mov_b32_e32 v30, v32
+; SI-NEXT: v_mov_b32_e32 v29, v63
+; SI-NEXT: v_mov_b32_e32 v63, v52
+; SI-NEXT: v_mov_b32_e32 v61, v45
+; SI-NEXT: v_mov_b32_e32 v32, v44
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB19_2
+; SI-NEXT: s_branch .LBB19_3
;
; VI-LABEL: bitcast_v56f16_to_v28i32_scalar:
; VI: ; %bb.0:
@@ -11431,6 +11513,7 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v13
; VI-NEXT: v_mov_b32_e32 v33, v12
; VI-NEXT: v_mov_b32_e32 v34, v11
@@ -11445,7 +11528,7 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v51, v2
; VI-NEXT: v_mov_b32_e32 v52, v1
; VI-NEXT: v_mov_b32_e32 v53, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB19_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -11639,25 +11722,13 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB19_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB19_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB19_2
+; VI-NEXT: s_branch .LBB19_3
;
; GFX9-LABEL: bitcast_v56f16_to_v28i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v13
-; GFX9-NEXT: v_mov_b32_e32 v33, v12
-; GFX9-NEXT: v_mov_b32_e32 v34, v11
-; GFX9-NEXT: v_mov_b32_e32 v35, v10
-; GFX9-NEXT: v_mov_b32_e32 v36, v9
-; GFX9-NEXT: v_mov_b32_e32 v37, v8
-; GFX9-NEXT: v_mov_b32_e32 v38, v7
-; GFX9-NEXT: v_mov_b32_e32 v39, v6
-; GFX9-NEXT: v_mov_b32_e32 v48, v5
-; GFX9-NEXT: v_mov_b32_e32 v49, v4
-; GFX9-NEXT: v_mov_b32_e32 v50, v3
-; GFX9-NEXT: v_mov_b32_e32 v51, v2
-; GFX9-NEXT: v_mov_b32_e32 v52, v1
-; GFX9-NEXT: v_mov_b32_e32 v53, v0
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
@@ -11673,6 +11744,21 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s8, s18, 16
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
+; GFX9-NEXT: v_mov_b32_e32 v32, v13
+; GFX9-NEXT: v_mov_b32_e32 v33, v12
+; GFX9-NEXT: v_mov_b32_e32 v34, v11
+; GFX9-NEXT: v_mov_b32_e32 v35, v10
+; GFX9-NEXT: v_mov_b32_e32 v36, v9
+; GFX9-NEXT: v_mov_b32_e32 v37, v8
+; GFX9-NEXT: v_mov_b32_e32 v38, v7
+; GFX9-NEXT: v_mov_b32_e32 v39, v6
+; GFX9-NEXT: v_mov_b32_e32 v48, v5
+; GFX9-NEXT: v_mov_b32_e32 v49, v4
+; GFX9-NEXT: v_mov_b32_e32 v50, v3
+; GFX9-NEXT: v_mov_b32_e32 v51, v2
+; GFX9-NEXT: v_mov_b32_e32 v52, v1
+; GFX9-NEXT: v_mov_b32_e32 v53, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
@@ -11691,7 +11777,6 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; GFX9-NEXT: v_lshrrev_b32_e32 v41, 16, v35
; GFX9-NEXT: v_lshrrev_b32_e32 v42, 16, v36
; GFX9-NEXT: v_lshrrev_b32_e32 v43, 16, v37
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -11706,6 +11791,7 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_lshrrev_b32_e32 v44, 16, v38
; GFX9-NEXT: v_lshrrev_b32_e32 v45, 16, v39
; GFX9-NEXT: v_lshrrev_b32_e32 v46, 16, v48
@@ -11835,7 +11921,9 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB19_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB19_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB19_2
+; GFX9-NEXT: s_branch .LBB19_3
;
; GFX11-TRUE16-LABEL: bitcast_v56f16_to_v28i32_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -11873,7 +11961,7 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v9
; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s27, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
@@ -11885,15 +11973,14 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s42
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
@@ -11905,10 +11992,11 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB19_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
@@ -11928,10 +12016,9 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB19_3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB19_3
; GFX11-TRUE16-NEXT: .LBB19_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
@@ -11958,9 +12045,9 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
@@ -11975,7 +12062,9 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB19_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB19_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB19_2
+; GFX11-TRUE16-NEXT: s_branch .LBB19_3
;
; GFX11-FAKE16-LABEL: bitcast_v56f16_to_v28i32_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -12003,7 +12092,7 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_and_b32_e32 v50, 0xffff, v9
; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s27, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s26, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s25, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s24, 16
@@ -12015,15 +12104,14 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s18, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s17, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s15, 0
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s44
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s45
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s43
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s42
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
@@ -12035,10 +12123,11 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB19_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
@@ -12058,10 +12147,9 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB19_3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB19_3
; GFX11-FAKE16-NEXT: .LBB19_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
@@ -12088,9 +12176,9 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, s18 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, s16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
@@ -12105,7 +12193,9 @@ define inreg <28 x i32> @bitcast_v56f16_to_v28i32_scalar(<56 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB19_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB19_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB19_2
+; GFX11-FAKE16-NEXT: s_branch .LBB19_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -12296,6 +12386,7 @@ define inreg <14 x i64> @bitcast_v28f32_to_v14i64_scalar(<28 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v27, v13
; SI-NEXT: v_mov_b32_e32 v26, v12
; SI-NEXT: v_mov_b32_e32 v25, v11
@@ -12316,7 +12407,7 @@ define inreg <14 x i64> @bitcast_v28f32_to_v14i64_scalar(<28 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v3, s19
; SI-NEXT: v_mov_b32_e32 v4, s20
; SI-NEXT: v_mov_b32_e32 v5, s21
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v6, s22
; SI-NEXT: v_mov_b32_e32 v7, s23
; SI-NEXT: v_mov_b32_e32 v8, s24
@@ -12325,10 +12416,13 @@ define inreg <14 x i64> @bitcast_v28f32_to_v14i64_scalar(<28 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB21_4
+; SI-NEXT: s_cbranch_scc0 .LBB21_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB21_3
-; SI-NEXT: .LBB21_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB21_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB21_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e32 v27, 1.0, v27
; SI-NEXT: v_add_f32_e32 v26, 1.0, v26
; SI-NEXT: v_add_f32_e32 v25, 1.0, v25
@@ -12357,16 +12451,15 @@ define inreg <14 x i64> @bitcast_v28f32_to_v14i64_scalar(<28 x float> inreg %a,
; SI-NEXT: v_add_f32_e32 v2, 1.0, v2
; SI-NEXT: v_add_f32_e32 v1, 1.0, v1
; SI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; SI-NEXT: .LBB21_3: ; %end
+; SI-NEXT: .LBB21_4: ; %end
; SI-NEXT: v_mov_b32_e32 v14, v28
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB21_4:
-; SI-NEXT: s_branch .LBB21_2
;
; VI-LABEL: bitcast_v28f32_to_v14i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v27, v13
; VI-NEXT: v_mov_b32_e32 v26, v12
; VI-NEXT: v_mov_b32_e32 v25, v11
@@ -12387,7 +12480,7 @@ define inreg <14 x i64> @bitcast_v28f32_to_v14i64_scalar(<28 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: v_mov_b32_e32 v5, s21
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: v_mov_b32_e32 v8, s24
@@ -12396,10 +12489,13 @@ define inreg <14 x i64> @bitcast_v28f32_to_v14i64_scalar(<28 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB21_4
+; VI-NEXT: s_cbranch_scc0 .LBB21_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB21_3
-; VI-NEXT: .LBB21_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB21_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB21_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e32 v27, 1.0, v27
; VI-NEXT: v_add_f32_e32 v26, 1.0, v26
; VI-NEXT: v_add_f32_e32 v25, 1.0, v25
@@ -12428,16 +12524,15 @@ define inreg <14 x i64> @bitcast_v28f32_to_v14i64_scalar(<28 x float> inreg %a,
; VI-NEXT: v_add_f32_e32 v2, 1.0, v2
; VI-NEXT: v_add_f32_e32 v1, 1.0, v1
; VI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; VI-NEXT: .LBB21_3: ; %end
+; VI-NEXT: .LBB21_4: ; %end
; VI-NEXT: v_mov_b32_e32 v14, v28
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB21_4:
-; VI-NEXT: s_branch .LBB21_2
;
; GFX9-LABEL: bitcast_v28f32_to_v14i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v27, v13
; GFX9-NEXT: v_mov_b32_e32 v26, v12
; GFX9-NEXT: v_mov_b32_e32 v25, v11
@@ -12458,7 +12553,7 @@ define inreg <14 x i64> @bitcast_v28f32_to_v14i64_scalar(<28 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
; GFX9-NEXT: v_mov_b32_e32 v5, s21
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v6, s22
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: v_mov_b32_e32 v8, s24
@@ -12467,10 +12562,13 @@ define inreg <14 x i64> @bitcast_v28f32_to_v14i64_scalar(<28 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB21_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB21_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB21_3
-; GFX9-NEXT: .LBB21_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB21_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e32 v27, 1.0, v27
; GFX9-NEXT: v_add_f32_e32 v26, 1.0, v26
; GFX9-NEXT: v_add_f32_e32 v25, 1.0, v25
@@ -12499,42 +12597,40 @@ define inreg <14 x i64> @bitcast_v28f32_to_v14i64_scalar(<28 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e32 v2, 1.0, v2
; GFX9-NEXT: v_add_f32_e32 v1, 1.0, v1
; GFX9-NEXT: v_add_f32_e32 v0, 1.0, v0
-; GFX9-NEXT: .LBB21_3: ; %end
+; GFX9-NEXT: .LBB21_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v14, v28
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB21_4:
-; GFX9-NEXT: s_branch .LBB21_2
;
; GFX11-LABEL: bitcast_v28f32_to_v14i64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v10 :: v_dual_mov_b32 v27, v9
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
+; GFX11-NEXT: v_dual_mov_b32 v15, v10 :: v_dual_mov_b32 v26, v8
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB21_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB21_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB21_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB21_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB21_3:
-; GFX11-NEXT: .LBB21_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_dual_add_f32 v27, 1.0, v27 :: v_dual_add_f32 v26, 1.0, v26
; GFX11-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v24, 1.0, v24
; GFX11-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
@@ -12549,6 +12645,7 @@ define inreg <14 x i64> @bitcast_v28f32_to_v14i64_scalar(<28 x float> inreg %a,
; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-NEXT: .LBB21_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -12761,6 +12858,7 @@ define inreg <28 x float> @bitcast_v14i64_to_v28f32_scalar(<14 x i64> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v27, v13
; SI-NEXT: v_mov_b32_e32 v26, v12
; SI-NEXT: v_mov_b32_e32 v25, v11
@@ -12781,7 +12879,7 @@ define inreg <28 x float> @bitcast_v14i64_to_v28f32_scalar(<14 x i64> inreg %a,
; SI-NEXT: v_mov_b32_e32 v3, s19
; SI-NEXT: v_mov_b32_e32 v4, s20
; SI-NEXT: v_mov_b32_e32 v5, s21
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v6, s22
; SI-NEXT: v_mov_b32_e32 v7, s23
; SI-NEXT: v_mov_b32_e32 v8, s24
@@ -12790,10 +12888,13 @@ define inreg <28 x float> @bitcast_v14i64_to_v28f32_scalar(<14 x i64> inreg %a,
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB23_4
+; SI-NEXT: s_cbranch_scc0 .LBB23_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB23_3
-; SI-NEXT: .LBB23_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB23_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB23_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v26, vcc, 3, v26
; SI-NEXT: v_addc_u32_e32 v27, vcc, 0, v27, vcc
; SI-NEXT: v_add_i32_e32 v24, vcc, 3, v24
@@ -12822,16 +12923,15 @@ define inreg <28 x float> @bitcast_v14i64_to_v28f32_scalar(<14 x i64> inreg %a,
; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT: .LBB23_3: ; %end
+; SI-NEXT: .LBB23_4: ; %end
; SI-NEXT: v_mov_b32_e32 v14, v28
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB23_4:
-; SI-NEXT: s_branch .LBB23_2
;
; VI-LABEL: bitcast_v14i64_to_v28f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v27, v13
; VI-NEXT: v_mov_b32_e32 v26, v12
; VI-NEXT: v_mov_b32_e32 v25, v11
@@ -12852,7 +12952,7 @@ define inreg <28 x float> @bitcast_v14i64_to_v28f32_scalar(<14 x i64> inreg %a,
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: v_mov_b32_e32 v5, s21
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: v_mov_b32_e32 v8, s24
@@ -12861,10 +12961,13 @@ define inreg <28 x float> @bitcast_v14i64_to_v28f32_scalar(<14 x i64> inreg %a,
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB23_4
+; VI-NEXT: s_cbranch_scc0 .LBB23_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB23_3
-; VI-NEXT: .LBB23_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB23_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB23_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v26, vcc, 3, v26
; VI-NEXT: v_addc_u32_e32 v27, vcc, 0, v27, vcc
; VI-NEXT: v_add_u32_e32 v24, vcc, 3, v24
@@ -12893,16 +12996,15 @@ define inreg <28 x float> @bitcast_v14i64_to_v28f32_scalar(<14 x i64> inreg %a,
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT: .LBB23_3: ; %end
+; VI-NEXT: .LBB23_4: ; %end
; VI-NEXT: v_mov_b32_e32 v14, v28
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB23_4:
-; VI-NEXT: s_branch .LBB23_2
;
; GFX9-LABEL: bitcast_v14i64_to_v28f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v27, v13
; GFX9-NEXT: v_mov_b32_e32 v26, v12
; GFX9-NEXT: v_mov_b32_e32 v25, v11
@@ -12923,7 +13025,7 @@ define inreg <28 x float> @bitcast_v14i64_to_v28f32_scalar(<14 x i64> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
; GFX9-NEXT: v_mov_b32_e32 v5, s21
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v6, s22
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: v_mov_b32_e32 v8, s24
@@ -12932,10 +13034,13 @@ define inreg <28 x float> @bitcast_v14i64_to_v28f32_scalar(<14 x i64> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB23_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB23_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB23_3
-; GFX9-NEXT: .LBB23_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB23_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_co_u32_e32 v26, vcc, 3, v26
; GFX9-NEXT: v_addc_co_u32_e32 v27, vcc, 0, v27, vcc
; GFX9-NEXT: v_add_co_u32_e32 v24, vcc, 3, v24
@@ -12964,42 +13069,40 @@ define inreg <28 x float> @bitcast_v14i64_to_v28f32_scalar(<14 x i64> inreg %a,
; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 3, v0
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
-; GFX9-NEXT: .LBB23_3: ; %end
+; GFX9-NEXT: .LBB23_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v14, v28
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB23_4:
-; GFX9-NEXT: s_branch .LBB23_2
;
; GFX11-LABEL: bitcast_v14i64_to_v28f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v10 :: v_dual_mov_b32 v27, v9
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
+; GFX11-NEXT: v_dual_mov_b32 v15, v10 :: v_dual_mov_b32 v26, v8
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB23_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB23_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB23_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB23_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB23_3:
-; GFX11-NEXT: .LBB23_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_co_u32 v26, vcc_lo, v26, 3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add_co_ci_u32_e64 v27, null, 0, v27, vcc_lo
@@ -13035,6 +13138,7 @@ define inreg <28 x float> @bitcast_v14i64_to_v28f32_scalar(<14 x i64> inreg %a,
; GFX11-NEXT: v_add_co_ci_u32_e64 v3, null, 0, v3, vcc_lo
; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-NEXT: .LBB23_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -13226,6 +13330,7 @@ define inreg <14 x double> @bitcast_v28f32_to_v14f64_scalar(<28 x float> inreg %
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v27, v13
; SI-NEXT: v_mov_b32_e32 v26, v12
; SI-NEXT: v_mov_b32_e32 v25, v11
@@ -13246,7 +13351,7 @@ define inreg <14 x double> @bitcast_v28f32_to_v14f64_scalar(<28 x float> inreg %
; SI-NEXT: v_mov_b32_e32 v3, s19
; SI-NEXT: v_mov_b32_e32 v4, s20
; SI-NEXT: v_mov_b32_e32 v5, s21
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v6, s22
; SI-NEXT: v_mov_b32_e32 v7, s23
; SI-NEXT: v_mov_b32_e32 v8, s24
@@ -13255,10 +13360,13 @@ define inreg <14 x double> @bitcast_v28f32_to_v14f64_scalar(<28 x float> inreg %
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB25_4
+; SI-NEXT: s_cbranch_scc0 .LBB25_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB25_3
-; SI-NEXT: .LBB25_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB25_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB25_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e32 v27, 1.0, v27
; SI-NEXT: v_add_f32_e32 v26, 1.0, v26
; SI-NEXT: v_add_f32_e32 v25, 1.0, v25
@@ -13287,16 +13395,15 @@ define inreg <14 x double> @bitcast_v28f32_to_v14f64_scalar(<28 x float> inreg %
; SI-NEXT: v_add_f32_e32 v2, 1.0, v2
; SI-NEXT: v_add_f32_e32 v1, 1.0, v1
; SI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; SI-NEXT: .LBB25_3: ; %end
+; SI-NEXT: .LBB25_4: ; %end
; SI-NEXT: v_mov_b32_e32 v14, v28
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB25_4:
-; SI-NEXT: s_branch .LBB25_2
;
; VI-LABEL: bitcast_v28f32_to_v14f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v27, v13
; VI-NEXT: v_mov_b32_e32 v26, v12
; VI-NEXT: v_mov_b32_e32 v25, v11
@@ -13317,7 +13424,7 @@ define inreg <14 x double> @bitcast_v28f32_to_v14f64_scalar(<28 x float> inreg %
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: v_mov_b32_e32 v5, s21
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: v_mov_b32_e32 v8, s24
@@ -13326,10 +13433,13 @@ define inreg <14 x double> @bitcast_v28f32_to_v14f64_scalar(<28 x float> inreg %
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB25_4
+; VI-NEXT: s_cbranch_scc0 .LBB25_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB25_3
-; VI-NEXT: .LBB25_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB25_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB25_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e32 v27, 1.0, v27
; VI-NEXT: v_add_f32_e32 v26, 1.0, v26
; VI-NEXT: v_add_f32_e32 v25, 1.0, v25
@@ -13358,16 +13468,15 @@ define inreg <14 x double> @bitcast_v28f32_to_v14f64_scalar(<28 x float> inreg %
; VI-NEXT: v_add_f32_e32 v2, 1.0, v2
; VI-NEXT: v_add_f32_e32 v1, 1.0, v1
; VI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; VI-NEXT: .LBB25_3: ; %end
+; VI-NEXT: .LBB25_4: ; %end
; VI-NEXT: v_mov_b32_e32 v14, v28
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB25_4:
-; VI-NEXT: s_branch .LBB25_2
;
; GFX9-LABEL: bitcast_v28f32_to_v14f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v27, v13
; GFX9-NEXT: v_mov_b32_e32 v26, v12
; GFX9-NEXT: v_mov_b32_e32 v25, v11
@@ -13388,7 +13497,7 @@ define inreg <14 x double> @bitcast_v28f32_to_v14f64_scalar(<28 x float> inreg %
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
; GFX9-NEXT: v_mov_b32_e32 v5, s21
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v6, s22
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: v_mov_b32_e32 v8, s24
@@ -13397,10 +13506,13 @@ define inreg <14 x double> @bitcast_v28f32_to_v14f64_scalar(<28 x float> inreg %
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB25_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB25_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB25_3
-; GFX9-NEXT: .LBB25_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB25_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB25_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e32 v27, 1.0, v27
; GFX9-NEXT: v_add_f32_e32 v26, 1.0, v26
; GFX9-NEXT: v_add_f32_e32 v25, 1.0, v25
@@ -13429,42 +13541,40 @@ define inreg <14 x double> @bitcast_v28f32_to_v14f64_scalar(<28 x float> inreg %
; GFX9-NEXT: v_add_f32_e32 v2, 1.0, v2
; GFX9-NEXT: v_add_f32_e32 v1, 1.0, v1
; GFX9-NEXT: v_add_f32_e32 v0, 1.0, v0
-; GFX9-NEXT: .LBB25_3: ; %end
+; GFX9-NEXT: .LBB25_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v14, v28
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB25_4:
-; GFX9-NEXT: s_branch .LBB25_2
;
; GFX11-LABEL: bitcast_v28f32_to_v14f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v10 :: v_dual_mov_b32 v27, v9
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
+; GFX11-NEXT: v_dual_mov_b32 v15, v10 :: v_dual_mov_b32 v26, v8
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB25_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB25_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB25_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB25_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB25_3:
-; GFX11-NEXT: .LBB25_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB25_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_dual_add_f32 v27, 1.0, v27 :: v_dual_add_f32 v26, 1.0, v26
; GFX11-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v24, 1.0, v24
; GFX11-NEXT: v_dual_add_f32 v23, 1.0, v23 :: v_dual_add_f32 v22, 1.0, v22
@@ -13479,6 +13589,7 @@ define inreg <14 x double> @bitcast_v28f32_to_v14f64_scalar(<28 x float> inreg %
; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-NEXT: .LBB25_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -13628,6 +13739,7 @@ define inreg <28 x float> @bitcast_v14f64_to_v28f32_scalar(<14 x double> inreg %
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v27, v13
; SI-NEXT: v_mov_b32_e32 v26, v12
; SI-NEXT: v_mov_b32_e32 v25, v11
@@ -13654,13 +13766,16 @@ define inreg <28 x float> @bitcast_v14f64_to_v28f32_scalar(<14 x double> inreg %
; SI-NEXT: v_mov_b32_e32 v9, s25
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB27_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB27_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB27_3
-; SI-NEXT: .LBB27_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB27_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB27_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
; SI-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
; SI-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
@@ -13675,17 +13790,16 @@ define inreg <28 x float> @bitcast_v14f64_to_v28f32_scalar(<14 x double> inreg %
; SI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; SI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; SI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; SI-NEXT: .LBB27_3: ; %end
+; SI-NEXT: .LBB27_4: ; %end
; SI-NEXT: v_mov_b32_e32 v14, v28
; SI-NEXT: v_mov_b32_e32 v15, v29
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB27_4:
-; SI-NEXT: s_branch .LBB27_2
;
; VI-LABEL: bitcast_v14f64_to_v28f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v27, v13
; VI-NEXT: v_mov_b32_e32 v26, v12
; VI-NEXT: v_mov_b32_e32 v25, v11
@@ -13712,13 +13826,16 @@ define inreg <28 x float> @bitcast_v14f64_to_v28f32_scalar(<14 x double> inreg %
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB27_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB27_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB27_3
-; VI-NEXT: .LBB27_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB27_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB27_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
; VI-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
; VI-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
@@ -13733,17 +13850,16 @@ define inreg <28 x float> @bitcast_v14f64_to_v28f32_scalar(<14 x double> inreg %
; VI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; VI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; VI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; VI-NEXT: .LBB27_3: ; %end
+; VI-NEXT: .LBB27_4: ; %end
; VI-NEXT: v_mov_b32_e32 v14, v28
; VI-NEXT: v_mov_b32_e32 v15, v29
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB27_4:
-; VI-NEXT: s_branch .LBB27_2
;
; GFX9-LABEL: bitcast_v14f64_to_v28f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v27, v13
; GFX9-NEXT: v_mov_b32_e32 v26, v12
; GFX9-NEXT: v_mov_b32_e32 v25, v11
@@ -13770,13 +13886,16 @@ define inreg <28 x float> @bitcast_v14f64_to_v28f32_scalar(<14 x double> inreg %
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB27_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB27_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB27_3
-; GFX9-NEXT: .LBB27_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB27_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB27_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
; GFX9-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
; GFX9-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
@@ -13791,43 +13910,41 @@ define inreg <28 x float> @bitcast_v14f64_to_v28f32_scalar(<14 x double> inreg %
; GFX9-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX9-NEXT: .LBB27_3: ; %end
+; GFX9-NEXT: .LBB27_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v14, v28
; GFX9-NEXT: v_mov_b32_e32 v15, v29
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB27_4:
-; GFX9-NEXT: s_branch .LBB27_2
;
; GFX11-LABEL: bitcast_v14f64_to_v28f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v10 :: v_dual_mov_b32 v27, v9
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
+; GFX11-NEXT: v_dual_mov_b32 v15, v10 :: v_dual_mov_b32 v26, v8
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB27_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB27_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB27_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB27_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB27_3:
-; GFX11-NEXT: .LBB27_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB27_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
; GFX11-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
; GFX11-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
@@ -13842,6 +13959,7 @@ define inreg <28 x float> @bitcast_v14f64_to_v28f32_scalar(<14 x double> inreg %
; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-NEXT: .LBB27_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -14755,12 +14873,13 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v15
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v30, s16
; SI-NEXT: v_mov_b32_e32 v29, s17
; SI-NEXT: v_mov_b32_e32 v25, s18
; SI-NEXT: v_mov_b32_e32 v23, s19
; SI-NEXT: v_mov_b32_e32 v28, s20
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v26, s21
; SI-NEXT: v_mov_b32_e32 v24, s22
; SI-NEXT: v_mov_b32_e32 v22, s23
@@ -15087,26 +15206,29 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
; SI-NEXT: ; implicit-def: $vgpr36
; SI-NEXT: ; implicit-def: $vgpr21
; SI-NEXT: ; implicit-def: $vgpr35
-; SI-NEXT: s_branch .LBB29_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB29_2
+; SI-NEXT: s_branch .LBB29_3
;
; VI-LABEL: bitcast_v28f32_to_v56i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
-; VI-NEXT: v_mov_b32_e32 v20, s16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: v_mov_b32_e32 v19, s16
; VI-NEXT: v_mov_b32_e32 v18, s17
; VI-NEXT: v_mov_b32_e32 v17, s18
; VI-NEXT: v_mov_b32_e32 v16, s19
; VI-NEXT: v_mov_b32_e32 v15, s20
-; VI-NEXT: v_mov_b32_e32 v26, s21
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: v_mov_b32_e32 v25, s21
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v27, s22
-; VI-NEXT: v_mov_b32_e32 v25, s23
+; VI-NEXT: v_mov_b32_e32 v26, s23
; VI-NEXT: v_mov_b32_e32 v24, s24
; VI-NEXT: v_mov_b32_e32 v23, s25
; VI-NEXT: v_mov_b32_e32 v22, s26
; VI-NEXT: v_mov_b32_e32 v21, s27
-; VI-NEXT: v_mov_b32_e32 v19, s28
+; VI-NEXT: v_mov_b32_e32 v20, s28
; VI-NEXT: v_mov_b32_e32 v14, s29
; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
@@ -15133,19 +15255,19 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v46, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v47, 16, v0
; VI-NEXT: v_lshrrev_b32_e32 v49, 16, v14
-; VI-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; VI-NEXT: v_lshrrev_b32_e32 v48, 16, v20
; VI-NEXT: v_lshrrev_b32_e32 v39, 16, v21
; VI-NEXT: v_lshrrev_b32_e32 v38, 16, v22
; VI-NEXT: v_lshrrev_b32_e32 v37, 16, v23
; VI-NEXT: v_lshrrev_b32_e32 v36, 16, v24
-; VI-NEXT: v_lshrrev_b32_e32 v35, 16, v25
+; VI-NEXT: v_lshrrev_b32_e32 v35, 16, v26
; VI-NEXT: v_lshrrev_b32_e32 v34, 16, v27
-; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v25
; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v15
; VI-NEXT: v_lshrrev_b32_e32 v31, 16, v16
; VI-NEXT: v_lshrrev_b32_e32 v30, 16, v17
; VI-NEXT: v_lshrrev_b32_e32 v29, 16, v18
-; VI-NEXT: v_lshrrev_b32_e32 v28, 16, v20
+; VI-NEXT: v_lshrrev_b32_e32 v28, 16, v19
; VI-NEXT: s_cbranch_execnz .LBB29_3
; VI-NEXT: .LBB29_2: ; %cmp.true
; VI-NEXT: v_add_f32_e32 v13, 1.0, v13
@@ -15163,19 +15285,19 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
; VI-NEXT: v_add_f32_e32 v1, 1.0, v1
; VI-NEXT: v_add_f32_e32 v0, 1.0, v0
; VI-NEXT: v_add_f32_e32 v14, 1.0, v14
-; VI-NEXT: v_add_f32_e32 v19, 1.0, v19
+; VI-NEXT: v_add_f32_e32 v20, 1.0, v20
; VI-NEXT: v_add_f32_e32 v21, 1.0, v21
; VI-NEXT: v_add_f32_e32 v22, 1.0, v22
; VI-NEXT: v_add_f32_e32 v23, 1.0, v23
; VI-NEXT: v_add_f32_e32 v24, 1.0, v24
-; VI-NEXT: v_add_f32_e32 v25, 1.0, v25
-; VI-NEXT: v_add_f32_e32 v27, 1.0, v27
; VI-NEXT: v_add_f32_e32 v26, 1.0, v26
+; VI-NEXT: v_add_f32_e32 v27, 1.0, v27
+; VI-NEXT: v_add_f32_e32 v25, 1.0, v25
; VI-NEXT: v_add_f32_e32 v15, 1.0, v15
; VI-NEXT: v_add_f32_e32 v16, 1.0, v16
; VI-NEXT: v_add_f32_e32 v17, 1.0, v17
; VI-NEXT: v_add_f32_e32 v18, 1.0, v18
-; VI-NEXT: v_add_f32_e32 v20, 1.0, v20
+; VI-NEXT: v_add_f32_e32 v19, 1.0, v19
; VI-NEXT: v_lshrrev_b32_e32 v50, 16, v13
; VI-NEXT: v_lshrrev_b32_e32 v51, 16, v12
; VI-NEXT: v_lshrrev_b32_e32 v52, 16, v11
@@ -15191,24 +15313,24 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v46, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v47, 16, v0
; VI-NEXT: v_lshrrev_b32_e32 v49, 16, v14
-; VI-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; VI-NEXT: v_lshrrev_b32_e32 v48, 16, v20
; VI-NEXT: v_lshrrev_b32_e32 v39, 16, v21
; VI-NEXT: v_lshrrev_b32_e32 v38, 16, v22
; VI-NEXT: v_lshrrev_b32_e32 v37, 16, v23
; VI-NEXT: v_lshrrev_b32_e32 v36, 16, v24
-; VI-NEXT: v_lshrrev_b32_e32 v35, 16, v25
+; VI-NEXT: v_lshrrev_b32_e32 v35, 16, v26
; VI-NEXT: v_lshrrev_b32_e32 v34, 16, v27
-; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v25
; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v15
; VI-NEXT: v_lshrrev_b32_e32 v31, 16, v16
; VI-NEXT: v_lshrrev_b32_e32 v30, 16, v17
; VI-NEXT: v_lshrrev_b32_e32 v29, 16, v18
-; VI-NEXT: v_lshrrev_b32_e32 v28, 16, v20
+; VI-NEXT: v_lshrrev_b32_e32 v28, 16, v19
; VI-NEXT: .LBB29_3: ; %end
; VI-NEXT: v_lshlrev_b32_e32 v28, 16, v28
-; VI-NEXT: v_or_b32_sdwa v28, v20, v28 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_e32 v20, 16, v29
-; VI-NEXT: v_or_b32_sdwa v29, v18, v20 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v28, v19, v28 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_e32 v19, 16, v29
+; VI-NEXT: v_or_b32_sdwa v29, v18, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v18, 16, v30
; VI-NEXT: v_or_b32_sdwa v30, v17, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v17, 16, v31
@@ -15216,11 +15338,11 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
; VI-NEXT: v_lshlrev_b32_e32 v16, 16, v32
; VI-NEXT: v_or_b32_sdwa v32, v15, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v15, 16, v33
-; VI-NEXT: v_or_b32_sdwa v33, v26, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v33, v25, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v15, 16, v34
; VI-NEXT: v_or_b32_sdwa v34, v27, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v15, 16, v35
-; VI-NEXT: v_or_b32_sdwa v35, v25, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v35, v26, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v15, 16, v36
; VI-NEXT: v_or_b32_sdwa v36, v24, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v15, 16, v37
@@ -15230,7 +15352,7 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
; VI-NEXT: v_lshlrev_b32_e32 v15, 16, v39
; VI-NEXT: v_or_b32_sdwa v39, v21, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v15, 16, v48
-; VI-NEXT: v_or_b32_sdwa v48, v19, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v48, v20, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v15, 16, v49
; VI-NEXT: v_or_b32_sdwa v49, v14, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v14, 16, v47
@@ -15314,26 +15436,29 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
; VI-NEXT: ; implicit-def: $vgpr52
; VI-NEXT: ; implicit-def: $vgpr51
; VI-NEXT: ; implicit-def: $vgpr50
-; VI-NEXT: s_branch .LBB29_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB29_2
+; VI-NEXT: s_branch .LBB29_3
;
; GFX9-LABEL: bitcast_v28f32_to_v56i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
-; GFX9-NEXT: v_mov_b32_e32 v20, s16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: v_mov_b32_e32 v19, s16
; GFX9-NEXT: v_mov_b32_e32 v18, s17
; GFX9-NEXT: v_mov_b32_e32 v17, s18
; GFX9-NEXT: v_mov_b32_e32 v16, s19
; GFX9-NEXT: v_mov_b32_e32 v15, s20
-; GFX9-NEXT: v_mov_b32_e32 v26, s21
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: v_mov_b32_e32 v25, s21
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v27, s22
-; GFX9-NEXT: v_mov_b32_e32 v25, s23
+; GFX9-NEXT: v_mov_b32_e32 v26, s23
; GFX9-NEXT: v_mov_b32_e32 v24, s24
; GFX9-NEXT: v_mov_b32_e32 v23, s25
; GFX9-NEXT: v_mov_b32_e32 v22, s26
; GFX9-NEXT: v_mov_b32_e32 v21, s27
-; GFX9-NEXT: v_mov_b32_e32 v19, s28
+; GFX9-NEXT: v_mov_b32_e32 v20, s28
; GFX9-NEXT: v_mov_b32_e32 v14, s29
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
@@ -15360,19 +15485,19 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v46, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v47, 16, v0
; GFX9-NEXT: v_lshrrev_b32_e32 v49, 16, v14
-; GFX9-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; GFX9-NEXT: v_lshrrev_b32_e32 v48, 16, v20
; GFX9-NEXT: v_lshrrev_b32_e32 v39, 16, v21
; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v22
; GFX9-NEXT: v_lshrrev_b32_e32 v37, 16, v23
; GFX9-NEXT: v_lshrrev_b32_e32 v36, 16, v24
-; GFX9-NEXT: v_lshrrev_b32_e32 v35, 16, v25
+; GFX9-NEXT: v_lshrrev_b32_e32 v35, 16, v26
; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v27
-; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v25
; GFX9-NEXT: v_lshrrev_b32_e32 v32, 16, v15
; GFX9-NEXT: v_lshrrev_b32_e32 v31, 16, v16
; GFX9-NEXT: v_lshrrev_b32_e32 v30, 16, v17
; GFX9-NEXT: v_lshrrev_b32_e32 v29, 16, v18
-; GFX9-NEXT: v_lshrrev_b32_e32 v28, 16, v20
+; GFX9-NEXT: v_lshrrev_b32_e32 v28, 16, v19
; GFX9-NEXT: s_cbranch_execnz .LBB29_3
; GFX9-NEXT: .LBB29_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e32 v13, 1.0, v13
@@ -15390,19 +15515,19 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e32 v1, 1.0, v1
; GFX9-NEXT: v_add_f32_e32 v0, 1.0, v0
; GFX9-NEXT: v_add_f32_e32 v14, 1.0, v14
-; GFX9-NEXT: v_add_f32_e32 v19, 1.0, v19
+; GFX9-NEXT: v_add_f32_e32 v20, 1.0, v20
; GFX9-NEXT: v_add_f32_e32 v21, 1.0, v21
; GFX9-NEXT: v_add_f32_e32 v22, 1.0, v22
; GFX9-NEXT: v_add_f32_e32 v23, 1.0, v23
; GFX9-NEXT: v_add_f32_e32 v24, 1.0, v24
-; GFX9-NEXT: v_add_f32_e32 v25, 1.0, v25
-; GFX9-NEXT: v_add_f32_e32 v27, 1.0, v27
; GFX9-NEXT: v_add_f32_e32 v26, 1.0, v26
+; GFX9-NEXT: v_add_f32_e32 v27, 1.0, v27
+; GFX9-NEXT: v_add_f32_e32 v25, 1.0, v25
; GFX9-NEXT: v_add_f32_e32 v15, 1.0, v15
; GFX9-NEXT: v_add_f32_e32 v16, 1.0, v16
; GFX9-NEXT: v_add_f32_e32 v17, 1.0, v17
; GFX9-NEXT: v_add_f32_e32 v18, 1.0, v18
-; GFX9-NEXT: v_add_f32_e32 v20, 1.0, v20
+; GFX9-NEXT: v_add_f32_e32 v19, 1.0, v19
; GFX9-NEXT: v_lshrrev_b32_e32 v50, 16, v13
; GFX9-NEXT: v_lshrrev_b32_e32 v51, 16, v12
; GFX9-NEXT: v_lshrrev_b32_e32 v52, 16, v11
@@ -15418,27 +15543,27 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v46, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v47, 16, v0
; GFX9-NEXT: v_lshrrev_b32_e32 v49, 16, v14
-; GFX9-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; GFX9-NEXT: v_lshrrev_b32_e32 v48, 16, v20
; GFX9-NEXT: v_lshrrev_b32_e32 v39, 16, v21
; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v22
; GFX9-NEXT: v_lshrrev_b32_e32 v37, 16, v23
; GFX9-NEXT: v_lshrrev_b32_e32 v36, 16, v24
-; GFX9-NEXT: v_lshrrev_b32_e32 v35, 16, v25
+; GFX9-NEXT: v_lshrrev_b32_e32 v35, 16, v26
; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v27
-; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v25
; GFX9-NEXT: v_lshrrev_b32_e32 v32, 16, v15
; GFX9-NEXT: v_lshrrev_b32_e32 v31, 16, v16
; GFX9-NEXT: v_lshrrev_b32_e32 v30, 16, v17
; GFX9-NEXT: v_lshrrev_b32_e32 v29, 16, v18
-; GFX9-NEXT: v_lshrrev_b32_e32 v28, 16, v20
+; GFX9-NEXT: v_lshrrev_b32_e32 v28, 16, v19
; GFX9-NEXT: .LBB29_3: ; %end
; GFX9-NEXT: v_and_b32_e32 v15, 0xffff, v15
; GFX9-NEXT: v_lshl_or_b32 v32, v32, 16, v15
-; GFX9-NEXT: v_and_b32_e32 v15, 0xffff, v26
+; GFX9-NEXT: v_and_b32_e32 v15, 0xffff, v25
; GFX9-NEXT: v_lshl_or_b32 v33, v33, 16, v15
; GFX9-NEXT: v_and_b32_e32 v15, 0xffff, v27
; GFX9-NEXT: v_lshl_or_b32 v34, v34, 16, v15
-; GFX9-NEXT: v_and_b32_e32 v15, 0xffff, v25
+; GFX9-NEXT: v_and_b32_e32 v15, 0xffff, v26
; GFX9-NEXT: v_lshl_or_b32 v35, v35, 16, v15
; GFX9-NEXT: v_and_b32_e32 v15, 0xffff, v24
; GFX9-NEXT: v_lshl_or_b32 v36, v36, 16, v15
@@ -15450,7 +15575,7 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
; GFX9-NEXT: v_and_b32_e32 v14, 0xffff, v14
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_lshl_or_b32 v39, v39, 16, v15
-; GFX9-NEXT: v_and_b32_e32 v15, 0xffff, v19
+; GFX9-NEXT: v_and_b32_e32 v15, 0xffff, v20
; GFX9-NEXT: v_lshl_or_b32 v49, v49, 16, v14
; GFX9-NEXT: v_lshl_or_b32 v14, v47, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v1
@@ -15466,13 +15591,13 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
; GFX9-NEXT: v_lshl_or_b32 v30, v30, 16, v17
; GFX9-NEXT: v_lshl_or_b32 v17, v44, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v4
+; GFX9-NEXT: v_and_b32_e32 v19, 0xffff, v19
; GFX9-NEXT: v_lshl_or_b32 v29, v29, 16, v18
; GFX9-NEXT: v_lshl_or_b32 v18, v43, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v5
-; GFX9-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX9-NEXT: v_lshl_or_b32 v28, v28, 16, v19
; GFX9-NEXT: v_lshl_or_b32 v19, v42, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v6
-; GFX9-NEXT: v_lshl_or_b32 v28, v28, 16, v20
; GFX9-NEXT: v_lshl_or_b32 v20, v41, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v7
; GFX9-NEXT: v_lshl_or_b32 v21, v40, 16, v0
@@ -15541,7 +15666,9 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr52
; GFX9-NEXT: ; implicit-def: $vgpr51
; GFX9-NEXT: ; implicit-def: $vgpr50
-; GFX9-NEXT: s_branch .LBB29_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB29_2
+; GFX9-NEXT: s_branch .LBB29_3
;
; GFX11-LABEL: bitcast_v28f32_to_v56i16_scalar:
; GFX11: ; %bb.0:
@@ -15553,11 +15680,11 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v22, s18 :: v_dual_mov_b32 v21, s19
; GFX11-NEXT: v_dual_mov_b32 v20, s20 :: v_dual_mov_b32 v19, s21
; GFX11-NEXT: v_dual_mov_b32 v18, s22 :: v_dual_mov_b32 v11, s24
-; GFX11-NEXT: v_dual_mov_b32 v12, s23 :: v_dual_mov_b32 v15, s25
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v13, s27
-; GFX11-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-NEXT: v_dual_mov_b32 v12, s23 :: v_dual_mov_b32 v13, s26
+; GFX11-NEXT: v_dual_mov_b32 v14, s25 :: v_dual_mov_b32 v17, s27
+; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v15, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB29_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
@@ -15570,11 +15697,11 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v2
; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v16
+; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v17
+; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v13
+; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v14
; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v11
; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v12
; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v18
@@ -15588,17 +15715,16 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v26
; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v27
; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v28
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB29_3
+; GFX11-NEXT: s_cbranch_execnz .LBB29_3
; GFX11-NEXT: .LBB29_2: ; %cmp.true
; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
; GFX11-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
-; GFX11-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
-; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v14, 1.0, v14
-; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v18, 1.0, v18
; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v22, 1.0, v22
@@ -15615,11 +15741,11 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v2
; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v16
+; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v17
+; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v13
+; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v14
; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v11
; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v12
; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v18
@@ -15642,7 +15768,7 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
; GFX11-NEXT: v_lshl_or_b32 v29, v29, 16, v19
; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v16
+; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v15
; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX11-NEXT: v_lshl_or_b32 v31, v31, 16, v27
; GFX11-NEXT: v_and_b32_e32 v28, 0xffff, v28
@@ -15656,9 +15782,9 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v12
; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v11
; GFX11-NEXT: v_lshl_or_b32 v10, v10, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v13
-; GFX11-NEXT: v_lshl_or_b32 v16, v65, 16, v17
+; GFX11-NEXT: v_lshl_or_b32 v15, v66, 16, v17
; GFX11-NEXT: v_lshl_or_b32 v17, v64, 16, v19
; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -15669,9 +15795,9 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
; GFX11-NEXT: v_lshl_or_b32 v32, v32, 16, v26
; GFX11-NEXT: v_lshl_or_b32 v11, v70, 16, v12
; GFX11-NEXT: v_lshl_or_b32 v12, v69, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_lshl_or_b32 v13, v68, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v15, v66, 16, v18
+; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-NEXT: v_lshl_or_b32 v13, v68, 16, v14
+; GFX11-NEXT: v_lshl_or_b32 v14, v67, 16, v18
; GFX11-NEXT: v_lshl_or_b32 v18, v55, 16, v0
; GFX11-NEXT: v_lshl_or_b32 v21, v52, 16, v3
; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v5
@@ -15687,7 +15813,7 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
; GFX11-NEXT: v_lshl_or_b32 v36, v36, 16, v22
; GFX11-NEXT: v_lshl_or_b32 v22, v51, 16, v4
; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v9
-; GFX11-NEXT: v_lshl_or_b32 v14, v67, 16, v14
+; GFX11-NEXT: v_lshl_or_b32 v16, v65, 16, v16
; GFX11-NEXT: v_lshl_or_b32 v23, v50, 16, v0
; GFX11-NEXT: v_lshl_or_b32 v25, v48, 16, v2
; GFX11-NEXT: v_lshl_or_b32 v26, v39, 16, v3
@@ -15727,7 +15853,9 @@ define inreg <56 x i16> @bitcast_v28f32_to_v56i16_scalar(<28 x float> inreg %a,
; GFX11-NEXT: ; implicit-def: $vgpr48
; GFX11-NEXT: ; implicit-def: $vgpr39
; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: s_branch .LBB29_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_vccz .LBB29_2
+; GFX11-NEXT: s_branch .LBB29_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -17041,6 +17169,7 @@ define inreg <28 x float> @bitcast_v56i16_to_v28f32_scalar(<56 x i16> inreg %a,
; SI-NEXT: v_mov_b32_e32 v36, v18
; SI-NEXT: v_mov_b32_e32 v37, v16
; SI-NEXT: v_mov_b32_e32 v38, v14
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v3
; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v5
@@ -17067,7 +17196,7 @@ define inreg <28 x float> @bitcast_v56i16_to_v28f32_scalar(<56 x i16> inreg %a,
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: v_lshlrev_b32_e32 v46, 16, v2
; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v4
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_and_b64 s[6:7], vcc, exec
; SI-NEXT: s_waitcnt vmcnt(13)
; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v6
; SI-NEXT: s_waitcnt vmcnt(11)
@@ -17381,7 +17510,9 @@ define inreg <28 x float> @bitcast_v56i16_to_v28f32_scalar(<56 x i16> inreg %a,
; SI-NEXT: v_mov_b32_e32 v45, v56
; SI-NEXT: v_mov_b32_e32 v56, v59
; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
-; SI-NEXT: s_branch .LBB31_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB31_2
+; SI-NEXT: s_branch .LBB31_3
;
; VI-LABEL: bitcast_v56i16_to_v28f32_scalar:
; VI: ; %bb.0:
@@ -17401,6 +17532,7 @@ define inreg <28 x float> @bitcast_v56i16_to_v28f32_scalar(<56 x i16> inreg %a,
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v13
; VI-NEXT: v_mov_b32_e32 v33, v12
; VI-NEXT: v_mov_b32_e32 v34, v11
@@ -17415,7 +17547,7 @@ define inreg <28 x float> @bitcast_v56i16_to_v28f32_scalar(<56 x i16> inreg %a,
; VI-NEXT: v_mov_b32_e32 v51, v2
; VI-NEXT: v_mov_b32_e32 v52, v1
; VI-NEXT: v_mov_b32_e32 v53, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB31_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -17650,25 +17782,13 @@ define inreg <28 x float> @bitcast_v56i16_to_v28f32_scalar(<56 x i16> inreg %a,
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB31_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB31_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB31_2
+; VI-NEXT: s_branch .LBB31_3
;
; GFX9-LABEL: bitcast_v56i16_to_v28f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v13
-; GFX9-NEXT: v_mov_b32_e32 v33, v12
-; GFX9-NEXT: v_mov_b32_e32 v34, v11
-; GFX9-NEXT: v_mov_b32_e32 v35, v10
-; GFX9-NEXT: v_mov_b32_e32 v36, v9
-; GFX9-NEXT: v_mov_b32_e32 v37, v8
-; GFX9-NEXT: v_mov_b32_e32 v38, v7
-; GFX9-NEXT: v_mov_b32_e32 v39, v6
-; GFX9-NEXT: v_mov_b32_e32 v48, v5
-; GFX9-NEXT: v_mov_b32_e32 v49, v4
-; GFX9-NEXT: v_mov_b32_e32 v50, v3
-; GFX9-NEXT: v_mov_b32_e32 v51, v2
-; GFX9-NEXT: v_mov_b32_e32 v52, v1
-; GFX9-NEXT: v_mov_b32_e32 v53, v0
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
@@ -17684,6 +17804,21 @@ define inreg <28 x float> @bitcast_v56i16_to_v28f32_scalar(<56 x i16> inreg %a,
; GFX9-NEXT: s_lshr_b32 s8, s18, 16
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
+; GFX9-NEXT: v_mov_b32_e32 v32, v13
+; GFX9-NEXT: v_mov_b32_e32 v33, v12
+; GFX9-NEXT: v_mov_b32_e32 v34, v11
+; GFX9-NEXT: v_mov_b32_e32 v35, v10
+; GFX9-NEXT: v_mov_b32_e32 v36, v9
+; GFX9-NEXT: v_mov_b32_e32 v37, v8
+; GFX9-NEXT: v_mov_b32_e32 v38, v7
+; GFX9-NEXT: v_mov_b32_e32 v39, v6
+; GFX9-NEXT: v_mov_b32_e32 v48, v5
+; GFX9-NEXT: v_mov_b32_e32 v49, v4
+; GFX9-NEXT: v_mov_b32_e32 v50, v3
+; GFX9-NEXT: v_mov_b32_e32 v51, v2
+; GFX9-NEXT: v_mov_b32_e32 v52, v1
+; GFX9-NEXT: v_mov_b32_e32 v53, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
@@ -17702,7 +17837,6 @@ define inreg <28 x float> @bitcast_v56i16_to_v28f32_scalar(<56 x i16> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v41, 16, v35
; GFX9-NEXT: v_lshrrev_b32_e32 v42, 16, v36
; GFX9-NEXT: v_lshrrev_b32_e32 v43, 16, v37
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -17717,6 +17851,7 @@ define inreg <28 x float> @bitcast_v56i16_to_v28f32_scalar(<56 x i16> inreg %a,
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_lshrrev_b32_e32 v44, 16, v38
; GFX9-NEXT: v_lshrrev_b32_e32 v45, 16, v39
; GFX9-NEXT: v_lshrrev_b32_e32 v46, 16, v48
@@ -17844,7 +17979,9 @@ define inreg <28 x float> @bitcast_v56i16_to_v28f32_scalar(<56 x i16> inreg %a,
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB31_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB31_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB31_2
+; GFX9-NEXT: s_branch .LBB31_3
;
; GFX11-TRUE16-LABEL: bitcast_v56i16_to_v28f32_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -17882,7 +18019,7 @@ define inreg <28 x float> @bitcast_v56i16_to_v28f32_scalar(<56 x i16> inreg %a,
; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v9
; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s27, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
@@ -17894,15 +18031,14 @@ define inreg <28 x float> @bitcast_v56i16_to_v28f32_scalar(<56 x i16> inreg %a,
; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s42
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
@@ -17914,10 +18050,11 @@ define inreg <28 x float> @bitcast_v56i16_to_v28f32_scalar(<56 x i16> inreg %a,
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB31_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
@@ -17937,10 +18074,9 @@ define inreg <28 x float> @bitcast_v56i16_to_v28f32_scalar(<56 x i16> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB31_3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB31_3
; GFX11-TRUE16-NEXT: .LBB31_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
@@ -17967,9 +18103,9 @@ define inreg <28 x float> @bitcast_v56i16_to_v28f32_scalar(<56 x i16> inreg %a,
; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
@@ -17984,7 +18120,9 @@ define inreg <28 x float> @bitcast_v56i16_to_v28f32_scalar(<56 x i16> inreg %a,
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB31_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB31_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB31_2
+; GFX11-TRUE16-NEXT: s_branch .LBB31_3
;
; GFX11-FAKE16-LABEL: bitcast_v56i16_to_v28f32_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -18012,7 +18150,7 @@ define inreg <28 x float> @bitcast_v56i16_to_v28f32_scalar(<56 x i16> inreg %a,
; GFX11-FAKE16-NEXT: v_and_b32_e32 v50, 0xffff, v9
; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s27, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s26, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s25, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s24, 16
@@ -18024,15 +18162,14 @@ define inreg <28 x float> @bitcast_v56i16_to_v28f32_scalar(<56 x i16> inreg %a,
; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s18, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s17, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s15, 0
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s44
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s45
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s43
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s42
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
@@ -18044,10 +18181,11 @@ define inreg <28 x float> @bitcast_v56i16_to_v28f32_scalar(<56 x i16> inreg %a,
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB31_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
@@ -18067,10 +18205,9 @@ define inreg <28 x float> @bitcast_v56i16_to_v28f32_scalar(<56 x i16> inreg %a,
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB31_3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB31_3
; GFX11-FAKE16-NEXT: .LBB31_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
@@ -18097,9 +18234,9 @@ define inreg <28 x float> @bitcast_v56i16_to_v28f32_scalar(<56 x i16> inreg %a,
; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, s16, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, s17, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, s18, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, s15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, s16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
@@ -18114,7 +18251,9 @@ define inreg <28 x float> @bitcast_v56i16_to_v28f32_scalar(<56 x i16> inreg %a,
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB31_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB31_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB31_2
+; GFX11-FAKE16-NEXT: s_branch .LBB31_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -19344,6 +19483,7 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v15
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s43, v1
; SI-NEXT: v_readfirstlane_b32 s42, v2
; SI-NEXT: v_readfirstlane_b32 s41, v3
@@ -19354,11 +19494,11 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s12, v8
; SI-NEXT: v_readfirstlane_b32 s11, v9
; SI-NEXT: v_readfirstlane_b32 s10, v10
-; SI-NEXT: v_readfirstlane_b32 s8, v11
-; SI-NEXT: v_readfirstlane_b32 s7, v12
-; SI-NEXT: v_readfirstlane_b32 s6, v13
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: v_readfirstlane_b32 s9, v14
+; SI-NEXT: v_readfirstlane_b32 s9, v11
+; SI-NEXT: v_readfirstlane_b32 s8, v12
+; SI-NEXT: v_readfirstlane_b32 s7, v13
+; SI-NEXT: v_readfirstlane_b32 s6, v14
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
@@ -19377,13 +19517,13 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB33_4
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_lshr_b32 s4, s9, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v40, s4
; SI-NEXT: s_lshr_b32 s4, s6, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v41, s4
+; SI-NEXT: v_cvt_f32_f16_e32 v40, s4
; SI-NEXT: s_lshr_b32 s4, s7, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v43, s4
+; SI-NEXT: v_cvt_f32_f16_e32 v41, s4
; SI-NEXT: s_lshr_b32 s4, s8, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v43, s4
+; SI-NEXT: s_lshr_b32 s4, s9, 16
; SI-NEXT: v_cvt_f32_f16_e32 v45, s4
; SI-NEXT: s_lshr_b32 s4, s10, 16
; SI-NEXT: v_cvt_f32_f16_e32 v47, s4
@@ -19433,12 +19573,12 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; SI-NEXT: v_cvt_f32_f16_e32 v46, s4
; SI-NEXT: s_lshr_b32 s4, s16, 16
; SI-NEXT: v_cvt_f32_f16_e32 v56, s4
-; SI-NEXT: v_cvt_f32_f16_e32 v38, s9
-; SI-NEXT: v_cvt_f32_f16_e32 v52, s6
+; SI-NEXT: v_cvt_f32_f16_e32 v38, s6
+; SI-NEXT: v_cvt_f32_f16_e32 v52, s7
; SI-NEXT: s_waitcnt expcnt(6)
-; SI-NEXT: v_cvt_f32_f16_e32 v57, s7
+; SI-NEXT: v_cvt_f32_f16_e32 v57, s8
; SI-NEXT: s_waitcnt expcnt(5)
-; SI-NEXT: v_cvt_f32_f16_e32 v58, s8
+; SI-NEXT: v_cvt_f32_f16_e32 v58, s9
; SI-NEXT: s_waitcnt expcnt(4)
; SI-NEXT: v_cvt_f32_f16_e32 v59, s10
; SI-NEXT: s_waitcnt expcnt(3)
@@ -19470,14 +19610,14 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; SI-NEXT: v_add_f32_e64 v1, s16, 1.0
; SI-NEXT: v_lshrrev_b32_e32 v2, 16, v1
; SI-NEXT: v_add_f32_e64 v14, s11, 1.0
-; SI-NEXT: v_add_f32_e64 v36, s6, 1.0
+; SI-NEXT: v_add_f32_e64 v36, s7, 1.0
; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; SI-NEXT: v_lshrrev_b32_e32 v6, 16, v14
; SI-NEXT: v_lshrrev_b32_e32 v54, 16, v36
; SI-NEXT: v_cvt_f32_f16_e32 v41, v54
; SI-NEXT: v_cvt_f32_f16_e32 v54, v6
; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload
-; SI-NEXT: v_add_f32_e64 v10, s8, 1.0
+; SI-NEXT: v_add_f32_e64 v10, s9, 1.0
; SI-NEXT: v_add_f32_e64 v26, s29, 1.0
; SI-NEXT: v_lshrrev_b32_e32 v31, 16, v10
; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v26
@@ -19486,7 +19626,7 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; SI-NEXT: v_cvt_f32_f16_e32 v10, v26
; SI-NEXT: v_cvt_f32_f16_e32 v26, v31
; SI-NEXT: v_add_f32_e64 v12, s10, 1.0
-; SI-NEXT: v_add_f32_e64 v33, s7, 1.0
+; SI-NEXT: v_add_f32_e64 v33, s8, 1.0
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_add_f32_e64 v2, s17, 1.0
; SI-NEXT: v_add_f32_e64 v3, s18, 1.0
@@ -19510,7 +19650,7 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; SI-NEXT: v_add_f32_e64 v16, s12, 1.0
; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v12
; SI-NEXT: v_lshrrev_b32_e32 v34, 16, v33
-; SI-NEXT: v_add_f32_e64 v48, s9, 1.0
+; SI-NEXT: v_add_f32_e64 v48, s6, 1.0
; SI-NEXT: v_lshrrev_b32_e32 v56, 16, v2
; SI-NEXT: v_lshrrev_b32_e32 v46, 16, v3
; SI-NEXT: v_lshrrev_b32_e32 v44, 16, v5
@@ -19857,26 +19997,29 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; SI-NEXT: ; implicit-def: $vgpr41
; SI-NEXT: ; implicit-def: $vgpr38
; SI-NEXT: ; implicit-def: $vgpr40
-; SI-NEXT: s_branch .LBB33_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB33_2
+; SI-NEXT: s_branch .LBB33_3
;
; VI-LABEL: bitcast_v28f32_to_v56f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
-; VI-NEXT: v_mov_b32_e32 v20, s16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: v_mov_b32_e32 v19, s16
; VI-NEXT: v_mov_b32_e32 v18, s17
; VI-NEXT: v_mov_b32_e32 v17, s18
; VI-NEXT: v_mov_b32_e32 v16, s19
; VI-NEXT: v_mov_b32_e32 v15, s20
-; VI-NEXT: v_mov_b32_e32 v26, s21
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: v_mov_b32_e32 v25, s21
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v27, s22
-; VI-NEXT: v_mov_b32_e32 v25, s23
+; VI-NEXT: v_mov_b32_e32 v26, s23
; VI-NEXT: v_mov_b32_e32 v24, s24
; VI-NEXT: v_mov_b32_e32 v23, s25
; VI-NEXT: v_mov_b32_e32 v22, s26
; VI-NEXT: v_mov_b32_e32 v21, s27
-; VI-NEXT: v_mov_b32_e32 v19, s28
+; VI-NEXT: v_mov_b32_e32 v20, s28
; VI-NEXT: v_mov_b32_e32 v14, s29
; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
@@ -19903,19 +20046,19 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v46, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v47, 16, v0
; VI-NEXT: v_lshrrev_b32_e32 v49, 16, v14
-; VI-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; VI-NEXT: v_lshrrev_b32_e32 v48, 16, v20
; VI-NEXT: v_lshrrev_b32_e32 v39, 16, v21
; VI-NEXT: v_lshrrev_b32_e32 v38, 16, v22
; VI-NEXT: v_lshrrev_b32_e32 v37, 16, v23
; VI-NEXT: v_lshrrev_b32_e32 v36, 16, v24
-; VI-NEXT: v_lshrrev_b32_e32 v35, 16, v25
+; VI-NEXT: v_lshrrev_b32_e32 v35, 16, v26
; VI-NEXT: v_lshrrev_b32_e32 v34, 16, v27
-; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v25
; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v15
; VI-NEXT: v_lshrrev_b32_e32 v31, 16, v16
; VI-NEXT: v_lshrrev_b32_e32 v30, 16, v17
; VI-NEXT: v_lshrrev_b32_e32 v29, 16, v18
-; VI-NEXT: v_lshrrev_b32_e32 v28, 16, v20
+; VI-NEXT: v_lshrrev_b32_e32 v28, 16, v19
; VI-NEXT: s_cbranch_execnz .LBB33_3
; VI-NEXT: .LBB33_2: ; %cmp.true
; VI-NEXT: v_add_f32_e32 v13, 1.0, v13
@@ -19933,19 +20076,19 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; VI-NEXT: v_add_f32_e32 v1, 1.0, v1
; VI-NEXT: v_add_f32_e32 v0, 1.0, v0
; VI-NEXT: v_add_f32_e32 v14, 1.0, v14
-; VI-NEXT: v_add_f32_e32 v19, 1.0, v19
+; VI-NEXT: v_add_f32_e32 v20, 1.0, v20
; VI-NEXT: v_add_f32_e32 v21, 1.0, v21
; VI-NEXT: v_add_f32_e32 v22, 1.0, v22
; VI-NEXT: v_add_f32_e32 v23, 1.0, v23
; VI-NEXT: v_add_f32_e32 v24, 1.0, v24
-; VI-NEXT: v_add_f32_e32 v25, 1.0, v25
-; VI-NEXT: v_add_f32_e32 v27, 1.0, v27
; VI-NEXT: v_add_f32_e32 v26, 1.0, v26
+; VI-NEXT: v_add_f32_e32 v27, 1.0, v27
+; VI-NEXT: v_add_f32_e32 v25, 1.0, v25
; VI-NEXT: v_add_f32_e32 v15, 1.0, v15
; VI-NEXT: v_add_f32_e32 v16, 1.0, v16
; VI-NEXT: v_add_f32_e32 v17, 1.0, v17
; VI-NEXT: v_add_f32_e32 v18, 1.0, v18
-; VI-NEXT: v_add_f32_e32 v20, 1.0, v20
+; VI-NEXT: v_add_f32_e32 v19, 1.0, v19
; VI-NEXT: v_lshrrev_b32_e32 v50, 16, v13
; VI-NEXT: v_lshrrev_b32_e32 v51, 16, v12
; VI-NEXT: v_lshrrev_b32_e32 v52, 16, v11
@@ -19961,24 +20104,24 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v46, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v47, 16, v0
; VI-NEXT: v_lshrrev_b32_e32 v49, 16, v14
-; VI-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; VI-NEXT: v_lshrrev_b32_e32 v48, 16, v20
; VI-NEXT: v_lshrrev_b32_e32 v39, 16, v21
; VI-NEXT: v_lshrrev_b32_e32 v38, 16, v22
; VI-NEXT: v_lshrrev_b32_e32 v37, 16, v23
; VI-NEXT: v_lshrrev_b32_e32 v36, 16, v24
-; VI-NEXT: v_lshrrev_b32_e32 v35, 16, v25
+; VI-NEXT: v_lshrrev_b32_e32 v35, 16, v26
; VI-NEXT: v_lshrrev_b32_e32 v34, 16, v27
-; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v25
; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v15
; VI-NEXT: v_lshrrev_b32_e32 v31, 16, v16
; VI-NEXT: v_lshrrev_b32_e32 v30, 16, v17
; VI-NEXT: v_lshrrev_b32_e32 v29, 16, v18
-; VI-NEXT: v_lshrrev_b32_e32 v28, 16, v20
+; VI-NEXT: v_lshrrev_b32_e32 v28, 16, v19
; VI-NEXT: .LBB33_3: ; %end
; VI-NEXT: v_lshlrev_b32_e32 v28, 16, v28
-; VI-NEXT: v_or_b32_sdwa v28, v20, v28 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
-; VI-NEXT: v_lshlrev_b32_e32 v20, 16, v29
-; VI-NEXT: v_or_b32_sdwa v29, v18, v20 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v28, v19, v28 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_lshlrev_b32_e32 v19, 16, v29
+; VI-NEXT: v_or_b32_sdwa v29, v18, v19 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v18, 16, v30
; VI-NEXT: v_or_b32_sdwa v30, v17, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v17, 16, v31
@@ -19986,11 +20129,11 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; VI-NEXT: v_lshlrev_b32_e32 v16, 16, v32
; VI-NEXT: v_or_b32_sdwa v32, v15, v16 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v15, 16, v33
-; VI-NEXT: v_or_b32_sdwa v33, v26, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v33, v25, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v15, 16, v34
; VI-NEXT: v_or_b32_sdwa v34, v27, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v15, 16, v35
-; VI-NEXT: v_or_b32_sdwa v35, v25, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v35, v26, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v15, 16, v36
; VI-NEXT: v_or_b32_sdwa v36, v24, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v15, 16, v37
@@ -20000,7 +20143,7 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; VI-NEXT: v_lshlrev_b32_e32 v15, 16, v39
; VI-NEXT: v_or_b32_sdwa v39, v21, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v15, 16, v48
-; VI-NEXT: v_or_b32_sdwa v48, v19, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v48, v20, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v15, 16, v49
; VI-NEXT: v_or_b32_sdwa v49, v14, v15 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v14, 16, v47
@@ -20084,26 +20227,29 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; VI-NEXT: ; implicit-def: $vgpr52
; VI-NEXT: ; implicit-def: $vgpr51
; VI-NEXT: ; implicit-def: $vgpr50
-; VI-NEXT: s_branch .LBB33_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB33_2
+; VI-NEXT: s_branch .LBB33_3
;
; GFX9-LABEL: bitcast_v28f32_to_v56f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
-; GFX9-NEXT: v_mov_b32_e32 v20, s16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: v_mov_b32_e32 v19, s16
; GFX9-NEXT: v_mov_b32_e32 v18, s17
; GFX9-NEXT: v_mov_b32_e32 v17, s18
; GFX9-NEXT: v_mov_b32_e32 v16, s19
; GFX9-NEXT: v_mov_b32_e32 v15, s20
-; GFX9-NEXT: v_mov_b32_e32 v26, s21
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: v_mov_b32_e32 v25, s21
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v27, s22
-; GFX9-NEXT: v_mov_b32_e32 v25, s23
+; GFX9-NEXT: v_mov_b32_e32 v26, s23
; GFX9-NEXT: v_mov_b32_e32 v24, s24
; GFX9-NEXT: v_mov_b32_e32 v23, s25
; GFX9-NEXT: v_mov_b32_e32 v22, s26
; GFX9-NEXT: v_mov_b32_e32 v21, s27
-; GFX9-NEXT: v_mov_b32_e32 v19, s28
+; GFX9-NEXT: v_mov_b32_e32 v20, s28
; GFX9-NEXT: v_mov_b32_e32 v14, s29
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
@@ -20130,19 +20276,19 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v46, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v47, 16, v0
; GFX9-NEXT: v_lshrrev_b32_e32 v49, 16, v14
-; GFX9-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; GFX9-NEXT: v_lshrrev_b32_e32 v48, 16, v20
; GFX9-NEXT: v_lshrrev_b32_e32 v39, 16, v21
; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v22
; GFX9-NEXT: v_lshrrev_b32_e32 v37, 16, v23
; GFX9-NEXT: v_lshrrev_b32_e32 v36, 16, v24
-; GFX9-NEXT: v_lshrrev_b32_e32 v35, 16, v25
+; GFX9-NEXT: v_lshrrev_b32_e32 v35, 16, v26
; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v27
-; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v25
; GFX9-NEXT: v_lshrrev_b32_e32 v32, 16, v15
; GFX9-NEXT: v_lshrrev_b32_e32 v31, 16, v16
; GFX9-NEXT: v_lshrrev_b32_e32 v30, 16, v17
; GFX9-NEXT: v_lshrrev_b32_e32 v29, 16, v18
-; GFX9-NEXT: v_lshrrev_b32_e32 v28, 16, v20
+; GFX9-NEXT: v_lshrrev_b32_e32 v28, 16, v19
; GFX9-NEXT: s_cbranch_execnz .LBB33_3
; GFX9-NEXT: .LBB33_2: ; %cmp.true
; GFX9-NEXT: v_add_f32_e32 v13, 1.0, v13
@@ -20160,19 +20306,19 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e32 v1, 1.0, v1
; GFX9-NEXT: v_add_f32_e32 v0, 1.0, v0
; GFX9-NEXT: v_add_f32_e32 v14, 1.0, v14
-; GFX9-NEXT: v_add_f32_e32 v19, 1.0, v19
+; GFX9-NEXT: v_add_f32_e32 v20, 1.0, v20
; GFX9-NEXT: v_add_f32_e32 v21, 1.0, v21
; GFX9-NEXT: v_add_f32_e32 v22, 1.0, v22
; GFX9-NEXT: v_add_f32_e32 v23, 1.0, v23
; GFX9-NEXT: v_add_f32_e32 v24, 1.0, v24
-; GFX9-NEXT: v_add_f32_e32 v25, 1.0, v25
-; GFX9-NEXT: v_add_f32_e32 v27, 1.0, v27
; GFX9-NEXT: v_add_f32_e32 v26, 1.0, v26
+; GFX9-NEXT: v_add_f32_e32 v27, 1.0, v27
+; GFX9-NEXT: v_add_f32_e32 v25, 1.0, v25
; GFX9-NEXT: v_add_f32_e32 v15, 1.0, v15
; GFX9-NEXT: v_add_f32_e32 v16, 1.0, v16
; GFX9-NEXT: v_add_f32_e32 v17, 1.0, v17
; GFX9-NEXT: v_add_f32_e32 v18, 1.0, v18
-; GFX9-NEXT: v_add_f32_e32 v20, 1.0, v20
+; GFX9-NEXT: v_add_f32_e32 v19, 1.0, v19
; GFX9-NEXT: v_lshrrev_b32_e32 v50, 16, v13
; GFX9-NEXT: v_lshrrev_b32_e32 v51, 16, v12
; GFX9-NEXT: v_lshrrev_b32_e32 v52, 16, v11
@@ -20188,27 +20334,27 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v46, 16, v1
; GFX9-NEXT: v_lshrrev_b32_e32 v47, 16, v0
; GFX9-NEXT: v_lshrrev_b32_e32 v49, 16, v14
-; GFX9-NEXT: v_lshrrev_b32_e32 v48, 16, v19
+; GFX9-NEXT: v_lshrrev_b32_e32 v48, 16, v20
; GFX9-NEXT: v_lshrrev_b32_e32 v39, 16, v21
; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v22
; GFX9-NEXT: v_lshrrev_b32_e32 v37, 16, v23
; GFX9-NEXT: v_lshrrev_b32_e32 v36, 16, v24
-; GFX9-NEXT: v_lshrrev_b32_e32 v35, 16, v25
+; GFX9-NEXT: v_lshrrev_b32_e32 v35, 16, v26
; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v27
-; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v26
+; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v25
; GFX9-NEXT: v_lshrrev_b32_e32 v32, 16, v15
; GFX9-NEXT: v_lshrrev_b32_e32 v31, 16, v16
; GFX9-NEXT: v_lshrrev_b32_e32 v30, 16, v17
; GFX9-NEXT: v_lshrrev_b32_e32 v29, 16, v18
-; GFX9-NEXT: v_lshrrev_b32_e32 v28, 16, v20
+; GFX9-NEXT: v_lshrrev_b32_e32 v28, 16, v19
; GFX9-NEXT: .LBB33_3: ; %end
; GFX9-NEXT: v_and_b32_e32 v15, 0xffff, v15
; GFX9-NEXT: v_lshl_or_b32 v32, v32, 16, v15
-; GFX9-NEXT: v_and_b32_e32 v15, 0xffff, v26
+; GFX9-NEXT: v_and_b32_e32 v15, 0xffff, v25
; GFX9-NEXT: v_lshl_or_b32 v33, v33, 16, v15
; GFX9-NEXT: v_and_b32_e32 v15, 0xffff, v27
; GFX9-NEXT: v_lshl_or_b32 v34, v34, 16, v15
-; GFX9-NEXT: v_and_b32_e32 v15, 0xffff, v25
+; GFX9-NEXT: v_and_b32_e32 v15, 0xffff, v26
; GFX9-NEXT: v_lshl_or_b32 v35, v35, 16, v15
; GFX9-NEXT: v_and_b32_e32 v15, 0xffff, v24
; GFX9-NEXT: v_lshl_or_b32 v36, v36, 16, v15
@@ -20220,7 +20366,7 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; GFX9-NEXT: v_and_b32_e32 v14, 0xffff, v14
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX9-NEXT: v_lshl_or_b32 v39, v39, 16, v15
-; GFX9-NEXT: v_and_b32_e32 v15, 0xffff, v19
+; GFX9-NEXT: v_and_b32_e32 v15, 0xffff, v20
; GFX9-NEXT: v_lshl_or_b32 v49, v49, 16, v14
; GFX9-NEXT: v_lshl_or_b32 v14, v47, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v1
@@ -20236,13 +20382,13 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; GFX9-NEXT: v_lshl_or_b32 v30, v30, 16, v17
; GFX9-NEXT: v_lshl_or_b32 v17, v44, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v4
+; GFX9-NEXT: v_and_b32_e32 v19, 0xffff, v19
; GFX9-NEXT: v_lshl_or_b32 v29, v29, 16, v18
; GFX9-NEXT: v_lshl_or_b32 v18, v43, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v5
-; GFX9-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX9-NEXT: v_lshl_or_b32 v28, v28, 16, v19
; GFX9-NEXT: v_lshl_or_b32 v19, v42, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v6
-; GFX9-NEXT: v_lshl_or_b32 v28, v28, 16, v20
; GFX9-NEXT: v_lshl_or_b32 v20, v41, 16, v0
; GFX9-NEXT: v_and_b32_e32 v0, 0xffff, v7
; GFX9-NEXT: v_lshl_or_b32 v21, v40, 16, v0
@@ -20311,7 +20457,9 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr52
; GFX9-NEXT: ; implicit-def: $vgpr51
; GFX9-NEXT: ; implicit-def: $vgpr50
-; GFX9-NEXT: s_branch .LBB33_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB33_2
+; GFX9-NEXT: s_branch .LBB33_3
;
; GFX11-LABEL: bitcast_v28f32_to_v56f16_scalar:
; GFX11: ; %bb.0:
@@ -20323,11 +20471,11 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v22, s18 :: v_dual_mov_b32 v21, s19
; GFX11-NEXT: v_dual_mov_b32 v20, s20 :: v_dual_mov_b32 v19, s21
; GFX11-NEXT: v_dual_mov_b32 v18, s22 :: v_dual_mov_b32 v11, s24
-; GFX11-NEXT: v_dual_mov_b32 v12, s23 :: v_dual_mov_b32 v15, s25
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v13, s27
-; GFX11-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-NEXT: v_dual_mov_b32 v12, s23 :: v_dual_mov_b32 v13, s26
+; GFX11-NEXT: v_dual_mov_b32 v14, s25 :: v_dual_mov_b32 v17, s27
+; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v15, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB33_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
@@ -20340,11 +20488,11 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v2
; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v16
+; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v17
+; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v13
+; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v14
; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v11
; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v12
; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v18
@@ -20358,17 +20506,16 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v26
; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v27
; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v28
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB33_3
+; GFX11-NEXT: s_cbranch_execnz .LBB33_3
; GFX11-NEXT: .LBB33_2: ; %cmp.true
; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
; GFX11-NEXT: v_dual_add_f32 v7, 1.0, v7 :: v_dual_add_f32 v6, 1.0, v6
; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
-; GFX11-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
-; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v14, 1.0, v14
-; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v12, 1.0, v12
+; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v12, 1.0, v12
; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v18, 1.0, v18
; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v22, 1.0, v22
@@ -20385,11 +20532,11 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v2
; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v14
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v16
+; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v17
+; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v13
+; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v14
; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v11
; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v12
; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v18
@@ -20412,7 +20559,7 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
; GFX11-NEXT: v_lshl_or_b32 v29, v29, 16, v19
; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v16
+; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v15
; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
; GFX11-NEXT: v_lshl_or_b32 v31, v31, 16, v27
; GFX11-NEXT: v_and_b32_e32 v28, 0xffff, v28
@@ -20426,9 +20573,9 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v12
; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v11
; GFX11-NEXT: v_lshl_or_b32 v10, v10, 16, v18
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v13
-; GFX11-NEXT: v_lshl_or_b32 v16, v65, 16, v17
+; GFX11-NEXT: v_lshl_or_b32 v15, v66, 16, v17
; GFX11-NEXT: v_lshl_or_b32 v17, v64, 16, v19
; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
@@ -20439,9 +20586,9 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; GFX11-NEXT: v_lshl_or_b32 v32, v32, 16, v26
; GFX11-NEXT: v_lshl_or_b32 v11, v70, 16, v12
; GFX11-NEXT: v_lshl_or_b32 v12, v69, 16, v21
-; GFX11-NEXT: v_and_b32_e32 v14, 0xffff, v14
-; GFX11-NEXT: v_lshl_or_b32 v13, v68, 16, v15
-; GFX11-NEXT: v_lshl_or_b32 v15, v66, 16, v18
+; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
+; GFX11-NEXT: v_lshl_or_b32 v13, v68, 16, v14
+; GFX11-NEXT: v_lshl_or_b32 v14, v67, 16, v18
; GFX11-NEXT: v_lshl_or_b32 v18, v55, 16, v0
; GFX11-NEXT: v_lshl_or_b32 v21, v52, 16, v3
; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v5
@@ -20457,7 +20604,7 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; GFX11-NEXT: v_lshl_or_b32 v36, v36, 16, v22
; GFX11-NEXT: v_lshl_or_b32 v22, v51, 16, v4
; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v9
-; GFX11-NEXT: v_lshl_or_b32 v14, v67, 16, v14
+; GFX11-NEXT: v_lshl_or_b32 v16, v65, 16, v16
; GFX11-NEXT: v_lshl_or_b32 v23, v50, 16, v0
; GFX11-NEXT: v_lshl_or_b32 v25, v48, 16, v2
; GFX11-NEXT: v_lshl_or_b32 v26, v39, 16, v3
@@ -20497,7 +20644,9 @@ define inreg <56 x half> @bitcast_v28f32_to_v56f16_scalar(<28 x float> inreg %a,
; GFX11-NEXT: ; implicit-def: $vgpr48
; GFX11-NEXT: ; implicit-def: $vgpr39
; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: s_branch .LBB33_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_vccz .LBB33_2
+; GFX11-NEXT: s_branch .LBB33_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -21987,56 +22136,53 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:28
; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:40
; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:36
-; SI-NEXT: s_waitcnt expcnt(2)
-; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:44
-; SI-NEXT: v_cvt_f16_f32_e32 v47, v0
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v23
+; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:44
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
; SI-NEXT: v_cvt_f16_f32_e32 v44, v1
-; SI-NEXT: v_cvt_f16_f32_e32 v46, v3
-; SI-NEXT: v_cvt_f16_f32_e32 v58, v2
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(5)
+; SI-NEXT: v_cvt_f16_f32_e32 v58, v3
+; SI-NEXT: s_waitcnt expcnt(2)
+; SI-NEXT: v_cvt_f16_f32_e32 v61, v2
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v22
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v7
; SI-NEXT: v_cvt_f16_f32_e32 v60, v5
; SI-NEXT: v_cvt_f16_f32_e32 v59, v4
-; SI-NEXT: v_cvt_f16_f32_e32 v62, v7
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v25
-; SI-NEXT: v_cvt_f16_f32_e32 v56, v6
-; SI-NEXT: v_cvt_f16_f32_e32 v57, v9
-; SI-NEXT: v_cvt_f16_f32_e32 v45, v8
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; SI-NEXT: v_cvt_f16_f32_e32 v46, v6
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v24
-; SI-NEXT: v_cvt_f16_f32_e32 v63, v11
-; SI-NEXT: v_cvt_f16_f32_e32 v38, v10
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v11
+; SI-NEXT: v_cvt_f16_f32_e32 v56, v9
+; SI-NEXT: v_cvt_f16_f32_e32 v57, v8
; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v27
-; SI-NEXT: v_cvt_f16_f32_e32 v36, v12
-; SI-NEXT: v_cvt_f16_f32_e32 v34, v15
-; SI-NEXT: v_cvt_f16_f32_e32 v35, v14
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v10
+; SI-NEXT: v_cvt_f16_f32_e32 v47, v12
+; SI-NEXT: v_cvt_f16_f32_e32 v45, v14
+; SI-NEXT: v_cvt_f16_f32_e32 v62, v16
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v26
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v15
; SI-NEXT: v_cvt_f16_f32_e32 v15, v17
-; SI-NEXT: v_cvt_f16_f32_e32 v32, v16
; SI-NEXT: v_cvt_f16_f32_e32 v16, v19
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v28
; SI-NEXT: v_cvt_f16_f32_e32 v18, v18
-; SI-NEXT: v_cvt_f16_f32_e32 v17, v21
-; SI-NEXT: v_cvt_f16_f32_e32 v37, v20
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v30
-; SI-NEXT: v_cvt_f16_f32_e32 v29, v29
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v23
+; SI-NEXT: v_cvt_f16_f32_e32 v17, v21
+; SI-NEXT: v_cvt_f16_f32_e32 v63, v20
+; SI-NEXT: v_cvt_f16_f32_e32 v49, v22
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
+; SI-NEXT: v_cvt_f16_f32_e32 v48, v25
+; SI-NEXT: v_cvt_f16_f32_e32 v39, v24
+; SI-NEXT: v_cvt_f16_f32_e32 v38, v27
+; SI-NEXT: v_cvt_f16_f32_e32 v37, v26
+; SI-NEXT: v_cvt_f16_f32_e32 v36, v29
+; SI-NEXT: v_cvt_f16_f32_e32 v35, v28
+; SI-NEXT: v_cvt_f16_f32_e32 v33, v30
; SI-NEXT: v_cvt_f16_f32_e32 v2, s21
; SI-NEXT: v_cvt_f16_f32_e32 v11, s20
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
; SI-NEXT: v_cvt_f16_f32_e32 v3, s23
; SI-NEXT: v_cvt_f16_f32_e32 v10, s22
; SI-NEXT: v_cvt_f16_f32_e32 v4, s25
@@ -22045,32 +22191,26 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v8, s26
; SI-NEXT: v_cvt_f16_f32_e32 v6, s29
; SI-NEXT: v_cvt_f16_f32_e32 v7, s28
-; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v31
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v50
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v51
-; SI-NEXT: s_waitcnt vmcnt(10)
-; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v61
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v52
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(14)
+; SI-NEXT: v_cvt_f16_f32_e32 v34, v31
+; SI-NEXT: v_cvt_f16_f32_e32 v31, v50
+; SI-NEXT: v_cvt_f16_f32_e32 v30, v51
+; SI-NEXT: v_cvt_f16_f32_e32 v29, v52
+; SI-NEXT: s_waitcnt vmcnt(13) expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v53
+; SI-NEXT: v_cvt_f16_f32_e32 v51, s16
; SI-NEXT: v_cvt_f16_f32_e32 v53, s18
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(13) expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v54
; SI-NEXT: v_cvt_f16_f32_e32 v54, s19
+; SI-NEXT: s_waitcnt vmcnt(7)
+; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v32
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v55
-; SI-NEXT: v_cvt_f16_f32_e32 v55, s16
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v40
@@ -22085,120 +22225,119 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v0, v43
; SI-NEXT: v_cvt_f16_f32_e32 v43, s17
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB35_4
; SI-NEXT: ; %bb.1: ; %cmp.false
+; SI-NEXT: s_waitcnt expcnt(4)
+; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; SI-NEXT: v_or_b32_e32 v4, v9, v4
+; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v60
+; SI-NEXT: v_or_b32_e32 v9, v59, v9
+; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
+; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v29
+; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
+; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; SI-NEXT: v_or_b32_e32 v5, v8, v5
+; SI-NEXT: v_mov_b32_e32 v32, v44
+; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v42, v58
+; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v58
+; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16
+; SI-NEXT: v_mov_b32_e32 v40, v60
+; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
; SI-NEXT: v_or_b32_e32 v16, v18, v16
; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
-; SI-NEXT: v_mov_b32_e32 v49, v2
+; SI-NEXT: s_waitcnt expcnt(2)
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; SI-NEXT: v_mov_b32_e32 v48, v3
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6
-; SI-NEXT: v_mov_b32_e32 v61, v44
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v43
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v54
-; SI-NEXT: v_mov_b32_e32 v39, v11
; SI-NEXT: v_or_b32_e32 v2, v11, v2
-; SI-NEXT: v_mov_b32_e32 v33, v10
; SI-NEXT: v_or_b32_e32 v3, v10, v3
-; SI-NEXT: v_or_b32_e32 v4, v9, v4
-; SI-NEXT: v_or_b32_e32 v5, v8, v5
; SI-NEXT: v_or_b32_e32 v6, v7, v6
-; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v61
-; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v46
-; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v60
-; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v62
-; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v57
-; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v63
+; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v32
+; SI-NEXT: v_mov_b32_e32 v41, v61
+; SI-NEXT: v_or_b32_e32 v8, v61, v8
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v56
; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
-; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v34
+; SI-NEXT: v_mov_b32_e32 v61, v45
; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17
-; SI-NEXT: v_or_b32_e32 v0, v55, v0
+; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v48
+; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v38
+; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v36
+; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v34
+; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v31
+; SI-NEXT: v_or_b32_e32 v0, v51, v0
; SI-NEXT: v_or_b32_e32 v1, v53, v1
-; SI-NEXT: v_or_b32_e32 v7, v47, v7
-; SI-NEXT: v_mov_b32_e32 v42, v58
-; SI-NEXT: v_or_b32_e32 v8, v58, v8
-; SI-NEXT: v_mov_b32_e32 v41, v60
-; SI-NEXT: v_or_b32_e32 v9, v59, v9
-; SI-NEXT: v_mov_b32_e32 v40, v56
-; SI-NEXT: v_or_b32_e32 v10, v56, v10
-; SI-NEXT: v_or_b32_e32 v11, v45, v11
-; SI-NEXT: v_or_b32_e32 v12, v38, v12
-; SI-NEXT: v_or_b32_e32 v13, v36, v13
-; SI-NEXT: v_or_b32_e32 v14, v35, v14
-; SI-NEXT: v_or_b32_e32 v15, v32, v15
-; SI-NEXT: v_or_b32_e32 v17, v37, v17
-; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(10)
-; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18
+; SI-NEXT: v_mov_b32_e32 v55, v56
+; SI-NEXT: v_or_b32_e32 v11, v57, v11
+; SI-NEXT: v_or_b32_e32 v13, v47, v13
+; SI-NEXT: v_or_b32_e32 v15, v62, v15
+; SI-NEXT: v_or_b32_e32 v17, v63, v17
+; SI-NEXT: v_or_b32_e32 v19, v39, v19
+; SI-NEXT: v_or_b32_e32 v20, v37, v20
+; SI-NEXT: v_or_b32_e32 v21, v35, v21
+; SI-NEXT: v_or_b32_e32 v22, v33, v22
+; SI-NEXT: v_or_b32_e32 v23, v30, v23
; SI-NEXT: s_waitcnt vmcnt(9)
-; SI-NEXT: v_or_b32_e32 v18, v19, v18
-; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19
-; SI-NEXT: v_or_b32_e32 v19, v20, v19
-; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20
-; SI-NEXT: v_or_b32_e32 v20, v21, v20
-; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v29
-; SI-NEXT: v_or_b32_e32 v21, v22, v21
-; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22
-; SI-NEXT: v_or_b32_e32 v22, v23, v22
-; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v23
-; SI-NEXT: v_or_b32_e32 v23, v24, v23
-; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v24
+; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v59
+; SI-NEXT: s_waitcnt vmcnt(8)
; SI-NEXT: v_or_b32_e32 v24, v25, v24
; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(8)
+; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v28
+; SI-NEXT: v_or_b32_e32 v10, v46, v10
+; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v58
+; SI-NEXT: v_or_b32_e32 v14, v61, v14
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: v_or_b32_e32 v12, v60, v12
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v25
; SI-NEXT: v_or_b32_e32 v25, v26, v25
; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload
+; SI-NEXT: v_or_b32_e32 v7, v44, v7
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v26
; SI-NEXT: v_or_b32_e32 v26, v27, v26
; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
+; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18
+; SI-NEXT: v_or_b32_e32 v18, v49, v18
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v27
; SI-NEXT: v_or_b32_e32 v27, v50, v27
+; SI-NEXT: v_mov_b32_e32 v50, v62
+; SI-NEXT: v_mov_b32_e32 v62, v44
; SI-NEXT: s_cbranch_execnz .LBB35_3
; SI-NEXT: .LBB35_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, v43
+; SI-NEXT: s_waitcnt expcnt(2)
; SI-NEXT: v_cvt_f32_f16_e32 v2, v54
-; SI-NEXT: v_cvt_f32_f16_e32 v1, v55
+; SI-NEXT: v_cvt_f32_f16_e32 v1, v51
; SI-NEXT: v_cvt_f32_f16_e32 v3, v53
; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0
; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2
@@ -22212,173 +22351,186 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_or_b32_e32 v1, v3, v2
-; SI-NEXT: v_cvt_f32_f16_e32 v2, v49
-; SI-NEXT: v_cvt_f32_f16_e32 v3, v39
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: v_cvt_f32_f16_e32 v8, v62
+; SI-NEXT: v_cvt_f32_f16_e32 v9, v41
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v4, v33
-; SI-NEXT: v_cvt_f32_f16_e32 v8, v47
-; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2
-; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
-; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3
-; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
-; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4
-; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
-; SI-NEXT: v_or_b32_e32 v2, v3, v2
-; SI-NEXT: v_cvt_f32_f16_e32 v3, v48
+; SI-NEXT: v_cvt_f32_f16_e32 v11, v46
+; SI-NEXT: v_cvt_f32_f16_e32 v13, v60
; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8
-; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
; SI-NEXT: v_cvt_f16_f32_e32 v8, v8
-; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3
-; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
-; SI-NEXT: v_cvt_f32_f16_e32 v9, v42
-; SI-NEXT: v_cvt_f32_f16_e32 v11, v40
-; SI-NEXT: v_cvt_f32_f16_e32 v12, v45
-; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; SI-NEXT: v_or_b32_e32 v3, v4, v3
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9
; SI-NEXT: v_cvt_f16_f32_e32 v9, v9
; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11
; SI-NEXT: v_cvt_f16_f32_e32 v11, v11
-; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12
-; SI-NEXT: v_cvt_f16_f32_e32 v12, v12
-; SI-NEXT: v_cvt_f32_f16_e32 v13, v38
-; SI-NEXT: v_cvt_f32_f16_e32 v14, v36
-; SI-NEXT: v_cvt_f32_f16_e32 v15, v35
-; SI-NEXT: v_cvt_f32_f16_e32 v17, v32
; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13
; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
+; SI-NEXT: v_cvt_f32_f16_e32 v14, v47
+; SI-NEXT: v_cvt_f32_f16_e32 v15, v61
+; SI-NEXT: v_cvt_f32_f16_e32 v17, v50
+; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14
; SI-NEXT: v_cvt_f16_f32_e32 v14, v14
; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15
; SI-NEXT: v_cvt_f16_f32_e32 v15, v15
+; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17
; SI-NEXT: v_cvt_f16_f32_e32 v17, v17
-; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
-; SI-NEXT: v_cvt_f32_f16_e32 v22, v29
-; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
+; SI-NEXT: v_cvt_f32_f16_e32 v20, v49
+; SI-NEXT: v_cvt_f32_f16_e32 v21, v39
+; SI-NEXT: v_cvt_f32_f16_e32 v22, v36
+; SI-NEXT: v_cvt_f32_f16_e32 v23, v35
+; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20
+; SI-NEXT: v_cvt_f16_f32_e32 v20, v20
+; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21
+; SI-NEXT: v_cvt_f16_f32_e32 v21, v21
; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22
; SI-NEXT: v_cvt_f16_f32_e32 v22, v22
+; SI-NEXT: v_cvt_f32_f16_e32 v24, v33
+; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23
+; SI-NEXT: v_cvt_f16_f32_e32 v23, v23
+; SI-NEXT: v_cvt_f32_f16_e32 v25, v29
+; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24
+; SI-NEXT: v_cvt_f16_f32_e32 v24, v24
; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(14)
-; SI-NEXT: v_cvt_f32_f16_e32 v5, v5
-; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5
-; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
-; SI-NEXT: v_cvt_f32_f16_e32 v4, v4
-; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4
-; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
-; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; SI-NEXT: v_or_b32_e32 v4, v5, v4
-; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
-; SI-NEXT: v_cvt_f32_f16_e32 v19, v19
+; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25
+; SI-NEXT: v_cvt_f16_f32_e32 v25, v25
+; SI-NEXT: s_waitcnt vmcnt(13)
+; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
; SI-NEXT: s_waitcnt vmcnt(12)
-; SI-NEXT: v_cvt_f32_f16_e32 v28, v28
-; SI-NEXT: s_waitcnt vmcnt(11)
-; SI-NEXT: v_cvt_f32_f16_e32 v6, v6
-; SI-NEXT: s_waitcnt vmcnt(10)
-; SI-NEXT: v_cvt_f32_f16_e32 v7, v7
+; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
+; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2
+; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
+; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3
+; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; SI-NEXT: v_or_b32_e32 v2, v3, v2
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(12)
+; SI-NEXT: v_cvt_f32_f16_e32 v19, v19
; SI-NEXT: s_waitcnt vmcnt(9)
-; SI-NEXT: v_cvt_f32_f16_e32 v10, v10
+; SI-NEXT: v_cvt_f32_f16_e32 v4, v4
; SI-NEXT: s_waitcnt vmcnt(8)
-; SI-NEXT: v_cvt_f32_f16_e32 v16, v16
+; SI-NEXT: v_cvt_f32_f16_e32 v5, v5
+; SI-NEXT: s_waitcnt vmcnt(7)
+; SI-NEXT: v_cvt_f32_f16_e32 v6, v6
+; SI-NEXT: s_waitcnt vmcnt(6)
+; SI-NEXT: v_cvt_f32_f16_e32 v7, v7
+; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4
+; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
+; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5
+; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6
; SI-NEXT: v_cvt_f16_f32_e32 v6, v6
; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7
; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
+; SI-NEXT: s_waitcnt vmcnt(5)
+; SI-NEXT: v_cvt_f32_f16_e32 v10, v10
+; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: v_cvt_f32_f16_e32 v12, v12
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: v_cvt_f32_f16_e32 v16, v16
+; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19
; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10
; SI-NEXT: v_cvt_f16_f32_e32 v10, v10
+; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12
+; SI-NEXT: v_cvt_f16_f32_e32 v12, v12
; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16
; SI-NEXT: v_cvt_f16_f32_e32 v16, v16
-; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19
; SI-NEXT: v_cvt_f16_f32_e32 v19, v19
-; SI-NEXT: s_waitcnt vmcnt(5)
-; SI-NEXT: v_cvt_f32_f16_e32 v21, v21
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_cvt_f32_f16_e32 v25, v25
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_cvt_f32_f16_e32 v29, v29
-; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28
-; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21
-; SI-NEXT: v_cvt_f16_f32_e32 v21, v21
-; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25
-; SI-NEXT: v_cvt_f16_f32_e32 v25, v25
; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v29
-; SI-NEXT: v_cvt_f16_f32_e32 v28, v28
; SI-NEXT: v_cvt_f16_f32_e32 v29, v29
; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
+; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3
+; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; SI-NEXT: v_or_b32_e32 v3, v4, v3
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_cvt_f32_f16_e32 v4, v4
+; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4
+; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
+; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; SI-NEXT: v_or_b32_e32 v4, v5, v4
+; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v5, v5
; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5
; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: v_or_b32_e32 v5, v6, v5
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v6, v6
; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6
; SI-NEXT: v_cvt_f16_f32_e32 v6, v6
; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6
; SI-NEXT: v_or_b32_e32 v6, v7, v6
-; SI-NEXT: v_cvt_f32_f16_e32 v7, v61
+; SI-NEXT: v_cvt_f32_f16_e32 v7, v32
; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7
; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7
; SI-NEXT: v_or_b32_e32 v7, v8, v7
-; SI-NEXT: v_cvt_f32_f16_e32 v8, v46
+; SI-NEXT: v_cvt_f32_f16_e32 v8, v42
; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8
; SI-NEXT: v_cvt_f16_f32_e32 v8, v8
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8
; SI-NEXT: v_or_b32_e32 v8, v9, v8
-; SI-NEXT: v_cvt_f32_f16_e32 v9, v41
+; SI-NEXT: v_cvt_f32_f16_e32 v9, v40
; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9
; SI-NEXT: v_cvt_f16_f32_e32 v9, v9
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9
; SI-NEXT: v_or_b32_e32 v9, v10, v9
-; SI-NEXT: v_cvt_f32_f16_e32 v10, v62
+; SI-NEXT: v_cvt_f32_f16_e32 v10, v59
; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10
; SI-NEXT: v_cvt_f16_f32_e32 v10, v10
; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10
; SI-NEXT: v_or_b32_e32 v10, v11, v10
-; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v11, v11
+; SI-NEXT: v_cvt_f32_f16_e32 v11, v55
; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11
; SI-NEXT: v_cvt_f16_f32_e32 v11, v11
; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11
; SI-NEXT: v_or_b32_e32 v11, v12, v11
-; SI-NEXT: v_cvt_f32_f16_e32 v12, v63
+; SI-NEXT: v_cvt_f32_f16_e32 v12, v28
+; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12
; SI-NEXT: v_cvt_f16_f32_e32 v12, v12
; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12
; SI-NEXT: v_or_b32_e32 v12, v13, v12
-; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_cvt_f32_f16_e32 v28, v28
+; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28
+; SI-NEXT: v_cvt_f16_f32_e32 v28, v28
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v13, v13
; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13
; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
; SI-NEXT: v_or_b32_e32 v13, v14, v13
-; SI-NEXT: v_cvt_f32_f16_e32 v14, v34
+; SI-NEXT: v_cvt_f32_f16_e32 v14, v58
; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14
; SI-NEXT: v_cvt_f16_f32_e32 v14, v14
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
; SI-NEXT: v_or_b32_e32 v14, v15, v14
; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v16
-; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
; SI-NEXT: v_or_b32_e32 v15, v17, v15
-; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
; SI-NEXT: v_cvt_f32_f16_e32 v18, v18
; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18
; SI-NEXT: v_cvt_f16_f32_e32 v18, v18
@@ -22392,59 +22544,38 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v17, v17
; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16
; SI-NEXT: v_or_b32_e32 v16, v18, v16
-; SI-NEXT: v_cvt_f32_f16_e32 v18, v37
+; SI-NEXT: v_cvt_f32_f16_e32 v18, v63
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17
; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18
; SI-NEXT: v_cvt_f16_f32_e32 v18, v18
; SI-NEXT: v_or_b32_e32 v17, v18, v17
; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v19
-; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
-; SI-NEXT: v_cvt_f32_f16_e32 v20, v20
-; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20
-; SI-NEXT: v_cvt_f16_f32_e32 v20, v20
+; SI-NEXT: v_cvt_f32_f16_e32 v19, v48
; SI-NEXT: v_or_b32_e32 v18, v20, v18
-; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_cvt_f32_f16_e32 v19, v19
+; SI-NEXT: v_cvt_f32_f16_e32 v20, v38
; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19
; SI-NEXT: v_cvt_f16_f32_e32 v19, v19
-; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19
-; SI-NEXT: v_or_b32_e32 v19, v21, v19
-; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_cvt_f32_f16_e32 v20, v20
; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20
; SI-NEXT: v_cvt_f16_f32_e32 v20, v20
+; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19
+; SI-NEXT: v_or_b32_e32 v19, v21, v19
+; SI-NEXT: v_cvt_f32_f16_e32 v21, v37
; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v21, v21
; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21
; SI-NEXT: v_cvt_f16_f32_e32 v21, v21
; SI-NEXT: v_or_b32_e32 v20, v21, v20
; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v22
-; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
-; SI-NEXT: v_cvt_f32_f16_e32 v23, v23
-; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23
-; SI-NEXT: v_cvt_f16_f32_e32 v23, v23
+; SI-NEXT: v_cvt_f32_f16_e32 v22, v34
; SI-NEXT: v_or_b32_e32 v21, v23, v21
-; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
-; SI-NEXT: v_cvt_f32_f16_e32 v24, v24
-; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24
-; SI-NEXT: v_cvt_f16_f32_e32 v24, v24
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_cvt_f32_f16_e32 v22, v22
+; SI-NEXT: v_cvt_f32_f16_e32 v23, v31
; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22
; SI-NEXT: v_cvt_f16_f32_e32 v22, v22
-; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22
-; SI-NEXT: v_or_b32_e32 v22, v24, v22
-; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_cvt_f32_f16_e32 v23, v23
; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23
; SI-NEXT: v_cvt_f16_f32_e32 v23, v23
+; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22
+; SI-NEXT: v_or_b32_e32 v22, v24, v22
+; SI-NEXT: v_cvt_f32_f16_e32 v24, v30
; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v23
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v24, v24
; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24
; SI-NEXT: v_cvt_f16_f32_e32 v24, v24
; SI-NEXT: v_or_b32_e32 v23, v24, v23
@@ -22482,7 +22613,6 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(6)
; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload
@@ -22498,20 +22628,48 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB35_4:
-; SI-NEXT: v_mov_b32_e32 v39, v11
-; SI-NEXT: v_mov_b32_e32 v33, v10
-; SI-NEXT: v_mov_b32_e32 v49, v2
-; SI-NEXT: v_mov_b32_e32 v48, v3
-; SI-NEXT: v_mov_b32_e32 v52, v37
-; SI-NEXT: v_mov_b32_e32 v37, v29
+; SI-NEXT: v_mov_b32_e32 v52, v63
+; SI-NEXT: v_mov_b32_e32 v63, v29
+; SI-NEXT: v_mov_b32_e32 v50, v49
+; SI-NEXT: v_mov_b32_e32 v49, v48
+; SI-NEXT: v_mov_b32_e32 v48, v39
+; SI-NEXT: v_mov_b32_e32 v39, v38
+; SI-NEXT: v_mov_b32_e32 v38, v37
+; SI-NEXT: v_mov_b32_e32 v37, v36
+; SI-NEXT: v_mov_b32_e32 v36, v35
+; SI-NEXT: v_mov_b32_e32 v35, v34
+; SI-NEXT: v_mov_b32_e32 v34, v33
+; SI-NEXT: v_mov_b32_e32 v33, v31
+; SI-NEXT: v_mov_b32_e32 v32, v30
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; SI-NEXT: v_mov_b32_e32 v42, v58
-; SI-NEXT: v_mov_b32_e32 v41, v60
-; SI-NEXT: v_mov_b32_e32 v40, v56
-; SI-NEXT: v_mov_b32_e32 v29, v37
-; SI-NEXT: v_mov_b32_e32 v37, v52
-; SI-NEXT: v_mov_b32_e32 v61, v44
-; SI-NEXT: s_branch .LBB35_2
+; SI-NEXT: v_mov_b32_e32 v40, v60
+; SI-NEXT: v_mov_b32_e32 v31, v33
+; SI-NEXT: v_mov_b32_e32 v33, v34
+; SI-NEXT: v_mov_b32_e32 v34, v35
+; SI-NEXT: v_mov_b32_e32 v35, v36
+; SI-NEXT: v_mov_b32_e32 v36, v37
+; SI-NEXT: v_mov_b32_e32 v37, v38
+; SI-NEXT: v_mov_b32_e32 v38, v39
+; SI-NEXT: v_mov_b32_e32 v39, v48
+; SI-NEXT: v_mov_b32_e32 v48, v49
+; SI-NEXT: v_mov_b32_e32 v49, v50
+; SI-NEXT: v_mov_b32_e32 v50, v62
+; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v41, v61
+; SI-NEXT: v_mov_b32_e32 v55, v56
+; SI-NEXT: v_mov_b32_e32 v30, v32
+; SI-NEXT: v_mov_b32_e32 v29, v63
+; SI-NEXT: v_mov_b32_e32 v63, v52
+; SI-NEXT: v_mov_b32_e32 v61, v45
+; SI-NEXT: v_mov_b32_e32 v32, v44
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB35_2
+; SI-NEXT: s_branch .LBB35_3
;
; VI-LABEL: bitcast_v56f16_to_v28f32_scalar:
; VI: ; %bb.0:
@@ -22531,6 +22689,7 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v13
; VI-NEXT: v_mov_b32_e32 v33, v12
; VI-NEXT: v_mov_b32_e32 v34, v11
@@ -22545,7 +22704,7 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; VI-NEXT: v_mov_b32_e32 v51, v2
; VI-NEXT: v_mov_b32_e32 v52, v1
; VI-NEXT: v_mov_b32_e32 v53, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB35_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -22739,25 +22898,13 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB35_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB35_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB35_2
+; VI-NEXT: s_branch .LBB35_3
;
; GFX9-LABEL: bitcast_v56f16_to_v28f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v13
-; GFX9-NEXT: v_mov_b32_e32 v33, v12
-; GFX9-NEXT: v_mov_b32_e32 v34, v11
-; GFX9-NEXT: v_mov_b32_e32 v35, v10
-; GFX9-NEXT: v_mov_b32_e32 v36, v9
-; GFX9-NEXT: v_mov_b32_e32 v37, v8
-; GFX9-NEXT: v_mov_b32_e32 v38, v7
-; GFX9-NEXT: v_mov_b32_e32 v39, v6
-; GFX9-NEXT: v_mov_b32_e32 v48, v5
-; GFX9-NEXT: v_mov_b32_e32 v49, v4
-; GFX9-NEXT: v_mov_b32_e32 v50, v3
-; GFX9-NEXT: v_mov_b32_e32 v51, v2
-; GFX9-NEXT: v_mov_b32_e32 v52, v1
-; GFX9-NEXT: v_mov_b32_e32 v53, v0
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
@@ -22773,6 +22920,21 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; GFX9-NEXT: s_lshr_b32 s8, s18, 16
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
+; GFX9-NEXT: v_mov_b32_e32 v32, v13
+; GFX9-NEXT: v_mov_b32_e32 v33, v12
+; GFX9-NEXT: v_mov_b32_e32 v34, v11
+; GFX9-NEXT: v_mov_b32_e32 v35, v10
+; GFX9-NEXT: v_mov_b32_e32 v36, v9
+; GFX9-NEXT: v_mov_b32_e32 v37, v8
+; GFX9-NEXT: v_mov_b32_e32 v38, v7
+; GFX9-NEXT: v_mov_b32_e32 v39, v6
+; GFX9-NEXT: v_mov_b32_e32 v48, v5
+; GFX9-NEXT: v_mov_b32_e32 v49, v4
+; GFX9-NEXT: v_mov_b32_e32 v50, v3
+; GFX9-NEXT: v_mov_b32_e32 v51, v2
+; GFX9-NEXT: v_mov_b32_e32 v52, v1
+; GFX9-NEXT: v_mov_b32_e32 v53, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
@@ -22791,7 +22953,6 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v41, 16, v35
; GFX9-NEXT: v_lshrrev_b32_e32 v42, 16, v36
; GFX9-NEXT: v_lshrrev_b32_e32 v43, 16, v37
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -22806,6 +22967,7 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_lshrrev_b32_e32 v44, 16, v38
; GFX9-NEXT: v_lshrrev_b32_e32 v45, 16, v39
; GFX9-NEXT: v_lshrrev_b32_e32 v46, 16, v48
@@ -22935,7 +23097,9 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB35_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB35_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB35_2
+; GFX9-NEXT: s_branch .LBB35_3
;
; GFX11-TRUE16-LABEL: bitcast_v56f16_to_v28f32_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -22973,7 +23137,7 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v9
; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s27, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
@@ -22985,15 +23149,14 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s42
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
@@ -23005,10 +23168,11 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB35_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
@@ -23028,10 +23192,9 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB35_3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB35_3
; GFX11-TRUE16-NEXT: .LBB35_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
@@ -23058,9 +23221,9 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
@@ -23075,7 +23238,9 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB35_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB35_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB35_2
+; GFX11-TRUE16-NEXT: s_branch .LBB35_3
;
; GFX11-FAKE16-LABEL: bitcast_v56f16_to_v28f32_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -23103,7 +23268,7 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; GFX11-FAKE16-NEXT: v_and_b32_e32 v50, 0xffff, v9
; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s27, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s26, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s25, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s24, 16
@@ -23115,15 +23280,14 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s18, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s17, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s15, 0
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s44
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s45
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s43
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s42
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
@@ -23135,10 +23299,11 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB35_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
@@ -23158,10 +23323,9 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB35_3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB35_3
; GFX11-FAKE16-NEXT: .LBB35_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
@@ -23188,9 +23352,9 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, s18 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, s16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
@@ -23205,7 +23369,9 @@ define inreg <28 x float> @bitcast_v56f16_to_v28f32_scalar(<56 x half> inreg %a,
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB35_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB35_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB35_2
+; GFX11-FAKE16-NEXT: s_branch .LBB35_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -23417,6 +23583,7 @@ define inreg <14 x double> @bitcast_v14i64_to_v14f64_scalar(<14 x i64> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v27, v13
; SI-NEXT: v_mov_b32_e32 v26, v12
; SI-NEXT: v_mov_b32_e32 v25, v11
@@ -23437,7 +23604,7 @@ define inreg <14 x double> @bitcast_v14i64_to_v14f64_scalar(<14 x i64> inreg %a,
; SI-NEXT: v_mov_b32_e32 v3, s19
; SI-NEXT: v_mov_b32_e32 v4, s20
; SI-NEXT: v_mov_b32_e32 v5, s21
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v6, s22
; SI-NEXT: v_mov_b32_e32 v7, s23
; SI-NEXT: v_mov_b32_e32 v8, s24
@@ -23446,10 +23613,13 @@ define inreg <14 x double> @bitcast_v14i64_to_v14f64_scalar(<14 x i64> inreg %a,
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB37_4
+; SI-NEXT: s_cbranch_scc0 .LBB37_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB37_3
-; SI-NEXT: .LBB37_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB37_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB37_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2
@@ -23478,16 +23648,15 @@ define inreg <14 x double> @bitcast_v14i64_to_v14f64_scalar(<14 x i64> inreg %a,
; SI-NEXT: v_addc_u32_e32 v25, vcc, 0, v25, vcc
; SI-NEXT: v_add_i32_e32 v26, vcc, 3, v26
; SI-NEXT: v_addc_u32_e32 v27, vcc, 0, v27, vcc
-; SI-NEXT: .LBB37_3: ; %end
+; SI-NEXT: .LBB37_4: ; %end
; SI-NEXT: v_mov_b32_e32 v14, v28
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB37_4:
-; SI-NEXT: s_branch .LBB37_2
;
; VI-LABEL: bitcast_v14i64_to_v14f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v27, v13
; VI-NEXT: v_mov_b32_e32 v26, v12
; VI-NEXT: v_mov_b32_e32 v25, v11
@@ -23508,7 +23677,7 @@ define inreg <14 x double> @bitcast_v14i64_to_v14f64_scalar(<14 x i64> inreg %a,
; VI-NEXT: v_mov_b32_e32 v3, s19
; VI-NEXT: v_mov_b32_e32 v4, s20
; VI-NEXT: v_mov_b32_e32 v5, s21
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v6, s22
; VI-NEXT: v_mov_b32_e32 v7, s23
; VI-NEXT: v_mov_b32_e32 v8, s24
@@ -23517,10 +23686,13 @@ define inreg <14 x double> @bitcast_v14i64_to_v14f64_scalar(<14 x i64> inreg %a,
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB37_4
+; VI-NEXT: s_cbranch_scc0 .LBB37_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB37_3
-; VI-NEXT: .LBB37_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB37_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB37_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
@@ -23549,16 +23721,15 @@ define inreg <14 x double> @bitcast_v14i64_to_v14f64_scalar(<14 x i64> inreg %a,
; VI-NEXT: v_addc_u32_e32 v25, vcc, 0, v25, vcc
; VI-NEXT: v_add_u32_e32 v26, vcc, 3, v26
; VI-NEXT: v_addc_u32_e32 v27, vcc, 0, v27, vcc
-; VI-NEXT: .LBB37_3: ; %end
+; VI-NEXT: .LBB37_4: ; %end
; VI-NEXT: v_mov_b32_e32 v14, v28
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB37_4:
-; VI-NEXT: s_branch .LBB37_2
;
; GFX9-LABEL: bitcast_v14i64_to_v14f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v27, v13
; GFX9-NEXT: v_mov_b32_e32 v26, v12
; GFX9-NEXT: v_mov_b32_e32 v25, v11
@@ -23579,7 +23750,7 @@ define inreg <14 x double> @bitcast_v14i64_to_v14f64_scalar(<14 x i64> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v3, s19
; GFX9-NEXT: v_mov_b32_e32 v4, s20
; GFX9-NEXT: v_mov_b32_e32 v5, s21
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v6, s22
; GFX9-NEXT: v_mov_b32_e32 v7, s23
; GFX9-NEXT: v_mov_b32_e32 v8, s24
@@ -23588,10 +23759,13 @@ define inreg <14 x double> @bitcast_v14i64_to_v14f64_scalar(<14 x i64> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB37_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB37_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB37_3
-; GFX9-NEXT: .LBB37_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB37_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB37_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 3, v0
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, 3, v2
@@ -23620,42 +23794,40 @@ define inreg <14 x double> @bitcast_v14i64_to_v14f64_scalar(<14 x i64> inreg %a,
; GFX9-NEXT: v_addc_co_u32_e32 v25, vcc, 0, v25, vcc
; GFX9-NEXT: v_add_co_u32_e32 v26, vcc, 3, v26
; GFX9-NEXT: v_addc_co_u32_e32 v27, vcc, 0, v27, vcc
-; GFX9-NEXT: .LBB37_3: ; %end
+; GFX9-NEXT: .LBB37_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v14, v28
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB37_4:
-; GFX9-NEXT: s_branch .LBB37_2
;
; GFX11-LABEL: bitcast_v14i64_to_v14f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v10 :: v_dual_mov_b32 v27, v9
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
+; GFX11-NEXT: v_dual_mov_b32 v15, v10 :: v_dual_mov_b32 v26, v8
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB37_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB37_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB37_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB37_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB37_3:
-; GFX11-NEXT: .LBB37_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB37_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
@@ -23691,6 +23863,7 @@ define inreg <14 x double> @bitcast_v14i64_to_v14f64_scalar(<14 x i64> inreg %a,
; GFX11-NEXT: v_add_co_ci_u32_e64 v25, null, 0, v25, vcc_lo
; GFX11-NEXT: v_add_co_u32 v26, vcc_lo, v26, 3
; GFX11-NEXT: v_add_co_ci_u32_e64 v27, null, 0, v27, vcc_lo
+; GFX11-NEXT: .LBB37_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -23840,6 +24013,7 @@ define inreg <14 x i64> @bitcast_v14f64_to_v14i64_scalar(<14 x double> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v27, v13
; SI-NEXT: v_mov_b32_e32 v26, v12
; SI-NEXT: v_mov_b32_e32 v25, v11
@@ -23866,13 +24040,16 @@ define inreg <14 x i64> @bitcast_v14f64_to_v14i64_scalar(<14 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v9, s25
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB39_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB39_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB39_3
-; SI-NEXT: .LBB39_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB39_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB39_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
; SI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; SI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
@@ -23887,17 +24064,16 @@ define inreg <14 x i64> @bitcast_v14f64_to_v14i64_scalar(<14 x double> inreg %a,
; SI-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
; SI-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
; SI-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
-; SI-NEXT: .LBB39_3: ; %end
+; SI-NEXT: .LBB39_4: ; %end
; SI-NEXT: v_mov_b32_e32 v14, v28
; SI-NEXT: v_mov_b32_e32 v15, v29
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB39_4:
-; SI-NEXT: s_branch .LBB39_2
;
; VI-LABEL: bitcast_v14f64_to_v14i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v27, v13
; VI-NEXT: v_mov_b32_e32 v26, v12
; VI-NEXT: v_mov_b32_e32 v25, v11
@@ -23924,13 +24100,16 @@ define inreg <14 x i64> @bitcast_v14f64_to_v14i64_scalar(<14 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB39_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB39_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB39_3
-; VI-NEXT: .LBB39_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB39_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB39_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
; VI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; VI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
@@ -23945,17 +24124,16 @@ define inreg <14 x i64> @bitcast_v14f64_to_v14i64_scalar(<14 x double> inreg %a,
; VI-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
; VI-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
; VI-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
-; VI-NEXT: .LBB39_3: ; %end
+; VI-NEXT: .LBB39_4: ; %end
; VI-NEXT: v_mov_b32_e32 v14, v28
; VI-NEXT: v_mov_b32_e32 v15, v29
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB39_4:
-; VI-NEXT: s_branch .LBB39_2
;
; GFX9-LABEL: bitcast_v14f64_to_v14i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v27, v13
; GFX9-NEXT: v_mov_b32_e32 v26, v12
; GFX9-NEXT: v_mov_b32_e32 v25, v11
@@ -23982,13 +24160,16 @@ define inreg <14 x i64> @bitcast_v14f64_to_v14i64_scalar(<14 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB39_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB39_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB39_3
-; GFX9-NEXT: .LBB39_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB39_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB39_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX9-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
@@ -24003,43 +24184,41 @@ define inreg <14 x i64> @bitcast_v14f64_to_v14i64_scalar(<14 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
; GFX9-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
; GFX9-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
-; GFX9-NEXT: .LBB39_3: ; %end
+; GFX9-NEXT: .LBB39_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v14, v28
; GFX9-NEXT: v_mov_b32_e32 v15, v29
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB39_4:
-; GFX9-NEXT: s_branch .LBB39_2
;
; GFX11-LABEL: bitcast_v14f64_to_v14i64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v10 :: v_dual_mov_b32 v27, v9
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
+; GFX11-NEXT: v_dual_mov_b32 v15, v10 :: v_dual_mov_b32 v26, v8
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB39_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB39_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB39_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB39_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB39_3:
-; GFX11-NEXT: .LBB39_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB39_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
@@ -24054,6 +24233,7 @@ define inreg <14 x i64> @bitcast_v14f64_to_v14i64_scalar(<14 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[22:23], v[22:23], 1.0
; GFX11-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
; GFX11-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
+; GFX11-NEXT: .LBB39_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -25009,6 +25189,7 @@ define inreg <56 x i16> @bitcast_v14i64_to_v56i16_scalar(<14 x i64> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v15
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s43, v1
; SI-NEXT: v_readfirstlane_b32 s42, v2
; SI-NEXT: v_readfirstlane_b32 s41, v3
@@ -25022,8 +25203,8 @@ define inreg <56 x i16> @bitcast_v14i64_to_v56i16_scalar(<14 x i64> inreg %a, i3
; SI-NEXT: v_readfirstlane_b32 s9, v11
; SI-NEXT: v_readfirstlane_b32 s8, v12
; SI-NEXT: v_readfirstlane_b32 s7, v13
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s6, v14
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB41_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v1, s7
@@ -25339,7 +25520,9 @@ define inreg <56 x i16> @bitcast_v14i64_to_v56i16_scalar(<14 x i64> inreg %a, i3
; SI-NEXT: ; implicit-def: $sgpr45
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; implicit-def: $sgpr44
-; SI-NEXT: s_branch .LBB41_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB41_2
+; SI-NEXT: s_branch .LBB41_3
;
; VI-LABEL: bitcast_v14i64_to_v56i16_scalar:
; VI: ; %bb.0:
@@ -25348,8 +25531,9 @@ define inreg <56 x i16> @bitcast_v14i64_to_v56i16_scalar(<14 x i64> inreg %a, i3
; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 ; 4-byte Folded Spill
; VI-NEXT: s_mov_b64 exec, s[4:5]
; VI-NEXT: v_writelane_b32 v28, s30, 0
-; VI-NEXT: v_writelane_b32 v28, s31, 1
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; VI-NEXT: v_writelane_b32 v28, s31, 1
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_writelane_b32 v28, s34, 2
; VI-NEXT: v_readfirstlane_b32 s43, v0
; VI-NEXT: v_readfirstlane_b32 s42, v1
@@ -25363,14 +25547,14 @@ define inreg <56 x i16> @bitcast_v14i64_to_v56i16_scalar(<14 x i64> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s10, v9
; VI-NEXT: v_readfirstlane_b32 s9, v10
; VI-NEXT: v_readfirstlane_b32 s8, v11
-; VI-NEXT: v_readfirstlane_b32 s6, v12
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: v_readfirstlane_b32 s7, v13
+; VI-NEXT: v_readfirstlane_b32 s7, v12
+; VI-NEXT: v_readfirstlane_b32 s6, v13
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_writelane_b32 v28, s35, 3
; VI-NEXT: s_cbranch_scc0 .LBB41_4
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_lshr_b32 s44, s7, 16
-; VI-NEXT: s_lshr_b32 s45, s6, 16
+; VI-NEXT: s_lshr_b32 s44, s6, 16
+; VI-NEXT: s_lshr_b32 s45, s7, 16
; VI-NEXT: s_lshr_b32 s46, s8, 16
; VI-NEXT: s_lshr_b32 s47, s9, 16
; VI-NEXT: s_lshr_b32 s56, s10, 16
@@ -25399,8 +25583,8 @@ define inreg <56 x i16> @bitcast_v14i64_to_v56i16_scalar(<14 x i64> inreg %a, i3
; VI-NEXT: s_lshr_b32 s35, s16, 16
; VI-NEXT: s_cbranch_execnz .LBB41_3
; VI-NEXT: .LBB41_2: ; %cmp.true
-; VI-NEXT: s_add_u32 s6, s6, 3
-; VI-NEXT: s_addc_u32 s7, s7, 0
+; VI-NEXT: s_add_u32 s7, s7, 3
+; VI-NEXT: s_addc_u32 s6, s6, 0
; VI-NEXT: s_add_u32 s9, s9, 3
; VI-NEXT: s_addc_u32 s8, s8, 0
; VI-NEXT: s_add_u32 s11, s11, 3
@@ -25427,8 +25611,8 @@ define inreg <56 x i16> @bitcast_v14i64_to_v56i16_scalar(<14 x i64> inreg %a, i3
; VI-NEXT: s_addc_u32 s19, s19, 0
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
-; VI-NEXT: s_lshr_b32 s44, s7, 16
-; VI-NEXT: s_lshr_b32 s45, s6, 16
+; VI-NEXT: s_lshr_b32 s44, s6, 16
+; VI-NEXT: s_lshr_b32 s45, s7, 16
; VI-NEXT: s_lshr_b32 s46, s8, 16
; VI-NEXT: s_lshr_b32 s47, s9, 16
; VI-NEXT: s_lshr_b32 s56, s10, 16
@@ -25534,12 +25718,12 @@ define inreg <56 x i16> @bitcast_v14i64_to_v56i16_scalar(<14 x i64> inreg %a, i3
; VI-NEXT: s_and_b32 s8, 0xffff, s8
; VI-NEXT: s_lshl_b32 s42, s46, 16
; VI-NEXT: s_or_b32 s8, s8, s42
-; VI-NEXT: s_and_b32 s6, 0xffff, s6
-; VI-NEXT: s_lshl_b32 s42, s45, 16
-; VI-NEXT: s_or_b32 s6, s6, s42
; VI-NEXT: s_and_b32 s7, 0xffff, s7
-; VI-NEXT: s_lshl_b32 s42, s44, 16
+; VI-NEXT: s_lshl_b32 s42, s45, 16
; VI-NEXT: s_or_b32 s7, s7, s42
+; VI-NEXT: s_and_b32 s6, 0xffff, s6
+; VI-NEXT: s_lshl_b32 s42, s44, 16
+; VI-NEXT: s_or_b32 s6, s6, s42
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: v_mov_b32_e32 v2, s16
@@ -25566,8 +25750,8 @@ define inreg <56 x i16> @bitcast_v14i64_to_v56i16_scalar(<14 x i64> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v23, s10
; VI-NEXT: v_mov_b32_e32 v24, s9
; VI-NEXT: v_mov_b32_e32 v25, s8
-; VI-NEXT: v_mov_b32_e32 v26, s6
-; VI-NEXT: v_mov_b32_e32 v27, s7
+; VI-NEXT: v_mov_b32_e32 v26, s7
+; VI-NEXT: v_mov_b32_e32 v27, s6
; VI-NEXT: v_readlane_b32 s35, v28, 3
; VI-NEXT: v_readlane_b32 s34, v28, 2
; VI-NEXT: v_readlane_b32 s31, v28, 1
@@ -25606,43 +25790,46 @@ define inreg <56 x i16> @bitcast_v14i64_to_v56i16_scalar(<14 x i64> inreg %a, i3
; VI-NEXT: ; implicit-def: $sgpr46
; VI-NEXT: ; implicit-def: $sgpr45
; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: s_branch .LBB41_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB41_2
+; VI-NEXT: s_branch .LBB41_3
;
; GFX9-LABEL: bitcast_v14i64_to_v56i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
-; GFX9-NEXT: v_readfirstlane_b32 s6, v0
-; GFX9-NEXT: v_readfirstlane_b32 s7, v1
-; GFX9-NEXT: v_readfirstlane_b32 s8, v2
-; GFX9-NEXT: v_readfirstlane_b32 s9, v3
-; GFX9-NEXT: v_readfirstlane_b32 s10, v4
-; GFX9-NEXT: v_readfirstlane_b32 s11, v5
-; GFX9-NEXT: v_readfirstlane_b32 s12, v6
-; GFX9-NEXT: v_readfirstlane_b32 s13, v7
-; GFX9-NEXT: v_readfirstlane_b32 s14, v8
-; GFX9-NEXT: v_readfirstlane_b32 s15, v9
-; GFX9-NEXT: v_readfirstlane_b32 s40, v10
-; GFX9-NEXT: v_readfirstlane_b32 s41, v11
-; GFX9-NEXT: v_readfirstlane_b32 s42, v12
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: v_readfirstlane_b32 s43, v13
+; GFX9-NEXT: v_readfirstlane_b32 s7, v0
+; GFX9-NEXT: v_readfirstlane_b32 s8, v1
+; GFX9-NEXT: v_readfirstlane_b32 s9, v2
+; GFX9-NEXT: v_readfirstlane_b32 s10, v3
+; GFX9-NEXT: v_readfirstlane_b32 s11, v4
+; GFX9-NEXT: v_readfirstlane_b32 s12, v5
+; GFX9-NEXT: v_readfirstlane_b32 s13, v6
+; GFX9-NEXT: v_readfirstlane_b32 s14, v7
+; GFX9-NEXT: v_readfirstlane_b32 s15, v8
+; GFX9-NEXT: v_readfirstlane_b32 s40, v9
+; GFX9-NEXT: v_readfirstlane_b32 s41, v10
+; GFX9-NEXT: v_readfirstlane_b32 s42, v11
+; GFX9-NEXT: v_readfirstlane_b32 s43, v12
+; GFX9-NEXT: v_readfirstlane_b32 s6, v13
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB41_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_lshr_b32 s44, s43, 16
-; GFX9-NEXT: s_lshr_b32 s45, s42, 16
-; GFX9-NEXT: s_lshr_b32 s46, s41, 16
-; GFX9-NEXT: s_lshr_b32 s47, s40, 16
-; GFX9-NEXT: s_lshr_b32 s56, s15, 16
-; GFX9-NEXT: s_lshr_b32 s57, s14, 16
-; GFX9-NEXT: s_lshr_b32 s58, s13, 16
-; GFX9-NEXT: s_lshr_b32 s59, s12, 16
-; GFX9-NEXT: s_lshr_b32 s60, s11, 16
-; GFX9-NEXT: s_lshr_b32 s61, s10, 16
-; GFX9-NEXT: s_lshr_b32 s62, s9, 16
-; GFX9-NEXT: s_lshr_b32 s63, s8, 16
-; GFX9-NEXT: s_lshr_b32 s72, s7, 16
-; GFX9-NEXT: s_lshr_b32 s73, s6, 16
+; GFX9-NEXT: s_lshr_b32 s44, s6, 16
+; GFX9-NEXT: s_lshr_b32 s45, s43, 16
+; GFX9-NEXT: s_lshr_b32 s46, s42, 16
+; GFX9-NEXT: s_lshr_b32 s47, s41, 16
+; GFX9-NEXT: s_lshr_b32 s56, s40, 16
+; GFX9-NEXT: s_lshr_b32 s57, s15, 16
+; GFX9-NEXT: s_lshr_b32 s58, s14, 16
+; GFX9-NEXT: s_lshr_b32 s59, s13, 16
+; GFX9-NEXT: s_lshr_b32 s60, s12, 16
+; GFX9-NEXT: s_lshr_b32 s61, s11, 16
+; GFX9-NEXT: s_lshr_b32 s62, s10, 16
+; GFX9-NEXT: s_lshr_b32 s63, s9, 16
+; GFX9-NEXT: s_lshr_b32 s72, s8, 16
+; GFX9-NEXT: s_lshr_b32 s73, s7, 16
; GFX9-NEXT: s_lshr_b32 s74, s29, 16
; GFX9-NEXT: s_lshr_b32 s75, s28, 16
; GFX9-NEXT: s_lshr_b32 s76, s27, 16
@@ -25659,20 +25846,20 @@ define inreg <56 x i16> @bitcast_v14i64_to_v56i16_scalar(<14 x i64> inreg %a, i3
; GFX9-NEXT: s_lshr_b32 s95, s16, 16
; GFX9-NEXT: s_cbranch_execnz .LBB41_3
; GFX9-NEXT: .LBB41_2: ; %cmp.true
-; GFX9-NEXT: s_add_u32 s42, s42, 3
-; GFX9-NEXT: s_addc_u32 s43, s43, 0
-; GFX9-NEXT: s_add_u32 s40, s40, 3
-; GFX9-NEXT: s_addc_u32 s41, s41, 0
-; GFX9-NEXT: s_add_u32 s14, s14, 3
-; GFX9-NEXT: s_addc_u32 s15, s15, 0
-; GFX9-NEXT: s_add_u32 s12, s12, 3
-; GFX9-NEXT: s_addc_u32 s13, s13, 0
-; GFX9-NEXT: s_add_u32 s10, s10, 3
-; GFX9-NEXT: s_addc_u32 s11, s11, 0
-; GFX9-NEXT: s_add_u32 s8, s8, 3
-; GFX9-NEXT: s_addc_u32 s9, s9, 0
-; GFX9-NEXT: s_add_u32 s6, s6, 3
-; GFX9-NEXT: s_addc_u32 s7, s7, 0
+; GFX9-NEXT: s_add_u32 s43, s43, 3
+; GFX9-NEXT: s_addc_u32 s6, s6, 0
+; GFX9-NEXT: s_add_u32 s41, s41, 3
+; GFX9-NEXT: s_addc_u32 s42, s42, 0
+; GFX9-NEXT: s_add_u32 s15, s15, 3
+; GFX9-NEXT: s_addc_u32 s40, s40, 0
+; GFX9-NEXT: s_add_u32 s13, s13, 3
+; GFX9-NEXT: s_addc_u32 s14, s14, 0
+; GFX9-NEXT: s_add_u32 s11, s11, 3
+; GFX9-NEXT: s_addc_u32 s12, s12, 0
+; GFX9-NEXT: s_add_u32 s9, s9, 3
+; GFX9-NEXT: s_addc_u32 s10, s10, 0
+; GFX9-NEXT: s_add_u32 s7, s7, 3
+; GFX9-NEXT: s_addc_u32 s8, s8, 0
; GFX9-NEXT: s_add_u32 s28, s28, 3
; GFX9-NEXT: s_addc_u32 s29, s29, 0
; GFX9-NEXT: s_add_u32 s26, s26, 3
@@ -25687,20 +25874,20 @@ define inreg <56 x i16> @bitcast_v14i64_to_v56i16_scalar(<14 x i64> inreg %a, i3
; GFX9-NEXT: s_addc_u32 s19, s19, 0
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
-; GFX9-NEXT: s_lshr_b32 s44, s43, 16
-; GFX9-NEXT: s_lshr_b32 s45, s42, 16
-; GFX9-NEXT: s_lshr_b32 s46, s41, 16
-; GFX9-NEXT: s_lshr_b32 s47, s40, 16
-; GFX9-NEXT: s_lshr_b32 s56, s15, 16
-; GFX9-NEXT: s_lshr_b32 s57, s14, 16
-; GFX9-NEXT: s_lshr_b32 s58, s13, 16
-; GFX9-NEXT: s_lshr_b32 s59, s12, 16
-; GFX9-NEXT: s_lshr_b32 s60, s11, 16
-; GFX9-NEXT: s_lshr_b32 s61, s10, 16
-; GFX9-NEXT: s_lshr_b32 s62, s9, 16
-; GFX9-NEXT: s_lshr_b32 s63, s8, 16
-; GFX9-NEXT: s_lshr_b32 s72, s7, 16
-; GFX9-NEXT: s_lshr_b32 s73, s6, 16
+; GFX9-NEXT: s_lshr_b32 s44, s6, 16
+; GFX9-NEXT: s_lshr_b32 s45, s43, 16
+; GFX9-NEXT: s_lshr_b32 s46, s42, 16
+; GFX9-NEXT: s_lshr_b32 s47, s41, 16
+; GFX9-NEXT: s_lshr_b32 s56, s40, 16
+; GFX9-NEXT: s_lshr_b32 s57, s15, 16
+; GFX9-NEXT: s_lshr_b32 s58, s14, 16
+; GFX9-NEXT: s_lshr_b32 s59, s13, 16
+; GFX9-NEXT: s_lshr_b32 s60, s12, 16
+; GFX9-NEXT: s_lshr_b32 s61, s11, 16
+; GFX9-NEXT: s_lshr_b32 s62, s10, 16
+; GFX9-NEXT: s_lshr_b32 s63, s9, 16
+; GFX9-NEXT: s_lshr_b32 s72, s8, 16
+; GFX9-NEXT: s_lshr_b32 s73, s7, 16
; GFX9-NEXT: s_lshr_b32 s74, s29, 16
; GFX9-NEXT: s_lshr_b32 s75, s28, 16
; GFX9-NEXT: s_lshr_b32 s76, s27, 16
@@ -25730,20 +25917,20 @@ define inreg <56 x i16> @bitcast_v14i64_to_v56i16_scalar(<14 x i64> inreg %a, i3
; GFX9-NEXT: s_pack_ll_b32_b16 s25, s27, s76
; GFX9-NEXT: s_pack_ll_b32_b16 s26, s28, s75
; GFX9-NEXT: s_pack_ll_b32_b16 s27, s29, s74
-; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s73
-; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s72
-; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s63
-; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s62
-; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s61
-; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s60
-; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s59
-; GFX9-NEXT: s_pack_ll_b32_b16 s13, s13, s58
-; GFX9-NEXT: s_pack_ll_b32_b16 s14, s14, s57
-; GFX9-NEXT: s_pack_ll_b32_b16 s15, s15, s56
-; GFX9-NEXT: s_pack_ll_b32_b16 s28, s40, s47
-; GFX9-NEXT: s_pack_ll_b32_b16 s29, s41, s46
-; GFX9-NEXT: s_pack_ll_b32_b16 s40, s42, s45
-; GFX9-NEXT: s_pack_ll_b32_b16 s41, s43, s44
+; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s73
+; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s72
+; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s63
+; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s62
+; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s61
+; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s60
+; GFX9-NEXT: s_pack_ll_b32_b16 s13, s13, s59
+; GFX9-NEXT: s_pack_ll_b32_b16 s14, s14, s58
+; GFX9-NEXT: s_pack_ll_b32_b16 s15, s15, s57
+; GFX9-NEXT: s_pack_ll_b32_b16 s28, s40, s56
+; GFX9-NEXT: s_pack_ll_b32_b16 s29, s41, s47
+; GFX9-NEXT: s_pack_ll_b32_b16 s40, s42, s46
+; GFX9-NEXT: s_pack_ll_b32_b16 s41, s43, s45
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s44
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: v_mov_b32_e32 v2, s16
@@ -25758,20 +25945,20 @@ define inreg <56 x i16> @bitcast_v14i64_to_v56i16_scalar(<14 x i64> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v11, s25
; GFX9-NEXT: v_mov_b32_e32 v12, s26
; GFX9-NEXT: v_mov_b32_e32 v13, s27
-; GFX9-NEXT: v_mov_b32_e32 v14, s6
-; GFX9-NEXT: v_mov_b32_e32 v15, s7
-; GFX9-NEXT: v_mov_b32_e32 v16, s8
-; GFX9-NEXT: v_mov_b32_e32 v17, s9
-; GFX9-NEXT: v_mov_b32_e32 v18, s10
-; GFX9-NEXT: v_mov_b32_e32 v19, s11
-; GFX9-NEXT: v_mov_b32_e32 v20, s12
-; GFX9-NEXT: v_mov_b32_e32 v21, s13
-; GFX9-NEXT: v_mov_b32_e32 v22, s14
-; GFX9-NEXT: v_mov_b32_e32 v23, s15
-; GFX9-NEXT: v_mov_b32_e32 v24, s28
-; GFX9-NEXT: v_mov_b32_e32 v25, s29
-; GFX9-NEXT: v_mov_b32_e32 v26, s40
-; GFX9-NEXT: v_mov_b32_e32 v27, s41
+; GFX9-NEXT: v_mov_b32_e32 v14, s7
+; GFX9-NEXT: v_mov_b32_e32 v15, s8
+; GFX9-NEXT: v_mov_b32_e32 v16, s9
+; GFX9-NEXT: v_mov_b32_e32 v17, s10
+; GFX9-NEXT: v_mov_b32_e32 v18, s11
+; GFX9-NEXT: v_mov_b32_e32 v19, s12
+; GFX9-NEXT: v_mov_b32_e32 v20, s13
+; GFX9-NEXT: v_mov_b32_e32 v21, s14
+; GFX9-NEXT: v_mov_b32_e32 v22, s15
+; GFX9-NEXT: v_mov_b32_e32 v23, s28
+; GFX9-NEXT: v_mov_b32_e32 v24, s29
+; GFX9-NEXT: v_mov_b32_e32 v25, s40
+; GFX9-NEXT: v_mov_b32_e32 v26, s41
+; GFX9-NEXT: v_mov_b32_e32 v27, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB41_4:
; GFX9-NEXT: ; implicit-def: $sgpr95
@@ -25802,7 +25989,9 @@ define inreg <56 x i16> @bitcast_v14i64_to_v56i16_scalar(<14 x i64> inreg %a, i3
; GFX9-NEXT: ; implicit-def: $sgpr46
; GFX9-NEXT: ; implicit-def: $sgpr45
; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: s_branch .LBB41_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB41_2
+; GFX9-NEXT: s_branch .LBB41_3
;
; GFX11-LABEL: bitcast_v14i64_to_v56i16_scalar:
; GFX11: ; %bb.0:
@@ -25815,16 +26004,16 @@ define inreg <56 x i16> @bitcast_v14i64_to_v56i16_scalar(<14 x i64> inreg %a, i3
; GFX11-NEXT: v_readfirstlane_b32 s8, v4
; GFX11-NEXT: v_readfirstlane_b32 s9, v5
; GFX11-NEXT: v_readfirstlane_b32 s10, v6
-; GFX11-NEXT: v_readfirstlane_b32 s11, v7
+; GFX11-NEXT: v_readfirstlane_b32 s12, v7
; GFX11-NEXT: v_readfirstlane_b32 s13, v8
-; GFX11-NEXT: v_readfirstlane_b32 s12, v9
-; GFX11-NEXT: s_mov_b32 s90, 0
+; GFX11-NEXT: v_readfirstlane_b32 s11, v9
+; GFX11-NEXT: s_mov_b32 s90, -1
; GFX11-NEXT: s_and_b32 s14, vcc_lo, exec_lo
; GFX11-NEXT: s_cbranch_scc0 .LBB41_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s14, s12, 16
+; GFX11-NEXT: s_lshr_b32 s14, s11, 16
; GFX11-NEXT: s_lshr_b32 s15, s13, 16
-; GFX11-NEXT: s_lshr_b32 s40, s11, 16
+; GFX11-NEXT: s_lshr_b32 s40, s12, 16
; GFX11-NEXT: s_lshr_b32 s41, s10, 16
; GFX11-NEXT: s_lshr_b32 s42, s9, 16
; GFX11-NEXT: s_lshr_b32 s43, s8, 16
@@ -25850,13 +26039,12 @@ define inreg <56 x i16> @bitcast_v14i64_to_v56i16_scalar(<14 x i64> inreg %a, i3
; GFX11-NEXT: s_lshr_b32 s79, s2, 16
; GFX11-NEXT: s_lshr_b32 s88, s1, 16
; GFX11-NEXT: s_lshr_b32 s89, s0, 16
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s90
-; GFX11-NEXT: s_cbranch_vccnz .LBB41_3
+; GFX11-NEXT: s_cbranch_execnz .LBB41_3
; GFX11-NEXT: .LBB41_2: ; %cmp.true
; GFX11-NEXT: s_add_u32 s13, s13, 3
-; GFX11-NEXT: s_addc_u32 s12, s12, 0
-; GFX11-NEXT: s_add_u32 s10, s10, 3
; GFX11-NEXT: s_addc_u32 s11, s11, 0
+; GFX11-NEXT: s_add_u32 s10, s10, 3
+; GFX11-NEXT: s_addc_u32 s12, s12, 0
; GFX11-NEXT: s_add_u32 s8, s8, 3
; GFX11-NEXT: s_addc_u32 s9, s9, 0
; GFX11-NEXT: s_add_u32 s6, s6, 3
@@ -25881,9 +26069,9 @@ define inreg <56 x i16> @bitcast_v14i64_to_v56i16_scalar(<14 x i64> inreg %a, i3
; GFX11-NEXT: s_addc_u32 s3, s3, 0
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: s_lshr_b32 s14, s12, 16
+; GFX11-NEXT: s_lshr_b32 s14, s11, 16
; GFX11-NEXT: s_lshr_b32 s15, s13, 16
-; GFX11-NEXT: s_lshr_b32 s40, s11, 16
+; GFX11-NEXT: s_lshr_b32 s40, s12, 16
; GFX11-NEXT: s_lshr_b32 s41, s10, 16
; GFX11-NEXT: s_lshr_b32 s42, s9, 16
; GFX11-NEXT: s_lshr_b32 s43, s8, 16
@@ -25936,9 +26124,9 @@ define inreg <56 x i16> @bitcast_v14i64_to_v56i16_scalar(<14 x i64> inreg %a, i3
; GFX11-NEXT: s_pack_ll_b32_b16 s8, s8, s43
; GFX11-NEXT: s_pack_ll_b32_b16 s9, s9, s42
; GFX11-NEXT: s_pack_ll_b32_b16 s10, s10, s41
-; GFX11-NEXT: s_pack_ll_b32_b16 s11, s11, s40
+; GFX11-NEXT: s_pack_ll_b32_b16 s12, s12, s40
; GFX11-NEXT: s_pack_ll_b32_b16 s13, s13, s15
-; GFX11-NEXT: s_pack_ll_b32_b16 s12, s12, s14
+; GFX11-NEXT: s_pack_ll_b32_b16 s11, s11, s14
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -25951,8 +26139,8 @@ define inreg <56 x i16> @bitcast_v14i64_to_v56i16_scalar(<14 x i64> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v18, s4 :: v_dual_mov_b32 v19, s5
; GFX11-NEXT: v_dual_mov_b32 v20, s6 :: v_dual_mov_b32 v21, s7
; GFX11-NEXT: v_dual_mov_b32 v22, s8 :: v_dual_mov_b32 v23, s9
-; GFX11-NEXT: v_dual_mov_b32 v24, s10 :: v_dual_mov_b32 v25, s11
-; GFX11-NEXT: v_dual_mov_b32 v26, s13 :: v_dual_mov_b32 v27, s12
+; GFX11-NEXT: v_dual_mov_b32 v24, s10 :: v_dual_mov_b32 v25, s12
+; GFX11-NEXT: v_dual_mov_b32 v26, s13 :: v_dual_mov_b32 v27, s11
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB41_4:
; GFX11-NEXT: ; implicit-def: $sgpr89
@@ -25983,7 +26171,9 @@ define inreg <56 x i16> @bitcast_v14i64_to_v56i16_scalar(<14 x i64> inreg %a, i3
; GFX11-NEXT: ; implicit-def: $sgpr40
; GFX11-NEXT: ; implicit-def: $sgpr15
; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: s_branch .LBB41_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s90
+; GFX11-NEXT: s_cbranch_vccz .LBB41_2
+; GFX11-NEXT: s_branch .LBB41_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -27297,6 +27487,7 @@ define inreg <14 x i64> @bitcast_v56i16_to_v14i64_scalar(<56 x i16> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v36, v18
; SI-NEXT: v_mov_b32_e32 v37, v16
; SI-NEXT: v_mov_b32_e32 v38, v14
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v3
; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v5
@@ -27323,7 +27514,7 @@ define inreg <14 x i64> @bitcast_v56i16_to_v14i64_scalar(<56 x i16> inreg %a, i3
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: v_lshlrev_b32_e32 v46, 16, v2
; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v4
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_and_b64 s[6:7], vcc, exec
; SI-NEXT: s_waitcnt vmcnt(13)
; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v6
; SI-NEXT: s_waitcnt vmcnt(11)
@@ -27637,7 +27828,9 @@ define inreg <14 x i64> @bitcast_v56i16_to_v14i64_scalar(<56 x i16> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v45, v56
; SI-NEXT: v_mov_b32_e32 v56, v59
; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
-; SI-NEXT: s_branch .LBB43_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB43_2
+; SI-NEXT: s_branch .LBB43_3
;
; VI-LABEL: bitcast_v56i16_to_v14i64_scalar:
; VI: ; %bb.0:
@@ -27657,6 +27850,7 @@ define inreg <14 x i64> @bitcast_v56i16_to_v14i64_scalar(<56 x i16> inreg %a, i3
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v13
; VI-NEXT: v_mov_b32_e32 v33, v12
; VI-NEXT: v_mov_b32_e32 v34, v11
@@ -27671,7 +27865,7 @@ define inreg <14 x i64> @bitcast_v56i16_to_v14i64_scalar(<56 x i16> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v51, v2
; VI-NEXT: v_mov_b32_e32 v52, v1
; VI-NEXT: v_mov_b32_e32 v53, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB43_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -27906,25 +28100,13 @@ define inreg <14 x i64> @bitcast_v56i16_to_v14i64_scalar(<56 x i16> inreg %a, i3
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB43_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB43_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB43_2
+; VI-NEXT: s_branch .LBB43_3
;
; GFX9-LABEL: bitcast_v56i16_to_v14i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v13
-; GFX9-NEXT: v_mov_b32_e32 v33, v12
-; GFX9-NEXT: v_mov_b32_e32 v34, v11
-; GFX9-NEXT: v_mov_b32_e32 v35, v10
-; GFX9-NEXT: v_mov_b32_e32 v36, v9
-; GFX9-NEXT: v_mov_b32_e32 v37, v8
-; GFX9-NEXT: v_mov_b32_e32 v38, v7
-; GFX9-NEXT: v_mov_b32_e32 v39, v6
-; GFX9-NEXT: v_mov_b32_e32 v48, v5
-; GFX9-NEXT: v_mov_b32_e32 v49, v4
-; GFX9-NEXT: v_mov_b32_e32 v50, v3
-; GFX9-NEXT: v_mov_b32_e32 v51, v2
-; GFX9-NEXT: v_mov_b32_e32 v52, v1
-; GFX9-NEXT: v_mov_b32_e32 v53, v0
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
@@ -27940,6 +28122,21 @@ define inreg <14 x i64> @bitcast_v56i16_to_v14i64_scalar(<56 x i16> inreg %a, i3
; GFX9-NEXT: s_lshr_b32 s8, s18, 16
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
+; GFX9-NEXT: v_mov_b32_e32 v32, v13
+; GFX9-NEXT: v_mov_b32_e32 v33, v12
+; GFX9-NEXT: v_mov_b32_e32 v34, v11
+; GFX9-NEXT: v_mov_b32_e32 v35, v10
+; GFX9-NEXT: v_mov_b32_e32 v36, v9
+; GFX9-NEXT: v_mov_b32_e32 v37, v8
+; GFX9-NEXT: v_mov_b32_e32 v38, v7
+; GFX9-NEXT: v_mov_b32_e32 v39, v6
+; GFX9-NEXT: v_mov_b32_e32 v48, v5
+; GFX9-NEXT: v_mov_b32_e32 v49, v4
+; GFX9-NEXT: v_mov_b32_e32 v50, v3
+; GFX9-NEXT: v_mov_b32_e32 v51, v2
+; GFX9-NEXT: v_mov_b32_e32 v52, v1
+; GFX9-NEXT: v_mov_b32_e32 v53, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
@@ -27958,7 +28155,6 @@ define inreg <14 x i64> @bitcast_v56i16_to_v14i64_scalar(<56 x i16> inreg %a, i3
; GFX9-NEXT: v_lshrrev_b32_e32 v41, 16, v35
; GFX9-NEXT: v_lshrrev_b32_e32 v42, 16, v36
; GFX9-NEXT: v_lshrrev_b32_e32 v43, 16, v37
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -27973,6 +28169,7 @@ define inreg <14 x i64> @bitcast_v56i16_to_v14i64_scalar(<56 x i16> inreg %a, i3
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_lshrrev_b32_e32 v44, 16, v38
; GFX9-NEXT: v_lshrrev_b32_e32 v45, 16, v39
; GFX9-NEXT: v_lshrrev_b32_e32 v46, 16, v48
@@ -28100,7 +28297,9 @@ define inreg <14 x i64> @bitcast_v56i16_to_v14i64_scalar(<56 x i16> inreg %a, i3
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB43_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB43_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB43_2
+; GFX9-NEXT: s_branch .LBB43_3
;
; GFX11-TRUE16-LABEL: bitcast_v56i16_to_v14i64_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -28138,7 +28337,7 @@ define inreg <14 x i64> @bitcast_v56i16_to_v14i64_scalar(<56 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v9
; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s27, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
@@ -28150,15 +28349,14 @@ define inreg <14 x i64> @bitcast_v56i16_to_v14i64_scalar(<56 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s42
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
@@ -28170,10 +28368,11 @@ define inreg <14 x i64> @bitcast_v56i16_to_v14i64_scalar(<56 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB43_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
@@ -28193,10 +28392,9 @@ define inreg <14 x i64> @bitcast_v56i16_to_v14i64_scalar(<56 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB43_3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB43_3
; GFX11-TRUE16-NEXT: .LBB43_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
@@ -28223,9 +28421,9 @@ define inreg <14 x i64> @bitcast_v56i16_to_v14i64_scalar(<56 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
@@ -28240,7 +28438,9 @@ define inreg <14 x i64> @bitcast_v56i16_to_v14i64_scalar(<56 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB43_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB43_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB43_2
+; GFX11-TRUE16-NEXT: s_branch .LBB43_3
;
; GFX11-FAKE16-LABEL: bitcast_v56i16_to_v14i64_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -28268,7 +28468,7 @@ define inreg <14 x i64> @bitcast_v56i16_to_v14i64_scalar(<56 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: v_and_b32_e32 v50, 0xffff, v9
; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s27, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s26, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s25, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s24, 16
@@ -28280,15 +28480,14 @@ define inreg <14 x i64> @bitcast_v56i16_to_v14i64_scalar(<56 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s18, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s17, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s15, 0
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s44
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s45
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s43
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s42
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
@@ -28300,10 +28499,11 @@ define inreg <14 x i64> @bitcast_v56i16_to_v14i64_scalar(<56 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB43_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
@@ -28323,10 +28523,9 @@ define inreg <14 x i64> @bitcast_v56i16_to_v14i64_scalar(<56 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB43_3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB43_3
; GFX11-FAKE16-NEXT: .LBB43_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
@@ -28353,9 +28552,9 @@ define inreg <14 x i64> @bitcast_v56i16_to_v14i64_scalar(<56 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, s16, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, s17, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, s18, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, s15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, s16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
@@ -28370,7 +28569,9 @@ define inreg <14 x i64> @bitcast_v56i16_to_v14i64_scalar(<56 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB43_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB43_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB43_2
+; GFX11-FAKE16-NEXT: s_branch .LBB43_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -29642,6 +29843,7 @@ define inreg <56 x half> @bitcast_v14i64_to_v56f16_scalar(<14 x i64> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v15
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s42, v1
; SI-NEXT: v_readfirstlane_b32 s43, v2
; SI-NEXT: v_readfirstlane_b32 s40, v3
@@ -29652,11 +29854,11 @@ define inreg <56 x half> @bitcast_v14i64_to_v56f16_scalar(<14 x i64> inreg %a, i
; SI-NEXT: v_readfirstlane_b32 s13, v8
; SI-NEXT: v_readfirstlane_b32 s10, v9
; SI-NEXT: v_readfirstlane_b32 s11, v10
-; SI-NEXT: v_readfirstlane_b32 s7, v11
-; SI-NEXT: v_readfirstlane_b32 s8, v12
+; SI-NEXT: v_readfirstlane_b32 s8, v11
+; SI-NEXT: v_readfirstlane_b32 s9, v12
; SI-NEXT: v_readfirstlane_b32 s6, v13
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: v_readfirstlane_b32 s9, v14
+; SI-NEXT: v_readfirstlane_b32 s7, v14
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
@@ -29668,13 +29870,13 @@ define inreg <56 x half> @bitcast_v14i64_to_v56f16_scalar(<14 x i64> inreg %a, i
; SI-NEXT: buffer_store_dword v56, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB45_4
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_lshr_b32 s4, s9, 16
+; SI-NEXT: s_lshr_b32 s4, s7, 16
; SI-NEXT: v_cvt_f32_f16_e32 v1, s4
; SI-NEXT: s_lshr_b32 s4, s6, 16
; SI-NEXT: v_cvt_f32_f16_e32 v2, s4
-; SI-NEXT: s_lshr_b32 s4, s8, 16
+; SI-NEXT: s_lshr_b32 s4, s9, 16
; SI-NEXT: v_cvt_f32_f16_e32 v3, s4
-; SI-NEXT: s_lshr_b32 s4, s7, 16
+; SI-NEXT: s_lshr_b32 s4, s8, 16
; SI-NEXT: v_cvt_f32_f16_e32 v5, s4
; SI-NEXT: s_lshr_b32 s4, s11, 16
; SI-NEXT: v_cvt_f32_f16_e32 v7, s4
@@ -29728,10 +29930,10 @@ define inreg <56 x half> @bitcast_v14i64_to_v56f16_scalar(<14 x i64> inreg %a, i
; SI-NEXT: s_lshr_b32 s4, s16, 16
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v56, s4
-; SI-NEXT: v_cvt_f32_f16_e32 v4, s9
+; SI-NEXT: v_cvt_f32_f16_e32 v4, s7
; SI-NEXT: v_cvt_f32_f16_e32 v6, s6
-; SI-NEXT: v_cvt_f32_f16_e32 v8, s8
-; SI-NEXT: v_cvt_f32_f16_e32 v10, s7
+; SI-NEXT: v_cvt_f32_f16_e32 v8, s9
+; SI-NEXT: v_cvt_f32_f16_e32 v10, s8
; SI-NEXT: v_cvt_f32_f16_e32 v12, s11
; SI-NEXT: v_cvt_f32_f16_e32 v14, s10
; SI-NEXT: v_cvt_f32_f16_e32 v16, s13
@@ -29806,18 +30008,18 @@ define inreg <56 x half> @bitcast_v14i64_to_v56f16_scalar(<14 x i64> inreg %a, i
; SI-NEXT: s_addc_u32 s11, s11, 0
; SI-NEXT: s_lshr_b32 s88, s10, 16
; SI-NEXT: s_lshr_b32 s89, s11, 16
-; SI-NEXT: s_add_u32 s7, s7, 3
-; SI-NEXT: s_addc_u32 s8, s8, 0
-; SI-NEXT: s_lshr_b32 s90, s7, 16
-; SI-NEXT: s_lshr_b32 s91, s8, 16
-; SI-NEXT: s_add_u32 s6, s6, 3
+; SI-NEXT: s_add_u32 s8, s8, 3
; SI-NEXT: s_addc_u32 s9, s9, 0
+; SI-NEXT: s_lshr_b32 s90, s8, 16
+; SI-NEXT: s_lshr_b32 s91, s9, 16
+; SI-NEXT: s_add_u32 s6, s6, 3
+; SI-NEXT: s_addc_u32 s7, s7, 0
; SI-NEXT: s_lshr_b32 s92, s6, 16
-; SI-NEXT: s_lshr_b32 s93, s9, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v4, s9
+; SI-NEXT: s_lshr_b32 s93, s7, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v4, s7
; SI-NEXT: v_cvt_f32_f16_e32 v6, s6
-; SI-NEXT: v_cvt_f32_f16_e32 v8, s8
-; SI-NEXT: v_cvt_f32_f16_e32 v10, s7
+; SI-NEXT: v_cvt_f32_f16_e32 v8, s9
+; SI-NEXT: v_cvt_f32_f16_e32 v10, s8
; SI-NEXT: v_cvt_f32_f16_e32 v12, s11
; SI-NEXT: v_cvt_f32_f16_e32 v14, s10
; SI-NEXT: v_cvt_f32_f16_e32 v16, s13
@@ -30138,7 +30340,9 @@ define inreg <56 x half> @bitcast_v14i64_to_v56f16_scalar(<14 x i64> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr4
; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: s_branch .LBB45_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB45_2
+; SI-NEXT: s_branch .LBB45_3
;
; VI-LABEL: bitcast_v14i64_to_v56f16_scalar:
; VI: ; %bb.0:
@@ -30147,8 +30351,9 @@ define inreg <56 x half> @bitcast_v14i64_to_v56f16_scalar(<14 x i64> inreg %a, i
; VI-NEXT: buffer_store_dword v28, off, s[0:3], s32 ; 4-byte Folded Spill
; VI-NEXT: s_mov_b64 exec, s[4:5]
; VI-NEXT: v_writelane_b32 v28, s30, 0
-; VI-NEXT: v_writelane_b32 v28, s31, 1
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; VI-NEXT: v_writelane_b32 v28, s31, 1
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_writelane_b32 v28, s34, 2
; VI-NEXT: v_readfirstlane_b32 s43, v0
; VI-NEXT: v_readfirstlane_b32 s42, v1
@@ -30162,14 +30367,14 @@ define inreg <56 x half> @bitcast_v14i64_to_v56f16_scalar(<14 x i64> inreg %a, i
; VI-NEXT: v_readfirstlane_b32 s10, v9
; VI-NEXT: v_readfirstlane_b32 s9, v10
; VI-NEXT: v_readfirstlane_b32 s8, v11
-; VI-NEXT: v_readfirstlane_b32 s6, v12
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: v_readfirstlane_b32 s7, v13
+; VI-NEXT: v_readfirstlane_b32 s7, v12
+; VI-NEXT: v_readfirstlane_b32 s6, v13
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_writelane_b32 v28, s35, 3
; VI-NEXT: s_cbranch_scc0 .LBB45_4
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_lshr_b32 s44, s7, 16
-; VI-NEXT: s_lshr_b32 s45, s6, 16
+; VI-NEXT: s_lshr_b32 s44, s6, 16
+; VI-NEXT: s_lshr_b32 s45, s7, 16
; VI-NEXT: s_lshr_b32 s46, s8, 16
; VI-NEXT: s_lshr_b32 s47, s9, 16
; VI-NEXT: s_lshr_b32 s56, s10, 16
@@ -30198,8 +30403,8 @@ define inreg <56 x half> @bitcast_v14i64_to_v56f16_scalar(<14 x i64> inreg %a, i
; VI-NEXT: s_lshr_b32 s35, s16, 16
; VI-NEXT: s_cbranch_execnz .LBB45_3
; VI-NEXT: .LBB45_2: ; %cmp.true
-; VI-NEXT: s_add_u32 s6, s6, 3
-; VI-NEXT: s_addc_u32 s7, s7, 0
+; VI-NEXT: s_add_u32 s7, s7, 3
+; VI-NEXT: s_addc_u32 s6, s6, 0
; VI-NEXT: s_add_u32 s9, s9, 3
; VI-NEXT: s_addc_u32 s8, s8, 0
; VI-NEXT: s_add_u32 s11, s11, 3
@@ -30226,8 +30431,8 @@ define inreg <56 x half> @bitcast_v14i64_to_v56f16_scalar(<14 x i64> inreg %a, i
; VI-NEXT: s_addc_u32 s19, s19, 0
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
-; VI-NEXT: s_lshr_b32 s44, s7, 16
-; VI-NEXT: s_lshr_b32 s45, s6, 16
+; VI-NEXT: s_lshr_b32 s44, s6, 16
+; VI-NEXT: s_lshr_b32 s45, s7, 16
; VI-NEXT: s_lshr_b32 s46, s8, 16
; VI-NEXT: s_lshr_b32 s47, s9, 16
; VI-NEXT: s_lshr_b32 s56, s10, 16
@@ -30333,12 +30538,12 @@ define inreg <56 x half> @bitcast_v14i64_to_v56f16_scalar(<14 x i64> inreg %a, i
; VI-NEXT: s_and_b32 s8, 0xffff, s8
; VI-NEXT: s_lshl_b32 s42, s46, 16
; VI-NEXT: s_or_b32 s8, s8, s42
-; VI-NEXT: s_and_b32 s6, 0xffff, s6
-; VI-NEXT: s_lshl_b32 s42, s45, 16
-; VI-NEXT: s_or_b32 s6, s6, s42
; VI-NEXT: s_and_b32 s7, 0xffff, s7
-; VI-NEXT: s_lshl_b32 s42, s44, 16
+; VI-NEXT: s_lshl_b32 s42, s45, 16
; VI-NEXT: s_or_b32 s7, s7, s42
+; VI-NEXT: s_and_b32 s6, 0xffff, s6
+; VI-NEXT: s_lshl_b32 s42, s44, 16
+; VI-NEXT: s_or_b32 s6, s6, s42
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: v_mov_b32_e32 v2, s16
@@ -30365,8 +30570,8 @@ define inreg <56 x half> @bitcast_v14i64_to_v56f16_scalar(<14 x i64> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v23, s10
; VI-NEXT: v_mov_b32_e32 v24, s9
; VI-NEXT: v_mov_b32_e32 v25, s8
-; VI-NEXT: v_mov_b32_e32 v26, s6
-; VI-NEXT: v_mov_b32_e32 v27, s7
+; VI-NEXT: v_mov_b32_e32 v26, s7
+; VI-NEXT: v_mov_b32_e32 v27, s6
; VI-NEXT: v_readlane_b32 s35, v28, 3
; VI-NEXT: v_readlane_b32 s34, v28, 2
; VI-NEXT: v_readlane_b32 s31, v28, 1
@@ -30405,43 +30610,46 @@ define inreg <56 x half> @bitcast_v14i64_to_v56f16_scalar(<14 x i64> inreg %a, i
; VI-NEXT: ; implicit-def: $sgpr46
; VI-NEXT: ; implicit-def: $sgpr45
; VI-NEXT: ; implicit-def: $sgpr44
-; VI-NEXT: s_branch .LBB45_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB45_2
+; VI-NEXT: s_branch .LBB45_3
;
; GFX9-LABEL: bitcast_v14i64_to_v56f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
-; GFX9-NEXT: v_readfirstlane_b32 s6, v0
-; GFX9-NEXT: v_readfirstlane_b32 s7, v1
-; GFX9-NEXT: v_readfirstlane_b32 s8, v2
-; GFX9-NEXT: v_readfirstlane_b32 s9, v3
-; GFX9-NEXT: v_readfirstlane_b32 s10, v4
-; GFX9-NEXT: v_readfirstlane_b32 s11, v5
-; GFX9-NEXT: v_readfirstlane_b32 s12, v6
-; GFX9-NEXT: v_readfirstlane_b32 s13, v7
-; GFX9-NEXT: v_readfirstlane_b32 s14, v8
-; GFX9-NEXT: v_readfirstlane_b32 s15, v9
-; GFX9-NEXT: v_readfirstlane_b32 s40, v10
-; GFX9-NEXT: v_readfirstlane_b32 s41, v11
-; GFX9-NEXT: v_readfirstlane_b32 s42, v12
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: v_readfirstlane_b32 s43, v13
+; GFX9-NEXT: v_readfirstlane_b32 s7, v0
+; GFX9-NEXT: v_readfirstlane_b32 s8, v1
+; GFX9-NEXT: v_readfirstlane_b32 s9, v2
+; GFX9-NEXT: v_readfirstlane_b32 s10, v3
+; GFX9-NEXT: v_readfirstlane_b32 s11, v4
+; GFX9-NEXT: v_readfirstlane_b32 s12, v5
+; GFX9-NEXT: v_readfirstlane_b32 s13, v6
+; GFX9-NEXT: v_readfirstlane_b32 s14, v7
+; GFX9-NEXT: v_readfirstlane_b32 s15, v8
+; GFX9-NEXT: v_readfirstlane_b32 s40, v9
+; GFX9-NEXT: v_readfirstlane_b32 s41, v10
+; GFX9-NEXT: v_readfirstlane_b32 s42, v11
+; GFX9-NEXT: v_readfirstlane_b32 s43, v12
+; GFX9-NEXT: v_readfirstlane_b32 s6, v13
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB45_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_lshr_b32 s44, s43, 16
-; GFX9-NEXT: s_lshr_b32 s45, s42, 16
-; GFX9-NEXT: s_lshr_b32 s46, s41, 16
-; GFX9-NEXT: s_lshr_b32 s47, s40, 16
-; GFX9-NEXT: s_lshr_b32 s56, s15, 16
-; GFX9-NEXT: s_lshr_b32 s57, s14, 16
-; GFX9-NEXT: s_lshr_b32 s58, s13, 16
-; GFX9-NEXT: s_lshr_b32 s59, s12, 16
-; GFX9-NEXT: s_lshr_b32 s60, s11, 16
-; GFX9-NEXT: s_lshr_b32 s61, s10, 16
-; GFX9-NEXT: s_lshr_b32 s62, s9, 16
-; GFX9-NEXT: s_lshr_b32 s63, s8, 16
-; GFX9-NEXT: s_lshr_b32 s72, s7, 16
-; GFX9-NEXT: s_lshr_b32 s73, s6, 16
+; GFX9-NEXT: s_lshr_b32 s44, s6, 16
+; GFX9-NEXT: s_lshr_b32 s45, s43, 16
+; GFX9-NEXT: s_lshr_b32 s46, s42, 16
+; GFX9-NEXT: s_lshr_b32 s47, s41, 16
+; GFX9-NEXT: s_lshr_b32 s56, s40, 16
+; GFX9-NEXT: s_lshr_b32 s57, s15, 16
+; GFX9-NEXT: s_lshr_b32 s58, s14, 16
+; GFX9-NEXT: s_lshr_b32 s59, s13, 16
+; GFX9-NEXT: s_lshr_b32 s60, s12, 16
+; GFX9-NEXT: s_lshr_b32 s61, s11, 16
+; GFX9-NEXT: s_lshr_b32 s62, s10, 16
+; GFX9-NEXT: s_lshr_b32 s63, s9, 16
+; GFX9-NEXT: s_lshr_b32 s72, s8, 16
+; GFX9-NEXT: s_lshr_b32 s73, s7, 16
; GFX9-NEXT: s_lshr_b32 s74, s29, 16
; GFX9-NEXT: s_lshr_b32 s75, s28, 16
; GFX9-NEXT: s_lshr_b32 s76, s27, 16
@@ -30458,20 +30666,20 @@ define inreg <56 x half> @bitcast_v14i64_to_v56f16_scalar(<14 x i64> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s95, s16, 16
; GFX9-NEXT: s_cbranch_execnz .LBB45_3
; GFX9-NEXT: .LBB45_2: ; %cmp.true
-; GFX9-NEXT: s_add_u32 s42, s42, 3
-; GFX9-NEXT: s_addc_u32 s43, s43, 0
-; GFX9-NEXT: s_add_u32 s40, s40, 3
-; GFX9-NEXT: s_addc_u32 s41, s41, 0
-; GFX9-NEXT: s_add_u32 s14, s14, 3
-; GFX9-NEXT: s_addc_u32 s15, s15, 0
-; GFX9-NEXT: s_add_u32 s12, s12, 3
-; GFX9-NEXT: s_addc_u32 s13, s13, 0
-; GFX9-NEXT: s_add_u32 s10, s10, 3
-; GFX9-NEXT: s_addc_u32 s11, s11, 0
-; GFX9-NEXT: s_add_u32 s8, s8, 3
-; GFX9-NEXT: s_addc_u32 s9, s9, 0
-; GFX9-NEXT: s_add_u32 s6, s6, 3
-; GFX9-NEXT: s_addc_u32 s7, s7, 0
+; GFX9-NEXT: s_add_u32 s43, s43, 3
+; GFX9-NEXT: s_addc_u32 s6, s6, 0
+; GFX9-NEXT: s_add_u32 s41, s41, 3
+; GFX9-NEXT: s_addc_u32 s42, s42, 0
+; GFX9-NEXT: s_add_u32 s15, s15, 3
+; GFX9-NEXT: s_addc_u32 s40, s40, 0
+; GFX9-NEXT: s_add_u32 s13, s13, 3
+; GFX9-NEXT: s_addc_u32 s14, s14, 0
+; GFX9-NEXT: s_add_u32 s11, s11, 3
+; GFX9-NEXT: s_addc_u32 s12, s12, 0
+; GFX9-NEXT: s_add_u32 s9, s9, 3
+; GFX9-NEXT: s_addc_u32 s10, s10, 0
+; GFX9-NEXT: s_add_u32 s7, s7, 3
+; GFX9-NEXT: s_addc_u32 s8, s8, 0
; GFX9-NEXT: s_add_u32 s28, s28, 3
; GFX9-NEXT: s_addc_u32 s29, s29, 0
; GFX9-NEXT: s_add_u32 s26, s26, 3
@@ -30486,20 +30694,20 @@ define inreg <56 x half> @bitcast_v14i64_to_v56f16_scalar(<14 x i64> inreg %a, i
; GFX9-NEXT: s_addc_u32 s19, s19, 0
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
-; GFX9-NEXT: s_lshr_b32 s44, s43, 16
-; GFX9-NEXT: s_lshr_b32 s45, s42, 16
-; GFX9-NEXT: s_lshr_b32 s46, s41, 16
-; GFX9-NEXT: s_lshr_b32 s47, s40, 16
-; GFX9-NEXT: s_lshr_b32 s56, s15, 16
-; GFX9-NEXT: s_lshr_b32 s57, s14, 16
-; GFX9-NEXT: s_lshr_b32 s58, s13, 16
-; GFX9-NEXT: s_lshr_b32 s59, s12, 16
-; GFX9-NEXT: s_lshr_b32 s60, s11, 16
-; GFX9-NEXT: s_lshr_b32 s61, s10, 16
-; GFX9-NEXT: s_lshr_b32 s62, s9, 16
-; GFX9-NEXT: s_lshr_b32 s63, s8, 16
-; GFX9-NEXT: s_lshr_b32 s72, s7, 16
-; GFX9-NEXT: s_lshr_b32 s73, s6, 16
+; GFX9-NEXT: s_lshr_b32 s44, s6, 16
+; GFX9-NEXT: s_lshr_b32 s45, s43, 16
+; GFX9-NEXT: s_lshr_b32 s46, s42, 16
+; GFX9-NEXT: s_lshr_b32 s47, s41, 16
+; GFX9-NEXT: s_lshr_b32 s56, s40, 16
+; GFX9-NEXT: s_lshr_b32 s57, s15, 16
+; GFX9-NEXT: s_lshr_b32 s58, s14, 16
+; GFX9-NEXT: s_lshr_b32 s59, s13, 16
+; GFX9-NEXT: s_lshr_b32 s60, s12, 16
+; GFX9-NEXT: s_lshr_b32 s61, s11, 16
+; GFX9-NEXT: s_lshr_b32 s62, s10, 16
+; GFX9-NEXT: s_lshr_b32 s63, s9, 16
+; GFX9-NEXT: s_lshr_b32 s72, s8, 16
+; GFX9-NEXT: s_lshr_b32 s73, s7, 16
; GFX9-NEXT: s_lshr_b32 s74, s29, 16
; GFX9-NEXT: s_lshr_b32 s75, s28, 16
; GFX9-NEXT: s_lshr_b32 s76, s27, 16
@@ -30529,20 +30737,20 @@ define inreg <56 x half> @bitcast_v14i64_to_v56f16_scalar(<14 x i64> inreg %a, i
; GFX9-NEXT: s_pack_ll_b32_b16 s25, s27, s76
; GFX9-NEXT: s_pack_ll_b32_b16 s26, s28, s75
; GFX9-NEXT: s_pack_ll_b32_b16 s27, s29, s74
-; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s73
-; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s72
-; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s63
-; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s62
-; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s61
-; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s60
-; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s59
-; GFX9-NEXT: s_pack_ll_b32_b16 s13, s13, s58
-; GFX9-NEXT: s_pack_ll_b32_b16 s14, s14, s57
-; GFX9-NEXT: s_pack_ll_b32_b16 s15, s15, s56
-; GFX9-NEXT: s_pack_ll_b32_b16 s28, s40, s47
-; GFX9-NEXT: s_pack_ll_b32_b16 s29, s41, s46
-; GFX9-NEXT: s_pack_ll_b32_b16 s40, s42, s45
-; GFX9-NEXT: s_pack_ll_b32_b16 s41, s43, s44
+; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s73
+; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s72
+; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s63
+; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s62
+; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s61
+; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s60
+; GFX9-NEXT: s_pack_ll_b32_b16 s13, s13, s59
+; GFX9-NEXT: s_pack_ll_b32_b16 s14, s14, s58
+; GFX9-NEXT: s_pack_ll_b32_b16 s15, s15, s57
+; GFX9-NEXT: s_pack_ll_b32_b16 s28, s40, s56
+; GFX9-NEXT: s_pack_ll_b32_b16 s29, s41, s47
+; GFX9-NEXT: s_pack_ll_b32_b16 s40, s42, s46
+; GFX9-NEXT: s_pack_ll_b32_b16 s41, s43, s45
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s44
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: v_mov_b32_e32 v2, s16
@@ -30557,20 +30765,20 @@ define inreg <56 x half> @bitcast_v14i64_to_v56f16_scalar(<14 x i64> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v11, s25
; GFX9-NEXT: v_mov_b32_e32 v12, s26
; GFX9-NEXT: v_mov_b32_e32 v13, s27
-; GFX9-NEXT: v_mov_b32_e32 v14, s6
-; GFX9-NEXT: v_mov_b32_e32 v15, s7
-; GFX9-NEXT: v_mov_b32_e32 v16, s8
-; GFX9-NEXT: v_mov_b32_e32 v17, s9
-; GFX9-NEXT: v_mov_b32_e32 v18, s10
-; GFX9-NEXT: v_mov_b32_e32 v19, s11
-; GFX9-NEXT: v_mov_b32_e32 v20, s12
-; GFX9-NEXT: v_mov_b32_e32 v21, s13
-; GFX9-NEXT: v_mov_b32_e32 v22, s14
-; GFX9-NEXT: v_mov_b32_e32 v23, s15
-; GFX9-NEXT: v_mov_b32_e32 v24, s28
-; GFX9-NEXT: v_mov_b32_e32 v25, s29
-; GFX9-NEXT: v_mov_b32_e32 v26, s40
-; GFX9-NEXT: v_mov_b32_e32 v27, s41
+; GFX9-NEXT: v_mov_b32_e32 v14, s7
+; GFX9-NEXT: v_mov_b32_e32 v15, s8
+; GFX9-NEXT: v_mov_b32_e32 v16, s9
+; GFX9-NEXT: v_mov_b32_e32 v17, s10
+; GFX9-NEXT: v_mov_b32_e32 v18, s11
+; GFX9-NEXT: v_mov_b32_e32 v19, s12
+; GFX9-NEXT: v_mov_b32_e32 v20, s13
+; GFX9-NEXT: v_mov_b32_e32 v21, s14
+; GFX9-NEXT: v_mov_b32_e32 v22, s15
+; GFX9-NEXT: v_mov_b32_e32 v23, s28
+; GFX9-NEXT: v_mov_b32_e32 v24, s29
+; GFX9-NEXT: v_mov_b32_e32 v25, s40
+; GFX9-NEXT: v_mov_b32_e32 v26, s41
+; GFX9-NEXT: v_mov_b32_e32 v27, s6
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB45_4:
; GFX9-NEXT: ; implicit-def: $sgpr95
@@ -30601,7 +30809,9 @@ define inreg <56 x half> @bitcast_v14i64_to_v56f16_scalar(<14 x i64> inreg %a, i
; GFX9-NEXT: ; implicit-def: $sgpr46
; GFX9-NEXT: ; implicit-def: $sgpr45
; GFX9-NEXT: ; implicit-def: $sgpr44
-; GFX9-NEXT: s_branch .LBB45_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB45_2
+; GFX9-NEXT: s_branch .LBB45_3
;
; GFX11-LABEL: bitcast_v14i64_to_v56f16_scalar:
; GFX11: ; %bb.0:
@@ -30614,16 +30824,16 @@ define inreg <56 x half> @bitcast_v14i64_to_v56f16_scalar(<14 x i64> inreg %a, i
; GFX11-NEXT: v_readfirstlane_b32 s8, v4
; GFX11-NEXT: v_readfirstlane_b32 s9, v5
; GFX11-NEXT: v_readfirstlane_b32 s10, v6
-; GFX11-NEXT: v_readfirstlane_b32 s11, v7
+; GFX11-NEXT: v_readfirstlane_b32 s12, v7
; GFX11-NEXT: v_readfirstlane_b32 s13, v8
-; GFX11-NEXT: v_readfirstlane_b32 s12, v9
-; GFX11-NEXT: s_mov_b32 s90, 0
+; GFX11-NEXT: v_readfirstlane_b32 s11, v9
+; GFX11-NEXT: s_mov_b32 s90, -1
; GFX11-NEXT: s_and_b32 s14, vcc_lo, exec_lo
; GFX11-NEXT: s_cbranch_scc0 .LBB45_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s14, s12, 16
+; GFX11-NEXT: s_lshr_b32 s14, s11, 16
; GFX11-NEXT: s_lshr_b32 s15, s13, 16
-; GFX11-NEXT: s_lshr_b32 s40, s11, 16
+; GFX11-NEXT: s_lshr_b32 s40, s12, 16
; GFX11-NEXT: s_lshr_b32 s41, s10, 16
; GFX11-NEXT: s_lshr_b32 s42, s9, 16
; GFX11-NEXT: s_lshr_b32 s43, s8, 16
@@ -30649,13 +30859,12 @@ define inreg <56 x half> @bitcast_v14i64_to_v56f16_scalar(<14 x i64> inreg %a, i
; GFX11-NEXT: s_lshr_b32 s79, s2, 16
; GFX11-NEXT: s_lshr_b32 s88, s1, 16
; GFX11-NEXT: s_lshr_b32 s89, s0, 16
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s90
-; GFX11-NEXT: s_cbranch_vccnz .LBB45_3
+; GFX11-NEXT: s_cbranch_execnz .LBB45_3
; GFX11-NEXT: .LBB45_2: ; %cmp.true
; GFX11-NEXT: s_add_u32 s13, s13, 3
-; GFX11-NEXT: s_addc_u32 s12, s12, 0
-; GFX11-NEXT: s_add_u32 s10, s10, 3
; GFX11-NEXT: s_addc_u32 s11, s11, 0
+; GFX11-NEXT: s_add_u32 s10, s10, 3
+; GFX11-NEXT: s_addc_u32 s12, s12, 0
; GFX11-NEXT: s_add_u32 s8, s8, 3
; GFX11-NEXT: s_addc_u32 s9, s9, 0
; GFX11-NEXT: s_add_u32 s6, s6, 3
@@ -30680,9 +30889,9 @@ define inreg <56 x half> @bitcast_v14i64_to_v56f16_scalar(<14 x i64> inreg %a, i
; GFX11-NEXT: s_addc_u32 s3, s3, 0
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: s_lshr_b32 s14, s12, 16
+; GFX11-NEXT: s_lshr_b32 s14, s11, 16
; GFX11-NEXT: s_lshr_b32 s15, s13, 16
-; GFX11-NEXT: s_lshr_b32 s40, s11, 16
+; GFX11-NEXT: s_lshr_b32 s40, s12, 16
; GFX11-NEXT: s_lshr_b32 s41, s10, 16
; GFX11-NEXT: s_lshr_b32 s42, s9, 16
; GFX11-NEXT: s_lshr_b32 s43, s8, 16
@@ -30735,9 +30944,9 @@ define inreg <56 x half> @bitcast_v14i64_to_v56f16_scalar(<14 x i64> inreg %a, i
; GFX11-NEXT: s_pack_ll_b32_b16 s8, s8, s43
; GFX11-NEXT: s_pack_ll_b32_b16 s9, s9, s42
; GFX11-NEXT: s_pack_ll_b32_b16 s10, s10, s41
-; GFX11-NEXT: s_pack_ll_b32_b16 s11, s11, s40
+; GFX11-NEXT: s_pack_ll_b32_b16 s12, s12, s40
; GFX11-NEXT: s_pack_ll_b32_b16 s13, s13, s15
-; GFX11-NEXT: s_pack_ll_b32_b16 s12, s12, s14
+; GFX11-NEXT: s_pack_ll_b32_b16 s11, s11, s14
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -30750,8 +30959,8 @@ define inreg <56 x half> @bitcast_v14i64_to_v56f16_scalar(<14 x i64> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v18, s4 :: v_dual_mov_b32 v19, s5
; GFX11-NEXT: v_dual_mov_b32 v20, s6 :: v_dual_mov_b32 v21, s7
; GFX11-NEXT: v_dual_mov_b32 v22, s8 :: v_dual_mov_b32 v23, s9
-; GFX11-NEXT: v_dual_mov_b32 v24, s10 :: v_dual_mov_b32 v25, s11
-; GFX11-NEXT: v_dual_mov_b32 v26, s13 :: v_dual_mov_b32 v27, s12
+; GFX11-NEXT: v_dual_mov_b32 v24, s10 :: v_dual_mov_b32 v25, s12
+; GFX11-NEXT: v_dual_mov_b32 v26, s13 :: v_dual_mov_b32 v27, s11
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB45_4:
; GFX11-NEXT: ; implicit-def: $sgpr89
@@ -30782,7 +30991,9 @@ define inreg <56 x half> @bitcast_v14i64_to_v56f16_scalar(<14 x i64> inreg %a, i
; GFX11-NEXT: ; implicit-def: $sgpr40
; GFX11-NEXT: ; implicit-def: $sgpr15
; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: s_branch .LBB45_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s90
+; GFX11-NEXT: s_cbranch_vccz .LBB45_2
+; GFX11-NEXT: s_branch .LBB45_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -32272,56 +32483,53 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:28
; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:40
; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:36
-; SI-NEXT: s_waitcnt expcnt(2)
-; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:44
-; SI-NEXT: v_cvt_f16_f32_e32 v47, v0
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v23
+; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:44
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
; SI-NEXT: v_cvt_f16_f32_e32 v44, v1
-; SI-NEXT: v_cvt_f16_f32_e32 v46, v3
-; SI-NEXT: v_cvt_f16_f32_e32 v58, v2
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(5)
+; SI-NEXT: v_cvt_f16_f32_e32 v58, v3
+; SI-NEXT: s_waitcnt expcnt(2)
+; SI-NEXT: v_cvt_f16_f32_e32 v61, v2
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v22
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v7
; SI-NEXT: v_cvt_f16_f32_e32 v60, v5
; SI-NEXT: v_cvt_f16_f32_e32 v59, v4
-; SI-NEXT: v_cvt_f16_f32_e32 v62, v7
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v25
-; SI-NEXT: v_cvt_f16_f32_e32 v56, v6
-; SI-NEXT: v_cvt_f16_f32_e32 v57, v9
-; SI-NEXT: v_cvt_f16_f32_e32 v45, v8
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; SI-NEXT: v_cvt_f16_f32_e32 v46, v6
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v24
-; SI-NEXT: v_cvt_f16_f32_e32 v63, v11
-; SI-NEXT: v_cvt_f16_f32_e32 v38, v10
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v11
+; SI-NEXT: v_cvt_f16_f32_e32 v56, v9
+; SI-NEXT: v_cvt_f16_f32_e32 v57, v8
; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v27
-; SI-NEXT: v_cvt_f16_f32_e32 v36, v12
-; SI-NEXT: v_cvt_f16_f32_e32 v34, v15
-; SI-NEXT: v_cvt_f16_f32_e32 v35, v14
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v10
+; SI-NEXT: v_cvt_f16_f32_e32 v47, v12
+; SI-NEXT: v_cvt_f16_f32_e32 v45, v14
+; SI-NEXT: v_cvt_f16_f32_e32 v62, v16
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v26
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v15
; SI-NEXT: v_cvt_f16_f32_e32 v15, v17
-; SI-NEXT: v_cvt_f16_f32_e32 v32, v16
; SI-NEXT: v_cvt_f16_f32_e32 v16, v19
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v28
; SI-NEXT: v_cvt_f16_f32_e32 v18, v18
-; SI-NEXT: v_cvt_f16_f32_e32 v17, v21
-; SI-NEXT: v_cvt_f16_f32_e32 v37, v20
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v30
-; SI-NEXT: v_cvt_f16_f32_e32 v29, v29
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v23
+; SI-NEXT: v_cvt_f16_f32_e32 v17, v21
+; SI-NEXT: v_cvt_f16_f32_e32 v63, v20
+; SI-NEXT: v_cvt_f16_f32_e32 v49, v22
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
+; SI-NEXT: v_cvt_f16_f32_e32 v48, v25
+; SI-NEXT: v_cvt_f16_f32_e32 v39, v24
+; SI-NEXT: v_cvt_f16_f32_e32 v38, v27
+; SI-NEXT: v_cvt_f16_f32_e32 v37, v26
+; SI-NEXT: v_cvt_f16_f32_e32 v36, v29
+; SI-NEXT: v_cvt_f16_f32_e32 v35, v28
+; SI-NEXT: v_cvt_f16_f32_e32 v33, v30
; SI-NEXT: v_cvt_f16_f32_e32 v2, s21
; SI-NEXT: v_cvt_f16_f32_e32 v11, s20
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
; SI-NEXT: v_cvt_f16_f32_e32 v3, s23
; SI-NEXT: v_cvt_f16_f32_e32 v10, s22
; SI-NEXT: v_cvt_f16_f32_e32 v4, s25
@@ -32330,32 +32538,26 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v8, s26
; SI-NEXT: v_cvt_f16_f32_e32 v6, s29
; SI-NEXT: v_cvt_f16_f32_e32 v7, s28
-; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v31
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v50
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v51
-; SI-NEXT: s_waitcnt vmcnt(10)
-; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v61
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v52
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(14)
+; SI-NEXT: v_cvt_f16_f32_e32 v34, v31
+; SI-NEXT: v_cvt_f16_f32_e32 v31, v50
+; SI-NEXT: v_cvt_f16_f32_e32 v30, v51
+; SI-NEXT: v_cvt_f16_f32_e32 v29, v52
+; SI-NEXT: s_waitcnt vmcnt(13) expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v53
+; SI-NEXT: v_cvt_f16_f32_e32 v51, s16
; SI-NEXT: v_cvt_f16_f32_e32 v53, s18
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(13) expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v54
; SI-NEXT: v_cvt_f16_f32_e32 v54, s19
+; SI-NEXT: s_waitcnt vmcnt(7)
+; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v32
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v55
-; SI-NEXT: v_cvt_f16_f32_e32 v55, s16
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v40
@@ -32370,120 +32572,119 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v0, v43
; SI-NEXT: v_cvt_f16_f32_e32 v43, s17
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB47_4
; SI-NEXT: ; %bb.1: ; %cmp.false
+; SI-NEXT: s_waitcnt expcnt(4)
+; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; SI-NEXT: v_or_b32_e32 v4, v9, v4
+; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v60
+; SI-NEXT: v_or_b32_e32 v9, v59, v9
+; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
+; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v29
+; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
+; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; SI-NEXT: v_or_b32_e32 v5, v8, v5
+; SI-NEXT: v_mov_b32_e32 v32, v44
+; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v42, v58
+; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v58
+; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16
+; SI-NEXT: v_mov_b32_e32 v40, v60
+; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
; SI-NEXT: v_or_b32_e32 v16, v18, v16
; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
-; SI-NEXT: v_mov_b32_e32 v49, v2
+; SI-NEXT: s_waitcnt expcnt(2)
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; SI-NEXT: v_mov_b32_e32 v48, v3
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6
-; SI-NEXT: v_mov_b32_e32 v61, v44
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v43
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v54
-; SI-NEXT: v_mov_b32_e32 v39, v11
; SI-NEXT: v_or_b32_e32 v2, v11, v2
-; SI-NEXT: v_mov_b32_e32 v33, v10
; SI-NEXT: v_or_b32_e32 v3, v10, v3
-; SI-NEXT: v_or_b32_e32 v4, v9, v4
-; SI-NEXT: v_or_b32_e32 v5, v8, v5
; SI-NEXT: v_or_b32_e32 v6, v7, v6
-; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v61
-; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v46
-; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v60
-; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v62
-; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v57
-; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v63
+; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v32
+; SI-NEXT: v_mov_b32_e32 v41, v61
+; SI-NEXT: v_or_b32_e32 v8, v61, v8
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v56
; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
-; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v34
+; SI-NEXT: v_mov_b32_e32 v61, v45
; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17
-; SI-NEXT: v_or_b32_e32 v0, v55, v0
+; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v48
+; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v38
+; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v36
+; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v34
+; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v31
+; SI-NEXT: v_or_b32_e32 v0, v51, v0
; SI-NEXT: v_or_b32_e32 v1, v53, v1
-; SI-NEXT: v_or_b32_e32 v7, v47, v7
-; SI-NEXT: v_mov_b32_e32 v42, v58
-; SI-NEXT: v_or_b32_e32 v8, v58, v8
-; SI-NEXT: v_mov_b32_e32 v41, v60
-; SI-NEXT: v_or_b32_e32 v9, v59, v9
-; SI-NEXT: v_mov_b32_e32 v40, v56
-; SI-NEXT: v_or_b32_e32 v10, v56, v10
-; SI-NEXT: v_or_b32_e32 v11, v45, v11
-; SI-NEXT: v_or_b32_e32 v12, v38, v12
-; SI-NEXT: v_or_b32_e32 v13, v36, v13
-; SI-NEXT: v_or_b32_e32 v14, v35, v14
-; SI-NEXT: v_or_b32_e32 v15, v32, v15
-; SI-NEXT: v_or_b32_e32 v17, v37, v17
-; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(10)
-; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18
+; SI-NEXT: v_mov_b32_e32 v55, v56
+; SI-NEXT: v_or_b32_e32 v11, v57, v11
+; SI-NEXT: v_or_b32_e32 v13, v47, v13
+; SI-NEXT: v_or_b32_e32 v15, v62, v15
+; SI-NEXT: v_or_b32_e32 v17, v63, v17
+; SI-NEXT: v_or_b32_e32 v19, v39, v19
+; SI-NEXT: v_or_b32_e32 v20, v37, v20
+; SI-NEXT: v_or_b32_e32 v21, v35, v21
+; SI-NEXT: v_or_b32_e32 v22, v33, v22
+; SI-NEXT: v_or_b32_e32 v23, v30, v23
; SI-NEXT: s_waitcnt vmcnt(9)
-; SI-NEXT: v_or_b32_e32 v18, v19, v18
-; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19
-; SI-NEXT: v_or_b32_e32 v19, v20, v19
-; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20
-; SI-NEXT: v_or_b32_e32 v20, v21, v20
-; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v29
-; SI-NEXT: v_or_b32_e32 v21, v22, v21
-; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22
-; SI-NEXT: v_or_b32_e32 v22, v23, v22
-; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v23
-; SI-NEXT: v_or_b32_e32 v23, v24, v23
-; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v24
+; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v59
+; SI-NEXT: s_waitcnt vmcnt(8)
; SI-NEXT: v_or_b32_e32 v24, v25, v24
; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(8)
+; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v28
+; SI-NEXT: v_or_b32_e32 v10, v46, v10
+; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v58
+; SI-NEXT: v_or_b32_e32 v14, v61, v14
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: v_or_b32_e32 v12, v60, v12
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v25
; SI-NEXT: v_or_b32_e32 v25, v26, v25
; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload
+; SI-NEXT: v_or_b32_e32 v7, v44, v7
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v26
; SI-NEXT: v_or_b32_e32 v26, v27, v26
; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
+; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18
+; SI-NEXT: v_or_b32_e32 v18, v49, v18
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v27
; SI-NEXT: v_or_b32_e32 v27, v50, v27
+; SI-NEXT: v_mov_b32_e32 v50, v62
+; SI-NEXT: v_mov_b32_e32 v62, v44
; SI-NEXT: s_cbranch_execnz .LBB47_3
; SI-NEXT: .LBB47_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, v43
+; SI-NEXT: s_waitcnt expcnt(2)
; SI-NEXT: v_cvt_f32_f16_e32 v2, v54
-; SI-NEXT: v_cvt_f32_f16_e32 v1, v55
+; SI-NEXT: v_cvt_f32_f16_e32 v1, v51
; SI-NEXT: v_cvt_f32_f16_e32 v3, v53
; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0
; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2
@@ -32497,173 +32698,186 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_or_b32_e32 v1, v3, v2
-; SI-NEXT: v_cvt_f32_f16_e32 v2, v49
-; SI-NEXT: v_cvt_f32_f16_e32 v3, v39
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: v_cvt_f32_f16_e32 v8, v62
+; SI-NEXT: v_cvt_f32_f16_e32 v9, v41
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v4, v33
-; SI-NEXT: v_cvt_f32_f16_e32 v8, v47
-; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2
-; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
-; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3
-; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
-; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4
-; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
-; SI-NEXT: v_or_b32_e32 v2, v3, v2
-; SI-NEXT: v_cvt_f32_f16_e32 v3, v48
+; SI-NEXT: v_cvt_f32_f16_e32 v11, v46
+; SI-NEXT: v_cvt_f32_f16_e32 v13, v60
; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8
-; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
; SI-NEXT: v_cvt_f16_f32_e32 v8, v8
-; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3
-; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
-; SI-NEXT: v_cvt_f32_f16_e32 v9, v42
-; SI-NEXT: v_cvt_f32_f16_e32 v11, v40
-; SI-NEXT: v_cvt_f32_f16_e32 v12, v45
-; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; SI-NEXT: v_or_b32_e32 v3, v4, v3
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9
; SI-NEXT: v_cvt_f16_f32_e32 v9, v9
; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11
; SI-NEXT: v_cvt_f16_f32_e32 v11, v11
-; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12
-; SI-NEXT: v_cvt_f16_f32_e32 v12, v12
-; SI-NEXT: v_cvt_f32_f16_e32 v13, v38
-; SI-NEXT: v_cvt_f32_f16_e32 v14, v36
-; SI-NEXT: v_cvt_f32_f16_e32 v15, v35
-; SI-NEXT: v_cvt_f32_f16_e32 v17, v32
; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13
; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
+; SI-NEXT: v_cvt_f32_f16_e32 v14, v47
+; SI-NEXT: v_cvt_f32_f16_e32 v15, v61
+; SI-NEXT: v_cvt_f32_f16_e32 v17, v50
+; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14
; SI-NEXT: v_cvt_f16_f32_e32 v14, v14
; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15
; SI-NEXT: v_cvt_f16_f32_e32 v15, v15
+; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17
; SI-NEXT: v_cvt_f16_f32_e32 v17, v17
-; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
-; SI-NEXT: v_cvt_f32_f16_e32 v22, v29
-; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
+; SI-NEXT: v_cvt_f32_f16_e32 v20, v49
+; SI-NEXT: v_cvt_f32_f16_e32 v21, v39
+; SI-NEXT: v_cvt_f32_f16_e32 v22, v36
+; SI-NEXT: v_cvt_f32_f16_e32 v23, v35
+; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20
+; SI-NEXT: v_cvt_f16_f32_e32 v20, v20
+; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21
+; SI-NEXT: v_cvt_f16_f32_e32 v21, v21
; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22
; SI-NEXT: v_cvt_f16_f32_e32 v22, v22
+; SI-NEXT: v_cvt_f32_f16_e32 v24, v33
+; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23
+; SI-NEXT: v_cvt_f16_f32_e32 v23, v23
+; SI-NEXT: v_cvt_f32_f16_e32 v25, v29
+; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24
+; SI-NEXT: v_cvt_f16_f32_e32 v24, v24
; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(14)
-; SI-NEXT: v_cvt_f32_f16_e32 v5, v5
-; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5
-; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
-; SI-NEXT: v_cvt_f32_f16_e32 v4, v4
-; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4
-; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
-; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; SI-NEXT: v_or_b32_e32 v4, v5, v4
-; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
-; SI-NEXT: v_cvt_f32_f16_e32 v19, v19
+; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25
+; SI-NEXT: v_cvt_f16_f32_e32 v25, v25
+; SI-NEXT: s_waitcnt vmcnt(13)
+; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
; SI-NEXT: s_waitcnt vmcnt(12)
-; SI-NEXT: v_cvt_f32_f16_e32 v28, v28
-; SI-NEXT: s_waitcnt vmcnt(11)
-; SI-NEXT: v_cvt_f32_f16_e32 v6, v6
-; SI-NEXT: s_waitcnt vmcnt(10)
-; SI-NEXT: v_cvt_f32_f16_e32 v7, v7
+; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
+; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2
+; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
+; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3
+; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; SI-NEXT: v_or_b32_e32 v2, v3, v2
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(12)
+; SI-NEXT: v_cvt_f32_f16_e32 v19, v19
; SI-NEXT: s_waitcnt vmcnt(9)
-; SI-NEXT: v_cvt_f32_f16_e32 v10, v10
+; SI-NEXT: v_cvt_f32_f16_e32 v4, v4
; SI-NEXT: s_waitcnt vmcnt(8)
-; SI-NEXT: v_cvt_f32_f16_e32 v16, v16
+; SI-NEXT: v_cvt_f32_f16_e32 v5, v5
+; SI-NEXT: s_waitcnt vmcnt(7)
+; SI-NEXT: v_cvt_f32_f16_e32 v6, v6
+; SI-NEXT: s_waitcnt vmcnt(6)
+; SI-NEXT: v_cvt_f32_f16_e32 v7, v7
+; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4
+; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
+; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5
+; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6
; SI-NEXT: v_cvt_f16_f32_e32 v6, v6
; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7
; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
+; SI-NEXT: s_waitcnt vmcnt(5)
+; SI-NEXT: v_cvt_f32_f16_e32 v10, v10
+; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: v_cvt_f32_f16_e32 v12, v12
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: v_cvt_f32_f16_e32 v16, v16
+; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19
; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10
; SI-NEXT: v_cvt_f16_f32_e32 v10, v10
+; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12
+; SI-NEXT: v_cvt_f16_f32_e32 v12, v12
; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16
; SI-NEXT: v_cvt_f16_f32_e32 v16, v16
-; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19
; SI-NEXT: v_cvt_f16_f32_e32 v19, v19
-; SI-NEXT: s_waitcnt vmcnt(5)
-; SI-NEXT: v_cvt_f32_f16_e32 v21, v21
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_cvt_f32_f16_e32 v25, v25
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_cvt_f32_f16_e32 v29, v29
-; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28
-; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21
-; SI-NEXT: v_cvt_f16_f32_e32 v21, v21
-; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25
-; SI-NEXT: v_cvt_f16_f32_e32 v25, v25
; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v29
-; SI-NEXT: v_cvt_f16_f32_e32 v28, v28
; SI-NEXT: v_cvt_f16_f32_e32 v29, v29
; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
+; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3
+; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; SI-NEXT: v_or_b32_e32 v3, v4, v3
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_cvt_f32_f16_e32 v4, v4
+; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4
+; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
+; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; SI-NEXT: v_or_b32_e32 v4, v5, v4
+; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v5, v5
; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5
; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: v_or_b32_e32 v5, v6, v5
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v6, v6
; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6
; SI-NEXT: v_cvt_f16_f32_e32 v6, v6
; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6
; SI-NEXT: v_or_b32_e32 v6, v7, v6
-; SI-NEXT: v_cvt_f32_f16_e32 v7, v61
+; SI-NEXT: v_cvt_f32_f16_e32 v7, v32
; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7
; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7
; SI-NEXT: v_or_b32_e32 v7, v8, v7
-; SI-NEXT: v_cvt_f32_f16_e32 v8, v46
+; SI-NEXT: v_cvt_f32_f16_e32 v8, v42
; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8
; SI-NEXT: v_cvt_f16_f32_e32 v8, v8
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8
; SI-NEXT: v_or_b32_e32 v8, v9, v8
-; SI-NEXT: v_cvt_f32_f16_e32 v9, v41
+; SI-NEXT: v_cvt_f32_f16_e32 v9, v40
; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9
; SI-NEXT: v_cvt_f16_f32_e32 v9, v9
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9
; SI-NEXT: v_or_b32_e32 v9, v10, v9
-; SI-NEXT: v_cvt_f32_f16_e32 v10, v62
+; SI-NEXT: v_cvt_f32_f16_e32 v10, v59
; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10
; SI-NEXT: v_cvt_f16_f32_e32 v10, v10
; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10
; SI-NEXT: v_or_b32_e32 v10, v11, v10
-; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v11, v11
+; SI-NEXT: v_cvt_f32_f16_e32 v11, v55
; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11
; SI-NEXT: v_cvt_f16_f32_e32 v11, v11
; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11
; SI-NEXT: v_or_b32_e32 v11, v12, v11
-; SI-NEXT: v_cvt_f32_f16_e32 v12, v63
+; SI-NEXT: v_cvt_f32_f16_e32 v12, v28
+; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12
; SI-NEXT: v_cvt_f16_f32_e32 v12, v12
; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12
; SI-NEXT: v_or_b32_e32 v12, v13, v12
-; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_cvt_f32_f16_e32 v28, v28
+; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28
+; SI-NEXT: v_cvt_f16_f32_e32 v28, v28
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v13, v13
; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13
; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
; SI-NEXT: v_or_b32_e32 v13, v14, v13
-; SI-NEXT: v_cvt_f32_f16_e32 v14, v34
+; SI-NEXT: v_cvt_f32_f16_e32 v14, v58
; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14
; SI-NEXT: v_cvt_f16_f32_e32 v14, v14
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
; SI-NEXT: v_or_b32_e32 v14, v15, v14
; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v16
-; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
; SI-NEXT: v_or_b32_e32 v15, v17, v15
-; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
; SI-NEXT: v_cvt_f32_f16_e32 v18, v18
; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18
; SI-NEXT: v_cvt_f16_f32_e32 v18, v18
@@ -32677,59 +32891,38 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v17, v17
; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16
; SI-NEXT: v_or_b32_e32 v16, v18, v16
-; SI-NEXT: v_cvt_f32_f16_e32 v18, v37
+; SI-NEXT: v_cvt_f32_f16_e32 v18, v63
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17
; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18
; SI-NEXT: v_cvt_f16_f32_e32 v18, v18
; SI-NEXT: v_or_b32_e32 v17, v18, v17
; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v19
-; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
-; SI-NEXT: v_cvt_f32_f16_e32 v20, v20
-; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20
-; SI-NEXT: v_cvt_f16_f32_e32 v20, v20
+; SI-NEXT: v_cvt_f32_f16_e32 v19, v48
; SI-NEXT: v_or_b32_e32 v18, v20, v18
-; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_cvt_f32_f16_e32 v19, v19
+; SI-NEXT: v_cvt_f32_f16_e32 v20, v38
; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19
; SI-NEXT: v_cvt_f16_f32_e32 v19, v19
-; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19
-; SI-NEXT: v_or_b32_e32 v19, v21, v19
-; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_cvt_f32_f16_e32 v20, v20
; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20
; SI-NEXT: v_cvt_f16_f32_e32 v20, v20
+; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19
+; SI-NEXT: v_or_b32_e32 v19, v21, v19
+; SI-NEXT: v_cvt_f32_f16_e32 v21, v37
; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v21, v21
; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21
; SI-NEXT: v_cvt_f16_f32_e32 v21, v21
; SI-NEXT: v_or_b32_e32 v20, v21, v20
; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v22
-; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
-; SI-NEXT: v_cvt_f32_f16_e32 v23, v23
-; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23
-; SI-NEXT: v_cvt_f16_f32_e32 v23, v23
+; SI-NEXT: v_cvt_f32_f16_e32 v22, v34
; SI-NEXT: v_or_b32_e32 v21, v23, v21
-; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
-; SI-NEXT: v_cvt_f32_f16_e32 v24, v24
-; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24
-; SI-NEXT: v_cvt_f16_f32_e32 v24, v24
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_cvt_f32_f16_e32 v22, v22
+; SI-NEXT: v_cvt_f32_f16_e32 v23, v31
; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22
; SI-NEXT: v_cvt_f16_f32_e32 v22, v22
-; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22
-; SI-NEXT: v_or_b32_e32 v22, v24, v22
-; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_cvt_f32_f16_e32 v23, v23
; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23
; SI-NEXT: v_cvt_f16_f32_e32 v23, v23
+; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22
+; SI-NEXT: v_or_b32_e32 v22, v24, v22
+; SI-NEXT: v_cvt_f32_f16_e32 v24, v30
; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v23
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v24, v24
; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24
; SI-NEXT: v_cvt_f16_f32_e32 v24, v24
; SI-NEXT: v_or_b32_e32 v23, v24, v23
@@ -32767,7 +32960,6 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(6)
; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload
@@ -32783,20 +32975,48 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB47_4:
-; SI-NEXT: v_mov_b32_e32 v39, v11
-; SI-NEXT: v_mov_b32_e32 v33, v10
-; SI-NEXT: v_mov_b32_e32 v49, v2
-; SI-NEXT: v_mov_b32_e32 v48, v3
-; SI-NEXT: v_mov_b32_e32 v52, v37
-; SI-NEXT: v_mov_b32_e32 v37, v29
+; SI-NEXT: v_mov_b32_e32 v52, v63
+; SI-NEXT: v_mov_b32_e32 v63, v29
+; SI-NEXT: v_mov_b32_e32 v50, v49
+; SI-NEXT: v_mov_b32_e32 v49, v48
+; SI-NEXT: v_mov_b32_e32 v48, v39
+; SI-NEXT: v_mov_b32_e32 v39, v38
+; SI-NEXT: v_mov_b32_e32 v38, v37
+; SI-NEXT: v_mov_b32_e32 v37, v36
+; SI-NEXT: v_mov_b32_e32 v36, v35
+; SI-NEXT: v_mov_b32_e32 v35, v34
+; SI-NEXT: v_mov_b32_e32 v34, v33
+; SI-NEXT: v_mov_b32_e32 v33, v31
+; SI-NEXT: v_mov_b32_e32 v32, v30
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; SI-NEXT: v_mov_b32_e32 v42, v58
-; SI-NEXT: v_mov_b32_e32 v41, v60
-; SI-NEXT: v_mov_b32_e32 v40, v56
-; SI-NEXT: v_mov_b32_e32 v29, v37
-; SI-NEXT: v_mov_b32_e32 v37, v52
-; SI-NEXT: v_mov_b32_e32 v61, v44
-; SI-NEXT: s_branch .LBB47_2
+; SI-NEXT: v_mov_b32_e32 v40, v60
+; SI-NEXT: v_mov_b32_e32 v31, v33
+; SI-NEXT: v_mov_b32_e32 v33, v34
+; SI-NEXT: v_mov_b32_e32 v34, v35
+; SI-NEXT: v_mov_b32_e32 v35, v36
+; SI-NEXT: v_mov_b32_e32 v36, v37
+; SI-NEXT: v_mov_b32_e32 v37, v38
+; SI-NEXT: v_mov_b32_e32 v38, v39
+; SI-NEXT: v_mov_b32_e32 v39, v48
+; SI-NEXT: v_mov_b32_e32 v48, v49
+; SI-NEXT: v_mov_b32_e32 v49, v50
+; SI-NEXT: v_mov_b32_e32 v50, v62
+; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v41, v61
+; SI-NEXT: v_mov_b32_e32 v55, v56
+; SI-NEXT: v_mov_b32_e32 v30, v32
+; SI-NEXT: v_mov_b32_e32 v29, v63
+; SI-NEXT: v_mov_b32_e32 v63, v52
+; SI-NEXT: v_mov_b32_e32 v61, v45
+; SI-NEXT: v_mov_b32_e32 v32, v44
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB47_2
+; SI-NEXT: s_branch .LBB47_3
;
; VI-LABEL: bitcast_v56f16_to_v14i64_scalar:
; VI: ; %bb.0:
@@ -32816,6 +33036,7 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v13
; VI-NEXT: v_mov_b32_e32 v33, v12
; VI-NEXT: v_mov_b32_e32 v34, v11
@@ -32830,7 +33051,7 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v51, v2
; VI-NEXT: v_mov_b32_e32 v52, v1
; VI-NEXT: v_mov_b32_e32 v53, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB47_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -33024,25 +33245,13 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB47_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB47_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB47_2
+; VI-NEXT: s_branch .LBB47_3
;
; GFX9-LABEL: bitcast_v56f16_to_v14i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v13
-; GFX9-NEXT: v_mov_b32_e32 v33, v12
-; GFX9-NEXT: v_mov_b32_e32 v34, v11
-; GFX9-NEXT: v_mov_b32_e32 v35, v10
-; GFX9-NEXT: v_mov_b32_e32 v36, v9
-; GFX9-NEXT: v_mov_b32_e32 v37, v8
-; GFX9-NEXT: v_mov_b32_e32 v38, v7
-; GFX9-NEXT: v_mov_b32_e32 v39, v6
-; GFX9-NEXT: v_mov_b32_e32 v48, v5
-; GFX9-NEXT: v_mov_b32_e32 v49, v4
-; GFX9-NEXT: v_mov_b32_e32 v50, v3
-; GFX9-NEXT: v_mov_b32_e32 v51, v2
-; GFX9-NEXT: v_mov_b32_e32 v52, v1
-; GFX9-NEXT: v_mov_b32_e32 v53, v0
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
@@ -33058,6 +33267,21 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s8, s18, 16
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
+; GFX9-NEXT: v_mov_b32_e32 v32, v13
+; GFX9-NEXT: v_mov_b32_e32 v33, v12
+; GFX9-NEXT: v_mov_b32_e32 v34, v11
+; GFX9-NEXT: v_mov_b32_e32 v35, v10
+; GFX9-NEXT: v_mov_b32_e32 v36, v9
+; GFX9-NEXT: v_mov_b32_e32 v37, v8
+; GFX9-NEXT: v_mov_b32_e32 v38, v7
+; GFX9-NEXT: v_mov_b32_e32 v39, v6
+; GFX9-NEXT: v_mov_b32_e32 v48, v5
+; GFX9-NEXT: v_mov_b32_e32 v49, v4
+; GFX9-NEXT: v_mov_b32_e32 v50, v3
+; GFX9-NEXT: v_mov_b32_e32 v51, v2
+; GFX9-NEXT: v_mov_b32_e32 v52, v1
+; GFX9-NEXT: v_mov_b32_e32 v53, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
@@ -33076,7 +33300,6 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; GFX9-NEXT: v_lshrrev_b32_e32 v41, 16, v35
; GFX9-NEXT: v_lshrrev_b32_e32 v42, 16, v36
; GFX9-NEXT: v_lshrrev_b32_e32 v43, 16, v37
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -33091,6 +33314,7 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_lshrrev_b32_e32 v44, 16, v38
; GFX9-NEXT: v_lshrrev_b32_e32 v45, 16, v39
; GFX9-NEXT: v_lshrrev_b32_e32 v46, 16, v48
@@ -33220,7 +33444,9 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB47_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB47_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB47_2
+; GFX9-NEXT: s_branch .LBB47_3
;
; GFX11-TRUE16-LABEL: bitcast_v56f16_to_v14i64_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -33258,7 +33484,7 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v9
; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s27, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
@@ -33270,15 +33496,14 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s42
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
@@ -33290,10 +33515,11 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB47_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
@@ -33313,10 +33539,9 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB47_3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB47_3
; GFX11-TRUE16-NEXT: .LBB47_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
@@ -33343,9 +33568,9 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
@@ -33360,7 +33585,9 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB47_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB47_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB47_2
+; GFX11-TRUE16-NEXT: s_branch .LBB47_3
;
; GFX11-FAKE16-LABEL: bitcast_v56f16_to_v14i64_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -33388,7 +33615,7 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_and_b32_e32 v50, 0xffff, v9
; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s27, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s26, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s25, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s24, 16
@@ -33400,15 +33627,14 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s18, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s17, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s15, 0
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s44
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s45
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s43
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s42
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
@@ -33420,10 +33646,11 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB47_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
@@ -33443,10 +33670,9 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB47_3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB47_3
; GFX11-FAKE16-NEXT: .LBB47_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
@@ -33473,9 +33699,9 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, s18 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, s16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
@@ -33490,7 +33716,9 @@ define inreg <14 x i64> @bitcast_v56f16_to_v14i64_scalar(<56 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB47_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB47_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB47_2
+; GFX11-FAKE16-NEXT: s_branch .LBB47_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -34361,6 +34589,7 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v15
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v27, s16
; SI-NEXT: v_mov_b32_e32 v28, s17
; SI-NEXT: v_mov_b32_e32 v23, s18
@@ -34373,9 +34602,9 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v20, s25
; SI-NEXT: v_mov_b32_e32 v17, s26
; SI-NEXT: v_mov_b32_e32 v18, s27
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v15, s28
; SI-NEXT: v_mov_b32_e32 v16, s29
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:32 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
@@ -34679,12 +34908,15 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a,
; SI-NEXT: ; implicit-def: $vgpr37
; SI-NEXT: ; implicit-def: $vgpr29
; SI-NEXT: ; implicit-def: $vgpr35
-; SI-NEXT: s_branch .LBB49_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB49_2
+; SI-NEXT: s_branch .LBB49_3
;
; VI-LABEL: bitcast_v14f64_to_v56i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v19, s16
; VI-NEXT: v_mov_b32_e32 v20, s17
; VI-NEXT: v_mov_b32_e32 v15, s18
@@ -34697,9 +34929,9 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v24, s25
; VI-NEXT: v_mov_b32_e32 v21, s26
; VI-NEXT: v_mov_b32_e32 v22, s27
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v17, s28
; VI-NEXT: v_mov_b32_e32 v18, s29
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
@@ -34892,12 +35124,15 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a,
; VI-NEXT: ; implicit-def: $vgpr51
; VI-NEXT: ; implicit-def: $vgpr50
; VI-NEXT: ; implicit-def: $vgpr27
-; VI-NEXT: s_branch .LBB49_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB49_2
+; VI-NEXT: s_branch .LBB49_3
;
; GFX9-LABEL: bitcast_v14f64_to_v56i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v19, s16
; GFX9-NEXT: v_mov_b32_e32 v20, s17
; GFX9-NEXT: v_mov_b32_e32 v15, s18
@@ -34910,9 +35145,9 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v24, s25
; GFX9-NEXT: v_mov_b32_e32 v21, s26
; GFX9-NEXT: v_mov_b32_e32 v22, s27
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v17, s28
; GFX9-NEXT: v_mov_b32_e32 v18, s29
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
@@ -35105,7 +35340,9 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr51
; GFX9-NEXT: ; implicit-def: $vgpr50
; GFX9-NEXT: ; implicit-def: $vgpr27
-; GFX9-NEXT: s_branch .LBB49_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB49_2
+; GFX9-NEXT: s_branch .LBB49_3
;
; GFX11-LABEL: bitcast_v14f64_to_v56i16_scalar:
; GFX11: ; %bb.0:
@@ -35117,11 +35354,11 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v21, s18 :: v_dual_mov_b32 v22, s19
; GFX11-NEXT: v_dual_mov_b32 v19, s20 :: v_dual_mov_b32 v20, s21
; GFX11-NEXT: v_dual_mov_b32 v11, s22 :: v_dual_mov_b32 v12, s23
-; GFX11-NEXT: v_dual_mov_b32 v17, s24 :: v_dual_mov_b32 v18, s25
+; GFX11-NEXT: v_dual_mov_b32 v15, s24 :: v_dual_mov_b32 v16, s25
; GFX11-NEXT: v_dual_mov_b32 v13, s26 :: v_dual_mov_b32 v14, s27
-; GFX11-NEXT: v_dual_mov_b32 v15, s28 :: v_dual_mov_b32 v16, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v18, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB49_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
@@ -35134,12 +35371,12 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v2
; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v18
+; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v17
; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v14
; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v17
+; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v16
+; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v15
; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v12
; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v11
; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v20
@@ -35152,17 +35389,16 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v25
; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v28
; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v27
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB49_3
+; GFX11-NEXT: s_cbranch_execnz .LBB49_3
; GFX11-NEXT: .LBB49_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
; GFX11-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX11-NEXT: v_add_f64 v[15:16], v[15:16], 1.0
-; GFX11-NEXT: v_add_f64 v[13:14], v[13:14], 1.0
; GFX11-NEXT: v_add_f64 v[17:18], v[17:18], 1.0
+; GFX11-NEXT: v_add_f64 v[13:14], v[13:14], 1.0
+; GFX11-NEXT: v_add_f64 v[15:16], v[15:16], 1.0
; GFX11-NEXT: v_add_f64 v[11:12], v[11:12], 1.0
; GFX11-NEXT: v_add_f64 v[19:20], v[19:20], 1.0
; GFX11-NEXT: v_add_f64 v[21:22], v[21:22], 1.0
@@ -35179,12 +35415,12 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v2
; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v18
+; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v17
; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v14
; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v17
+; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v16
+; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v15
; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v12
; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v11
; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v20
@@ -35198,56 +35434,54 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v28
; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v27
; GFX11-NEXT: .LBB49_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v28, 0xffff, v28
; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX11-NEXT: v_and_b32_e32 v28, 0xffff, v28
; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v25
; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-NEXT: v_lshl_or_b32 v31, v31, 16, v28
+; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
; GFX11-NEXT: v_lshl_or_b32 v37, v37, 16, v22
; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-NEXT: v_lshl_or_b32 v31, v31, 16, v28
; GFX11-NEXT: v_lshl_or_b32 v28, v71, 16, v19
; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v14
+; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v4
; GFX11-NEXT: v_lshl_or_b32 v29, v29, 16, v20
; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v12
+; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v25
; GFX11-NEXT: v_lshl_or_b32 v36, v36, 16, v21
; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v23
+; GFX11-NEXT: v_lshl_or_b32 v10, v10, 16, v11
+; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v14
; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-NEXT: v_lshl_or_b32 v22, v51, 16, v4
+; GFX11-NEXT: v_dual_mov_b32 v9, v29 :: v_dual_and_b32 v4, 0xffff, v9
+; GFX11-NEXT: v_lshl_or_b32 v11, v70, 16, v12
+; GFX11-NEXT: v_lshl_or_b32 v12, v69, 16, v15
+; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v16
; GFX11-NEXT: v_and_b32_e32 v27, 0xffff, v27
; GFX11-NEXT: v_lshl_or_b32 v32, v32, 16, v25
-; GFX11-NEXT: v_lshl_or_b32 v10, v10, 16, v11
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v16
-; GFX11-NEXT: v_lshl_or_b32 v15, v66, 16, v19
+; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v13
; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-NEXT: v_lshl_or_b32 v13, v68, 16, v15
+; GFX11-NEXT: v_lshl_or_b32 v15, v66, 16, v19
; GFX11-NEXT: v_lshl_or_b32 v19, v54, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v22, v51, 16, v4
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v6
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v9
-; GFX11-NEXT: v_mov_b32_e32 v6, v36
+; GFX11-NEXT: v_dual_mov_b32 v6, v36 :: v_dual_and_b32 v1, 0xffff, v6
; GFX11-NEXT: v_lshl_or_b32 v34, v34, 16, v23
; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v26
-; GFX11-NEXT: v_mov_b32_e32 v9, v29
-; GFX11-NEXT: v_lshl_or_b32 v11, v70, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v12, v69, 16, v17
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v18
; GFX11-NEXT: v_lshl_or_b32 v30, v30, 16, v27
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v13
+; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_lshl_or_b32 v13, v68, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v17, v64, 16, v21
; GFX11-NEXT: v_lshl_or_b32 v21, v52, 16, v3
; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v8
; GFX11-NEXT: v_lshl_or_b32 v27, v38, 16, v4
; GFX11-NEXT: v_mov_b32_e32 v4, v34
; GFX11-NEXT: v_lshl_or_b32 v33, v33, 16, v25
; GFX11-NEXT: v_and_b32_e32 v24, 0xffff, v24
-; GFX11-NEXT: v_lshl_or_b32 v14, v67, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v16, v65, 16, v20
+; GFX11-NEXT: v_lshl_or_b32 v14, v67, 16, v16
+; GFX11-NEXT: v_lshl_or_b32 v16, v65, 16, v17
+; GFX11-NEXT: v_lshl_or_b32 v17, v64, 16, v18
; GFX11-NEXT: v_lshl_or_b32 v18, v55, 16, v0
; GFX11-NEXT: v_lshl_or_b32 v20, v53, 16, v2
; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v5
@@ -35291,7 +35525,9 @@ define inreg <56 x i16> @bitcast_v14f64_to_v56i16_scalar(<14 x double> inreg %a,
; GFX11-NEXT: ; implicit-def: $vgpr48
; GFX11-NEXT: ; implicit-def: $vgpr39
; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: s_branch .LBB49_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_vccz .LBB49_2
+; GFX11-NEXT: s_branch .LBB49_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -36605,6 +36841,7 @@ define inreg <14 x double> @bitcast_v56i16_to_v14f64_scalar(<56 x i16> inreg %a,
; SI-NEXT: v_mov_b32_e32 v36, v18
; SI-NEXT: v_mov_b32_e32 v37, v16
; SI-NEXT: v_mov_b32_e32 v38, v14
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v3
; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v5
@@ -36631,7 +36868,7 @@ define inreg <14 x double> @bitcast_v56i16_to_v14f64_scalar(<56 x i16> inreg %a,
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: v_lshlrev_b32_e32 v46, 16, v2
; SI-NEXT: v_lshlrev_b32_e32 v45, 16, v4
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_and_b64 s[6:7], vcc, exec
; SI-NEXT: s_waitcnt vmcnt(13)
; SI-NEXT: v_lshlrev_b32_e32 v31, 16, v6
; SI-NEXT: s_waitcnt vmcnt(11)
@@ -36945,7 +37182,9 @@ define inreg <14 x double> @bitcast_v56i16_to_v14f64_scalar(<56 x i16> inreg %a,
; SI-NEXT: v_mov_b32_e32 v45, v56
; SI-NEXT: v_mov_b32_e32 v56, v59
; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
-; SI-NEXT: s_branch .LBB51_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB51_2
+; SI-NEXT: s_branch .LBB51_3
;
; VI-LABEL: bitcast_v56i16_to_v14f64_scalar:
; VI: ; %bb.0:
@@ -36965,6 +37204,7 @@ define inreg <14 x double> @bitcast_v56i16_to_v14f64_scalar(<56 x i16> inreg %a,
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v13
; VI-NEXT: v_mov_b32_e32 v33, v12
; VI-NEXT: v_mov_b32_e32 v34, v11
@@ -36979,7 +37219,7 @@ define inreg <14 x double> @bitcast_v56i16_to_v14f64_scalar(<56 x i16> inreg %a,
; VI-NEXT: v_mov_b32_e32 v51, v2
; VI-NEXT: v_mov_b32_e32 v52, v1
; VI-NEXT: v_mov_b32_e32 v53, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB51_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -37214,25 +37454,13 @@ define inreg <14 x double> @bitcast_v56i16_to_v14f64_scalar(<56 x i16> inreg %a,
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB51_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB51_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB51_2
+; VI-NEXT: s_branch .LBB51_3
;
; GFX9-LABEL: bitcast_v56i16_to_v14f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v13
-; GFX9-NEXT: v_mov_b32_e32 v33, v12
-; GFX9-NEXT: v_mov_b32_e32 v34, v11
-; GFX9-NEXT: v_mov_b32_e32 v35, v10
-; GFX9-NEXT: v_mov_b32_e32 v36, v9
-; GFX9-NEXT: v_mov_b32_e32 v37, v8
-; GFX9-NEXT: v_mov_b32_e32 v38, v7
-; GFX9-NEXT: v_mov_b32_e32 v39, v6
-; GFX9-NEXT: v_mov_b32_e32 v48, v5
-; GFX9-NEXT: v_mov_b32_e32 v49, v4
-; GFX9-NEXT: v_mov_b32_e32 v50, v3
-; GFX9-NEXT: v_mov_b32_e32 v51, v2
-; GFX9-NEXT: v_mov_b32_e32 v52, v1
-; GFX9-NEXT: v_mov_b32_e32 v53, v0
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
@@ -37248,6 +37476,21 @@ define inreg <14 x double> @bitcast_v56i16_to_v14f64_scalar(<56 x i16> inreg %a,
; GFX9-NEXT: s_lshr_b32 s8, s18, 16
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
+; GFX9-NEXT: v_mov_b32_e32 v32, v13
+; GFX9-NEXT: v_mov_b32_e32 v33, v12
+; GFX9-NEXT: v_mov_b32_e32 v34, v11
+; GFX9-NEXT: v_mov_b32_e32 v35, v10
+; GFX9-NEXT: v_mov_b32_e32 v36, v9
+; GFX9-NEXT: v_mov_b32_e32 v37, v8
+; GFX9-NEXT: v_mov_b32_e32 v38, v7
+; GFX9-NEXT: v_mov_b32_e32 v39, v6
+; GFX9-NEXT: v_mov_b32_e32 v48, v5
+; GFX9-NEXT: v_mov_b32_e32 v49, v4
+; GFX9-NEXT: v_mov_b32_e32 v50, v3
+; GFX9-NEXT: v_mov_b32_e32 v51, v2
+; GFX9-NEXT: v_mov_b32_e32 v52, v1
+; GFX9-NEXT: v_mov_b32_e32 v53, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
@@ -37266,7 +37509,6 @@ define inreg <14 x double> @bitcast_v56i16_to_v14f64_scalar(<56 x i16> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v41, 16, v35
; GFX9-NEXT: v_lshrrev_b32_e32 v42, 16, v36
; GFX9-NEXT: v_lshrrev_b32_e32 v43, 16, v37
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -37281,6 +37523,7 @@ define inreg <14 x double> @bitcast_v56i16_to_v14f64_scalar(<56 x i16> inreg %a,
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_lshrrev_b32_e32 v44, 16, v38
; GFX9-NEXT: v_lshrrev_b32_e32 v45, 16, v39
; GFX9-NEXT: v_lshrrev_b32_e32 v46, 16, v48
@@ -37408,7 +37651,9 @@ define inreg <14 x double> @bitcast_v56i16_to_v14f64_scalar(<56 x i16> inreg %a,
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB51_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB51_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB51_2
+; GFX9-NEXT: s_branch .LBB51_3
;
; GFX11-TRUE16-LABEL: bitcast_v56i16_to_v14f64_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -37446,7 +37691,7 @@ define inreg <14 x double> @bitcast_v56i16_to_v14f64_scalar(<56 x i16> inreg %a,
; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v9
; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s27, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
@@ -37458,15 +37703,14 @@ define inreg <14 x double> @bitcast_v56i16_to_v14f64_scalar(<56 x i16> inreg %a,
; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s42
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
@@ -37478,10 +37722,11 @@ define inreg <14 x double> @bitcast_v56i16_to_v14f64_scalar(<56 x i16> inreg %a,
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB51_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
@@ -37501,10 +37746,9 @@ define inreg <14 x double> @bitcast_v56i16_to_v14f64_scalar(<56 x i16> inreg %a,
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB51_3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB51_3
; GFX11-TRUE16-NEXT: .LBB51_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
@@ -37531,9 +37775,9 @@ define inreg <14 x double> @bitcast_v56i16_to_v14f64_scalar(<56 x i16> inreg %a,
; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
@@ -37548,7 +37792,9 @@ define inreg <14 x double> @bitcast_v56i16_to_v14f64_scalar(<56 x i16> inreg %a,
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB51_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB51_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB51_2
+; GFX11-TRUE16-NEXT: s_branch .LBB51_3
;
; GFX11-FAKE16-LABEL: bitcast_v56i16_to_v14f64_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -37576,7 +37822,7 @@ define inreg <14 x double> @bitcast_v56i16_to_v14f64_scalar(<56 x i16> inreg %a,
; GFX11-FAKE16-NEXT: v_and_b32_e32 v50, 0xffff, v9
; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s27, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s26, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s25, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s24, 16
@@ -37588,15 +37834,14 @@ define inreg <14 x double> @bitcast_v56i16_to_v14f64_scalar(<56 x i16> inreg %a,
; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s18, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s17, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s15, 0
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s44
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s45
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s43
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s42
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
@@ -37608,10 +37853,11 @@ define inreg <14 x double> @bitcast_v56i16_to_v14f64_scalar(<56 x i16> inreg %a,
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB51_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
@@ -37631,10 +37877,9 @@ define inreg <14 x double> @bitcast_v56i16_to_v14f64_scalar(<56 x i16> inreg %a,
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB51_3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB51_3
; GFX11-FAKE16-NEXT: .LBB51_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
@@ -37661,9 +37906,9 @@ define inreg <14 x double> @bitcast_v56i16_to_v14f64_scalar(<56 x i16> inreg %a,
; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, s16, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, s17, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, s18, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, s15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, s16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
@@ -37678,7 +37923,9 @@ define inreg <14 x double> @bitcast_v56i16_to_v14f64_scalar(<56 x i16> inreg %a,
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB51_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB51_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB51_2
+; GFX11-FAKE16-NEXT: s_branch .LBB51_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -38855,6 +39102,7 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v15
+; SI-NEXT: s_and_b64 s[42:43], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s40, v1
; SI-NEXT: v_readfirstlane_b32 s41, v2
; SI-NEXT: v_readfirstlane_b32 s14, v3
@@ -38868,8 +39116,8 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a
; SI-NEXT: v_readfirstlane_b32 s6, v11
; SI-NEXT: v_readfirstlane_b32 s7, v12
; SI-NEXT: v_readfirstlane_b32 s4, v13
-; SI-NEXT: s_and_b64 s[42:43], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s5, v14
+; SI-NEXT: s_mov_b64 s[42:43], -1
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
@@ -39362,12 +39610,15 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a
; SI-NEXT: ; implicit-def: $vgpr25
; SI-NEXT: ; implicit-def: $vgpr57
; SI-NEXT: ; implicit-def: $vgpr29
-; SI-NEXT: s_branch .LBB53_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[42:43]
+; SI-NEXT: s_cbranch_vccz .LBB53_2
+; SI-NEXT: s_branch .LBB53_3
;
; VI-LABEL: bitcast_v14f64_to_v56f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v19, s16
; VI-NEXT: v_mov_b32_e32 v20, s17
; VI-NEXT: v_mov_b32_e32 v15, s18
@@ -39380,9 +39631,9 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a
; VI-NEXT: v_mov_b32_e32 v24, s25
; VI-NEXT: v_mov_b32_e32 v21, s26
; VI-NEXT: v_mov_b32_e32 v22, s27
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v17, s28
; VI-NEXT: v_mov_b32_e32 v18, s29
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
@@ -39575,12 +39826,15 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a
; VI-NEXT: ; implicit-def: $vgpr51
; VI-NEXT: ; implicit-def: $vgpr50
; VI-NEXT: ; implicit-def: $vgpr27
-; VI-NEXT: s_branch .LBB53_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB53_2
+; VI-NEXT: s_branch .LBB53_3
;
; GFX9-LABEL: bitcast_v14f64_to_v56f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v19, s16
; GFX9-NEXT: v_mov_b32_e32 v20, s17
; GFX9-NEXT: v_mov_b32_e32 v15, s18
@@ -39593,9 +39847,9 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v24, s25
; GFX9-NEXT: v_mov_b32_e32 v21, s26
; GFX9-NEXT: v_mov_b32_e32 v22, s27
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v17, s28
; GFX9-NEXT: v_mov_b32_e32 v18, s29
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:28 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:24 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:20 ; 4-byte Folded Spill
@@ -39788,7 +40042,9 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a
; GFX9-NEXT: ; implicit-def: $vgpr51
; GFX9-NEXT: ; implicit-def: $vgpr50
; GFX9-NEXT: ; implicit-def: $vgpr27
-; GFX9-NEXT: s_branch .LBB53_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB53_2
+; GFX9-NEXT: s_branch .LBB53_3
;
; GFX11-LABEL: bitcast_v14f64_to_v56f16_scalar:
; GFX11: ; %bb.0:
@@ -39800,11 +40056,11 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a
; GFX11-NEXT: v_dual_mov_b32 v21, s18 :: v_dual_mov_b32 v22, s19
; GFX11-NEXT: v_dual_mov_b32 v19, s20 :: v_dual_mov_b32 v20, s21
; GFX11-NEXT: v_dual_mov_b32 v11, s22 :: v_dual_mov_b32 v12, s23
-; GFX11-NEXT: v_dual_mov_b32 v17, s24 :: v_dual_mov_b32 v18, s25
+; GFX11-NEXT: v_dual_mov_b32 v15, s24 :: v_dual_mov_b32 v16, s25
; GFX11-NEXT: v_dual_mov_b32 v13, s26 :: v_dual_mov_b32 v14, s27
-; GFX11-NEXT: v_dual_mov_b32 v15, s28 :: v_dual_mov_b32 v16, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v18, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB53_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v9
@@ -39817,12 +40073,12 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a
; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v2
; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v18
+; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v17
; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v14
; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v17
+; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v16
+; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v15
; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v12
; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v11
; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v20
@@ -39835,17 +40091,16 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a
; GFX11-NEXT: v_lshrrev_b32_e32 v32, 16, v25
; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v28
; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v27
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB53_3
+; GFX11-NEXT: s_cbranch_execnz .LBB53_3
; GFX11-NEXT: .LBB53_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
; GFX11-NEXT: v_add_f64 v[6:7], v[6:7], 1.0
; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX11-NEXT: v_add_f64 v[15:16], v[15:16], 1.0
-; GFX11-NEXT: v_add_f64 v[13:14], v[13:14], 1.0
; GFX11-NEXT: v_add_f64 v[17:18], v[17:18], 1.0
+; GFX11-NEXT: v_add_f64 v[13:14], v[13:14], 1.0
+; GFX11-NEXT: v_add_f64 v[15:16], v[15:16], 1.0
; GFX11-NEXT: v_add_f64 v[11:12], v[11:12], 1.0
; GFX11-NEXT: v_add_f64 v[19:20], v[19:20], 1.0
; GFX11-NEXT: v_add_f64 v[21:22], v[21:22], 1.0
@@ -39862,12 +40117,12 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a
; GFX11-NEXT: v_lshrrev_b32_e32 v53, 16, v2
; GFX11-NEXT: v_lshrrev_b32_e32 v54, 16, v1
; GFX11-NEXT: v_lshrrev_b32_e32 v55, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v64, 16, v18
+; GFX11-NEXT: v_lshrrev_b32_e32 v65, 16, v17
; GFX11-NEXT: v_lshrrev_b32_e32 v66, 16, v14
; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v13
-; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v18
-; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v17
+; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v16
+; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v15
; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v12
; GFX11-NEXT: v_lshrrev_b32_e32 v10, 16, v11
; GFX11-NEXT: v_lshrrev_b32_e32 v29, 16, v20
@@ -39881,56 +40136,54 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a
; GFX11-NEXT: v_lshrrev_b32_e32 v31, 16, v28
; GFX11-NEXT: v_lshrrev_b32_e32 v30, 16, v27
; GFX11-NEXT: .LBB53_3: ; %end
-; GFX11-NEXT: v_and_b32_e32 v28, 0xffff, v28
; GFX11-NEXT: v_and_b32_e32 v22, 0xffff, v22
+; GFX11-NEXT: v_and_b32_e32 v28, 0xffff, v28
; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v19
-; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v25
; GFX11-NEXT: v_and_b32_e32 v11, 0xffff, v11
-; GFX11-NEXT: v_lshl_or_b32 v31, v31, 16, v28
+; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
; GFX11-NEXT: v_lshl_or_b32 v37, v37, 16, v22
; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
+; GFX11-NEXT: v_lshl_or_b32 v31, v31, 16, v28
; GFX11-NEXT: v_lshl_or_b32 v28, v71, 16, v19
; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v21
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v14
+; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v4
; GFX11-NEXT: v_lshl_or_b32 v29, v29, 16, v20
; GFX11-NEXT: v_and_b32_e32 v12, 0xffff, v12
+; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v25
; GFX11-NEXT: v_lshl_or_b32 v36, v36, 16, v21
; GFX11-NEXT: v_and_b32_e32 v23, 0xffff, v23
+; GFX11-NEXT: v_lshl_or_b32 v10, v10, 16, v11
+; GFX11-NEXT: v_and_b32_e32 v19, 0xffff, v14
; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v1
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v4
+; GFX11-NEXT: v_lshl_or_b32 v22, v51, 16, v4
+; GFX11-NEXT: v_dual_mov_b32 v9, v29 :: v_dual_and_b32 v4, 0xffff, v9
+; GFX11-NEXT: v_lshl_or_b32 v11, v70, 16, v12
+; GFX11-NEXT: v_lshl_or_b32 v12, v69, 16, v15
+; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v16
; GFX11-NEXT: v_and_b32_e32 v27, 0xffff, v27
; GFX11-NEXT: v_lshl_or_b32 v32, v32, 16, v25
-; GFX11-NEXT: v_lshl_or_b32 v10, v10, 16, v11
-; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v15
-; GFX11-NEXT: v_and_b32_e32 v21, 0xffff, v16
-; GFX11-NEXT: v_lshl_or_b32 v15, v66, 16, v19
+; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v13
; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v3
+; GFX11-NEXT: v_lshl_or_b32 v13, v68, 16, v15
+; GFX11-NEXT: v_lshl_or_b32 v15, v66, 16, v19
; GFX11-NEXT: v_lshl_or_b32 v19, v54, 16, v1
-; GFX11-NEXT: v_lshl_or_b32 v22, v51, 16, v4
-; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v6
-; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v9
-; GFX11-NEXT: v_mov_b32_e32 v6, v36
+; GFX11-NEXT: v_dual_mov_b32 v6, v36 :: v_dual_and_b32 v1, 0xffff, v6
; GFX11-NEXT: v_lshl_or_b32 v34, v34, 16, v23
; GFX11-NEXT: v_and_b32_e32 v25, 0xffff, v26
-; GFX11-NEXT: v_mov_b32_e32 v9, v29
-; GFX11-NEXT: v_lshl_or_b32 v11, v70, 16, v12
-; GFX11-NEXT: v_lshl_or_b32 v12, v69, 16, v17
-; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v18
; GFX11-NEXT: v_lshl_or_b32 v30, v30, 16, v27
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v13
+; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
+; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v18
; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-NEXT: v_and_b32_e32 v2, 0xffff, v2
-; GFX11-NEXT: v_lshl_or_b32 v13, v68, 16, v17
-; GFX11-NEXT: v_lshl_or_b32 v17, v64, 16, v21
; GFX11-NEXT: v_lshl_or_b32 v21, v52, 16, v3
; GFX11-NEXT: v_and_b32_e32 v3, 0xffff, v8
; GFX11-NEXT: v_lshl_or_b32 v27, v38, 16, v4
; GFX11-NEXT: v_mov_b32_e32 v4, v34
; GFX11-NEXT: v_lshl_or_b32 v33, v33, 16, v25
; GFX11-NEXT: v_and_b32_e32 v24, 0xffff, v24
-; GFX11-NEXT: v_lshl_or_b32 v14, v67, 16, v18
-; GFX11-NEXT: v_lshl_or_b32 v16, v65, 16, v20
+; GFX11-NEXT: v_lshl_or_b32 v14, v67, 16, v16
+; GFX11-NEXT: v_lshl_or_b32 v16, v65, 16, v17
+; GFX11-NEXT: v_lshl_or_b32 v17, v64, 16, v18
; GFX11-NEXT: v_lshl_or_b32 v18, v55, 16, v0
; GFX11-NEXT: v_lshl_or_b32 v20, v53, 16, v2
; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v5
@@ -39974,7 +40227,9 @@ define inreg <56 x half> @bitcast_v14f64_to_v56f16_scalar(<14 x double> inreg %a
; GFX11-NEXT: ; implicit-def: $vgpr48
; GFX11-NEXT: ; implicit-def: $vgpr39
; GFX11-NEXT: ; implicit-def: $vgpr38
-; GFX11-NEXT: s_branch .LBB53_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_vccz .LBB53_2
+; GFX11-NEXT: s_branch .LBB53_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -41464,56 +41719,53 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; SI-NEXT: buffer_load_dword v41, off, s[0:3], s32 offset:28
; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:40
; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:36
-; SI-NEXT: s_waitcnt expcnt(2)
-; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:44
-; SI-NEXT: v_cvt_f16_f32_e32 v47, v0
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v23
+; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:44
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
; SI-NEXT: v_cvt_f16_f32_e32 v44, v1
-; SI-NEXT: v_cvt_f16_f32_e32 v46, v3
-; SI-NEXT: v_cvt_f16_f32_e32 v58, v2
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(5)
+; SI-NEXT: v_cvt_f16_f32_e32 v58, v3
+; SI-NEXT: s_waitcnt expcnt(2)
+; SI-NEXT: v_cvt_f16_f32_e32 v61, v2
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v22
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v7
; SI-NEXT: v_cvt_f16_f32_e32 v60, v5
; SI-NEXT: v_cvt_f16_f32_e32 v59, v4
-; SI-NEXT: v_cvt_f16_f32_e32 v62, v7
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v25
-; SI-NEXT: v_cvt_f16_f32_e32 v56, v6
-; SI-NEXT: v_cvt_f16_f32_e32 v57, v9
-; SI-NEXT: v_cvt_f16_f32_e32 v45, v8
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; SI-NEXT: v_cvt_f16_f32_e32 v46, v6
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v24
-; SI-NEXT: v_cvt_f16_f32_e32 v63, v11
-; SI-NEXT: v_cvt_f16_f32_e32 v38, v10
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v11
+; SI-NEXT: v_cvt_f16_f32_e32 v56, v9
+; SI-NEXT: v_cvt_f16_f32_e32 v57, v8
; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v27
-; SI-NEXT: v_cvt_f16_f32_e32 v36, v12
-; SI-NEXT: v_cvt_f16_f32_e32 v34, v15
-; SI-NEXT: v_cvt_f16_f32_e32 v35, v14
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v10
+; SI-NEXT: v_cvt_f16_f32_e32 v47, v12
+; SI-NEXT: v_cvt_f16_f32_e32 v45, v14
+; SI-NEXT: v_cvt_f16_f32_e32 v62, v16
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v26
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v15
; SI-NEXT: v_cvt_f16_f32_e32 v15, v17
-; SI-NEXT: v_cvt_f16_f32_e32 v32, v16
; SI-NEXT: v_cvt_f16_f32_e32 v16, v19
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v28
; SI-NEXT: v_cvt_f16_f32_e32 v18, v18
-; SI-NEXT: v_cvt_f16_f32_e32 v17, v21
-; SI-NEXT: v_cvt_f16_f32_e32 v37, v20
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v30
-; SI-NEXT: v_cvt_f16_f32_e32 v29, v29
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v23
+; SI-NEXT: v_cvt_f16_f32_e32 v17, v21
+; SI-NEXT: v_cvt_f16_f32_e32 v63, v20
+; SI-NEXT: v_cvt_f16_f32_e32 v49, v22
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
+; SI-NEXT: v_cvt_f16_f32_e32 v48, v25
+; SI-NEXT: v_cvt_f16_f32_e32 v39, v24
+; SI-NEXT: v_cvt_f16_f32_e32 v38, v27
+; SI-NEXT: v_cvt_f16_f32_e32 v37, v26
+; SI-NEXT: v_cvt_f16_f32_e32 v36, v29
+; SI-NEXT: v_cvt_f16_f32_e32 v35, v28
+; SI-NEXT: v_cvt_f16_f32_e32 v33, v30
; SI-NEXT: v_cvt_f16_f32_e32 v2, s21
; SI-NEXT: v_cvt_f16_f32_e32 v11, s20
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
; SI-NEXT: v_cvt_f16_f32_e32 v3, s23
; SI-NEXT: v_cvt_f16_f32_e32 v10, s22
; SI-NEXT: v_cvt_f16_f32_e32 v4, s25
@@ -41522,32 +41774,26 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; SI-NEXT: v_cvt_f16_f32_e32 v8, s26
; SI-NEXT: v_cvt_f16_f32_e32 v6, s29
; SI-NEXT: v_cvt_f16_f32_e32 v7, s28
-; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v31
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v50
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v51
-; SI-NEXT: s_waitcnt vmcnt(10)
-; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v61
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v52
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(14)
+; SI-NEXT: v_cvt_f16_f32_e32 v34, v31
+; SI-NEXT: v_cvt_f16_f32_e32 v31, v50
+; SI-NEXT: v_cvt_f16_f32_e32 v30, v51
+; SI-NEXT: v_cvt_f16_f32_e32 v29, v52
+; SI-NEXT: s_waitcnt vmcnt(13) expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v53
+; SI-NEXT: v_cvt_f16_f32_e32 v51, s16
; SI-NEXT: v_cvt_f16_f32_e32 v53, s18
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(13) expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v54
; SI-NEXT: v_cvt_f16_f32_e32 v54, s19
+; SI-NEXT: s_waitcnt vmcnt(7)
+; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v32
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v55
-; SI-NEXT: v_cvt_f16_f32_e32 v55, s16
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v40
@@ -41562,120 +41808,119 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; SI-NEXT: v_cvt_f16_f32_e32 v0, v43
; SI-NEXT: v_cvt_f16_f32_e32 v43, s17
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:112 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v59, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB55_4
; SI-NEXT: ; %bb.1: ; %cmp.false
+; SI-NEXT: s_waitcnt expcnt(4)
+; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; SI-NEXT: v_or_b32_e32 v4, v9, v4
+; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v60
+; SI-NEXT: v_or_b32_e32 v9, v59, v9
+; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
+; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v29
+; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
+; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; SI-NEXT: v_or_b32_e32 v5, v8, v5
+; SI-NEXT: v_mov_b32_e32 v32, v44
+; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v42, v58
+; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v58
+; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16
+; SI-NEXT: v_mov_b32_e32 v40, v60
+; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
; SI-NEXT: v_or_b32_e32 v16, v18, v16
; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
-; SI-NEXT: v_mov_b32_e32 v49, v2
+; SI-NEXT: s_waitcnt expcnt(2)
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; SI-NEXT: v_mov_b32_e32 v48, v3
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6
-; SI-NEXT: v_mov_b32_e32 v61, v44
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v43
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v54
-; SI-NEXT: v_mov_b32_e32 v39, v11
; SI-NEXT: v_or_b32_e32 v2, v11, v2
-; SI-NEXT: v_mov_b32_e32 v33, v10
; SI-NEXT: v_or_b32_e32 v3, v10, v3
-; SI-NEXT: v_or_b32_e32 v4, v9, v4
-; SI-NEXT: v_or_b32_e32 v5, v8, v5
; SI-NEXT: v_or_b32_e32 v6, v7, v6
-; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v61
-; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v46
-; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v60
-; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v62
-; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v57
-; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v63
+; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v32
+; SI-NEXT: v_mov_b32_e32 v41, v61
+; SI-NEXT: v_or_b32_e32 v8, v61, v8
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v56
; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
-; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v34
+; SI-NEXT: v_mov_b32_e32 v61, v45
; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17
-; SI-NEXT: v_or_b32_e32 v0, v55, v0
+; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v48
+; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v38
+; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v36
+; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v34
+; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v31
+; SI-NEXT: v_or_b32_e32 v0, v51, v0
; SI-NEXT: v_or_b32_e32 v1, v53, v1
-; SI-NEXT: v_or_b32_e32 v7, v47, v7
-; SI-NEXT: v_mov_b32_e32 v42, v58
-; SI-NEXT: v_or_b32_e32 v8, v58, v8
-; SI-NEXT: v_mov_b32_e32 v41, v60
-; SI-NEXT: v_or_b32_e32 v9, v59, v9
-; SI-NEXT: v_mov_b32_e32 v40, v56
-; SI-NEXT: v_or_b32_e32 v10, v56, v10
-; SI-NEXT: v_or_b32_e32 v11, v45, v11
-; SI-NEXT: v_or_b32_e32 v12, v38, v12
-; SI-NEXT: v_or_b32_e32 v13, v36, v13
-; SI-NEXT: v_or_b32_e32 v14, v35, v14
-; SI-NEXT: v_or_b32_e32 v15, v32, v15
-; SI-NEXT: v_or_b32_e32 v17, v37, v17
-; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:120 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(10)
-; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18
+; SI-NEXT: v_mov_b32_e32 v55, v56
+; SI-NEXT: v_or_b32_e32 v11, v57, v11
+; SI-NEXT: v_or_b32_e32 v13, v47, v13
+; SI-NEXT: v_or_b32_e32 v15, v62, v15
+; SI-NEXT: v_or_b32_e32 v17, v63, v17
+; SI-NEXT: v_or_b32_e32 v19, v39, v19
+; SI-NEXT: v_or_b32_e32 v20, v37, v20
+; SI-NEXT: v_or_b32_e32 v21, v35, v21
+; SI-NEXT: v_or_b32_e32 v22, v33, v22
+; SI-NEXT: v_or_b32_e32 v23, v30, v23
; SI-NEXT: s_waitcnt vmcnt(9)
-; SI-NEXT: v_or_b32_e32 v18, v19, v18
-; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19
-; SI-NEXT: v_or_b32_e32 v19, v20, v19
-; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20
-; SI-NEXT: v_or_b32_e32 v20, v21, v20
-; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v29
-; SI-NEXT: v_or_b32_e32 v21, v22, v21
-; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22
-; SI-NEXT: v_or_b32_e32 v22, v23, v22
-; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v23
-; SI-NEXT: v_or_b32_e32 v23, v24, v23
-; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v24
+; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v59
+; SI-NEXT: s_waitcnt vmcnt(8)
; SI-NEXT: v_or_b32_e32 v24, v25, v24
; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(8)
+; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v28
+; SI-NEXT: v_or_b32_e32 v10, v46, v10
+; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v58
+; SI-NEXT: v_or_b32_e32 v14, v61, v14
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: v_or_b32_e32 v12, v60, v12
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v25
; SI-NEXT: v_or_b32_e32 v25, v26, v25
; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:124 ; 4-byte Folded Reload
+; SI-NEXT: v_or_b32_e32 v7, v44, v7
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v26
; SI-NEXT: v_or_b32_e32 v26, v27, v26
; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
+; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18
+; SI-NEXT: v_or_b32_e32 v18, v49, v18
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v27
; SI-NEXT: v_or_b32_e32 v27, v50, v27
+; SI-NEXT: v_mov_b32_e32 v50, v62
+; SI-NEXT: v_mov_b32_e32 v62, v44
; SI-NEXT: s_cbranch_execnz .LBB55_3
; SI-NEXT: .LBB55_2: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v0, v43
+; SI-NEXT: s_waitcnt expcnt(2)
; SI-NEXT: v_cvt_f32_f16_e32 v2, v54
-; SI-NEXT: v_cvt_f32_f16_e32 v1, v55
+; SI-NEXT: v_cvt_f32_f16_e32 v1, v51
; SI-NEXT: v_cvt_f32_f16_e32 v3, v53
; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0
; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2
@@ -41689,173 +41934,186 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; SI-NEXT: v_or_b32_e32 v0, v1, v0
; SI-NEXT: v_or_b32_e32 v1, v3, v2
-; SI-NEXT: v_cvt_f32_f16_e32 v2, v49
-; SI-NEXT: v_cvt_f32_f16_e32 v3, v39
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(2)
+; SI-NEXT: v_cvt_f32_f16_e32 v8, v62
+; SI-NEXT: v_cvt_f32_f16_e32 v9, v41
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v4, v33
-; SI-NEXT: v_cvt_f32_f16_e32 v8, v47
-; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2
-; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
-; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3
-; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
-; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4
-; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
-; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
-; SI-NEXT: v_or_b32_e32 v2, v3, v2
-; SI-NEXT: v_cvt_f32_f16_e32 v3, v48
+; SI-NEXT: v_cvt_f32_f16_e32 v11, v46
+; SI-NEXT: v_cvt_f32_f16_e32 v13, v60
; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8
-; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
; SI-NEXT: v_cvt_f16_f32_e32 v8, v8
-; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3
-; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
-; SI-NEXT: v_cvt_f32_f16_e32 v9, v42
-; SI-NEXT: v_cvt_f32_f16_e32 v11, v40
-; SI-NEXT: v_cvt_f32_f16_e32 v12, v45
-; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; SI-NEXT: v_or_b32_e32 v3, v4, v3
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9
; SI-NEXT: v_cvt_f16_f32_e32 v9, v9
; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11
; SI-NEXT: v_cvt_f16_f32_e32 v11, v11
-; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12
-; SI-NEXT: v_cvt_f16_f32_e32 v12, v12
-; SI-NEXT: v_cvt_f32_f16_e32 v13, v38
-; SI-NEXT: v_cvt_f32_f16_e32 v14, v36
-; SI-NEXT: v_cvt_f32_f16_e32 v15, v35
-; SI-NEXT: v_cvt_f32_f16_e32 v17, v32
; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13
; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
+; SI-NEXT: v_cvt_f32_f16_e32 v14, v47
+; SI-NEXT: v_cvt_f32_f16_e32 v15, v61
+; SI-NEXT: v_cvt_f32_f16_e32 v17, v50
+; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v12, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14
; SI-NEXT: v_cvt_f16_f32_e32 v14, v14
; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15
; SI-NEXT: v_cvt_f16_f32_e32 v15, v15
+; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17
; SI-NEXT: v_cvt_f16_f32_e32 v17, v17
-; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v10, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
-; SI-NEXT: v_cvt_f32_f16_e32 v22, v29
-; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
+; SI-NEXT: v_cvt_f32_f16_e32 v20, v49
+; SI-NEXT: v_cvt_f32_f16_e32 v21, v39
+; SI-NEXT: v_cvt_f32_f16_e32 v22, v36
+; SI-NEXT: v_cvt_f32_f16_e32 v23, v35
+; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20
+; SI-NEXT: v_cvt_f16_f32_e32 v20, v20
+; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21
+; SI-NEXT: v_cvt_f16_f32_e32 v21, v21
; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22
; SI-NEXT: v_cvt_f16_f32_e32 v22, v22
+; SI-NEXT: v_cvt_f32_f16_e32 v24, v33
+; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23
+; SI-NEXT: v_cvt_f16_f32_e32 v23, v23
+; SI-NEXT: v_cvt_f32_f16_e32 v25, v29
+; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24
+; SI-NEXT: v_cvt_f16_f32_e32 v24, v24
; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(14)
-; SI-NEXT: v_cvt_f32_f16_e32 v5, v5
-; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5
-; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
-; SI-NEXT: v_cvt_f32_f16_e32 v4, v4
-; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4
-; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
-; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; SI-NEXT: v_or_b32_e32 v4, v5, v4
-; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
-; SI-NEXT: v_cvt_f32_f16_e32 v19, v19
+; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25
+; SI-NEXT: v_cvt_f16_f32_e32 v25, v25
+; SI-NEXT: s_waitcnt vmcnt(13)
+; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
; SI-NEXT: s_waitcnt vmcnt(12)
-; SI-NEXT: v_cvt_f32_f16_e32 v28, v28
-; SI-NEXT: s_waitcnt vmcnt(11)
-; SI-NEXT: v_cvt_f32_f16_e32 v6, v6
-; SI-NEXT: s_waitcnt vmcnt(10)
-; SI-NEXT: v_cvt_f32_f16_e32 v7, v7
+; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
+; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2
+; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
+; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3
+; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; SI-NEXT: v_or_b32_e32 v2, v3, v2
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(12)
+; SI-NEXT: v_cvt_f32_f16_e32 v19, v19
; SI-NEXT: s_waitcnt vmcnt(9)
-; SI-NEXT: v_cvt_f32_f16_e32 v10, v10
+; SI-NEXT: v_cvt_f32_f16_e32 v4, v4
; SI-NEXT: s_waitcnt vmcnt(8)
-; SI-NEXT: v_cvt_f32_f16_e32 v16, v16
+; SI-NEXT: v_cvt_f32_f16_e32 v5, v5
+; SI-NEXT: s_waitcnt vmcnt(7)
+; SI-NEXT: v_cvt_f32_f16_e32 v6, v6
+; SI-NEXT: s_waitcnt vmcnt(6)
+; SI-NEXT: v_cvt_f32_f16_e32 v7, v7
+; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4
+; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
+; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5
+; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6
; SI-NEXT: v_cvt_f16_f32_e32 v6, v6
; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7
; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
+; SI-NEXT: s_waitcnt vmcnt(5)
+; SI-NEXT: v_cvt_f32_f16_e32 v10, v10
+; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: v_cvt_f32_f16_e32 v12, v12
+; SI-NEXT: s_waitcnt vmcnt(3)
+; SI-NEXT: v_cvt_f32_f16_e32 v16, v16
+; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19
; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10
; SI-NEXT: v_cvt_f16_f32_e32 v10, v10
+; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12
+; SI-NEXT: v_cvt_f16_f32_e32 v12, v12
; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16
; SI-NEXT: v_cvt_f16_f32_e32 v16, v16
-; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19
; SI-NEXT: v_cvt_f16_f32_e32 v19, v19
-; SI-NEXT: s_waitcnt vmcnt(5)
-; SI-NEXT: v_cvt_f32_f16_e32 v21, v21
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_cvt_f32_f16_e32 v25, v25
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_cvt_f32_f16_e32 v29, v29
-; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28
-; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21
-; SI-NEXT: v_cvt_f16_f32_e32 v21, v21
-; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25
-; SI-NEXT: v_cvt_f16_f32_e32 v25, v25
; SI-NEXT: v_add_f32_e32 v29, 0x38000000, v29
-; SI-NEXT: v_cvt_f16_f32_e32 v28, v28
; SI-NEXT: v_cvt_f16_f32_e32 v29, v29
; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
+; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3
+; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
+; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
+; SI-NEXT: v_or_b32_e32 v3, v4, v3
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_cvt_f32_f16_e32 v4, v4
+; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4
+; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
+; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
+; SI-NEXT: v_or_b32_e32 v4, v5, v4
+; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v5, v5
; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5
; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: v_or_b32_e32 v5, v6, v5
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v6, v6
; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6
; SI-NEXT: v_cvt_f16_f32_e32 v6, v6
; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6
; SI-NEXT: v_or_b32_e32 v6, v7, v6
-; SI-NEXT: v_cvt_f32_f16_e32 v7, v61
+; SI-NEXT: v_cvt_f32_f16_e32 v7, v32
; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7
; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7
; SI-NEXT: v_or_b32_e32 v7, v8, v7
-; SI-NEXT: v_cvt_f32_f16_e32 v8, v46
+; SI-NEXT: v_cvt_f32_f16_e32 v8, v42
; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8
; SI-NEXT: v_cvt_f16_f32_e32 v8, v8
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8
; SI-NEXT: v_or_b32_e32 v8, v9, v8
-; SI-NEXT: v_cvt_f32_f16_e32 v9, v41
+; SI-NEXT: v_cvt_f32_f16_e32 v9, v40
; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9
; SI-NEXT: v_cvt_f16_f32_e32 v9, v9
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9
; SI-NEXT: v_or_b32_e32 v9, v10, v9
-; SI-NEXT: v_cvt_f32_f16_e32 v10, v62
+; SI-NEXT: v_cvt_f32_f16_e32 v10, v59
; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10
; SI-NEXT: v_cvt_f16_f32_e32 v10, v10
; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10
; SI-NEXT: v_or_b32_e32 v10, v11, v10
-; SI-NEXT: buffer_load_dword v11, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v11, v11
+; SI-NEXT: v_cvt_f32_f16_e32 v11, v55
; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11
; SI-NEXT: v_cvt_f16_f32_e32 v11, v11
; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11
; SI-NEXT: v_or_b32_e32 v11, v12, v11
-; SI-NEXT: v_cvt_f32_f16_e32 v12, v63
+; SI-NEXT: v_cvt_f32_f16_e32 v12, v28
+; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:116 ; 4-byte Folded Reload
; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12
; SI-NEXT: v_cvt_f16_f32_e32 v12, v12
; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12
; SI-NEXT: v_or_b32_e32 v12, v13, v12
-; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_cvt_f32_f16_e32 v28, v28
+; SI-NEXT: v_add_f32_e32 v28, 0x38000000, v28
+; SI-NEXT: v_cvt_f16_f32_e32 v28, v28
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v13, v13
; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13
; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
; SI-NEXT: v_or_b32_e32 v13, v14, v13
-; SI-NEXT: v_cvt_f32_f16_e32 v14, v34
+; SI-NEXT: v_cvt_f32_f16_e32 v14, v58
; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14
; SI-NEXT: v_cvt_f16_f32_e32 v14, v14
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
; SI-NEXT: v_or_b32_e32 v14, v15, v14
; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v16
-; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
; SI-NEXT: v_or_b32_e32 v15, v17, v15
-; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v17, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
; SI-NEXT: v_cvt_f32_f16_e32 v18, v18
; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18
; SI-NEXT: v_cvt_f16_f32_e32 v18, v18
@@ -41869,59 +42127,38 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; SI-NEXT: v_cvt_f16_f32_e32 v17, v17
; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16
; SI-NEXT: v_or_b32_e32 v16, v18, v16
-; SI-NEXT: v_cvt_f32_f16_e32 v18, v37
+; SI-NEXT: v_cvt_f32_f16_e32 v18, v63
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17
; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18
; SI-NEXT: v_cvt_f16_f32_e32 v18, v18
; SI-NEXT: v_or_b32_e32 v17, v18, v17
; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v19
-; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
-; SI-NEXT: v_cvt_f32_f16_e32 v20, v20
-; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20
-; SI-NEXT: v_cvt_f16_f32_e32 v20, v20
+; SI-NEXT: v_cvt_f32_f16_e32 v19, v48
; SI-NEXT: v_or_b32_e32 v18, v20, v18
-; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_cvt_f32_f16_e32 v19, v19
+; SI-NEXT: v_cvt_f32_f16_e32 v20, v38
; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19
; SI-NEXT: v_cvt_f16_f32_e32 v19, v19
-; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19
-; SI-NEXT: v_or_b32_e32 v19, v21, v19
-; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_cvt_f32_f16_e32 v20, v20
; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20
; SI-NEXT: v_cvt_f16_f32_e32 v20, v20
+; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19
+; SI-NEXT: v_or_b32_e32 v19, v21, v19
+; SI-NEXT: v_cvt_f32_f16_e32 v21, v37
; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v21, v21
; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21
; SI-NEXT: v_cvt_f16_f32_e32 v21, v21
; SI-NEXT: v_or_b32_e32 v20, v21, v20
; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v22
-; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
-; SI-NEXT: v_cvt_f32_f16_e32 v23, v23
-; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23
-; SI-NEXT: v_cvt_f16_f32_e32 v23, v23
+; SI-NEXT: v_cvt_f32_f16_e32 v22, v34
; SI-NEXT: v_or_b32_e32 v21, v23, v21
-; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
-; SI-NEXT: v_cvt_f32_f16_e32 v24, v24
-; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24
-; SI-NEXT: v_cvt_f16_f32_e32 v24, v24
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_cvt_f32_f16_e32 v22, v22
+; SI-NEXT: v_cvt_f32_f16_e32 v23, v31
; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22
; SI-NEXT: v_cvt_f16_f32_e32 v22, v22
-; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22
-; SI-NEXT: v_or_b32_e32 v22, v24, v22
-; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(1)
-; SI-NEXT: v_cvt_f32_f16_e32 v23, v23
; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23
; SI-NEXT: v_cvt_f16_f32_e32 v23, v23
+; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22
+; SI-NEXT: v_or_b32_e32 v22, v24, v22
+; SI-NEXT: v_cvt_f32_f16_e32 v24, v30
; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v23
-; SI-NEXT: s_waitcnt vmcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v24, v24
; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24
; SI-NEXT: v_cvt_f16_f32_e32 v24, v24
; SI-NEXT: v_or_b32_e32 v23, v24, v23
@@ -41959,7 +42196,6 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:52 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v61, off, s[0:3], s32 offset:56 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:60 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(6)
; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:64 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:68 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v57, off, s[0:3], s32 offset:72 ; 4-byte Folded Reload
@@ -41975,20 +42211,48 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB55_4:
-; SI-NEXT: v_mov_b32_e32 v39, v11
-; SI-NEXT: v_mov_b32_e32 v33, v10
-; SI-NEXT: v_mov_b32_e32 v49, v2
-; SI-NEXT: v_mov_b32_e32 v48, v3
-; SI-NEXT: v_mov_b32_e32 v52, v37
-; SI-NEXT: v_mov_b32_e32 v37, v29
+; SI-NEXT: v_mov_b32_e32 v52, v63
+; SI-NEXT: v_mov_b32_e32 v63, v29
+; SI-NEXT: v_mov_b32_e32 v50, v49
+; SI-NEXT: v_mov_b32_e32 v49, v48
+; SI-NEXT: v_mov_b32_e32 v48, v39
+; SI-NEXT: v_mov_b32_e32 v39, v38
+; SI-NEXT: v_mov_b32_e32 v38, v37
+; SI-NEXT: v_mov_b32_e32 v37, v36
+; SI-NEXT: v_mov_b32_e32 v36, v35
+; SI-NEXT: v_mov_b32_e32 v35, v34
+; SI-NEXT: v_mov_b32_e32 v34, v33
+; SI-NEXT: v_mov_b32_e32 v33, v31
+; SI-NEXT: v_mov_b32_e32 v32, v30
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
; SI-NEXT: v_mov_b32_e32 v42, v58
-; SI-NEXT: v_mov_b32_e32 v41, v60
-; SI-NEXT: v_mov_b32_e32 v40, v56
-; SI-NEXT: v_mov_b32_e32 v29, v37
-; SI-NEXT: v_mov_b32_e32 v37, v52
-; SI-NEXT: v_mov_b32_e32 v61, v44
-; SI-NEXT: s_branch .LBB55_2
+; SI-NEXT: v_mov_b32_e32 v40, v60
+; SI-NEXT: v_mov_b32_e32 v31, v33
+; SI-NEXT: v_mov_b32_e32 v33, v34
+; SI-NEXT: v_mov_b32_e32 v34, v35
+; SI-NEXT: v_mov_b32_e32 v35, v36
+; SI-NEXT: v_mov_b32_e32 v36, v37
+; SI-NEXT: v_mov_b32_e32 v37, v38
+; SI-NEXT: v_mov_b32_e32 v38, v39
+; SI-NEXT: v_mov_b32_e32 v39, v48
+; SI-NEXT: v_mov_b32_e32 v48, v49
+; SI-NEXT: v_mov_b32_e32 v49, v50
+; SI-NEXT: v_mov_b32_e32 v50, v62
+; SI-NEXT: buffer_load_dword v58, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v62, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v41, v61
+; SI-NEXT: v_mov_b32_e32 v55, v56
+; SI-NEXT: v_mov_b32_e32 v30, v32
+; SI-NEXT: v_mov_b32_e32 v29, v63
+; SI-NEXT: v_mov_b32_e32 v63, v52
+; SI-NEXT: v_mov_b32_e32 v61, v45
+; SI-NEXT: v_mov_b32_e32 v32, v44
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB55_2
+; SI-NEXT: s_branch .LBB55_3
;
; VI-LABEL: bitcast_v56f16_to_v14f64_scalar:
; VI: ; %bb.0:
@@ -42008,6 +42272,7 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v13
; VI-NEXT: v_mov_b32_e32 v33, v12
; VI-NEXT: v_mov_b32_e32 v34, v11
@@ -42022,7 +42287,7 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; VI-NEXT: v_mov_b32_e32 v51, v2
; VI-NEXT: v_mov_b32_e32 v52, v1
; VI-NEXT: v_mov_b32_e32 v53, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB55_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, 0xffff, s16
@@ -42216,25 +42481,13 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB55_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB55_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB55_2
+; VI-NEXT: s_branch .LBB55_3
;
; GFX9-LABEL: bitcast_v56f16_to_v14f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX9-NEXT: v_mov_b32_e32 v32, v13
-; GFX9-NEXT: v_mov_b32_e32 v33, v12
-; GFX9-NEXT: v_mov_b32_e32 v34, v11
-; GFX9-NEXT: v_mov_b32_e32 v35, v10
-; GFX9-NEXT: v_mov_b32_e32 v36, v9
-; GFX9-NEXT: v_mov_b32_e32 v37, v8
-; GFX9-NEXT: v_mov_b32_e32 v38, v7
-; GFX9-NEXT: v_mov_b32_e32 v39, v6
-; GFX9-NEXT: v_mov_b32_e32 v48, v5
-; GFX9-NEXT: v_mov_b32_e32 v49, v4
-; GFX9-NEXT: v_mov_b32_e32 v50, v3
-; GFX9-NEXT: v_mov_b32_e32 v51, v2
-; GFX9-NEXT: v_mov_b32_e32 v52, v1
-; GFX9-NEXT: v_mov_b32_e32 v53, v0
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v14
; GFX9-NEXT: s_lshr_b32 s40, s29, 16
; GFX9-NEXT: s_lshr_b32 s41, s28, 16
@@ -42250,6 +42503,21 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; GFX9-NEXT: s_lshr_b32 s8, s18, 16
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
+; GFX9-NEXT: v_mov_b32_e32 v32, v13
+; GFX9-NEXT: v_mov_b32_e32 v33, v12
+; GFX9-NEXT: v_mov_b32_e32 v34, v11
+; GFX9-NEXT: v_mov_b32_e32 v35, v10
+; GFX9-NEXT: v_mov_b32_e32 v36, v9
+; GFX9-NEXT: v_mov_b32_e32 v37, v8
+; GFX9-NEXT: v_mov_b32_e32 v38, v7
+; GFX9-NEXT: v_mov_b32_e32 v39, v6
+; GFX9-NEXT: v_mov_b32_e32 v48, v5
+; GFX9-NEXT: v_mov_b32_e32 v49, v4
+; GFX9-NEXT: v_mov_b32_e32 v50, v3
+; GFX9-NEXT: v_mov_b32_e32 v51, v2
+; GFX9-NEXT: v_mov_b32_e32 v52, v1
+; GFX9-NEXT: v_mov_b32_e32 v53, v0
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
@@ -42268,7 +42536,6 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; GFX9-NEXT: v_lshrrev_b32_e32 v41, 16, v35
; GFX9-NEXT: v_lshrrev_b32_e32 v42, 16, v36
; GFX9-NEXT: v_lshrrev_b32_e32 v43, 16, v37
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -42283,6 +42550,7 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_lshrrev_b32_e32 v44, 16, v38
; GFX9-NEXT: v_lshrrev_b32_e32 v45, 16, v39
; GFX9-NEXT: v_lshrrev_b32_e32 v46, 16, v48
@@ -42412,7 +42680,9 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB55_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB55_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB55_2
+; GFX9-NEXT: s_branch .LBB55_3
;
; GFX11-TRUE16-LABEL: bitcast_v56f16_to_v14f64_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -42450,7 +42720,7 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; GFX11-TRUE16-NEXT: v_and_b32_e32 v50, 0xffff, v9
; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s27, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
@@ -42462,15 +42732,14 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
+; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s3, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s2, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s1, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s42
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
@@ -42482,10 +42751,11 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB55_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
@@ -42505,10 +42775,9 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB55_3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s15
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB55_3
; GFX11-TRUE16-NEXT: .LBB55_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
@@ -42535,9 +42804,9 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
@@ -42552,7 +42821,9 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB55_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB55_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB55_2
+; GFX11-TRUE16-NEXT: s_branch .LBB55_3
;
; GFX11-FAKE16-LABEL: bitcast_v56f16_to_v14f64_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -42580,7 +42851,7 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; GFX11-FAKE16-NEXT: v_and_b32_e32 v50, 0xffff, v9
; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s27, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s26, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s25, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s24, 16
@@ -42592,15 +42863,14 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s18, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s17, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s16, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s3, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s2, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s15, 0
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
+; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s1, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s44
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s45
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s43
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s42
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
@@ -42612,10 +42882,11 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s27, s15
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB55_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
@@ -42635,10 +42906,9 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB55_3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s15
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB55_3
; GFX11-FAKE16-NEXT: .LBB55_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v49, 16, v67
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v48, 16, v66
@@ -42665,9 +42935,9 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, s18 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, s16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
@@ -42682,7 +42952,9 @@ define inreg <14 x double> @bitcast_v56f16_to_v14f64_scalar(<56 x half> inreg %a
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB55_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB55_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB55_2
+; GFX11-FAKE16-NEXT: s_branch .LBB55_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -44229,6 +44501,7 @@ define inreg <56 x half> @bitcast_v56i16_to_v56f16_scalar(<56 x i16> inreg %a, i
; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:376 ; 4-byte Folded Spill
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v33
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB57_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_cvt_f32_f16_e32 v33, s16
@@ -44365,9 +44638,6 @@ define inreg <56 x half> @bitcast_v56i16_to_v56f16_scalar(<56 x i16> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v47, v34
; SI-NEXT: ; implicit-def: $vgpr33
; SI-NEXT: ; kill: killed $vgpr33
-; SI-NEXT: s_mov_b64 s[4:5], -1
-; SI-NEXT: ; implicit-def: $vgpr33
-; SI-NEXT: ; kill: killed $vgpr33
; SI-NEXT: ; implicit-def: $vgpr38
; SI-NEXT: ; implicit-def: $vgpr39
; SI-NEXT: ; implicit-def: $vgpr46
@@ -44453,6 +44723,8 @@ define inreg <56 x half> @bitcast_v56i16_to_v56f16_scalar(<56 x i16> inreg %a, i
; SI-NEXT: ; kill: killed $vgpr33
; SI-NEXT: ; implicit-def: $vgpr33
; SI-NEXT: ; kill: killed $vgpr33
+; SI-NEXT: ; implicit-def: $vgpr33
+; SI-NEXT: ; kill: killed $vgpr33
; SI-NEXT: .LBB57_3: ; %Flow
; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
; SI-NEXT: s_waitcnt expcnt(0)
@@ -45014,13 +45286,14 @@ define inreg <56 x half> @bitcast_v56i16_to_v56f16_scalar(<56 x i16> inreg %a, i
; VI-NEXT: s_lshr_b32 s41, s18, 16
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_lshrrev_b32_e32 v27, 16, v13
; VI-NEXT: v_lshrrev_b32_e32 v26, 16, v12
; VI-NEXT: v_lshrrev_b32_e32 v25, 16, v11
; VI-NEXT: v_lshrrev_b32_e32 v24, 16, v10
; VI-NEXT: v_lshrrev_b32_e32 v23, 16, v9
; VI-NEXT: v_lshrrev_b32_e32 v22, 16, v8
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_lshrrev_b32_e32 v21, 16, v7
; VI-NEXT: v_lshrrev_b32_e32 v20, 16, v6
; VI-NEXT: v_lshrrev_b32_e32 v19, 16, v5
@@ -45029,10 +45302,13 @@ define inreg <56 x half> @bitcast_v56i16_to_v56f16_scalar(<56 x i16> inreg %a, i
; VI-NEXT: v_lshrrev_b32_e32 v16, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v15, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v14, 16, v0
-; VI-NEXT: s_cbranch_scc0 .LBB57_4
+; VI-NEXT: s_cbranch_scc0 .LBB57_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB57_3
-; VI-NEXT: .LBB57_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB57_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB57_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s16, s16, 3
; VI-NEXT: s_add_i32 s43, s43, 3
; VI-NEXT: s_add_i32 s17, s17, 3
@@ -45089,7 +45365,7 @@ define inreg <56 x half> @bitcast_v56i16_to_v56f16_scalar(<56 x i16> inreg %a, i
; VI-NEXT: v_add_u32_e32 v26, vcc, 3, v26
; VI-NEXT: v_add_u32_e32 v13, vcc, 3, v13
; VI-NEXT: v_add_u32_e32 v27, vcc, 3, v27
-; VI-NEXT: .LBB57_3: ; %end
+; VI-NEXT: .LBB57_4: ; %end
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s43, 16
; VI-NEXT: s_or_b32 s4, s4, s5
@@ -45175,8 +45451,6 @@ define inreg <56 x half> @bitcast_v56i16_to_v56f16_scalar(<56 x i16> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v12, s7
; VI-NEXT: v_mov_b32_e32 v13, s6
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB57_4:
-; VI-NEXT: s_branch .LBB57_2
;
; GFX9-LABEL: bitcast_v56i16_to_v56f16_scalar:
; GFX9: ; %bb.0:
@@ -45196,13 +45470,14 @@ define inreg <56 x half> @bitcast_v56i16_to_v56f16_scalar(<56 x i16> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s8, s18, 16
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_lshrrev_b32_e32 v27, 16, v13
; GFX9-NEXT: v_lshrrev_b32_e32 v26, 16, v12
; GFX9-NEXT: v_lshrrev_b32_e32 v25, 16, v11
; GFX9-NEXT: v_lshrrev_b32_e32 v24, 16, v10
; GFX9-NEXT: v_lshrrev_b32_e32 v23, 16, v9
; GFX9-NEXT: v_lshrrev_b32_e32 v22, 16, v8
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_lshrrev_b32_e32 v21, 16, v7
; GFX9-NEXT: v_lshrrev_b32_e32 v20, 16, v6
; GFX9-NEXT: v_lshrrev_b32_e32 v19, 16, v5
@@ -45219,10 +45494,13 @@ define inreg <56 x half> @bitcast_v56i16_to_v56f16_scalar(<56 x i16> inreg %a, i
; GFX9-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v47, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX9-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB57_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB57_4
-; GFX9-NEXT: .LBB57_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB57_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB57_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_pack_ll_b32_b16 s4, s29, s43
; GFX9-NEXT: v_pk_add_u16 v49, s4, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_pack_ll_b32_b16 s4, s28, s42
@@ -45322,8 +45600,6 @@ define inreg <56 x half> @bitcast_v56i16_to_v56f16_scalar(<56 x i16> inreg %a, i
; GFX9-NEXT: v_lshrrev_b32_e32 v26, 16, v12
; GFX9-NEXT: v_lshrrev_b32_e32 v27, 16, v13
; GFX9-NEXT: s_branch .LBB57_5
-; GFX9-NEXT: .LBB57_3:
-; GFX9-NEXT: s_branch .LBB57_2
; GFX9-NEXT: .LBB57_4:
; GFX9-NEXT: v_mov_b32_e32 v49, s29
; GFX9-NEXT: v_mov_b32_e32 v48, s28
@@ -45460,7 +45736,7 @@ define inreg <56 x half> @bitcast_v56i16_to_v56f16_scalar(<56 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v0.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v27.h
; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s28, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s26, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s25, 16
@@ -45476,14 +45752,17 @@ define inreg <56 x half> @bitcast_v56i16_to_v56f16_scalar(<56 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s3, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s2, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s0, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s0, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s46, -1
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB57_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_mov_b32 s46, 0
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB57_3
-; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: .LBB57_2: ; %Flow
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB57_4
-; GFX11-TRUE16-NEXT: .LBB57_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: ; %bb.3: ; %cmp.true
; GFX11-TRUE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff, v8
; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
@@ -45505,7 +45784,7 @@ define inreg <56 x half> @bitcast_v56i16_to_v56f16_scalar(<56 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, v19, 16, v1
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v18, 16, v0
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s29, s29, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s28, s28, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s28, s28, s43
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s27, s27, s42
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s26, s26, s41
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s25, s25, s40
@@ -45520,7 +45799,7 @@ define inreg <56 x half> @bitcast_v56i16_to_v56f16_scalar(<56 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s5
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s44
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, v8, 3 op_sel_hi:[1,0]
@@ -45579,8 +45858,6 @@ define inreg <56 x half> @bitcast_v56i16_to_v56f16_scalar(<56 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v8
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v9
; GFX11-TRUE16-NEXT: s_branch .LBB57_5
-; GFX11-TRUE16-NEXT: .LBB57_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB57_2
; GFX11-TRUE16-NEXT: .LBB57_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s29 :: v_dual_mov_b32 v14, s28
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s26
@@ -45591,7 +45868,7 @@ define inreg <56 x half> @bitcast_v56i16_to_v56f16_scalar(<56 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, s17 :: v_dual_mov_b32 v36, s16
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, s3 :: v_dual_mov_b32 v30, s2
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, s1 :: v_dual_mov_b32 v32, s0
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v38, s45 :: v_dual_mov_b32 v39, s44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v38, s45 :: v_dual_mov_b32 v39, s43
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v48, s42 :: v_dual_mov_b32 v49, s41
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v50, s40 :: v_dual_mov_b32 v51, s15
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v52, s14 :: v_dual_mov_b32 v53, s13
@@ -45599,7 +45876,7 @@ define inreg <56 x half> @bitcast_v56i16_to_v56f16_scalar(<56 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v64, s10 :: v_dual_mov_b32 v65, s9
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v66, s8 :: v_dual_mov_b32 v67, s6
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v68, s5 :: v_dual_mov_b32 v69, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v70, s4 :: v_dual_mov_b32 v71, s43
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v70, s4 :: v_dual_mov_b32 v71, s44
; GFX11-TRUE16-NEXT: .LBB57_5: ; %end
; GFX11-TRUE16-NEXT: v_and_b32_e32 v31, 0xffff, v31
; GFX11-TRUE16-NEXT: v_and_b32_e32 v37, 0xffff, v37
@@ -45692,19 +45969,22 @@ define inreg <56 x half> @bitcast_v56i16_to_v56f16_scalar(<56 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s20, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s19, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s18, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s17, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s3, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s17, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s16, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s2, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s0, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_mov_b32 s46, -1
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB57_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_mov_b32 s46, 0
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB57_3
-; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: .LBB57_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB57_4
-; GFX11-FAKE16-NEXT: .LBB57_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
; GFX11-FAKE16-NEXT: v_and_b32_e32 v8, 0xffff, v8
; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
@@ -45737,10 +46017,10 @@ define inreg <56 x half> @bitcast_v56i16_to_v56f16_scalar(<56 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s20, s12
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s19, s11
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s18, s10
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s17, s9
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s6
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s17, s8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s16, s6
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s9
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s7
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s5
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, v9, 3 op_sel_hi:[1,0]
@@ -45765,12 +46045,12 @@ define inreg <56 x half> @bitcast_v56i16_to_v56f16_scalar(<56 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: v_pk_add_u16 v29, s12, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v33, s11, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v34, s10, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v35, s9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v35, s8, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v32, s0, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v31, s1, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v30, s2, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v37, s3, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v36, s7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v36, s6, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v32
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v31
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v30
@@ -45800,8 +46080,6 @@ define inreg <56 x half> @bitcast_v56i16_to_v56f16_scalar(<56 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v8
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v9
; GFX11-FAKE16-NEXT: s_branch .LBB57_5
-; GFX11-FAKE16-NEXT: .LBB57_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB57_2
; GFX11-FAKE16-NEXT: .LBB57_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v13, s29 :: v_dual_mov_b32 v14, s28
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s26
@@ -45818,8 +46096,8 @@ define inreg <56 x half> @bitcast_v56i16_to_v56f16_scalar(<56 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v52, s15 :: v_dual_mov_b32 v53, s14
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v54, s13 :: v_dual_mov_b32 v55, s12
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v64, s11 :: v_dual_mov_b32 v65, s10
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v66, s9 :: v_dual_mov_b32 v67, s7
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v68, s6 :: v_dual_mov_b32 v69, s8
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v66, s8 :: v_dual_mov_b32 v67, s6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v68, s9 :: v_dual_mov_b32 v69, s7
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v70, s4 :: v_dual_mov_b32 v71, s5
; GFX11-FAKE16-NEXT: .LBB57_5: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v31, 0xffff, v31
@@ -47162,10 +47440,14 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v33
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: s_cbranch_scc0 .LBB59_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB59_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB59_3
-; SI-NEXT: .LBB59_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB59_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB59_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v33, v56
; SI-NEXT: v_cvt_f32_f16_e32 v47, v47
; SI-NEXT: v_cvt_f32_f16_e32 v45, v45
@@ -47404,7 +47686,7 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
; SI-NEXT: v_alignbit_b32 v53, v6, v30, 16
; SI-NEXT: v_alignbit_b32 v30, v7, v60, 16
; SI-NEXT: v_alignbit_b32 v29, v4, v29, 16
-; SI-NEXT: .LBB59_3: ; %end
+; SI-NEXT: .LBB59_4: ; %end
; SI-NEXT: v_and_b32_e32 v28, 0xffff, v28
; SI-NEXT: v_lshlrev_b32_e32 v33, 16, v56
; SI-NEXT: v_and_b32_e32 v25, 0xffff, v25
@@ -47587,8 +47869,6 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:112 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0)
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB59_4:
-; SI-NEXT: s_branch .LBB59_2
;
; VI-LABEL: bitcast_v56f16_to_v56i16_scalar:
; VI: ; %bb.0:
@@ -47608,13 +47888,14 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
; VI-NEXT: s_lshr_b32 s41, s18, 16
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_lshrrev_b32_e32 v27, 16, v13
; VI-NEXT: v_lshrrev_b32_e32 v26, 16, v12
; VI-NEXT: v_lshrrev_b32_e32 v25, 16, v11
; VI-NEXT: v_lshrrev_b32_e32 v24, 16, v10
; VI-NEXT: v_lshrrev_b32_e32 v23, 16, v9
; VI-NEXT: v_lshrrev_b32_e32 v22, 16, v8
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_lshrrev_b32_e32 v21, 16, v7
; VI-NEXT: v_lshrrev_b32_e32 v20, 16, v6
; VI-NEXT: v_lshrrev_b32_e32 v19, 16, v5
@@ -47631,10 +47912,13 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
; VI-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v47, off, s[0:3], s32 ; 4-byte Folded Spill
-; VI-NEXT: s_cbranch_scc0 .LBB59_3
+; VI-NEXT: s_cbranch_scc0 .LBB59_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB59_4
-; VI-NEXT: .LBB59_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB59_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB59_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v50, 0x200
; VI-NEXT: v_add_f16_e32 v28, s16, v50
; VI-NEXT: v_add_f16_e32 v47, s43, v50
@@ -47693,8 +47977,6 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
; VI-NEXT: v_add_f16_e32 v13, 0x200, v13
; VI-NEXT: v_add_f16_e32 v27, 0x200, v27
; VI-NEXT: s_branch .LBB59_5
-; VI-NEXT: .LBB59_3:
-; VI-NEXT: s_branch .LBB59_2
; VI-NEXT: .LBB59_4:
; VI-NEXT: v_mov_b32_e32 v50, s6
; VI-NEXT: v_mov_b32_e32 v49, s29
@@ -47824,13 +48106,14 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s8, s18, 16
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_lshrrev_b32_e32 v27, 16, v13
; GFX9-NEXT: v_lshrrev_b32_e32 v26, 16, v12
; GFX9-NEXT: v_lshrrev_b32_e32 v25, 16, v11
; GFX9-NEXT: v_lshrrev_b32_e32 v24, 16, v10
; GFX9-NEXT: v_lshrrev_b32_e32 v23, 16, v9
; GFX9-NEXT: v_lshrrev_b32_e32 v22, 16, v8
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_lshrrev_b32_e32 v21, 16, v7
; GFX9-NEXT: v_lshrrev_b32_e32 v20, 16, v6
; GFX9-NEXT: v_lshrrev_b32_e32 v19, 16, v5
@@ -47847,10 +48130,13 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
; GFX9-NEXT: buffer_store_dword v45, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v46, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v47, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX9-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB59_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB59_4
-; GFX9-NEXT: .LBB59_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB59_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB59_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_and_b32_e32 v13, 0xffff, v13
; GFX9-NEXT: v_and_b32_e32 v12, 0xffff, v12
; GFX9-NEXT: v_and_b32_e32 v11, 0xffff, v11
@@ -47952,8 +48238,6 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
; GFX9-NEXT: v_lshrrev_b32_e32 v26, 16, v12
; GFX9-NEXT: v_lshrrev_b32_e32 v27, 16, v13
; GFX9-NEXT: s_branch .LBB59_5
-; GFX9-NEXT: .LBB59_3:
-; GFX9-NEXT: s_branch .LBB59_2
; GFX9-NEXT: .LBB59_4:
; GFX9-NEXT: v_mov_b32_e32 v49, s29
; GFX9-NEXT: v_mov_b32_e32 v48, s28
@@ -48090,7 +48374,7 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.l, v0.h
; GFX11-TRUE16-NEXT: v_mov_b16_e32 v18.h, v27.h
; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s29, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s28, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s28, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s26, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s25, 16
@@ -48106,14 +48390,17 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s3, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s2, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s0, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s0, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s46, -1
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB59_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_mov_b32 s46, 0
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB59_3
-; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: .LBB59_2: ; %Flow
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB59_4
-; GFX11-TRUE16-NEXT: .LBB59_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: ; %bb.3: ; %cmp.true
; GFX11-TRUE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
; GFX11-TRUE16-NEXT: v_and_b32_e32 v8, 0xffff, v8
; GFX11-TRUE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
@@ -48135,7 +48422,7 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v1, v19, 16, v1
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v0, v18, 16, v0
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s29, s29, s45
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s28, s28, s44
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s28, s28, s43
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s27, s27, s42
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s26, s26, s41
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s25, s25, s40
@@ -48150,7 +48437,7 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s5
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s43
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s44
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, v8 op_sel_hi:[0,1]
@@ -48209,8 +48496,6 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v26, 16, v8
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v27, 16, v9
; GFX11-TRUE16-NEXT: s_branch .LBB59_5
-; GFX11-TRUE16-NEXT: .LBB59_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB59_2
; GFX11-TRUE16-NEXT: .LBB59_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v13, s29 :: v_dual_mov_b32 v14, s28
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s26
@@ -48221,7 +48506,7 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v35, s17 :: v_dual_mov_b32 v36, s16
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v37, s3 :: v_dual_mov_b32 v30, s2
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v31, s1 :: v_dual_mov_b32 v32, s0
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v38, s45 :: v_dual_mov_b32 v39, s44
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v38, s45 :: v_dual_mov_b32 v39, s43
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v48, s42 :: v_dual_mov_b32 v49, s41
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v50, s40 :: v_dual_mov_b32 v51, s15
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v52, s14 :: v_dual_mov_b32 v53, s13
@@ -48229,7 +48514,7 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v64, s10 :: v_dual_mov_b32 v65, s9
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v66, s8 :: v_dual_mov_b32 v67, s6
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v68, s5 :: v_dual_mov_b32 v69, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v70, s4 :: v_dual_mov_b32 v71, s43
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v70, s4 :: v_dual_mov_b32 v71, s44
; GFX11-TRUE16-NEXT: .LBB59_5: ; %end
; GFX11-TRUE16-NEXT: v_and_b32_e32 v31, 0xffff, v31
; GFX11-TRUE16-NEXT: v_and_b32_e32 v37, 0xffff, v37
@@ -48322,19 +48607,22 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s20, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s19, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s18, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s17, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s3, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s17, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s16, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s2, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s0, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_mov_b32 s46, -1
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB59_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_mov_b32 s46, 0
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB59_3
-; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: .LBB59_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB59_4
-; GFX11-FAKE16-NEXT: .LBB59_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
; GFX11-FAKE16-NEXT: v_and_b32_e32 v8, 0xffff, v8
; GFX11-FAKE16-NEXT: v_and_b32_e32 v7, 0xffff, v7
@@ -48367,10 +48655,10 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s20, s12
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s19, s11
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s18, s10
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s17, s9
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s6
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s17, s8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s16, s6
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s9
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s7
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s5
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, v9 op_sel_hi:[0,1]
@@ -48395,12 +48683,12 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_pk_add_f16 v29, 0x200, s12 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v33, 0x200, s11 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v34, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v35, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v35, 0x200, s8 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v32, 0x200, s0 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v31, 0x200, s1 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v30, 0x200, s2 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v37, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v36, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v36, 0x200, s6 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v71, 16, v32
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v70, 16, v31
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v69, 16, v30
@@ -48430,8 +48718,6 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v26, 16, v8
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v27, 16, v9
; GFX11-FAKE16-NEXT: s_branch .LBB59_5
-; GFX11-FAKE16-NEXT: .LBB59_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB59_2
; GFX11-FAKE16-NEXT: .LBB59_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v13, s29 :: v_dual_mov_b32 v14, s28
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s26
@@ -48448,8 +48734,8 @@ define inreg <56 x i16> @bitcast_v56f16_to_v56i16_scalar(<56 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v52, s15 :: v_dual_mov_b32 v53, s14
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v54, s13 :: v_dual_mov_b32 v55, s12
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v64, s11 :: v_dual_mov_b32 v65, s10
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v66, s9 :: v_dual_mov_b32 v67, s7
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v68, s6 :: v_dual_mov_b32 v69, s8
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v66, s8 :: v_dual_mov_b32 v67, s6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v68, s9 :: v_dual_mov_b32 v69, s7
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v70, s4 :: v_dual_mov_b32 v71, s5
; GFX11-FAKE16-NEXT: .LBB59_5: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v31, 0xffff, v31
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.960bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.960bit.ll
index 4f46875..6e60051 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.960bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.960bit.ll
@@ -201,6 +201,7 @@ define inreg <30 x float> @bitcast_v30i32_to_v30f32_scalar(<30 x i32> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v29, v15
; SI-NEXT: v_mov_b32_e32 v28, v14
; SI-NEXT: v_mov_b32_e32 v27, v13
@@ -218,7 +219,7 @@ define inreg <30 x float> @bitcast_v30i32_to_v30f32_scalar(<30 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v15, v1
; SI-NEXT: v_mov_b32_e32 v14, v0
; SI-NEXT: v_mov_b32_e32 v0, s16
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
; SI-NEXT: v_mov_b32_e32 v3, s19
@@ -232,10 +233,13 @@ define inreg <30 x float> @bitcast_v30i32_to_v30f32_scalar(<30 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB1_3
-; SI-NEXT: .LBB1_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB1_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB1_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v29, vcc, 3, v29
; SI-NEXT: v_add_i32_e32 v28, vcc, 3, v28
; SI-NEXT: v_add_i32_e32 v27, vcc, 3, v27
@@ -266,16 +270,15 @@ define inreg <30 x float> @bitcast_v30i32_to_v30f32_scalar(<30 x i32> inreg %a,
; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
-; SI-NEXT: .LBB1_3: ; %end
+; SI-NEXT: .LBB1_4: ; %end
; SI-NEXT: v_mov_b32_e32 v16, v30
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: s_branch .LBB1_2
;
; VI-LABEL: bitcast_v30i32_to_v30f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v29, v15
; VI-NEXT: v_mov_b32_e32 v28, v14
; VI-NEXT: v_mov_b32_e32 v27, v13
@@ -293,7 +296,7 @@ define inreg <30 x float> @bitcast_v30i32_to_v30f32_scalar(<30 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
@@ -307,10 +310,13 @@ define inreg <30 x float> @bitcast_v30i32_to_v30f32_scalar(<30 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB1_4
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB1_3
-; VI-NEXT: .LBB1_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB1_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB1_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v29, vcc, 3, v29
; VI-NEXT: v_add_u32_e32 v28, vcc, 3, v28
; VI-NEXT: v_add_u32_e32 v27, vcc, 3, v27
@@ -341,16 +347,15 @@ define inreg <30 x float> @bitcast_v30i32_to_v30f32_scalar(<30 x i32> inreg %a,
; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: .LBB1_3: ; %end
+; VI-NEXT: .LBB1_4: ; %end
; VI-NEXT: v_mov_b32_e32 v16, v30
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_4:
-; VI-NEXT: s_branch .LBB1_2
;
; GFX9-LABEL: bitcast_v30i32_to_v30f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v29, v15
; GFX9-NEXT: v_mov_b32_e32 v28, v14
; GFX9-NEXT: v_mov_b32_e32 v27, v13
@@ -368,7 +373,7 @@ define inreg <30 x float> @bitcast_v30i32_to_v30f32_scalar(<30 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
@@ -382,10 +387,13 @@ define inreg <30 x float> @bitcast_v30i32_to_v30f32_scalar(<30 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB1_3
-; GFX9-NEXT: .LBB1_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB1_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_u32_e32 v29, 3, v29
; GFX9-NEXT: v_add_u32_e32 v28, 3, v28
; GFX9-NEXT: v_add_u32_e32 v27, 3, v27
@@ -416,43 +424,41 @@ define inreg <30 x float> @bitcast_v30i32_to_v30f32_scalar(<30 x i32> inreg %a,
; GFX9-NEXT: v_add_u32_e32 v2, 3, v2
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: .LBB1_3: ; %end
+; GFX9-NEXT: .LBB1_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v16, v30
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-LABEL: bitcast_v30i32_to_v30f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v29, v11
-; GFX11-NEXT: v_dual_mov_b32 v28, v10 :: v_dual_mov_b32 v27, v9
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
+; GFX11-NEXT: v_dual_mov_b32 v15, v12 :: v_dual_mov_b32 v28, v10
+; GFX11-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v26, v8
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB1_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB1_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_3:
-; GFX11-NEXT: .LBB1_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_nc_u32_e32 v29, 3, v29
; GFX11-NEXT: v_add_nc_u32_e32 v28, 3, v28
; GFX11-NEXT: v_add_nc_u32_e32 v27, 3, v27
@@ -483,6 +489,7 @@ define inreg <30 x float> @bitcast_v30i32_to_v30f32_scalar(<30 x i32> inreg %a,
; GFX11-NEXT: v_add_nc_u32_e32 v2, 3, v2
; GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v1
; GFX11-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-NEXT: .LBB1_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -681,6 +688,7 @@ define inreg <30 x i32> @bitcast_v30f32_to_v30i32_scalar(<30 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v29, v15
; SI-NEXT: v_mov_b32_e32 v28, v14
; SI-NEXT: v_mov_b32_e32 v27, v13
@@ -698,7 +706,7 @@ define inreg <30 x i32> @bitcast_v30f32_to_v30i32_scalar(<30 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v15, v1
; SI-NEXT: v_mov_b32_e32 v14, v0
; SI-NEXT: v_mov_b32_e32 v0, s16
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
; SI-NEXT: v_mov_b32_e32 v3, s19
@@ -712,10 +720,13 @@ define inreg <30 x i32> @bitcast_v30f32_to_v30i32_scalar(<30 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB3_4
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB3_3
-; SI-NEXT: .LBB3_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB3_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB3_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e32 v29, 1.0, v29
; SI-NEXT: v_add_f32_e32 v28, 1.0, v28
; SI-NEXT: v_add_f32_e32 v27, 1.0, v27
@@ -746,16 +757,15 @@ define inreg <30 x i32> @bitcast_v30f32_to_v30i32_scalar(<30 x float> inreg %a,
; SI-NEXT: v_add_f32_e32 v2, 1.0, v2
; SI-NEXT: v_add_f32_e32 v1, 1.0, v1
; SI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; SI-NEXT: .LBB3_3: ; %end
+; SI-NEXT: .LBB3_4: ; %end
; SI-NEXT: v_mov_b32_e32 v16, v30
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB3_4:
-; SI-NEXT: s_branch .LBB3_2
;
; VI-LABEL: bitcast_v30f32_to_v30i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v29, v15
; VI-NEXT: v_mov_b32_e32 v28, v14
; VI-NEXT: v_mov_b32_e32 v27, v13
@@ -773,7 +783,7 @@ define inreg <30 x i32> @bitcast_v30f32_to_v30i32_scalar(<30 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
@@ -787,10 +797,13 @@ define inreg <30 x i32> @bitcast_v30f32_to_v30i32_scalar(<30 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB3_4
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_3
-; VI-NEXT: .LBB3_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB3_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB3_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e32 v29, 1.0, v29
; VI-NEXT: v_add_f32_e32 v28, 1.0, v28
; VI-NEXT: v_add_f32_e32 v27, 1.0, v27
@@ -821,16 +834,15 @@ define inreg <30 x i32> @bitcast_v30f32_to_v30i32_scalar(<30 x float> inreg %a,
; VI-NEXT: v_add_f32_e32 v2, 1.0, v2
; VI-NEXT: v_add_f32_e32 v1, 1.0, v1
; VI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; VI-NEXT: .LBB3_3: ; %end
+; VI-NEXT: .LBB3_4: ; %end
; VI-NEXT: v_mov_b32_e32 v16, v30
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB3_4:
-; VI-NEXT: s_branch .LBB3_2
;
; GFX9-LABEL: bitcast_v30f32_to_v30i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v29, v15
; GFX9-NEXT: v_mov_b32_e32 v28, v14
; GFX9-NEXT: v_mov_b32_e32 v27, v13
@@ -848,7 +860,7 @@ define inreg <30 x i32> @bitcast_v30f32_to_v30i32_scalar(<30 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
@@ -862,10 +874,13 @@ define inreg <30 x i32> @bitcast_v30f32_to_v30i32_scalar(<30 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_3
-; GFX9-NEXT: .LBB3_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB3_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e32 v29, 1.0, v29
; GFX9-NEXT: v_add_f32_e32 v28, 1.0, v28
; GFX9-NEXT: v_add_f32_e32 v27, 1.0, v27
@@ -896,43 +911,41 @@ define inreg <30 x i32> @bitcast_v30f32_to_v30i32_scalar(<30 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e32 v2, 1.0, v2
; GFX9-NEXT: v_add_f32_e32 v1, 1.0, v1
; GFX9-NEXT: v_add_f32_e32 v0, 1.0, v0
-; GFX9-NEXT: .LBB3_3: ; %end
+; GFX9-NEXT: .LBB3_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v16, v30
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB3_4:
-; GFX9-NEXT: s_branch .LBB3_2
;
; GFX11-LABEL: bitcast_v30f32_to_v30i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v29, v11
-; GFX11-NEXT: v_dual_mov_b32 v28, v10 :: v_dual_mov_b32 v27, v9
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
+; GFX11-NEXT: v_dual_mov_b32 v15, v12 :: v_dual_mov_b32 v28, v10
+; GFX11-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v26, v8
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB3_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB3_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB3_3:
-; GFX11-NEXT: .LBB3_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_dual_add_f32 v29, 1.0, v29 :: v_dual_add_f32 v28, 1.0, v28
; GFX11-NEXT: v_dual_add_f32 v27, 1.0, v27 :: v_dual_add_f32 v26, 1.0, v26
; GFX11-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v24, 1.0, v24
@@ -948,6 +961,7 @@ define inreg <30 x i32> @bitcast_v30f32_to_v30i32_scalar(<30 x float> inreg %a,
; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-NEXT: .LBB3_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1161,6 +1175,7 @@ define inreg <15 x i64> @bitcast_v30i32_to_v15i64_scalar(<30 x i32> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v29, v15
; SI-NEXT: v_mov_b32_e32 v28, v14
; SI-NEXT: v_mov_b32_e32 v27, v13
@@ -1178,7 +1193,7 @@ define inreg <15 x i64> @bitcast_v30i32_to_v15i64_scalar(<30 x i32> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v15, v1
; SI-NEXT: v_mov_b32_e32 v14, v0
; SI-NEXT: v_mov_b32_e32 v0, s16
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
; SI-NEXT: v_mov_b32_e32 v3, s19
@@ -1192,10 +1207,13 @@ define inreg <15 x i64> @bitcast_v30i32_to_v15i64_scalar(<30 x i32> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB5_4
+; SI-NEXT: s_cbranch_scc0 .LBB5_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB5_3
-; SI-NEXT: .LBB5_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB5_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB5_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v29, vcc, 3, v29
; SI-NEXT: v_add_i32_e32 v28, vcc, 3, v28
; SI-NEXT: v_add_i32_e32 v27, vcc, 3, v27
@@ -1226,16 +1244,15 @@ define inreg <15 x i64> @bitcast_v30i32_to_v15i64_scalar(<30 x i32> inreg %a, i3
; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
-; SI-NEXT: .LBB5_3: ; %end
+; SI-NEXT: .LBB5_4: ; %end
; SI-NEXT: v_mov_b32_e32 v16, v30
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB5_4:
-; SI-NEXT: s_branch .LBB5_2
;
; VI-LABEL: bitcast_v30i32_to_v15i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v29, v15
; VI-NEXT: v_mov_b32_e32 v28, v14
; VI-NEXT: v_mov_b32_e32 v27, v13
@@ -1253,7 +1270,7 @@ define inreg <15 x i64> @bitcast_v30i32_to_v15i64_scalar(<30 x i32> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
@@ -1267,10 +1284,13 @@ define inreg <15 x i64> @bitcast_v30i32_to_v15i64_scalar(<30 x i32> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB5_4
+; VI-NEXT: s_cbranch_scc0 .LBB5_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB5_3
-; VI-NEXT: .LBB5_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB5_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB5_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v29, vcc, 3, v29
; VI-NEXT: v_add_u32_e32 v28, vcc, 3, v28
; VI-NEXT: v_add_u32_e32 v27, vcc, 3, v27
@@ -1301,16 +1321,15 @@ define inreg <15 x i64> @bitcast_v30i32_to_v15i64_scalar(<30 x i32> inreg %a, i3
; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: .LBB5_3: ; %end
+; VI-NEXT: .LBB5_4: ; %end
; VI-NEXT: v_mov_b32_e32 v16, v30
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB5_4:
-; VI-NEXT: s_branch .LBB5_2
;
; GFX9-LABEL: bitcast_v30i32_to_v15i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v29, v15
; GFX9-NEXT: v_mov_b32_e32 v28, v14
; GFX9-NEXT: v_mov_b32_e32 v27, v13
@@ -1328,7 +1347,7 @@ define inreg <15 x i64> @bitcast_v30i32_to_v15i64_scalar(<30 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
@@ -1342,10 +1361,13 @@ define inreg <15 x i64> @bitcast_v30i32_to_v15i64_scalar(<30 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB5_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB5_3
-; GFX9-NEXT: .LBB5_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB5_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_u32_e32 v29, 3, v29
; GFX9-NEXT: v_add_u32_e32 v28, 3, v28
; GFX9-NEXT: v_add_u32_e32 v27, 3, v27
@@ -1376,43 +1398,41 @@ define inreg <15 x i64> @bitcast_v30i32_to_v15i64_scalar(<30 x i32> inreg %a, i3
; GFX9-NEXT: v_add_u32_e32 v2, 3, v2
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: .LBB5_3: ; %end
+; GFX9-NEXT: .LBB5_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v16, v30
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB5_4:
-; GFX9-NEXT: s_branch .LBB5_2
;
; GFX11-LABEL: bitcast_v30i32_to_v15i64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v29, v11
-; GFX11-NEXT: v_dual_mov_b32 v28, v10 :: v_dual_mov_b32 v27, v9
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
+; GFX11-NEXT: v_dual_mov_b32 v15, v12 :: v_dual_mov_b32 v28, v10
+; GFX11-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v26, v8
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB5_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB5_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB5_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB5_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB5_3:
-; GFX11-NEXT: .LBB5_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB5_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_nc_u32_e32 v29, 3, v29
; GFX11-NEXT: v_add_nc_u32_e32 v28, 3, v28
; GFX11-NEXT: v_add_nc_u32_e32 v27, 3, v27
@@ -1443,6 +1463,7 @@ define inreg <15 x i64> @bitcast_v30i32_to_v15i64_scalar(<30 x i32> inreg %a, i3
; GFX11-NEXT: v_add_nc_u32_e32 v2, 3, v2
; GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v1
; GFX11-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-NEXT: .LBB5_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1664,6 +1685,7 @@ define inreg <30 x i32> @bitcast_v15i64_to_v30i32_scalar(<15 x i64> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v29, v15
; SI-NEXT: v_mov_b32_e32 v28, v14
; SI-NEXT: v_mov_b32_e32 v27, v13
@@ -1681,7 +1703,7 @@ define inreg <30 x i32> @bitcast_v15i64_to_v30i32_scalar(<15 x i64> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v15, v1
; SI-NEXT: v_mov_b32_e32 v14, v0
; SI-NEXT: v_mov_b32_e32 v0, s16
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
; SI-NEXT: v_mov_b32_e32 v3, s19
@@ -1695,10 +1717,13 @@ define inreg <30 x i32> @bitcast_v15i64_to_v30i32_scalar(<15 x i64> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB7_4
+; SI-NEXT: s_cbranch_scc0 .LBB7_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB7_3
-; SI-NEXT: .LBB7_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB7_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB7_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v28, vcc, 3, v28
; SI-NEXT: v_addc_u32_e32 v29, vcc, 0, v29, vcc
; SI-NEXT: v_add_i32_e32 v26, vcc, 3, v26
@@ -1729,16 +1754,15 @@ define inreg <30 x i32> @bitcast_v15i64_to_v30i32_scalar(<15 x i64> inreg %a, i3
; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT: .LBB7_3: ; %end
+; SI-NEXT: .LBB7_4: ; %end
; SI-NEXT: v_mov_b32_e32 v16, v30
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB7_4:
-; SI-NEXT: s_branch .LBB7_2
;
; VI-LABEL: bitcast_v15i64_to_v30i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v29, v15
; VI-NEXT: v_mov_b32_e32 v28, v14
; VI-NEXT: v_mov_b32_e32 v27, v13
@@ -1756,7 +1780,7 @@ define inreg <30 x i32> @bitcast_v15i64_to_v30i32_scalar(<15 x i64> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
@@ -1770,10 +1794,13 @@ define inreg <30 x i32> @bitcast_v15i64_to_v30i32_scalar(<15 x i64> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB7_4
+; VI-NEXT: s_cbranch_scc0 .LBB7_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB7_3
-; VI-NEXT: .LBB7_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB7_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB7_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v28, vcc, 3, v28
; VI-NEXT: v_addc_u32_e32 v29, vcc, 0, v29, vcc
; VI-NEXT: v_add_u32_e32 v26, vcc, 3, v26
@@ -1804,16 +1831,15 @@ define inreg <30 x i32> @bitcast_v15i64_to_v30i32_scalar(<15 x i64> inreg %a, i3
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT: .LBB7_3: ; %end
+; VI-NEXT: .LBB7_4: ; %end
; VI-NEXT: v_mov_b32_e32 v16, v30
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB7_4:
-; VI-NEXT: s_branch .LBB7_2
;
; GFX9-LABEL: bitcast_v15i64_to_v30i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v29, v15
; GFX9-NEXT: v_mov_b32_e32 v28, v14
; GFX9-NEXT: v_mov_b32_e32 v27, v13
@@ -1831,7 +1857,7 @@ define inreg <30 x i32> @bitcast_v15i64_to_v30i32_scalar(<15 x i64> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
@@ -1845,10 +1871,13 @@ define inreg <30 x i32> @bitcast_v15i64_to_v30i32_scalar(<15 x i64> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB7_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB7_3
-; GFX9-NEXT: .LBB7_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB7_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB7_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_co_u32_e32 v28, vcc, 3, v28
; GFX9-NEXT: v_addc_co_u32_e32 v29, vcc, 0, v29, vcc
; GFX9-NEXT: v_add_co_u32_e32 v26, vcc, 3, v26
@@ -1879,43 +1908,41 @@ define inreg <30 x i32> @bitcast_v15i64_to_v30i32_scalar(<15 x i64> inreg %a, i3
; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 3, v0
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
-; GFX9-NEXT: .LBB7_3: ; %end
+; GFX9-NEXT: .LBB7_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v16, v30
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB7_4:
-; GFX9-NEXT: s_branch .LBB7_2
;
; GFX11-LABEL: bitcast_v15i64_to_v30i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v29, v11
-; GFX11-NEXT: v_dual_mov_b32 v28, v10 :: v_dual_mov_b32 v27, v9
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
+; GFX11-NEXT: v_dual_mov_b32 v15, v12 :: v_dual_mov_b32 v28, v10
+; GFX11-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v26, v8
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB7_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB7_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB7_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB7_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB7_3:
-; GFX11-NEXT: .LBB7_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB7_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_co_u32 v28, vcc_lo, v28, 3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add_co_ci_u32_e64 v29, null, 0, v29, vcc_lo
@@ -1954,6 +1981,7 @@ define inreg <30 x i32> @bitcast_v15i64_to_v30i32_scalar(<15 x i64> inreg %a, i3
; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-NEXT: .LBB7_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2167,6 +2195,7 @@ define inreg <15 x double> @bitcast_v30i32_to_v15f64_scalar(<30 x i32> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v29, v15
; SI-NEXT: v_mov_b32_e32 v28, v14
; SI-NEXT: v_mov_b32_e32 v27, v13
@@ -2184,7 +2213,7 @@ define inreg <15 x double> @bitcast_v30i32_to_v15f64_scalar(<30 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v15, v1
; SI-NEXT: v_mov_b32_e32 v14, v0
; SI-NEXT: v_mov_b32_e32 v0, s16
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
; SI-NEXT: v_mov_b32_e32 v3, s19
@@ -2198,10 +2227,13 @@ define inreg <15 x double> @bitcast_v30i32_to_v15f64_scalar(<30 x i32> inreg %a,
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB9_4
+; SI-NEXT: s_cbranch_scc0 .LBB9_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB9_3
-; SI-NEXT: .LBB9_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB9_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB9_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v29, vcc, 3, v29
; SI-NEXT: v_add_i32_e32 v28, vcc, 3, v28
; SI-NEXT: v_add_i32_e32 v27, vcc, 3, v27
@@ -2232,16 +2264,15 @@ define inreg <15 x double> @bitcast_v30i32_to_v15f64_scalar(<30 x i32> inreg %a,
; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2
; SI-NEXT: v_add_i32_e32 v1, vcc, 3, v1
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
-; SI-NEXT: .LBB9_3: ; %end
+; SI-NEXT: .LBB9_4: ; %end
; SI-NEXT: v_mov_b32_e32 v16, v30
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB9_4:
-; SI-NEXT: s_branch .LBB9_2
;
; VI-LABEL: bitcast_v30i32_to_v15f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v29, v15
; VI-NEXT: v_mov_b32_e32 v28, v14
; VI-NEXT: v_mov_b32_e32 v27, v13
@@ -2259,7 +2290,7 @@ define inreg <15 x double> @bitcast_v30i32_to_v15f64_scalar(<30 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
@@ -2273,10 +2304,13 @@ define inreg <15 x double> @bitcast_v30i32_to_v15f64_scalar(<30 x i32> inreg %a,
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB9_4
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB9_3
-; VI-NEXT: .LBB9_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB9_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB9_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v29, vcc, 3, v29
; VI-NEXT: v_add_u32_e32 v28, vcc, 3, v28
; VI-NEXT: v_add_u32_e32 v27, vcc, 3, v27
@@ -2307,16 +2341,15 @@ define inreg <15 x double> @bitcast_v30i32_to_v15f64_scalar(<30 x i32> inreg %a,
; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
; VI-NEXT: v_add_u32_e32 v1, vcc, 3, v1
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
-; VI-NEXT: .LBB9_3: ; %end
+; VI-NEXT: .LBB9_4: ; %end
; VI-NEXT: v_mov_b32_e32 v16, v30
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB9_4:
-; VI-NEXT: s_branch .LBB9_2
;
; GFX9-LABEL: bitcast_v30i32_to_v15f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v29, v15
; GFX9-NEXT: v_mov_b32_e32 v28, v14
; GFX9-NEXT: v_mov_b32_e32 v27, v13
@@ -2334,7 +2367,7 @@ define inreg <15 x double> @bitcast_v30i32_to_v15f64_scalar(<30 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
@@ -2348,10 +2381,13 @@ define inreg <15 x double> @bitcast_v30i32_to_v15f64_scalar(<30 x i32> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB9_3
-; GFX9-NEXT: .LBB9_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB9_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_u32_e32 v29, 3, v29
; GFX9-NEXT: v_add_u32_e32 v28, 3, v28
; GFX9-NEXT: v_add_u32_e32 v27, 3, v27
@@ -2382,43 +2418,41 @@ define inreg <15 x double> @bitcast_v30i32_to_v15f64_scalar(<30 x i32> inreg %a,
; GFX9-NEXT: v_add_u32_e32 v2, 3, v2
; GFX9-NEXT: v_add_u32_e32 v1, 3, v1
; GFX9-NEXT: v_add_u32_e32 v0, 3, v0
-; GFX9-NEXT: .LBB9_3: ; %end
+; GFX9-NEXT: .LBB9_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v16, v30
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB9_4:
-; GFX9-NEXT: s_branch .LBB9_2
;
; GFX11-LABEL: bitcast_v30i32_to_v15f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v29, v11
-; GFX11-NEXT: v_dual_mov_b32 v28, v10 :: v_dual_mov_b32 v27, v9
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
+; GFX11-NEXT: v_dual_mov_b32 v15, v12 :: v_dual_mov_b32 v28, v10
+; GFX11-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v26, v8
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB9_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB9_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB9_3:
-; GFX11-NEXT: .LBB9_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_nc_u32_e32 v29, 3, v29
; GFX11-NEXT: v_add_nc_u32_e32 v28, 3, v28
; GFX11-NEXT: v_add_nc_u32_e32 v27, 3, v27
@@ -2449,6 +2483,7 @@ define inreg <15 x double> @bitcast_v30i32_to_v15f64_scalar(<30 x i32> inreg %a,
; GFX11-NEXT: v_add_nc_u32_e32 v2, 3, v2
; GFX11-NEXT: v_add_nc_u32_e32 v1, 3, v1
; GFX11-NEXT: v_add_nc_u32_e32 v0, 3, v0
+; GFX11-NEXT: .LBB9_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2602,6 +2637,7 @@ define inreg <30 x i32> @bitcast_v15f64_to_v30i32_scalar(<15 x double> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v29, v15
; SI-NEXT: v_mov_b32_e32 v28, v14
; SI-NEXT: v_mov_b32_e32 v27, v13
@@ -2630,13 +2666,16 @@ define inreg <30 x i32> @bitcast_v15f64_to_v30i32_scalar(<15 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v9, s25
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB11_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB11_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB11_3
-; SI-NEXT: .LBB11_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB11_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB11_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[28:29], v[28:29], 1.0
; SI-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
; SI-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
@@ -2652,17 +2691,16 @@ define inreg <30 x i32> @bitcast_v15f64_to_v30i32_scalar(<15 x double> inreg %a,
; SI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; SI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; SI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; SI-NEXT: .LBB11_3: ; %end
+; SI-NEXT: .LBB11_4: ; %end
; SI-NEXT: v_mov_b32_e32 v16, v30
; SI-NEXT: v_mov_b32_e32 v17, v31
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB11_4:
-; SI-NEXT: s_branch .LBB11_2
;
; VI-LABEL: bitcast_v15f64_to_v30i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v29, v15
; VI-NEXT: v_mov_b32_e32 v28, v14
; VI-NEXT: v_mov_b32_e32 v27, v13
@@ -2691,13 +2729,16 @@ define inreg <30 x i32> @bitcast_v15f64_to_v30i32_scalar(<15 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB11_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB11_3
-; VI-NEXT: .LBB11_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB11_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB11_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[28:29], v[28:29], 1.0
; VI-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
; VI-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
@@ -2713,17 +2754,16 @@ define inreg <30 x i32> @bitcast_v15f64_to_v30i32_scalar(<15 x double> inreg %a,
; VI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; VI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; VI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; VI-NEXT: .LBB11_3: ; %end
+; VI-NEXT: .LBB11_4: ; %end
; VI-NEXT: v_mov_b32_e32 v16, v30
; VI-NEXT: v_mov_b32_e32 v17, v31
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB11_4:
-; VI-NEXT: s_branch .LBB11_2
;
; GFX9-LABEL: bitcast_v15f64_to_v30i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v29, v15
; GFX9-NEXT: v_mov_b32_e32 v28, v14
; GFX9-NEXT: v_mov_b32_e32 v27, v13
@@ -2752,13 +2792,16 @@ define inreg <30 x i32> @bitcast_v15f64_to_v30i32_scalar(<15 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_3
-; GFX9-NEXT: .LBB11_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB11_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[28:29], v[28:29], 1.0
; GFX9-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
; GFX9-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
@@ -2774,44 +2817,42 @@ define inreg <30 x i32> @bitcast_v15f64_to_v30i32_scalar(<15 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX9-NEXT: .LBB11_3: ; %end
+; GFX9-NEXT: .LBB11_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v16, v30
; GFX9-NEXT: v_mov_b32_e32 v17, v31
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB11_4:
-; GFX9-NEXT: s_branch .LBB11_2
;
; GFX11-LABEL: bitcast_v15f64_to_v30i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v29, v11
-; GFX11-NEXT: v_dual_mov_b32 v28, v10 :: v_dual_mov_b32 v27, v9
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
+; GFX11-NEXT: v_dual_mov_b32 v15, v12 :: v_dual_mov_b32 v28, v10
+; GFX11-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v26, v8
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB11_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB11_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: .LBB11_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[28:29], v[28:29], 1.0
; GFX11-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
; GFX11-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
@@ -2827,6 +2868,7 @@ define inreg <30 x i32> @bitcast_v15f64_to_v30i32_scalar(<15 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-NEXT: .LBB11_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -3848,6 +3890,7 @@ define inreg <60 x i16> @bitcast_v30i32_to_v60i16_scalar(<30 x i32> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v17
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s45, v1
; SI-NEXT: v_readfirstlane_b32 s44, v2
; SI-NEXT: v_readfirstlane_b32 s43, v3
@@ -3863,8 +3906,8 @@ define inreg <60 x i16> @bitcast_v30i32_to_v60i16_scalar(<30 x i32> inreg %a, i3
; SI-NEXT: v_readfirstlane_b32 s9, v13
; SI-NEXT: v_readfirstlane_b32 s8, v14
; SI-NEXT: v_readfirstlane_b32 s7, v15
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s6, v16
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB13_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v1, s7
@@ -4202,7 +4245,9 @@ define inreg <60 x i16> @bitcast_v30i32_to_v60i16_scalar(<30 x i32> inreg %a, i3
; SI-NEXT: ; implicit-def: $sgpr47
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: s_branch .LBB13_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB13_2
+; SI-NEXT: s_branch .LBB13_3
;
; VI-LABEL: bitcast_v30i32_to_v60i16_scalar:
; VI: ; %bb.0:
@@ -4215,8 +4260,9 @@ define inreg <60 x i16> @bitcast_v30i32_to_v60i16_scalar(<30 x i32> inreg %a, i3
; VI-NEXT: v_writelane_b32 v30, s34, 2
; VI-NEXT: v_writelane_b32 v30, s35, 3
; VI-NEXT: v_writelane_b32 v30, s36, 4
-; VI-NEXT: v_writelane_b32 v30, s37, 5
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: v_writelane_b32 v30, s37, 5
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_writelane_b32 v30, s38, 6
; VI-NEXT: v_readfirstlane_b32 s45, v0
; VI-NEXT: v_readfirstlane_b32 s44, v1
@@ -4232,14 +4278,14 @@ define inreg <60 x i16> @bitcast_v30i32_to_v60i16_scalar(<30 x i32> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s10, v11
; VI-NEXT: v_readfirstlane_b32 s9, v12
; VI-NEXT: v_readfirstlane_b32 s8, v13
-; VI-NEXT: v_readfirstlane_b32 s6, v14
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: v_readfirstlane_b32 s7, v15
+; VI-NEXT: v_readfirstlane_b32 s7, v14
+; VI-NEXT: v_readfirstlane_b32 s6, v15
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_writelane_b32 v30, s39, 7
; VI-NEXT: s_cbranch_scc0 .LBB13_4
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_lshr_b32 s46, s7, 16
-; VI-NEXT: s_lshr_b32 s47, s6, 16
+; VI-NEXT: s_lshr_b32 s46, s6, 16
+; VI-NEXT: s_lshr_b32 s47, s7, 16
; VI-NEXT: s_lshr_b32 s56, s8, 16
; VI-NEXT: s_lshr_b32 s57, s9, 16
; VI-NEXT: s_lshr_b32 s58, s10, 16
@@ -4270,8 +4316,8 @@ define inreg <60 x i16> @bitcast_v30i32_to_v60i16_scalar(<30 x i32> inreg %a, i3
; VI-NEXT: s_lshr_b32 s39, s16, 16
; VI-NEXT: s_cbranch_execnz .LBB13_3
; VI-NEXT: .LBB13_2: ; %cmp.true
-; VI-NEXT: s_add_i32 s7, s7, 3
; VI-NEXT: s_add_i32 s6, s6, 3
+; VI-NEXT: s_add_i32 s7, s7, 3
; VI-NEXT: s_add_i32 s8, s8, 3
; VI-NEXT: s_add_i32 s9, s9, 3
; VI-NEXT: s_add_i32 s10, s10, 3
@@ -4300,8 +4346,8 @@ define inreg <60 x i16> @bitcast_v30i32_to_v60i16_scalar(<30 x i32> inreg %a, i3
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: s_lshr_b32 s46, s7, 16
-; VI-NEXT: s_lshr_b32 s47, s6, 16
+; VI-NEXT: s_lshr_b32 s46, s6, 16
+; VI-NEXT: s_lshr_b32 s47, s7, 16
; VI-NEXT: s_lshr_b32 s56, s8, 16
; VI-NEXT: s_lshr_b32 s57, s9, 16
; VI-NEXT: s_lshr_b32 s58, s10, 16
@@ -4415,12 +4461,12 @@ define inreg <60 x i16> @bitcast_v30i32_to_v60i16_scalar(<30 x i32> inreg %a, i3
; VI-NEXT: s_and_b32 s8, 0xffff, s8
; VI-NEXT: s_lshl_b32 s44, s56, 16
; VI-NEXT: s_or_b32 s8, s8, s44
-; VI-NEXT: s_and_b32 s6, 0xffff, s6
-; VI-NEXT: s_lshl_b32 s44, s47, 16
-; VI-NEXT: s_or_b32 s6, s6, s44
; VI-NEXT: s_and_b32 s7, 0xffff, s7
-; VI-NEXT: s_lshl_b32 s44, s46, 16
+; VI-NEXT: s_lshl_b32 s44, s47, 16
; VI-NEXT: s_or_b32 s7, s7, s44
+; VI-NEXT: s_and_b32 s6, 0xffff, s6
+; VI-NEXT: s_lshl_b32 s44, s46, 16
+; VI-NEXT: s_or_b32 s6, s6, s44
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: v_mov_b32_e32 v2, s16
@@ -4449,8 +4495,8 @@ define inreg <60 x i16> @bitcast_v30i32_to_v60i16_scalar(<30 x i32> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v25, s10
; VI-NEXT: v_mov_b32_e32 v26, s9
; VI-NEXT: v_mov_b32_e32 v27, s8
-; VI-NEXT: v_mov_b32_e32 v28, s6
-; VI-NEXT: v_mov_b32_e32 v29, s7
+; VI-NEXT: v_mov_b32_e32 v28, s7
+; VI-NEXT: v_mov_b32_e32 v29, s6
; VI-NEXT: v_readlane_b32 s39, v30, 7
; VI-NEXT: v_readlane_b32 s38, v30, 6
; VI-NEXT: v_readlane_b32 s37, v30, 5
@@ -4495,7 +4541,9 @@ define inreg <60 x i16> @bitcast_v30i32_to_v60i16_scalar(<30 x i32> inreg %a, i3
; VI-NEXT: ; implicit-def: $sgpr56
; VI-NEXT: ; implicit-def: $sgpr47
; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: s_branch .LBB13_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB13_2
+; VI-NEXT: s_branch .LBB13_3
;
; GFX9-LABEL: bitcast_v30i32_to_v60i16_scalar:
; GFX9: ; %bb.0:
@@ -4504,45 +4552,46 @@ define inreg <60 x i16> @bitcast_v30i32_to_v60i16_scalar(<30 x i32> inreg %a, i3
; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 ; 4-byte Folded Spill
; GFX9-NEXT: s_mov_b64 exec, s[4:5]
; GFX9-NEXT: v_writelane_b32 v30, s30, 0
-; GFX9-NEXT: v_writelane_b32 v30, s31, 1
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
-; GFX9-NEXT: v_writelane_b32 v30, s34, 2
-; GFX9-NEXT: v_readfirstlane_b32 s6, v0
-; GFX9-NEXT: v_readfirstlane_b32 s7, v1
-; GFX9-NEXT: v_readfirstlane_b32 s8, v2
-; GFX9-NEXT: v_readfirstlane_b32 s9, v3
-; GFX9-NEXT: v_readfirstlane_b32 s10, v4
-; GFX9-NEXT: v_readfirstlane_b32 s11, v5
-; GFX9-NEXT: v_readfirstlane_b32 s12, v6
-; GFX9-NEXT: v_readfirstlane_b32 s13, v7
-; GFX9-NEXT: v_readfirstlane_b32 s14, v8
-; GFX9-NEXT: v_readfirstlane_b32 s15, v9
-; GFX9-NEXT: v_readfirstlane_b32 s40, v10
-; GFX9-NEXT: v_readfirstlane_b32 s41, v11
-; GFX9-NEXT: v_readfirstlane_b32 s42, v12
-; GFX9-NEXT: v_readfirstlane_b32 s43, v13
-; GFX9-NEXT: v_readfirstlane_b32 s44, v14
+; GFX9-NEXT: v_writelane_b32 v30, s31, 1
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: v_readfirstlane_b32 s45, v15
+; GFX9-NEXT: v_writelane_b32 v30, s34, 2
+; GFX9-NEXT: v_readfirstlane_b32 s7, v0
+; GFX9-NEXT: v_readfirstlane_b32 s8, v1
+; GFX9-NEXT: v_readfirstlane_b32 s9, v2
+; GFX9-NEXT: v_readfirstlane_b32 s10, v3
+; GFX9-NEXT: v_readfirstlane_b32 s11, v4
+; GFX9-NEXT: v_readfirstlane_b32 s12, v5
+; GFX9-NEXT: v_readfirstlane_b32 s13, v6
+; GFX9-NEXT: v_readfirstlane_b32 s14, v7
+; GFX9-NEXT: v_readfirstlane_b32 s15, v8
+; GFX9-NEXT: v_readfirstlane_b32 s40, v9
+; GFX9-NEXT: v_readfirstlane_b32 s41, v10
+; GFX9-NEXT: v_readfirstlane_b32 s42, v11
+; GFX9-NEXT: v_readfirstlane_b32 s43, v12
+; GFX9-NEXT: v_readfirstlane_b32 s44, v13
+; GFX9-NEXT: v_readfirstlane_b32 s45, v14
+; GFX9-NEXT: v_readfirstlane_b32 s6, v15
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_writelane_b32 v30, s35, 3
; GFX9-NEXT: s_cbranch_scc0 .LBB13_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_lshr_b32 s46, s45, 16
-; GFX9-NEXT: s_lshr_b32 s47, s44, 16
-; GFX9-NEXT: s_lshr_b32 s56, s43, 16
-; GFX9-NEXT: s_lshr_b32 s57, s42, 16
-; GFX9-NEXT: s_lshr_b32 s58, s41, 16
-; GFX9-NEXT: s_lshr_b32 s59, s40, 16
-; GFX9-NEXT: s_lshr_b32 s60, s15, 16
-; GFX9-NEXT: s_lshr_b32 s61, s14, 16
-; GFX9-NEXT: s_lshr_b32 s62, s13, 16
-; GFX9-NEXT: s_lshr_b32 s63, s12, 16
-; GFX9-NEXT: s_lshr_b32 s72, s11, 16
-; GFX9-NEXT: s_lshr_b32 s73, s10, 16
-; GFX9-NEXT: s_lshr_b32 s74, s9, 16
-; GFX9-NEXT: s_lshr_b32 s75, s8, 16
-; GFX9-NEXT: s_lshr_b32 s76, s7, 16
-; GFX9-NEXT: s_lshr_b32 s77, s6, 16
+; GFX9-NEXT: s_lshr_b32 s46, s6, 16
+; GFX9-NEXT: s_lshr_b32 s47, s45, 16
+; GFX9-NEXT: s_lshr_b32 s56, s44, 16
+; GFX9-NEXT: s_lshr_b32 s57, s43, 16
+; GFX9-NEXT: s_lshr_b32 s58, s42, 16
+; GFX9-NEXT: s_lshr_b32 s59, s41, 16
+; GFX9-NEXT: s_lshr_b32 s60, s40, 16
+; GFX9-NEXT: s_lshr_b32 s61, s15, 16
+; GFX9-NEXT: s_lshr_b32 s62, s14, 16
+; GFX9-NEXT: s_lshr_b32 s63, s13, 16
+; GFX9-NEXT: s_lshr_b32 s72, s12, 16
+; GFX9-NEXT: s_lshr_b32 s73, s11, 16
+; GFX9-NEXT: s_lshr_b32 s74, s10, 16
+; GFX9-NEXT: s_lshr_b32 s75, s9, 16
+; GFX9-NEXT: s_lshr_b32 s76, s8, 16
+; GFX9-NEXT: s_lshr_b32 s77, s7, 16
; GFX9-NEXT: s_lshr_b32 s78, s29, 16
; GFX9-NEXT: s_lshr_b32 s79, s28, 16
; GFX9-NEXT: s_lshr_b32 s88, s27, 16
@@ -4559,6 +4608,7 @@ define inreg <60 x i16> @bitcast_v30i32_to_v60i16_scalar(<30 x i32> inreg %a, i3
; GFX9-NEXT: s_lshr_b32 s35, s16, 16
; GFX9-NEXT: s_cbranch_execnz .LBB13_3
; GFX9-NEXT: .LBB13_2: ; %cmp.true
+; GFX9-NEXT: s_add_i32 s6, s6, 3
; GFX9-NEXT: s_add_i32 s45, s45, 3
; GFX9-NEXT: s_add_i32 s44, s44, 3
; GFX9-NEXT: s_add_i32 s43, s43, 3
@@ -4574,7 +4624,6 @@ define inreg <60 x i16> @bitcast_v30i32_to_v60i16_scalar(<30 x i32> inreg %a, i3
; GFX9-NEXT: s_add_i32 s9, s9, 3
; GFX9-NEXT: s_add_i32 s8, s8, 3
; GFX9-NEXT: s_add_i32 s7, s7, 3
-; GFX9-NEXT: s_add_i32 s6, s6, 3
; GFX9-NEXT: s_add_i32 s29, s29, 3
; GFX9-NEXT: s_add_i32 s28, s28, 3
; GFX9-NEXT: s_add_i32 s27, s27, 3
@@ -4589,22 +4638,22 @@ define inreg <60 x i16> @bitcast_v30i32_to_v60i16_scalar(<30 x i32> inreg %a, i3
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: s_lshr_b32 s46, s45, 16
-; GFX9-NEXT: s_lshr_b32 s47, s44, 16
-; GFX9-NEXT: s_lshr_b32 s56, s43, 16
-; GFX9-NEXT: s_lshr_b32 s57, s42, 16
-; GFX9-NEXT: s_lshr_b32 s58, s41, 16
-; GFX9-NEXT: s_lshr_b32 s59, s40, 16
-; GFX9-NEXT: s_lshr_b32 s60, s15, 16
-; GFX9-NEXT: s_lshr_b32 s61, s14, 16
-; GFX9-NEXT: s_lshr_b32 s62, s13, 16
-; GFX9-NEXT: s_lshr_b32 s63, s12, 16
-; GFX9-NEXT: s_lshr_b32 s72, s11, 16
-; GFX9-NEXT: s_lshr_b32 s73, s10, 16
-; GFX9-NEXT: s_lshr_b32 s74, s9, 16
-; GFX9-NEXT: s_lshr_b32 s75, s8, 16
-; GFX9-NEXT: s_lshr_b32 s76, s7, 16
-; GFX9-NEXT: s_lshr_b32 s77, s6, 16
+; GFX9-NEXT: s_lshr_b32 s46, s6, 16
+; GFX9-NEXT: s_lshr_b32 s47, s45, 16
+; GFX9-NEXT: s_lshr_b32 s56, s44, 16
+; GFX9-NEXT: s_lshr_b32 s57, s43, 16
+; GFX9-NEXT: s_lshr_b32 s58, s42, 16
+; GFX9-NEXT: s_lshr_b32 s59, s41, 16
+; GFX9-NEXT: s_lshr_b32 s60, s40, 16
+; GFX9-NEXT: s_lshr_b32 s61, s15, 16
+; GFX9-NEXT: s_lshr_b32 s62, s14, 16
+; GFX9-NEXT: s_lshr_b32 s63, s13, 16
+; GFX9-NEXT: s_lshr_b32 s72, s12, 16
+; GFX9-NEXT: s_lshr_b32 s73, s11, 16
+; GFX9-NEXT: s_lshr_b32 s74, s10, 16
+; GFX9-NEXT: s_lshr_b32 s75, s9, 16
+; GFX9-NEXT: s_lshr_b32 s76, s8, 16
+; GFX9-NEXT: s_lshr_b32 s77, s7, 16
; GFX9-NEXT: s_lshr_b32 s78, s29, 16
; GFX9-NEXT: s_lshr_b32 s79, s28, 16
; GFX9-NEXT: s_lshr_b32 s88, s27, 16
@@ -4634,22 +4683,22 @@ define inreg <60 x i16> @bitcast_v30i32_to_v60i16_scalar(<30 x i32> inreg %a, i3
; GFX9-NEXT: s_pack_ll_b32_b16 s25, s27, s88
; GFX9-NEXT: s_pack_ll_b32_b16 s26, s28, s79
; GFX9-NEXT: s_pack_ll_b32_b16 s27, s29, s78
-; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s77
-; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s76
-; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s75
-; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s74
-; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s73
-; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s72
-; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s63
-; GFX9-NEXT: s_pack_ll_b32_b16 s13, s13, s62
-; GFX9-NEXT: s_pack_ll_b32_b16 s14, s14, s61
-; GFX9-NEXT: s_pack_ll_b32_b16 s15, s15, s60
-; GFX9-NEXT: s_pack_ll_b32_b16 s28, s40, s59
-; GFX9-NEXT: s_pack_ll_b32_b16 s29, s41, s58
-; GFX9-NEXT: s_pack_ll_b32_b16 s40, s42, s57
-; GFX9-NEXT: s_pack_ll_b32_b16 s41, s43, s56
-; GFX9-NEXT: s_pack_ll_b32_b16 s42, s44, s47
-; GFX9-NEXT: s_pack_ll_b32_b16 s43, s45, s46
+; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s77
+; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s76
+; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s75
+; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s74
+; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s73
+; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s72
+; GFX9-NEXT: s_pack_ll_b32_b16 s13, s13, s63
+; GFX9-NEXT: s_pack_ll_b32_b16 s14, s14, s62
+; GFX9-NEXT: s_pack_ll_b32_b16 s15, s15, s61
+; GFX9-NEXT: s_pack_ll_b32_b16 s28, s40, s60
+; GFX9-NEXT: s_pack_ll_b32_b16 s29, s41, s59
+; GFX9-NEXT: s_pack_ll_b32_b16 s40, s42, s58
+; GFX9-NEXT: s_pack_ll_b32_b16 s41, s43, s57
+; GFX9-NEXT: s_pack_ll_b32_b16 s42, s44, s56
+; GFX9-NEXT: s_pack_ll_b32_b16 s43, s45, s47
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s46
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: v_mov_b32_e32 v2, s16
@@ -4664,22 +4713,22 @@ define inreg <60 x i16> @bitcast_v30i32_to_v60i16_scalar(<30 x i32> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v11, s25
; GFX9-NEXT: v_mov_b32_e32 v12, s26
; GFX9-NEXT: v_mov_b32_e32 v13, s27
-; GFX9-NEXT: v_mov_b32_e32 v14, s6
-; GFX9-NEXT: v_mov_b32_e32 v15, s7
-; GFX9-NEXT: v_mov_b32_e32 v16, s8
-; GFX9-NEXT: v_mov_b32_e32 v17, s9
-; GFX9-NEXT: v_mov_b32_e32 v18, s10
-; GFX9-NEXT: v_mov_b32_e32 v19, s11
-; GFX9-NEXT: v_mov_b32_e32 v20, s12
-; GFX9-NEXT: v_mov_b32_e32 v21, s13
-; GFX9-NEXT: v_mov_b32_e32 v22, s14
-; GFX9-NEXT: v_mov_b32_e32 v23, s15
-; GFX9-NEXT: v_mov_b32_e32 v24, s28
-; GFX9-NEXT: v_mov_b32_e32 v25, s29
-; GFX9-NEXT: v_mov_b32_e32 v26, s40
-; GFX9-NEXT: v_mov_b32_e32 v27, s41
-; GFX9-NEXT: v_mov_b32_e32 v28, s42
-; GFX9-NEXT: v_mov_b32_e32 v29, s43
+; GFX9-NEXT: v_mov_b32_e32 v14, s7
+; GFX9-NEXT: v_mov_b32_e32 v15, s8
+; GFX9-NEXT: v_mov_b32_e32 v16, s9
+; GFX9-NEXT: v_mov_b32_e32 v17, s10
+; GFX9-NEXT: v_mov_b32_e32 v18, s11
+; GFX9-NEXT: v_mov_b32_e32 v19, s12
+; GFX9-NEXT: v_mov_b32_e32 v20, s13
+; GFX9-NEXT: v_mov_b32_e32 v21, s14
+; GFX9-NEXT: v_mov_b32_e32 v22, s15
+; GFX9-NEXT: v_mov_b32_e32 v23, s28
+; GFX9-NEXT: v_mov_b32_e32 v24, s29
+; GFX9-NEXT: v_mov_b32_e32 v25, s40
+; GFX9-NEXT: v_mov_b32_e32 v26, s41
+; GFX9-NEXT: v_mov_b32_e32 v27, s42
+; GFX9-NEXT: v_mov_b32_e32 v28, s43
+; GFX9-NEXT: v_mov_b32_e32 v29, s6
; GFX9-NEXT: v_readlane_b32 s35, v30, 3
; GFX9-NEXT: v_readlane_b32 s34, v30, 2
; GFX9-NEXT: v_readlane_b32 s31, v30, 1
@@ -4720,7 +4769,9 @@ define inreg <60 x i16> @bitcast_v30i32_to_v60i16_scalar(<30 x i32> inreg %a, i3
; GFX9-NEXT: ; implicit-def: $sgpr56
; GFX9-NEXT: ; implicit-def: $sgpr47
; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: s_branch .LBB13_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB13_2
+; GFX9-NEXT: s_branch .LBB13_3
;
; GFX11-LABEL: bitcast_v30i32_to_v60i16_scalar:
; GFX11: ; %bb.0:
@@ -4735,16 +4786,16 @@ define inreg <60 x i16> @bitcast_v30i32_to_v60i16_scalar(<30 x i32> inreg %a, i3
; GFX11-NEXT: v_readfirstlane_b32 s10, v6
; GFX11-NEXT: v_readfirstlane_b32 s11, v7
; GFX11-NEXT: v_readfirstlane_b32 s12, v8
-; GFX11-NEXT: v_readfirstlane_b32 s13, v9
+; GFX11-NEXT: v_readfirstlane_b32 s14, v9
; GFX11-NEXT: v_readfirstlane_b32 s15, v10
-; GFX11-NEXT: v_readfirstlane_b32 s14, v11
-; GFX11-NEXT: s_mov_b32 s94, 0
+; GFX11-NEXT: v_readfirstlane_b32 s13, v11
+; GFX11-NEXT: s_mov_b32 s94, -1
; GFX11-NEXT: s_and_b32 s40, vcc_lo, exec_lo
; GFX11-NEXT: s_cbranch_scc0 .LBB13_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s40, s14, 16
+; GFX11-NEXT: s_lshr_b32 s40, s13, 16
; GFX11-NEXT: s_lshr_b32 s41, s15, 16
-; GFX11-NEXT: s_lshr_b32 s42, s13, 16
+; GFX11-NEXT: s_lshr_b32 s42, s14, 16
; GFX11-NEXT: s_lshr_b32 s43, s12, 16
; GFX11-NEXT: s_lshr_b32 s44, s11, 16
; GFX11-NEXT: s_lshr_b32 s45, s10, 16
@@ -4772,12 +4823,11 @@ define inreg <60 x i16> @bitcast_v30i32_to_v60i16_scalar(<30 x i32> inreg %a, i3
; GFX11-NEXT: s_lshr_b32 s91, s2, 16
; GFX11-NEXT: s_lshr_b32 s92, s1, 16
; GFX11-NEXT: s_lshr_b32 s93, s0, 16
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s94
-; GFX11-NEXT: s_cbranch_vccnz .LBB13_3
+; GFX11-NEXT: s_cbranch_execnz .LBB13_3
; GFX11-NEXT: .LBB13_2: ; %cmp.true
-; GFX11-NEXT: s_add_i32 s14, s14, 3
-; GFX11-NEXT: s_add_i32 s15, s15, 3
; GFX11-NEXT: s_add_i32 s13, s13, 3
+; GFX11-NEXT: s_add_i32 s15, s15, 3
+; GFX11-NEXT: s_add_i32 s14, s14, 3
; GFX11-NEXT: s_add_i32 s12, s12, 3
; GFX11-NEXT: s_add_i32 s11, s11, 3
; GFX11-NEXT: s_add_i32 s10, s10, 3
@@ -4805,9 +4855,9 @@ define inreg <60 x i16> @bitcast_v30i32_to_v60i16_scalar(<30 x i32> inreg %a, i3
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: s_lshr_b32 s40, s14, 16
+; GFX11-NEXT: s_lshr_b32 s40, s13, 16
; GFX11-NEXT: s_lshr_b32 s41, s15, 16
-; GFX11-NEXT: s_lshr_b32 s42, s13, 16
+; GFX11-NEXT: s_lshr_b32 s42, s14, 16
; GFX11-NEXT: s_lshr_b32 s43, s12, 16
; GFX11-NEXT: s_lshr_b32 s44, s11, 16
; GFX11-NEXT: s_lshr_b32 s45, s10, 16
@@ -4864,9 +4914,9 @@ define inreg <60 x i16> @bitcast_v30i32_to_v60i16_scalar(<30 x i32> inreg %a, i3
; GFX11-NEXT: s_pack_ll_b32_b16 s10, s10, s45
; GFX11-NEXT: s_pack_ll_b32_b16 s11, s11, s44
; GFX11-NEXT: s_pack_ll_b32_b16 s12, s12, s43
-; GFX11-NEXT: s_pack_ll_b32_b16 s13, s13, s42
+; GFX11-NEXT: s_pack_ll_b32_b16 s14, s14, s42
; GFX11-NEXT: s_pack_ll_b32_b16 s15, s15, s41
-; GFX11-NEXT: s_pack_ll_b32_b16 s14, s14, s40
+; GFX11-NEXT: s_pack_ll_b32_b16 s13, s13, s40
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -4880,8 +4930,8 @@ define inreg <60 x i16> @bitcast_v30i32_to_v60i16_scalar(<30 x i32> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v20, s6 :: v_dual_mov_b32 v21, s7
; GFX11-NEXT: v_dual_mov_b32 v22, s8 :: v_dual_mov_b32 v23, s9
; GFX11-NEXT: v_dual_mov_b32 v24, s10 :: v_dual_mov_b32 v25, s11
-; GFX11-NEXT: v_dual_mov_b32 v26, s12 :: v_dual_mov_b32 v27, s13
-; GFX11-NEXT: v_dual_mov_b32 v28, s15 :: v_dual_mov_b32 v29, s14
+; GFX11-NEXT: v_dual_mov_b32 v26, s12 :: v_dual_mov_b32 v27, s14
+; GFX11-NEXT: v_dual_mov_b32 v28, s15 :: v_dual_mov_b32 v29, s13
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB13_4:
; GFX11-NEXT: ; implicit-def: $sgpr93
@@ -4914,7 +4964,9 @@ define inreg <60 x i16> @bitcast_v30i32_to_v60i16_scalar(<30 x i32> inreg %a, i3
; GFX11-NEXT: ; implicit-def: $sgpr42
; GFX11-NEXT: ; implicit-def: $sgpr41
; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: s_branch .LBB13_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s94
+; GFX11-NEXT: s_cbranch_vccz .LBB13_2
+; GFX11-NEXT: s_branch .LBB13_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -6332,6 +6384,7 @@ define inreg <30 x i32> @bitcast_v60i16_to_v30i32_scalar(<60 x i16> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v35, v22
; SI-NEXT: v_mov_b32_e32 v36, v20
; SI-NEXT: v_mov_b32_e32 v37, v18
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v3
; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v5
@@ -6363,7 +6416,7 @@ define inreg <30 x i32> @bitcast_v60i16_to_v30i32_scalar(<60 x i16> inreg %a, i3
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v2
; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v4
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_and_b64 s[6:7], vcc, exec
; SI-NEXT: v_lshlrev_b32_e32 v47, 16, v6
; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v8
; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v10
@@ -6666,7 +6719,9 @@ define inreg <30 x i32> @bitcast_v60i16_to_v30i32_scalar(<60 x i16> inreg %a, i3
; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
; SI-NEXT: v_mov_b32_e32 v30, v32
-; SI-NEXT: s_branch .LBB15_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB15_2
+; SI-NEXT: s_branch .LBB15_3
;
; VI-LABEL: bitcast_v60i16_to_v30i32_scalar:
; VI: ; %bb.0:
@@ -6686,6 +6741,7 @@ define inreg <30 x i32> @bitcast_v60i16_to_v30i32_scalar(<60 x i16> inreg %a, i3
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v15
; VI-NEXT: v_mov_b32_e32 v33, v14
; VI-NEXT: v_mov_b32_e32 v34, v13
@@ -6702,7 +6758,7 @@ define inreg <30 x i32> @bitcast_v60i16_to_v30i32_scalar(<60 x i16> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v53, v2
; VI-NEXT: v_mov_b32_e32 v54, v1
; VI-NEXT: v_mov_b32_e32 v55, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB15_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v0, 16
@@ -6949,11 +7005,28 @@ define inreg <30 x i32> @bitcast_v60i16_to_v30i32_scalar(<60 x i16> inreg %a, i3
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB15_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB15_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB15_2
+; VI-NEXT: s_branch .LBB15_3
;
; GFX9-LABEL: bitcast_v60i16_to_v30i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_lshr_b32 s40, s29, 16
+; GFX9-NEXT: s_lshr_b32 s41, s28, 16
+; GFX9-NEXT: s_lshr_b32 s42, s27, 16
+; GFX9-NEXT: s_lshr_b32 s43, s26, 16
+; GFX9-NEXT: s_lshr_b32 s15, s25, 16
+; GFX9-NEXT: s_lshr_b32 s14, s24, 16
+; GFX9-NEXT: s_lshr_b32 s13, s23, 16
+; GFX9-NEXT: s_lshr_b32 s12, s22, 16
+; GFX9-NEXT: s_lshr_b32 s11, s21, 16
+; GFX9-NEXT: s_lshr_b32 s10, s20, 16
+; GFX9-NEXT: s_lshr_b32 s9, s19, 16
+; GFX9-NEXT: s_lshr_b32 s8, s18, 16
+; GFX9-NEXT: s_lshr_b32 s7, s17, 16
+; GFX9-NEXT: s_lshr_b32 s6, s16, 16
; GFX9-NEXT: v_mov_b32_e32 v32, v15
; GFX9-NEXT: v_mov_b32_e32 v33, v14
; GFX9-NEXT: v_mov_b32_e32 v34, v13
@@ -6970,21 +7043,7 @@ define inreg <30 x i32> @bitcast_v60i16_to_v30i32_scalar(<60 x i16> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v53, v2
; GFX9-NEXT: v_mov_b32_e32 v54, v1
; GFX9-NEXT: v_mov_b32_e32 v55, v0
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
-; GFX9-NEXT: s_lshr_b32 s40, s29, 16
-; GFX9-NEXT: s_lshr_b32 s41, s28, 16
-; GFX9-NEXT: s_lshr_b32 s42, s27, 16
-; GFX9-NEXT: s_lshr_b32 s43, s26, 16
-; GFX9-NEXT: s_lshr_b32 s15, s25, 16
-; GFX9-NEXT: s_lshr_b32 s14, s24, 16
-; GFX9-NEXT: s_lshr_b32 s13, s23, 16
-; GFX9-NEXT: s_lshr_b32 s12, s22, 16
-; GFX9-NEXT: s_lshr_b32 s11, s21, 16
-; GFX9-NEXT: s_lshr_b32 s10, s20, 16
-; GFX9-NEXT: s_lshr_b32 s9, s19, 16
-; GFX9-NEXT: s_lshr_b32 s8, s18, 16
-; GFX9-NEXT: s_lshr_b32 s7, s17, 16
-; GFX9-NEXT: s_lshr_b32 s6, s16, 16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
@@ -7005,7 +7064,6 @@ define inreg <30 x i32> @bitcast_v60i16_to_v30i32_scalar(<60 x i16> inreg %a, i3
; GFX9-NEXT: v_lshrrev_b32_e32 v41, 16, v33
; GFX9-NEXT: v_lshrrev_b32_e32 v42, 16, v34
; GFX9-NEXT: v_lshrrev_b32_e32 v43, 16, v35
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -7020,6 +7078,7 @@ define inreg <30 x i32> @bitcast_v60i16_to_v30i32_scalar(<60 x i16> inreg %a, i3
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_lshrrev_b32_e32 v44, 16, v36
; GFX9-NEXT: v_lshrrev_b32_e32 v45, 16, v37
; GFX9-NEXT: v_lshrrev_b32_e32 v46, 16, v38
@@ -7165,7 +7224,9 @@ define inreg <30 x i32> @bitcast_v60i16_to_v30i32_scalar(<60 x i16> inreg %a, i3
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB15_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB15_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB15_2
+; GFX9-NEXT: s_branch .LBB15_3
;
; GFX11-TRUE16-LABEL: bitcast_v60i16_to_v30i32_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -7210,41 +7271,41 @@ define inreg <30 x i32> @bitcast_v60i16_to_v30i32_scalar(<60 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s27, s42
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB15_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
@@ -7259,17 +7320,16 @@ define inreg <30 x i32> @bitcast_v60i16_to_v30i32_scalar(<60 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB15_3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB15_3
; GFX11-TRUE16-NEXT: .LBB15_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
@@ -7283,24 +7343,24 @@ define inreg <30 x i32> @bitcast_v60i16_to_v30i32_scalar(<60 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s4, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
@@ -7317,7 +7377,9 @@ define inreg <30 x i32> @bitcast_v60i16_to_v30i32_scalar(<60 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB15_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB15_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB15_2
+; GFX11-TRUE16-NEXT: s_branch .LBB15_3
;
; GFX11-FAKE16-LABEL: bitcast_v60i16_to_v30i32_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -7350,41 +7412,41 @@ define inreg <30 x i32> @bitcast_v60i16_to_v30i32_scalar(<60 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s16, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s26, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s25, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s24, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s23, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s22, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s21, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s20, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s19, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s18, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s17, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s16, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s3, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s2, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s15, 0
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s0, 16
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s27, s42
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB15_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
@@ -7399,17 +7461,16 @@ define inreg <30 x i32> @bitcast_v60i16_to_v30i32_scalar(<60 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB15_3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB15_3
; GFX11-FAKE16-NEXT: .LBB15_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
@@ -7423,24 +7484,24 @@ define inreg <30 x i32> @bitcast_v60i16_to_v30i32_scalar(<60 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, s4, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, s7, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, s8, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, s9, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, s10, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, s11, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, s16, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, s17, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, s18, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, s5, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, s6, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, s7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, s8, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, s9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, s10, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, s11, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, s12, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, s13, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, s14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, s15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, s0, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, s16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
@@ -7457,7 +7518,9 @@ define inreg <30 x i32> @bitcast_v60i16_to_v30i32_scalar(<60 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB15_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB15_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB15_2
+; GFX11-FAKE16-NEXT: s_branch .LBB15_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -8835,6 +8898,7 @@ define inreg <60 x half> @bitcast_v30i32_to_v60f16_scalar(<30 x i32> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v17
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s45, v1
; SI-NEXT: v_readfirstlane_b32 s44, v2
; SI-NEXT: v_readfirstlane_b32 s43, v3
@@ -8847,11 +8911,11 @@ define inreg <60 x half> @bitcast_v30i32_to_v60f16_scalar(<30 x i32> inreg %a, i
; SI-NEXT: v_readfirstlane_b32 s12, v10
; SI-NEXT: v_readfirstlane_b32 s11, v11
; SI-NEXT: v_readfirstlane_b32 s10, v12
-; SI-NEXT: v_readfirstlane_b32 s8, v13
-; SI-NEXT: v_readfirstlane_b32 s7, v14
-; SI-NEXT: v_readfirstlane_b32 s6, v15
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: v_readfirstlane_b32 s9, v16
+; SI-NEXT: v_readfirstlane_b32 s9, v13
+; SI-NEXT: v_readfirstlane_b32 s8, v14
+; SI-NEXT: v_readfirstlane_b32 s7, v15
+; SI-NEXT: v_readfirstlane_b32 s6, v16
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
@@ -8867,13 +8931,13 @@ define inreg <60 x half> @bitcast_v30i32_to_v60f16_scalar(<30 x i32> inreg %a, i
; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB17_4
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_lshr_b32 s4, s9, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v1, s4
; SI-NEXT: s_lshr_b32 s4, s6, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v2, s4
+; SI-NEXT: v_cvt_f32_f16_e32 v1, s4
; SI-NEXT: s_lshr_b32 s4, s7, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v3, s4
+; SI-NEXT: v_cvt_f32_f16_e32 v2, s4
; SI-NEXT: s_lshr_b32 s4, s8, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v3, s4
+; SI-NEXT: s_lshr_b32 s4, s9, 16
; SI-NEXT: v_cvt_f32_f16_e32 v5, s4
; SI-NEXT: s_lshr_b32 s4, s10, 16
; SI-NEXT: v_cvt_f32_f16_e32 v7, s4
@@ -8931,10 +8995,10 @@ define inreg <60 x half> @bitcast_v30i32_to_v60f16_scalar(<30 x i32> inreg %a, i
; SI-NEXT: s_lshr_b32 s4, s16, 16
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v60, s4
-; SI-NEXT: v_cvt_f32_f16_e32 v4, s9
-; SI-NEXT: v_cvt_f32_f16_e32 v6, s6
-; SI-NEXT: v_cvt_f32_f16_e32 v8, s7
-; SI-NEXT: v_cvt_f32_f16_e32 v10, s8
+; SI-NEXT: v_cvt_f32_f16_e32 v4, s6
+; SI-NEXT: v_cvt_f32_f16_e32 v6, s7
+; SI-NEXT: v_cvt_f32_f16_e32 v8, s8
+; SI-NEXT: v_cvt_f32_f16_e32 v10, s9
; SI-NEXT: v_cvt_f32_f16_e32 v12, s10
; SI-NEXT: v_cvt_f32_f16_e32 v14, s11
; SI-NEXT: v_cvt_f32_f16_e32 v16, s12
@@ -8989,10 +9053,10 @@ define inreg <60 x half> @bitcast_v30i32_to_v60f16_scalar(<30 x i32> inreg %a, i
; SI-NEXT: s_add_i32 s12, s12, 3
; SI-NEXT: s_add_i32 s11, s11, 3
; SI-NEXT: s_add_i32 s10, s10, 3
+; SI-NEXT: s_add_i32 s9, s9, 3
; SI-NEXT: s_add_i32 s8, s8, 3
; SI-NEXT: s_add_i32 s7, s7, 3
; SI-NEXT: s_add_i32 s6, s6, 3
-; SI-NEXT: s_add_i32 s9, s9, 3
; SI-NEXT: s_lshr_b32 s4, s16, 16
; SI-NEXT: s_lshr_b32 s5, s17, 16
; SI-NEXT: s_lshr_b32 s46, s18, 16
@@ -9019,14 +9083,14 @@ define inreg <60 x half> @bitcast_v30i32_to_v60f16_scalar(<30 x i32> inreg %a, i
; SI-NEXT: s_lshr_b32 s91, s12, 16
; SI-NEXT: s_lshr_b32 s92, s11, 16
; SI-NEXT: s_lshr_b32 s93, s10, 16
-; SI-NEXT: s_lshr_b32 s94, s8, 16
-; SI-NEXT: s_lshr_b32 s95, s7, 16
-; SI-NEXT: s_lshr_b32 vcc_lo, s6, 16
-; SI-NEXT: s_lshr_b32 vcc_hi, s9, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v4, s9
-; SI-NEXT: v_cvt_f32_f16_e32 v6, s6
-; SI-NEXT: v_cvt_f32_f16_e32 v8, s7
-; SI-NEXT: v_cvt_f32_f16_e32 v10, s8
+; SI-NEXT: s_lshr_b32 s94, s9, 16
+; SI-NEXT: s_lshr_b32 s95, s8, 16
+; SI-NEXT: s_lshr_b32 vcc_lo, s7, 16
+; SI-NEXT: s_lshr_b32 vcc_hi, s6, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v4, s6
+; SI-NEXT: v_cvt_f32_f16_e32 v6, s7
+; SI-NEXT: v_cvt_f32_f16_e32 v8, s8
+; SI-NEXT: v_cvt_f32_f16_e32 v10, s9
; SI-NEXT: v_cvt_f32_f16_e32 v12, s10
; SI-NEXT: v_cvt_f32_f16_e32 v14, s11
; SI-NEXT: v_cvt_f32_f16_e32 v16, s12
@@ -9373,7 +9437,9 @@ define inreg <60 x half> @bitcast_v30i32_to_v60f16_scalar(<30 x i32> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr4
; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: s_branch .LBB17_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB17_2
+; SI-NEXT: s_branch .LBB17_3
;
; VI-LABEL: bitcast_v30i32_to_v60f16_scalar:
; VI: ; %bb.0:
@@ -9386,8 +9452,9 @@ define inreg <60 x half> @bitcast_v30i32_to_v60f16_scalar(<30 x i32> inreg %a, i
; VI-NEXT: v_writelane_b32 v30, s34, 2
; VI-NEXT: v_writelane_b32 v30, s35, 3
; VI-NEXT: v_writelane_b32 v30, s36, 4
-; VI-NEXT: v_writelane_b32 v30, s37, 5
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: v_writelane_b32 v30, s37, 5
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_writelane_b32 v30, s38, 6
; VI-NEXT: v_readfirstlane_b32 s45, v0
; VI-NEXT: v_readfirstlane_b32 s44, v1
@@ -9403,14 +9470,14 @@ define inreg <60 x half> @bitcast_v30i32_to_v60f16_scalar(<30 x i32> inreg %a, i
; VI-NEXT: v_readfirstlane_b32 s10, v11
; VI-NEXT: v_readfirstlane_b32 s9, v12
; VI-NEXT: v_readfirstlane_b32 s8, v13
-; VI-NEXT: v_readfirstlane_b32 s6, v14
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: v_readfirstlane_b32 s7, v15
+; VI-NEXT: v_readfirstlane_b32 s7, v14
+; VI-NEXT: v_readfirstlane_b32 s6, v15
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_writelane_b32 v30, s39, 7
; VI-NEXT: s_cbranch_scc0 .LBB17_4
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_lshr_b32 s46, s7, 16
-; VI-NEXT: s_lshr_b32 s47, s6, 16
+; VI-NEXT: s_lshr_b32 s46, s6, 16
+; VI-NEXT: s_lshr_b32 s47, s7, 16
; VI-NEXT: s_lshr_b32 s56, s8, 16
; VI-NEXT: s_lshr_b32 s57, s9, 16
; VI-NEXT: s_lshr_b32 s58, s10, 16
@@ -9441,8 +9508,8 @@ define inreg <60 x half> @bitcast_v30i32_to_v60f16_scalar(<30 x i32> inreg %a, i
; VI-NEXT: s_lshr_b32 s39, s16, 16
; VI-NEXT: s_cbranch_execnz .LBB17_3
; VI-NEXT: .LBB17_2: ; %cmp.true
-; VI-NEXT: s_add_i32 s7, s7, 3
; VI-NEXT: s_add_i32 s6, s6, 3
+; VI-NEXT: s_add_i32 s7, s7, 3
; VI-NEXT: s_add_i32 s8, s8, 3
; VI-NEXT: s_add_i32 s9, s9, 3
; VI-NEXT: s_add_i32 s10, s10, 3
@@ -9471,8 +9538,8 @@ define inreg <60 x half> @bitcast_v30i32_to_v60f16_scalar(<30 x i32> inreg %a, i
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: s_lshr_b32 s46, s7, 16
-; VI-NEXT: s_lshr_b32 s47, s6, 16
+; VI-NEXT: s_lshr_b32 s46, s6, 16
+; VI-NEXT: s_lshr_b32 s47, s7, 16
; VI-NEXT: s_lshr_b32 s56, s8, 16
; VI-NEXT: s_lshr_b32 s57, s9, 16
; VI-NEXT: s_lshr_b32 s58, s10, 16
@@ -9586,12 +9653,12 @@ define inreg <60 x half> @bitcast_v30i32_to_v60f16_scalar(<30 x i32> inreg %a, i
; VI-NEXT: s_and_b32 s8, 0xffff, s8
; VI-NEXT: s_lshl_b32 s44, s56, 16
; VI-NEXT: s_or_b32 s8, s8, s44
-; VI-NEXT: s_and_b32 s6, 0xffff, s6
-; VI-NEXT: s_lshl_b32 s44, s47, 16
-; VI-NEXT: s_or_b32 s6, s6, s44
; VI-NEXT: s_and_b32 s7, 0xffff, s7
-; VI-NEXT: s_lshl_b32 s44, s46, 16
+; VI-NEXT: s_lshl_b32 s44, s47, 16
; VI-NEXT: s_or_b32 s7, s7, s44
+; VI-NEXT: s_and_b32 s6, 0xffff, s6
+; VI-NEXT: s_lshl_b32 s44, s46, 16
+; VI-NEXT: s_or_b32 s6, s6, s44
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: v_mov_b32_e32 v2, s16
@@ -9620,8 +9687,8 @@ define inreg <60 x half> @bitcast_v30i32_to_v60f16_scalar(<30 x i32> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v25, s10
; VI-NEXT: v_mov_b32_e32 v26, s9
; VI-NEXT: v_mov_b32_e32 v27, s8
-; VI-NEXT: v_mov_b32_e32 v28, s6
-; VI-NEXT: v_mov_b32_e32 v29, s7
+; VI-NEXT: v_mov_b32_e32 v28, s7
+; VI-NEXT: v_mov_b32_e32 v29, s6
; VI-NEXT: v_readlane_b32 s39, v30, 7
; VI-NEXT: v_readlane_b32 s38, v30, 6
; VI-NEXT: v_readlane_b32 s37, v30, 5
@@ -9666,7 +9733,9 @@ define inreg <60 x half> @bitcast_v30i32_to_v60f16_scalar(<30 x i32> inreg %a, i
; VI-NEXT: ; implicit-def: $sgpr56
; VI-NEXT: ; implicit-def: $sgpr47
; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: s_branch .LBB17_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB17_2
+; VI-NEXT: s_branch .LBB17_3
;
; GFX9-LABEL: bitcast_v30i32_to_v60f16_scalar:
; GFX9: ; %bb.0:
@@ -9675,45 +9744,46 @@ define inreg <60 x half> @bitcast_v30i32_to_v60f16_scalar(<30 x i32> inreg %a, i
; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 ; 4-byte Folded Spill
; GFX9-NEXT: s_mov_b64 exec, s[4:5]
; GFX9-NEXT: v_writelane_b32 v30, s30, 0
-; GFX9-NEXT: v_writelane_b32 v30, s31, 1
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
-; GFX9-NEXT: v_writelane_b32 v30, s34, 2
-; GFX9-NEXT: v_readfirstlane_b32 s6, v0
-; GFX9-NEXT: v_readfirstlane_b32 s7, v1
-; GFX9-NEXT: v_readfirstlane_b32 s8, v2
-; GFX9-NEXT: v_readfirstlane_b32 s9, v3
-; GFX9-NEXT: v_readfirstlane_b32 s10, v4
-; GFX9-NEXT: v_readfirstlane_b32 s11, v5
-; GFX9-NEXT: v_readfirstlane_b32 s12, v6
-; GFX9-NEXT: v_readfirstlane_b32 s13, v7
-; GFX9-NEXT: v_readfirstlane_b32 s14, v8
-; GFX9-NEXT: v_readfirstlane_b32 s15, v9
-; GFX9-NEXT: v_readfirstlane_b32 s40, v10
-; GFX9-NEXT: v_readfirstlane_b32 s41, v11
-; GFX9-NEXT: v_readfirstlane_b32 s42, v12
-; GFX9-NEXT: v_readfirstlane_b32 s43, v13
-; GFX9-NEXT: v_readfirstlane_b32 s44, v14
+; GFX9-NEXT: v_writelane_b32 v30, s31, 1
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: v_readfirstlane_b32 s45, v15
+; GFX9-NEXT: v_writelane_b32 v30, s34, 2
+; GFX9-NEXT: v_readfirstlane_b32 s7, v0
+; GFX9-NEXT: v_readfirstlane_b32 s8, v1
+; GFX9-NEXT: v_readfirstlane_b32 s9, v2
+; GFX9-NEXT: v_readfirstlane_b32 s10, v3
+; GFX9-NEXT: v_readfirstlane_b32 s11, v4
+; GFX9-NEXT: v_readfirstlane_b32 s12, v5
+; GFX9-NEXT: v_readfirstlane_b32 s13, v6
+; GFX9-NEXT: v_readfirstlane_b32 s14, v7
+; GFX9-NEXT: v_readfirstlane_b32 s15, v8
+; GFX9-NEXT: v_readfirstlane_b32 s40, v9
+; GFX9-NEXT: v_readfirstlane_b32 s41, v10
+; GFX9-NEXT: v_readfirstlane_b32 s42, v11
+; GFX9-NEXT: v_readfirstlane_b32 s43, v12
+; GFX9-NEXT: v_readfirstlane_b32 s44, v13
+; GFX9-NEXT: v_readfirstlane_b32 s45, v14
+; GFX9-NEXT: v_readfirstlane_b32 s6, v15
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_writelane_b32 v30, s35, 3
; GFX9-NEXT: s_cbranch_scc0 .LBB17_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_lshr_b32 s46, s45, 16
-; GFX9-NEXT: s_lshr_b32 s47, s44, 16
-; GFX9-NEXT: s_lshr_b32 s56, s43, 16
-; GFX9-NEXT: s_lshr_b32 s57, s42, 16
-; GFX9-NEXT: s_lshr_b32 s58, s41, 16
-; GFX9-NEXT: s_lshr_b32 s59, s40, 16
-; GFX9-NEXT: s_lshr_b32 s60, s15, 16
-; GFX9-NEXT: s_lshr_b32 s61, s14, 16
-; GFX9-NEXT: s_lshr_b32 s62, s13, 16
-; GFX9-NEXT: s_lshr_b32 s63, s12, 16
-; GFX9-NEXT: s_lshr_b32 s72, s11, 16
-; GFX9-NEXT: s_lshr_b32 s73, s10, 16
-; GFX9-NEXT: s_lshr_b32 s74, s9, 16
-; GFX9-NEXT: s_lshr_b32 s75, s8, 16
-; GFX9-NEXT: s_lshr_b32 s76, s7, 16
-; GFX9-NEXT: s_lshr_b32 s77, s6, 16
+; GFX9-NEXT: s_lshr_b32 s46, s6, 16
+; GFX9-NEXT: s_lshr_b32 s47, s45, 16
+; GFX9-NEXT: s_lshr_b32 s56, s44, 16
+; GFX9-NEXT: s_lshr_b32 s57, s43, 16
+; GFX9-NEXT: s_lshr_b32 s58, s42, 16
+; GFX9-NEXT: s_lshr_b32 s59, s41, 16
+; GFX9-NEXT: s_lshr_b32 s60, s40, 16
+; GFX9-NEXT: s_lshr_b32 s61, s15, 16
+; GFX9-NEXT: s_lshr_b32 s62, s14, 16
+; GFX9-NEXT: s_lshr_b32 s63, s13, 16
+; GFX9-NEXT: s_lshr_b32 s72, s12, 16
+; GFX9-NEXT: s_lshr_b32 s73, s11, 16
+; GFX9-NEXT: s_lshr_b32 s74, s10, 16
+; GFX9-NEXT: s_lshr_b32 s75, s9, 16
+; GFX9-NEXT: s_lshr_b32 s76, s8, 16
+; GFX9-NEXT: s_lshr_b32 s77, s7, 16
; GFX9-NEXT: s_lshr_b32 s78, s29, 16
; GFX9-NEXT: s_lshr_b32 s79, s28, 16
; GFX9-NEXT: s_lshr_b32 s88, s27, 16
@@ -9730,6 +9800,7 @@ define inreg <60 x half> @bitcast_v30i32_to_v60f16_scalar(<30 x i32> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s35, s16, 16
; GFX9-NEXT: s_cbranch_execnz .LBB17_3
; GFX9-NEXT: .LBB17_2: ; %cmp.true
+; GFX9-NEXT: s_add_i32 s6, s6, 3
; GFX9-NEXT: s_add_i32 s45, s45, 3
; GFX9-NEXT: s_add_i32 s44, s44, 3
; GFX9-NEXT: s_add_i32 s43, s43, 3
@@ -9745,7 +9816,6 @@ define inreg <60 x half> @bitcast_v30i32_to_v60f16_scalar(<30 x i32> inreg %a, i
; GFX9-NEXT: s_add_i32 s9, s9, 3
; GFX9-NEXT: s_add_i32 s8, s8, 3
; GFX9-NEXT: s_add_i32 s7, s7, 3
-; GFX9-NEXT: s_add_i32 s6, s6, 3
; GFX9-NEXT: s_add_i32 s29, s29, 3
; GFX9-NEXT: s_add_i32 s28, s28, 3
; GFX9-NEXT: s_add_i32 s27, s27, 3
@@ -9760,22 +9830,22 @@ define inreg <60 x half> @bitcast_v30i32_to_v60f16_scalar(<30 x i32> inreg %a, i
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: s_lshr_b32 s46, s45, 16
-; GFX9-NEXT: s_lshr_b32 s47, s44, 16
-; GFX9-NEXT: s_lshr_b32 s56, s43, 16
-; GFX9-NEXT: s_lshr_b32 s57, s42, 16
-; GFX9-NEXT: s_lshr_b32 s58, s41, 16
-; GFX9-NEXT: s_lshr_b32 s59, s40, 16
-; GFX9-NEXT: s_lshr_b32 s60, s15, 16
-; GFX9-NEXT: s_lshr_b32 s61, s14, 16
-; GFX9-NEXT: s_lshr_b32 s62, s13, 16
-; GFX9-NEXT: s_lshr_b32 s63, s12, 16
-; GFX9-NEXT: s_lshr_b32 s72, s11, 16
-; GFX9-NEXT: s_lshr_b32 s73, s10, 16
-; GFX9-NEXT: s_lshr_b32 s74, s9, 16
-; GFX9-NEXT: s_lshr_b32 s75, s8, 16
-; GFX9-NEXT: s_lshr_b32 s76, s7, 16
-; GFX9-NEXT: s_lshr_b32 s77, s6, 16
+; GFX9-NEXT: s_lshr_b32 s46, s6, 16
+; GFX9-NEXT: s_lshr_b32 s47, s45, 16
+; GFX9-NEXT: s_lshr_b32 s56, s44, 16
+; GFX9-NEXT: s_lshr_b32 s57, s43, 16
+; GFX9-NEXT: s_lshr_b32 s58, s42, 16
+; GFX9-NEXT: s_lshr_b32 s59, s41, 16
+; GFX9-NEXT: s_lshr_b32 s60, s40, 16
+; GFX9-NEXT: s_lshr_b32 s61, s15, 16
+; GFX9-NEXT: s_lshr_b32 s62, s14, 16
+; GFX9-NEXT: s_lshr_b32 s63, s13, 16
+; GFX9-NEXT: s_lshr_b32 s72, s12, 16
+; GFX9-NEXT: s_lshr_b32 s73, s11, 16
+; GFX9-NEXT: s_lshr_b32 s74, s10, 16
+; GFX9-NEXT: s_lshr_b32 s75, s9, 16
+; GFX9-NEXT: s_lshr_b32 s76, s8, 16
+; GFX9-NEXT: s_lshr_b32 s77, s7, 16
; GFX9-NEXT: s_lshr_b32 s78, s29, 16
; GFX9-NEXT: s_lshr_b32 s79, s28, 16
; GFX9-NEXT: s_lshr_b32 s88, s27, 16
@@ -9805,22 +9875,22 @@ define inreg <60 x half> @bitcast_v30i32_to_v60f16_scalar(<30 x i32> inreg %a, i
; GFX9-NEXT: s_pack_ll_b32_b16 s25, s27, s88
; GFX9-NEXT: s_pack_ll_b32_b16 s26, s28, s79
; GFX9-NEXT: s_pack_ll_b32_b16 s27, s29, s78
-; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s77
-; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s76
-; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s75
-; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s74
-; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s73
-; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s72
-; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s63
-; GFX9-NEXT: s_pack_ll_b32_b16 s13, s13, s62
-; GFX9-NEXT: s_pack_ll_b32_b16 s14, s14, s61
-; GFX9-NEXT: s_pack_ll_b32_b16 s15, s15, s60
-; GFX9-NEXT: s_pack_ll_b32_b16 s28, s40, s59
-; GFX9-NEXT: s_pack_ll_b32_b16 s29, s41, s58
-; GFX9-NEXT: s_pack_ll_b32_b16 s40, s42, s57
-; GFX9-NEXT: s_pack_ll_b32_b16 s41, s43, s56
-; GFX9-NEXT: s_pack_ll_b32_b16 s42, s44, s47
-; GFX9-NEXT: s_pack_ll_b32_b16 s43, s45, s46
+; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s77
+; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s76
+; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s75
+; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s74
+; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s73
+; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s72
+; GFX9-NEXT: s_pack_ll_b32_b16 s13, s13, s63
+; GFX9-NEXT: s_pack_ll_b32_b16 s14, s14, s62
+; GFX9-NEXT: s_pack_ll_b32_b16 s15, s15, s61
+; GFX9-NEXT: s_pack_ll_b32_b16 s28, s40, s60
+; GFX9-NEXT: s_pack_ll_b32_b16 s29, s41, s59
+; GFX9-NEXT: s_pack_ll_b32_b16 s40, s42, s58
+; GFX9-NEXT: s_pack_ll_b32_b16 s41, s43, s57
+; GFX9-NEXT: s_pack_ll_b32_b16 s42, s44, s56
+; GFX9-NEXT: s_pack_ll_b32_b16 s43, s45, s47
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s46
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: v_mov_b32_e32 v2, s16
@@ -9835,22 +9905,22 @@ define inreg <60 x half> @bitcast_v30i32_to_v60f16_scalar(<30 x i32> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v11, s25
; GFX9-NEXT: v_mov_b32_e32 v12, s26
; GFX9-NEXT: v_mov_b32_e32 v13, s27
-; GFX9-NEXT: v_mov_b32_e32 v14, s6
-; GFX9-NEXT: v_mov_b32_e32 v15, s7
-; GFX9-NEXT: v_mov_b32_e32 v16, s8
-; GFX9-NEXT: v_mov_b32_e32 v17, s9
-; GFX9-NEXT: v_mov_b32_e32 v18, s10
-; GFX9-NEXT: v_mov_b32_e32 v19, s11
-; GFX9-NEXT: v_mov_b32_e32 v20, s12
-; GFX9-NEXT: v_mov_b32_e32 v21, s13
-; GFX9-NEXT: v_mov_b32_e32 v22, s14
-; GFX9-NEXT: v_mov_b32_e32 v23, s15
-; GFX9-NEXT: v_mov_b32_e32 v24, s28
-; GFX9-NEXT: v_mov_b32_e32 v25, s29
-; GFX9-NEXT: v_mov_b32_e32 v26, s40
-; GFX9-NEXT: v_mov_b32_e32 v27, s41
-; GFX9-NEXT: v_mov_b32_e32 v28, s42
-; GFX9-NEXT: v_mov_b32_e32 v29, s43
+; GFX9-NEXT: v_mov_b32_e32 v14, s7
+; GFX9-NEXT: v_mov_b32_e32 v15, s8
+; GFX9-NEXT: v_mov_b32_e32 v16, s9
+; GFX9-NEXT: v_mov_b32_e32 v17, s10
+; GFX9-NEXT: v_mov_b32_e32 v18, s11
+; GFX9-NEXT: v_mov_b32_e32 v19, s12
+; GFX9-NEXT: v_mov_b32_e32 v20, s13
+; GFX9-NEXT: v_mov_b32_e32 v21, s14
+; GFX9-NEXT: v_mov_b32_e32 v22, s15
+; GFX9-NEXT: v_mov_b32_e32 v23, s28
+; GFX9-NEXT: v_mov_b32_e32 v24, s29
+; GFX9-NEXT: v_mov_b32_e32 v25, s40
+; GFX9-NEXT: v_mov_b32_e32 v26, s41
+; GFX9-NEXT: v_mov_b32_e32 v27, s42
+; GFX9-NEXT: v_mov_b32_e32 v28, s43
+; GFX9-NEXT: v_mov_b32_e32 v29, s6
; GFX9-NEXT: v_readlane_b32 s35, v30, 3
; GFX9-NEXT: v_readlane_b32 s34, v30, 2
; GFX9-NEXT: v_readlane_b32 s31, v30, 1
@@ -9891,7 +9961,9 @@ define inreg <60 x half> @bitcast_v30i32_to_v60f16_scalar(<30 x i32> inreg %a, i
; GFX9-NEXT: ; implicit-def: $sgpr56
; GFX9-NEXT: ; implicit-def: $sgpr47
; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: s_branch .LBB17_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB17_2
+; GFX9-NEXT: s_branch .LBB17_3
;
; GFX11-LABEL: bitcast_v30i32_to_v60f16_scalar:
; GFX11: ; %bb.0:
@@ -9906,16 +9978,16 @@ define inreg <60 x half> @bitcast_v30i32_to_v60f16_scalar(<30 x i32> inreg %a, i
; GFX11-NEXT: v_readfirstlane_b32 s10, v6
; GFX11-NEXT: v_readfirstlane_b32 s11, v7
; GFX11-NEXT: v_readfirstlane_b32 s12, v8
-; GFX11-NEXT: v_readfirstlane_b32 s13, v9
+; GFX11-NEXT: v_readfirstlane_b32 s14, v9
; GFX11-NEXT: v_readfirstlane_b32 s15, v10
-; GFX11-NEXT: v_readfirstlane_b32 s14, v11
-; GFX11-NEXT: s_mov_b32 s94, 0
+; GFX11-NEXT: v_readfirstlane_b32 s13, v11
+; GFX11-NEXT: s_mov_b32 s94, -1
; GFX11-NEXT: s_and_b32 s40, vcc_lo, exec_lo
; GFX11-NEXT: s_cbranch_scc0 .LBB17_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s40, s14, 16
+; GFX11-NEXT: s_lshr_b32 s40, s13, 16
; GFX11-NEXT: s_lshr_b32 s41, s15, 16
-; GFX11-NEXT: s_lshr_b32 s42, s13, 16
+; GFX11-NEXT: s_lshr_b32 s42, s14, 16
; GFX11-NEXT: s_lshr_b32 s43, s12, 16
; GFX11-NEXT: s_lshr_b32 s44, s11, 16
; GFX11-NEXT: s_lshr_b32 s45, s10, 16
@@ -9943,12 +10015,11 @@ define inreg <60 x half> @bitcast_v30i32_to_v60f16_scalar(<30 x i32> inreg %a, i
; GFX11-NEXT: s_lshr_b32 s91, s2, 16
; GFX11-NEXT: s_lshr_b32 s92, s1, 16
; GFX11-NEXT: s_lshr_b32 s93, s0, 16
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s94
-; GFX11-NEXT: s_cbranch_vccnz .LBB17_3
+; GFX11-NEXT: s_cbranch_execnz .LBB17_3
; GFX11-NEXT: .LBB17_2: ; %cmp.true
-; GFX11-NEXT: s_add_i32 s14, s14, 3
-; GFX11-NEXT: s_add_i32 s15, s15, 3
; GFX11-NEXT: s_add_i32 s13, s13, 3
+; GFX11-NEXT: s_add_i32 s15, s15, 3
+; GFX11-NEXT: s_add_i32 s14, s14, 3
; GFX11-NEXT: s_add_i32 s12, s12, 3
; GFX11-NEXT: s_add_i32 s11, s11, 3
; GFX11-NEXT: s_add_i32 s10, s10, 3
@@ -9976,9 +10047,9 @@ define inreg <60 x half> @bitcast_v30i32_to_v60f16_scalar(<30 x i32> inreg %a, i
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: s_lshr_b32 s40, s14, 16
+; GFX11-NEXT: s_lshr_b32 s40, s13, 16
; GFX11-NEXT: s_lshr_b32 s41, s15, 16
-; GFX11-NEXT: s_lshr_b32 s42, s13, 16
+; GFX11-NEXT: s_lshr_b32 s42, s14, 16
; GFX11-NEXT: s_lshr_b32 s43, s12, 16
; GFX11-NEXT: s_lshr_b32 s44, s11, 16
; GFX11-NEXT: s_lshr_b32 s45, s10, 16
@@ -10035,9 +10106,9 @@ define inreg <60 x half> @bitcast_v30i32_to_v60f16_scalar(<30 x i32> inreg %a, i
; GFX11-NEXT: s_pack_ll_b32_b16 s10, s10, s45
; GFX11-NEXT: s_pack_ll_b32_b16 s11, s11, s44
; GFX11-NEXT: s_pack_ll_b32_b16 s12, s12, s43
-; GFX11-NEXT: s_pack_ll_b32_b16 s13, s13, s42
+; GFX11-NEXT: s_pack_ll_b32_b16 s14, s14, s42
; GFX11-NEXT: s_pack_ll_b32_b16 s15, s15, s41
-; GFX11-NEXT: s_pack_ll_b32_b16 s14, s14, s40
+; GFX11-NEXT: s_pack_ll_b32_b16 s13, s13, s40
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -10051,8 +10122,8 @@ define inreg <60 x half> @bitcast_v30i32_to_v60f16_scalar(<30 x i32> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v20, s6 :: v_dual_mov_b32 v21, s7
; GFX11-NEXT: v_dual_mov_b32 v22, s8 :: v_dual_mov_b32 v23, s9
; GFX11-NEXT: v_dual_mov_b32 v24, s10 :: v_dual_mov_b32 v25, s11
-; GFX11-NEXT: v_dual_mov_b32 v26, s12 :: v_dual_mov_b32 v27, s13
-; GFX11-NEXT: v_dual_mov_b32 v28, s15 :: v_dual_mov_b32 v29, s14
+; GFX11-NEXT: v_dual_mov_b32 v26, s12 :: v_dual_mov_b32 v27, s14
+; GFX11-NEXT: v_dual_mov_b32 v28, s15 :: v_dual_mov_b32 v29, s13
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB17_4:
; GFX11-NEXT: ; implicit-def: $sgpr93
@@ -10085,7 +10156,9 @@ define inreg <60 x half> @bitcast_v30i32_to_v60f16_scalar(<30 x i32> inreg %a, i
; GFX11-NEXT: ; implicit-def: $sgpr42
; GFX11-NEXT: ; implicit-def: $sgpr41
; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: s_branch .LBB17_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s94
+; GFX11-NEXT: s_cbranch_vccz .LBB17_2
+; GFX11-NEXT: s_branch .LBB17_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -11696,11 +11769,11 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:60
; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32
-; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:8
+; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:8
; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:4
; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:16
; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:12
-; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:24
+; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:24
; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:20
; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:32
; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:28
@@ -11714,83 +11787,92 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:56
; SI-NEXT: s_waitcnt expcnt(3)
; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:52
-; SI-NEXT: v_cvt_f16_f32_e32 v37, v0
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v2
-; SI-NEXT: v_cvt_f16_f32_e32 v49, v1
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
; SI-NEXT: v_cvt_f16_f32_e32 v39, v3
-; SI-NEXT: v_cvt_f16_f32_e32 v34, v7
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; SI-NEXT: v_cvt_f16_f32_e32 v49, v2
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v5
-; SI-NEXT: v_cvt_f16_f32_e32 v32, v6
-; SI-NEXT: v_cvt_f16_f32_e32 v36, v9
-; SI-NEXT: v_cvt_f16_f32_e32 v35, v8
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v26
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; SI-NEXT: v_cvt_f16_f32_e32 v48, v5
+; SI-NEXT: v_cvt_f16_f32_e32 v36, v4
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v4
-; SI-NEXT: v_cvt_f16_f32_e32 v63, v11
-; SI-NEXT: v_cvt_f16_f32_e32 v62, v10
-; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v29
+; SI-NEXT: v_cvt_f16_f32_e32 v38, v7
+; SI-NEXT: v_cvt_f16_f32_e32 v37, v6
+; SI-NEXT: v_cvt_f16_f32_e32 v32, v9
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v28
+; SI-NEXT: v_cvt_f16_f32_e32 v35, v8
+; SI-NEXT: v_cvt_f16_f32_e32 v34, v11
+; SI-NEXT: v_cvt_f16_f32_e32 v63, v10
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v18
-; SI-NEXT: v_cvt_f16_f32_e32 v43, v12
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v30
+; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
+; SI-NEXT: v_cvt_f16_f32_e32 v62, v12
; SI-NEXT: v_cvt_f16_f32_e32 v41, v15
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
; SI-NEXT: v_cvt_f16_f32_e32 v55, v14
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v20
; SI-NEXT: v_cvt_f16_f32_e32 v15, v17
; SI-NEXT: v_cvt_f16_f32_e32 v61, v16
; SI-NEXT: v_cvt_f16_f32_e32 v16, v19
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
+; SI-NEXT: v_cvt_f16_f32_e32 v53, v18
; SI-NEXT: v_cvt_f16_f32_e32 v17, v21
+; SI-NEXT: v_cvt_f16_f32_e32 v51, v20
; SI-NEXT: v_cvt_f16_f32_e32 v18, v23
; SI-NEXT: v_cvt_f16_f32_e32 v22, v22
; SI-NEXT: v_cvt_f16_f32_e32 v19, v25
; SI-NEXT: v_cvt_f16_f32_e32 v21, v24
; SI-NEXT: v_cvt_f16_f32_e32 v20, v27
-; SI-NEXT: v_cvt_f16_f32_e32 v53, v26
-; SI-NEXT: v_cvt_f16_f32_e32 v52, v29
-; SI-NEXT: v_cvt_f16_f32_e32 v51, v28
-; SI-NEXT: v_cvt_f16_f32_e32 v30, v30
+; SI-NEXT: v_cvt_f16_f32_e32 v12, s16
; SI-NEXT: v_cvt_f16_f32_e32 v1, s19
-; SI-NEXT: v_cvt_f16_f32_e32 v12, s18
+; SI-NEXT: v_cvt_f16_f32_e32 v11, s18
; SI-NEXT: v_cvt_f16_f32_e32 v2, s21
-; SI-NEXT: v_cvt_f16_f32_e32 v11, s20
+; SI-NEXT: v_cvt_f16_f32_e32 v9, s20
; SI-NEXT: v_cvt_f16_f32_e32 v3, s23
; SI-NEXT: v_cvt_f16_f32_e32 v10, s22
; SI-NEXT: v_cvt_f16_f32_e32 v4, s25
-; SI-NEXT: v_cvt_f16_f32_e32 v9, s24
+; SI-NEXT: v_cvt_f16_f32_e32 v8, s24
; SI-NEXT: v_cvt_f16_f32_e32 v5, s27
-; SI-NEXT: v_cvt_f16_f32_e32 v8, s26
+; SI-NEXT: v_cvt_f16_f32_e32 v7, s26
; SI-NEXT: v_cvt_f16_f32_e32 v6, s29
-; SI-NEXT: v_cvt_f16_f32_e32 v7, s28
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31
-; SI-NEXT: v_cvt_f16_f32_e32 v50, v54
-; SI-NEXT: v_cvt_f16_f32_e32 v48, v48
-; SI-NEXT: v_cvt_f16_f32_e32 v31, v40
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v33
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v54
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v50
+; SI-NEXT: v_cvt_f16_f32_e32 v50, s28
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v40
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v33
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v42
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v38
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v43
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v44
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v45
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v46
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v47
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -11799,260 +11881,240 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v57
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v58
-; SI-NEXT: v_cvt_f16_f32_e32 v58, s16
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v59
; SI-NEXT: v_cvt_f16_f32_e32 v59, s17
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v60
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB19_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19
+; SI-NEXT: v_or_b32_e32 v19, v21, v19
+; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
+; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20
+; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18
+; SI-NEXT: v_or_b32_e32 v18, v22, v18
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(4)
-; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; SI-NEXT: v_or_b32_e32 v3, v10, v3
-; SI-NEXT: s_waitcnt expcnt(3)
-; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v34
-; SI-NEXT: v_mov_b32_e32 v33, v32
-; SI-NEXT: v_or_b32_e32 v10, v32, v10
-; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
-; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
-; SI-NEXT: v_mov_b32_e32 v44, v43
-; SI-NEXT: v_or_b32_e32 v13, v43, v13
-; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
-; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; SI-NEXT: v_or_b32_e32 v5, v8, v5
-; SI-NEXT: v_mov_b32_e32 v57, v39
-; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v39
-; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6
-; SI-NEXT: v_or_b32_e32 v6, v7, v6
-; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v49
-; SI-NEXT: v_or_b32_e32 v7, v37, v7
-; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
+; SI-NEXT: v_or_b32_e32 v6, v50, v6
+; SI-NEXT: v_mov_b32_e32 v30, v50
+; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
+; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v59
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18
-; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19
-; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v59
-; SI-NEXT: v_or_b32_e32 v1, v12, v1
-; SI-NEXT: v_or_b32_e32 v2, v11, v2
-; SI-NEXT: v_or_b32_e32 v4, v9, v4
-; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v36
-; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v63
+; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; SI-NEXT: v_mov_b32_e32 v52, v12
+; SI-NEXT: v_or_b32_e32 v0, v12, v0
+; SI-NEXT: v_or_b32_e32 v1, v11, v1
+; SI-NEXT: v_or_b32_e32 v2, v9, v2
+; SI-NEXT: v_or_b32_e32 v3, v10, v3
+; SI-NEXT: v_or_b32_e32 v4, v8, v4
+; SI-NEXT: v_or_b32_e32 v5, v7, v5
+; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v39
+; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v48
+; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v38
+; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v32
+; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v34
+; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v41
; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15
; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17
-; SI-NEXT: v_or_b32_e32 v18, v22, v18
-; SI-NEXT: v_or_b32_e32 v19, v21, v19
-; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20
-; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v52
-; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v50
-; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v48
-; SI-NEXT: v_or_b32_e32 v0, v58, v0
-; SI-NEXT: v_mov_b32_e32 v56, v34
-; SI-NEXT: v_mov_b32_e32 v47, v36
-; SI-NEXT: v_mov_b32_e32 v46, v35
+; SI-NEXT: v_mov_b32_e32 v58, v49
+; SI-NEXT: v_or_b32_e32 v8, v49, v8
+; SI-NEXT: v_mov_b32_e32 v57, v48
+; SI-NEXT: v_mov_b32_e32 v56, v36
+; SI-NEXT: v_or_b32_e32 v9, v36, v9
+; SI-NEXT: v_mov_b32_e32 v47, v38
+; SI-NEXT: v_mov_b32_e32 v46, v37
+; SI-NEXT: v_or_b32_e32 v10, v37, v10
+; SI-NEXT: v_mov_b32_e32 v33, v32
+; SI-NEXT: v_mov_b32_e32 v45, v35
; SI-NEXT: v_or_b32_e32 v11, v35, v11
+; SI-NEXT: v_mov_b32_e32 v44, v34
; SI-NEXT: v_mov_b32_e32 v60, v63
-; SI-NEXT: v_mov_b32_e32 v45, v62
-; SI-NEXT: v_or_b32_e32 v12, v62, v12
+; SI-NEXT: v_or_b32_e32 v12, v63, v12
+; SI-NEXT: v_mov_b32_e32 v43, v62
+; SI-NEXT: v_or_b32_e32 v13, v62, v13
; SI-NEXT: v_mov_b32_e32 v42, v41
; SI-NEXT: v_mov_b32_e32 v40, v55
; SI-NEXT: v_or_b32_e32 v14, v55, v14
; SI-NEXT: v_or_b32_e32 v15, v61, v15
-; SI-NEXT: v_or_b32_e32 v20, v53, v20
-; SI-NEXT: v_or_b32_e32 v21, v51, v21
-; SI-NEXT: v_or_b32_e32 v22, v30, v22
-; SI-NEXT: v_or_b32_e32 v23, v31, v23
+; SI-NEXT: v_or_b32_e32 v16, v53, v16
+; SI-NEXT: v_or_b32_e32 v17, v51, v17
; SI-NEXT: s_mov_b64 s[4:5], 0
; SI-NEXT: s_waitcnt vmcnt(11)
+; SI-NEXT: v_or_b32_e32 v20, v21, v20
+; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21
+; SI-NEXT: v_or_b32_e32 v21, v22, v21
+; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22
+; SI-NEXT: v_or_b32_e32 v22, v23, v22
+; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v23
+; SI-NEXT: v_or_b32_e32 v23, v24, v23
+; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v24
-; SI-NEXT: s_waitcnt vmcnt(10)
; SI-NEXT: v_or_b32_e32 v24, v25, v24
; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(10)
-; SI-NEXT: v_or_b32_e32 v17, v32, v17
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v25
; SI-NEXT: v_or_b32_e32 v25, v26, v25
; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
-; SI-NEXT: v_or_b32_e32 v16, v43, v16
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v26
; SI-NEXT: v_or_b32_e32 v26, v27, v26
; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
-; SI-NEXT: v_mov_b32_e32 v35, v39
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v27
; SI-NEXT: v_or_b32_e32 v27, v28, v27
; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
-; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v37
-; SI-NEXT: v_or_b32_e32 v9, v39, v9
-; SI-NEXT: v_mov_b32_e32 v36, v37
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v28
; SI-NEXT: v_or_b32_e32 v28, v29, v28
; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
-; SI-NEXT: v_or_b32_e32 v8, v38, v8
+; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v50
+; SI-NEXT: v_or_b32_e32 v7, v31, v7
+; SI-NEXT: v_mov_b32_e32 v35, v50
+; SI-NEXT: v_mov_b32_e32 v50, v30
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v29
; SI-NEXT: v_or_b32_e32 v29, v54, v29
-; SI-NEXT: v_mov_b32_e32 v54, v32
; SI-NEXT: s_branch .LBB19_3
; SI-NEXT: .LBB19_2:
-; SI-NEXT: v_mov_b32_e32 v54, v53
-; SI-NEXT: v_mov_b32_e32 v53, v52
-; SI-NEXT: v_mov_b32_e32 v52, v51
-; SI-NEXT: v_mov_b32_e32 v51, v50
-; SI-NEXT: v_mov_b32_e32 v50, v30
-; SI-NEXT: v_mov_b32_e32 v49, v48
-; SI-NEXT: v_mov_b32_e32 v48, v31
+; SI-NEXT: v_mov_b32_e32 v52, v12
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; SI-NEXT: v_mov_b32_e32 v47, v36
-; SI-NEXT: v_mov_b32_e32 v46, v35
-; SI-NEXT: v_mov_b32_e32 v44, v43
-; SI-NEXT: v_mov_b32_e32 v30, v50
-; SI-NEXT: v_mov_b32_e32 v50, v51
-; SI-NEXT: v_mov_b32_e32 v51, v52
-; SI-NEXT: v_mov_b32_e32 v52, v53
-; SI-NEXT: v_mov_b32_e32 v53, v54
-; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
-; SI-NEXT: v_mov_b32_e32 v57, v39
-; SI-NEXT: v_mov_b32_e32 v56, v34
+; SI-NEXT: v_mov_b32_e32 v45, v35
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v58, v49
+; SI-NEXT: v_mov_b32_e32 v57, v48
+; SI-NEXT: v_mov_b32_e32 v56, v36
+; SI-NEXT: v_mov_b32_e32 v47, v38
+; SI-NEXT: v_mov_b32_e32 v46, v37
; SI-NEXT: v_mov_b32_e32 v33, v32
+; SI-NEXT: v_mov_b32_e32 v44, v34
; SI-NEXT: v_mov_b32_e32 v60, v63
-; SI-NEXT: v_mov_b32_e32 v45, v62
+; SI-NEXT: v_mov_b32_e32 v43, v62
; SI-NEXT: v_mov_b32_e32 v42, v41
; SI-NEXT: v_mov_b32_e32 v40, v55
-; SI-NEXT: s_mov_b64 s[4:5], -1
-; SI-NEXT: v_mov_b32_e32 v31, v48
-; SI-NEXT: v_mov_b32_e32 v48, v49
; SI-NEXT: .LBB19_3: ; %Flow
; SI-NEXT: v_mov_b32_e32 v32, v33
-; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
; SI-NEXT: v_mov_b32_e32 v61, v40
-; SI-NEXT: v_mov_b32_e32 v40, v44
; SI-NEXT: s_cbranch_vccnz .LBB19_5
; SI-NEXT: ; %bb.4: ; %cmp.true
-; SI-NEXT: s_waitcnt expcnt(5)
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(4)
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(3)
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload
; SI-NEXT: v_cvt_f32_f16_e32 v0, v59
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v1, v58
-; SI-NEXT: s_waitcnt vmcnt(3)
-; SI-NEXT: v_cvt_f32_f16_e32 v8, v33
-; SI-NEXT: v_cvt_f32_f16_e32 v9, v38
+; SI-NEXT: v_cvt_f32_f16_e32 v1, v52
+; SI-NEXT: v_cvt_f32_f16_e32 v7, v50
+; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: v_cvt_f32_f16_e32 v8, v31
; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0
; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8
-; SI-NEXT: v_cvt_f16_f32_e32 v8, v8
+; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7
+; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8
+; SI-NEXT: v_cvt_f16_f32_e32 v8, v8
+; SI-NEXT: v_cvt_f32_f16_e32 v9, v58
+; SI-NEXT: v_cvt_f32_f16_e32 v10, v56
+; SI-NEXT: v_cvt_f32_f16_e32 v11, v46
+; SI-NEXT: v_cvt_f32_f16_e32 v12, v45
; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9
; SI-NEXT: v_cvt_f16_f32_e32 v9, v9
-; SI-NEXT: v_cvt_f32_f16_e32 v10, v35
-; SI-NEXT: v_cvt_f32_f16_e32 v11, v32
-; SI-NEXT: v_cvt_f32_f16_e32 v12, v46
-; SI-NEXT: v_cvt_f32_f16_e32 v13, v45
; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10
; SI-NEXT: v_cvt_f16_f32_e32 v10, v10
; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11
; SI-NEXT: v_cvt_f16_f32_e32 v11, v11
; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12
; SI-NEXT: v_cvt_f16_f32_e32 v12, v12
-; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13
-; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
-; SI-NEXT: v_cvt_f32_f16_e32 v14, v40
+; SI-NEXT: v_cvt_f32_f16_e32 v13, v60
+; SI-NEXT: v_cvt_f32_f16_e32 v14, v43
; SI-NEXT: v_mov_b32_e32 v55, v42
; SI-NEXT: v_cvt_f32_f16_e32 v15, v61
-; SI-NEXT: v_cvt_f32_f16_e32 v17, v43
+; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13
+; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14
; SI-NEXT: v_cvt_f16_f32_e32 v14, v14
; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15
; SI-NEXT: v_cvt_f16_f32_e32 v15, v15
-; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17
-; SI-NEXT: v_cvt_f16_f32_e32 v17, v17
-; SI-NEXT: v_cvt_f32_f16_e32 v19, v54
-; SI-NEXT: v_cvt_f32_f16_e32 v22, v53
-; SI-NEXT: v_cvt_f32_f16_e32 v23, v51
-; SI-NEXT: v_cvt_f32_f16_e32 v24, v48
-; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19
-; SI-NEXT: v_cvt_f16_f32_e32 v19, v19
-; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22
-; SI-NEXT: v_cvt_f16_f32_e32 v22, v22
-; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23
-; SI-NEXT: v_cvt_f16_f32_e32 v23, v23
-; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24
-; SI-NEXT: v_cvt_f16_f32_e32 v24, v24
-; SI-NEXT: v_cvt_f32_f16_e32 v25, v31
+; SI-NEXT: v_cvt_f32_f16_e32 v17, v53
+; SI-NEXT: v_cvt_f32_f16_e32 v19, v51
; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
+; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17
+; SI-NEXT: v_cvt_f16_f32_e32 v17, v17
+; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
+; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19
+; SI-NEXT: v_cvt_f16_f32_e32 v19, v19
+; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
-; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25
-; SI-NEXT: v_cvt_f16_f32_e32 v25, v25
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
-; SI-NEXT: s_waitcnt vmcnt(13)
; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2
; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
@@ -12060,42 +12122,48 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; SI-NEXT: v_or_b32_e32 v1, v3, v2
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(14)
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
; SI-NEXT: v_cvt_f32_f16_e32 v27, v27
-; SI-NEXT: s_waitcnt vmcnt(11)
-; SI-NEXT: v_cvt_f32_f16_e32 v31, v31
-; SI-NEXT: s_waitcnt vmcnt(10)
+; SI-NEXT: s_waitcnt vmcnt(14)
+; SI-NEXT: v_cvt_f32_f16_e32 v30, v30
+; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27
+; SI-NEXT: s_waitcnt vmcnt(13)
; SI-NEXT: v_cvt_f32_f16_e32 v4, v4
-; SI-NEXT: s_waitcnt vmcnt(9)
+; SI-NEXT: s_waitcnt vmcnt(12)
; SI-NEXT: v_cvt_f32_f16_e32 v5, v5
-; SI-NEXT: s_waitcnt vmcnt(8)
+; SI-NEXT: s_waitcnt vmcnt(11)
; SI-NEXT: v_cvt_f32_f16_e32 v6, v6
-; SI-NEXT: s_waitcnt vmcnt(7)
-; SI-NEXT: v_cvt_f32_f16_e32 v7, v7
+; SI-NEXT: s_waitcnt vmcnt(10)
+; SI-NEXT: v_cvt_f32_f16_e32 v16, v16
; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4
; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5
; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6
; SI-NEXT: v_cvt_f16_f32_e32 v6, v6
-; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7
-; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
-; SI-NEXT: s_waitcnt vmcnt(6)
-; SI-NEXT: v_cvt_f32_f16_e32 v16, v16
-; SI-NEXT: s_waitcnt vmcnt(5)
-; SI-NEXT: v_cvt_f32_f16_e32 v18, v18
-; SI-NEXT: s_waitcnt vmcnt(3)
-; SI-NEXT: v_cvt_f32_f16_e32 v21, v21
-; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27
; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16
; SI-NEXT: v_cvt_f16_f32_e32 v16, v16
+; SI-NEXT: s_waitcnt vmcnt(9)
+; SI-NEXT: v_cvt_f32_f16_e32 v18, v18
+; SI-NEXT: s_waitcnt vmcnt(7)
+; SI-NEXT: v_cvt_f32_f16_e32 v21, v21
+; SI-NEXT: s_waitcnt vmcnt(5)
+; SI-NEXT: v_cvt_f32_f16_e32 v23, v23
+; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: v_cvt_f32_f16_e32 v24, v24
; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18
; SI-NEXT: v_cvt_f16_f32_e32 v18, v18
; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21
; SI-NEXT: v_cvt_f16_f32_e32 v21, v21
+; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23
+; SI-NEXT: v_cvt_f16_f32_e32 v23, v23
+; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24
+; SI-NEXT: v_cvt_f16_f32_e32 v24, v24
; SI-NEXT: v_cvt_f16_f32_e32 v27, v27
+; SI-NEXT: v_cvt_f32_f16_e32 v31, v31
+; SI-NEXT: v_add_f32_e32 v30, 0x38000000, v30
+; SI-NEXT: v_cvt_f16_f32_e32 v30, v30
; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31
; SI-NEXT: v_cvt_f16_f32_e32 v31, v31
; SI-NEXT: s_waitcnt vmcnt(1)
@@ -12108,65 +12176,65 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; SI-NEXT: v_or_b32_e32 v2, v3, v2
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3
; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; SI-NEXT: v_or_b32_e32 v3, v4, v3
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v4, v4
; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4
; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
; SI-NEXT: v_or_b32_e32 v4, v5, v4
-; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v5, v5
; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5
; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: v_or_b32_e32 v5, v6, v5
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v6, v6
; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6
; SI-NEXT: v_cvt_f16_f32_e32 v6, v6
; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6
; SI-NEXT: v_or_b32_e32 v6, v7, v6
-; SI-NEXT: v_cvt_f32_f16_e32 v7, v37
+; SI-NEXT: v_cvt_f32_f16_e32 v7, v35
; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7
; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7
; SI-NEXT: v_or_b32_e32 v7, v8, v7
-; SI-NEXT: v_cvt_f32_f16_e32 v8, v57
+; SI-NEXT: v_cvt_f32_f16_e32 v8, v33
; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8
; SI-NEXT: v_cvt_f16_f32_e32 v8, v8
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8
; SI-NEXT: v_or_b32_e32 v8, v9, v8
-; SI-NEXT: v_cvt_f32_f16_e32 v9, v36
+; SI-NEXT: v_cvt_f32_f16_e32 v9, v57
; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9
; SI-NEXT: v_cvt_f16_f32_e32 v9, v9
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9
; SI-NEXT: v_or_b32_e32 v9, v10, v9
-; SI-NEXT: v_cvt_f32_f16_e32 v10, v56
+; SI-NEXT: v_cvt_f32_f16_e32 v10, v47
; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10
; SI-NEXT: v_cvt_f16_f32_e32 v10, v10
; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10
; SI-NEXT: v_or_b32_e32 v10, v11, v10
-; SI-NEXT: v_cvt_f32_f16_e32 v11, v47
+; SI-NEXT: v_cvt_f32_f16_e32 v11, v32
; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11
; SI-NEXT: v_cvt_f16_f32_e32 v11, v11
; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11
; SI-NEXT: v_or_b32_e32 v11, v12, v11
-; SI-NEXT: v_cvt_f32_f16_e32 v12, v60
+; SI-NEXT: v_cvt_f32_f16_e32 v12, v44
; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12
; SI-NEXT: v_cvt_f16_f32_e32 v12, v12
; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12
; SI-NEXT: v_or_b32_e32 v12, v13, v12
-; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v13, v13
; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13
@@ -12178,14 +12246,14 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v14, v14
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
; SI-NEXT: v_or_b32_e32 v14, v15, v14
-; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v15, v15
; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15
; SI-NEXT: v_cvt_f16_f32_e32 v15, v15
; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15
; SI-NEXT: v_or_b32_e32 v15, v16, v15
-; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v16, v16
; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16
@@ -12193,9 +12261,9 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16
; SI-NEXT: v_or_b32_e32 v16, v17, v16
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v18
-; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
; SI-NEXT: v_or_b32_e32 v17, v19, v17
-; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
; SI-NEXT: v_cvt_f32_f16_e32 v20, v20
; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20
; SI-NEXT: v_cvt_f16_f32_e32 v20, v20
@@ -12205,7 +12273,7 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v18, v18
; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18
; SI-NEXT: v_or_b32_e32 v18, v20, v18
-; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_cvt_f32_f16_e32 v19, v19
; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19
@@ -12217,32 +12285,39 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v20, v20
; SI-NEXT: v_or_b32_e32 v19, v20, v19
; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v21
-; SI-NEXT: v_cvt_f32_f16_e32 v21, v52
+; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
+; SI-NEXT: v_cvt_f32_f16_e32 v22, v22
+; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22
+; SI-NEXT: v_cvt_f16_f32_e32 v22, v22
; SI-NEXT: v_or_b32_e32 v20, v22, v20
-; SI-NEXT: v_cvt_f32_f16_e32 v22, v50
+; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_cvt_f32_f16_e32 v21, v21
; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21
; SI-NEXT: v_cvt_f16_f32_e32 v21, v21
-; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22
-; SI-NEXT: v_cvt_f16_f32_e32 v22, v22
; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21
; SI-NEXT: v_or_b32_e32 v21, v23, v21
-; SI-NEXT: v_cvt_f32_f16_e32 v23, v30
+; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_cvt_f32_f16_e32 v22, v22
+; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22
+; SI-NEXT: v_cvt_f16_f32_e32 v22, v22
; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22
-; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_cvt_f32_f16_e32 v23, v23
; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23
; SI-NEXT: v_cvt_f16_f32_e32 v23, v23
; SI-NEXT: v_or_b32_e32 v22, v23, v22
; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v24
; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; SI-NEXT: v_cvt_f32_f16_e32 v25, v25
+; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25
+; SI-NEXT: v_cvt_f16_f32_e32 v25, v25
; SI-NEXT: v_or_b32_e32 v23, v25, v23
; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
; SI-NEXT: v_cvt_f32_f16_e32 v26, v26
; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26
; SI-NEXT: v_cvt_f16_f32_e32 v26, v26
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_cvt_f32_f16_e32 v30, v30
-; SI-NEXT: v_add_f32_e32 v30, 0x38000000, v30
-; SI-NEXT: v_cvt_f16_f32_e32 v30, v30
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_cvt_f32_f16_e32 v24, v24
; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24
@@ -12327,6 +12402,7 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v15
; VI-NEXT: v_mov_b32_e32 v33, v14
; VI-NEXT: v_mov_b32_e32 v34, v13
@@ -12343,7 +12419,7 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v53, v2
; VI-NEXT: v_mov_b32_e32 v54, v1
; VI-NEXT: v_mov_b32_e32 v55, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB19_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v0, 16
@@ -12547,11 +12623,28 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB19_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB19_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB19_2
+; VI-NEXT: s_branch .LBB19_3
;
; GFX9-LABEL: bitcast_v60f16_to_v30i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_lshr_b32 s40, s29, 16
+; GFX9-NEXT: s_lshr_b32 s41, s28, 16
+; GFX9-NEXT: s_lshr_b32 s42, s27, 16
+; GFX9-NEXT: s_lshr_b32 s43, s26, 16
+; GFX9-NEXT: s_lshr_b32 s15, s25, 16
+; GFX9-NEXT: s_lshr_b32 s14, s24, 16
+; GFX9-NEXT: s_lshr_b32 s13, s23, 16
+; GFX9-NEXT: s_lshr_b32 s12, s22, 16
+; GFX9-NEXT: s_lshr_b32 s11, s21, 16
+; GFX9-NEXT: s_lshr_b32 s10, s20, 16
+; GFX9-NEXT: s_lshr_b32 s9, s19, 16
+; GFX9-NEXT: s_lshr_b32 s8, s18, 16
+; GFX9-NEXT: s_lshr_b32 s7, s17, 16
+; GFX9-NEXT: s_lshr_b32 s6, s16, 16
; GFX9-NEXT: v_mov_b32_e32 v32, v15
; GFX9-NEXT: v_mov_b32_e32 v33, v14
; GFX9-NEXT: v_mov_b32_e32 v34, v13
@@ -12568,21 +12661,7 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v53, v2
; GFX9-NEXT: v_mov_b32_e32 v54, v1
; GFX9-NEXT: v_mov_b32_e32 v55, v0
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
-; GFX9-NEXT: s_lshr_b32 s40, s29, 16
-; GFX9-NEXT: s_lshr_b32 s41, s28, 16
-; GFX9-NEXT: s_lshr_b32 s42, s27, 16
-; GFX9-NEXT: s_lshr_b32 s43, s26, 16
-; GFX9-NEXT: s_lshr_b32 s15, s25, 16
-; GFX9-NEXT: s_lshr_b32 s14, s24, 16
-; GFX9-NEXT: s_lshr_b32 s13, s23, 16
-; GFX9-NEXT: s_lshr_b32 s12, s22, 16
-; GFX9-NEXT: s_lshr_b32 s11, s21, 16
-; GFX9-NEXT: s_lshr_b32 s10, s20, 16
-; GFX9-NEXT: s_lshr_b32 s9, s19, 16
-; GFX9-NEXT: s_lshr_b32 s8, s18, 16
-; GFX9-NEXT: s_lshr_b32 s7, s17, 16
-; GFX9-NEXT: s_lshr_b32 s6, s16, 16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
@@ -12603,7 +12682,6 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; GFX9-NEXT: v_lshrrev_b32_e32 v41, 16, v33
; GFX9-NEXT: v_lshrrev_b32_e32 v42, 16, v34
; GFX9-NEXT: v_lshrrev_b32_e32 v43, 16, v35
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -12618,6 +12696,7 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_lshrrev_b32_e32 v44, 16, v36
; GFX9-NEXT: v_lshrrev_b32_e32 v45, 16, v37
; GFX9-NEXT: v_lshrrev_b32_e32 v46, 16, v38
@@ -12765,7 +12844,9 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB19_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB19_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB19_2
+; GFX9-NEXT: s_branch .LBB19_3
;
; GFX11-TRUE16-LABEL: bitcast_v60f16_to_v30i32_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -12810,41 +12891,41 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s27, s42
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB19_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
@@ -12859,17 +12940,16 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB19_3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB19_3
; GFX11-TRUE16-NEXT: .LBB19_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
@@ -12883,24 +12963,24 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s4 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
@@ -12917,7 +12997,9 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB19_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB19_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB19_2
+; GFX11-TRUE16-NEXT: s_branch .LBB19_3
;
; GFX11-FAKE16-LABEL: bitcast_v60f16_to_v30i32_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -12950,41 +13032,41 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s16, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s26, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s25, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s24, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s23, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s22, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s21, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s20, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s19, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s18, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s17, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s16, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s3, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s2, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s15, 0
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s0, 16
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s27, s42
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB19_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
@@ -12999,17 +13081,16 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB19_3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB19_3
; GFX11-FAKE16-NEXT: .LBB19_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
@@ -13023,24 +13104,24 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, s4 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, s18 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, s16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
@@ -13057,7 +13138,9 @@ define inreg <30 x i32> @bitcast_v60f16_to_v30i32_scalar(<60 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB19_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB19_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB19_2
+; GFX11-FAKE16-NEXT: s_branch .LBB19_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -13255,6 +13338,7 @@ define inreg <15 x i64> @bitcast_v30f32_to_v15i64_scalar(<30 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v29, v15
; SI-NEXT: v_mov_b32_e32 v28, v14
; SI-NEXT: v_mov_b32_e32 v27, v13
@@ -13272,7 +13356,7 @@ define inreg <15 x i64> @bitcast_v30f32_to_v15i64_scalar(<30 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v15, v1
; SI-NEXT: v_mov_b32_e32 v14, v0
; SI-NEXT: v_mov_b32_e32 v0, s16
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
; SI-NEXT: v_mov_b32_e32 v3, s19
@@ -13286,10 +13370,13 @@ define inreg <15 x i64> @bitcast_v30f32_to_v15i64_scalar(<30 x float> inreg %a,
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB21_4
+; SI-NEXT: s_cbranch_scc0 .LBB21_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB21_3
-; SI-NEXT: .LBB21_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB21_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB21_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e32 v29, 1.0, v29
; SI-NEXT: v_add_f32_e32 v28, 1.0, v28
; SI-NEXT: v_add_f32_e32 v27, 1.0, v27
@@ -13320,16 +13407,15 @@ define inreg <15 x i64> @bitcast_v30f32_to_v15i64_scalar(<30 x float> inreg %a,
; SI-NEXT: v_add_f32_e32 v2, 1.0, v2
; SI-NEXT: v_add_f32_e32 v1, 1.0, v1
; SI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; SI-NEXT: .LBB21_3: ; %end
+; SI-NEXT: .LBB21_4: ; %end
; SI-NEXT: v_mov_b32_e32 v16, v30
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB21_4:
-; SI-NEXT: s_branch .LBB21_2
;
; VI-LABEL: bitcast_v30f32_to_v15i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v29, v15
; VI-NEXT: v_mov_b32_e32 v28, v14
; VI-NEXT: v_mov_b32_e32 v27, v13
@@ -13347,7 +13433,7 @@ define inreg <15 x i64> @bitcast_v30f32_to_v15i64_scalar(<30 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
@@ -13361,10 +13447,13 @@ define inreg <15 x i64> @bitcast_v30f32_to_v15i64_scalar(<30 x float> inreg %a,
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB21_4
+; VI-NEXT: s_cbranch_scc0 .LBB21_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB21_3
-; VI-NEXT: .LBB21_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB21_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB21_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e32 v29, 1.0, v29
; VI-NEXT: v_add_f32_e32 v28, 1.0, v28
; VI-NEXT: v_add_f32_e32 v27, 1.0, v27
@@ -13395,16 +13484,15 @@ define inreg <15 x i64> @bitcast_v30f32_to_v15i64_scalar(<30 x float> inreg %a,
; VI-NEXT: v_add_f32_e32 v2, 1.0, v2
; VI-NEXT: v_add_f32_e32 v1, 1.0, v1
; VI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; VI-NEXT: .LBB21_3: ; %end
+; VI-NEXT: .LBB21_4: ; %end
; VI-NEXT: v_mov_b32_e32 v16, v30
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB21_4:
-; VI-NEXT: s_branch .LBB21_2
;
; GFX9-LABEL: bitcast_v30f32_to_v15i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v29, v15
; GFX9-NEXT: v_mov_b32_e32 v28, v14
; GFX9-NEXT: v_mov_b32_e32 v27, v13
@@ -13422,7 +13510,7 @@ define inreg <15 x i64> @bitcast_v30f32_to_v15i64_scalar(<30 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
@@ -13436,10 +13524,13 @@ define inreg <15 x i64> @bitcast_v30f32_to_v15i64_scalar(<30 x float> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB21_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB21_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB21_3
-; GFX9-NEXT: .LBB21_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB21_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e32 v29, 1.0, v29
; GFX9-NEXT: v_add_f32_e32 v28, 1.0, v28
; GFX9-NEXT: v_add_f32_e32 v27, 1.0, v27
@@ -13470,43 +13561,41 @@ define inreg <15 x i64> @bitcast_v30f32_to_v15i64_scalar(<30 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e32 v2, 1.0, v2
; GFX9-NEXT: v_add_f32_e32 v1, 1.0, v1
; GFX9-NEXT: v_add_f32_e32 v0, 1.0, v0
-; GFX9-NEXT: .LBB21_3: ; %end
+; GFX9-NEXT: .LBB21_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v16, v30
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB21_4:
-; GFX9-NEXT: s_branch .LBB21_2
;
; GFX11-LABEL: bitcast_v30f32_to_v15i64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v29, v11
-; GFX11-NEXT: v_dual_mov_b32 v28, v10 :: v_dual_mov_b32 v27, v9
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
+; GFX11-NEXT: v_dual_mov_b32 v15, v12 :: v_dual_mov_b32 v28, v10
+; GFX11-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v26, v8
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB21_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB21_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB21_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB21_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB21_3:
-; GFX11-NEXT: .LBB21_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_dual_add_f32 v29, 1.0, v29 :: v_dual_add_f32 v28, 1.0, v28
; GFX11-NEXT: v_dual_add_f32 v27, 1.0, v27 :: v_dual_add_f32 v26, 1.0, v26
; GFX11-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v24, 1.0, v24
@@ -13522,6 +13611,7 @@ define inreg <15 x i64> @bitcast_v30f32_to_v15i64_scalar(<30 x float> inreg %a,
; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-NEXT: .LBB21_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -13743,6 +13833,7 @@ define inreg <30 x float> @bitcast_v15i64_to_v30f32_scalar(<15 x i64> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v29, v15
; SI-NEXT: v_mov_b32_e32 v28, v14
; SI-NEXT: v_mov_b32_e32 v27, v13
@@ -13760,7 +13851,7 @@ define inreg <30 x float> @bitcast_v15i64_to_v30f32_scalar(<15 x i64> inreg %a,
; SI-NEXT: v_mov_b32_e32 v15, v1
; SI-NEXT: v_mov_b32_e32 v14, v0
; SI-NEXT: v_mov_b32_e32 v0, s16
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
; SI-NEXT: v_mov_b32_e32 v3, s19
@@ -13774,10 +13865,13 @@ define inreg <30 x float> @bitcast_v15i64_to_v30f32_scalar(<15 x i64> inreg %a,
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB23_4
+; SI-NEXT: s_cbranch_scc0 .LBB23_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB23_3
-; SI-NEXT: .LBB23_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB23_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB23_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v28, vcc, 3, v28
; SI-NEXT: v_addc_u32_e32 v29, vcc, 0, v29, vcc
; SI-NEXT: v_add_i32_e32 v26, vcc, 3, v26
@@ -13808,16 +13902,15 @@ define inreg <30 x float> @bitcast_v15i64_to_v30f32_scalar(<15 x i64> inreg %a,
; SI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; SI-NEXT: .LBB23_3: ; %end
+; SI-NEXT: .LBB23_4: ; %end
; SI-NEXT: v_mov_b32_e32 v16, v30
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB23_4:
-; SI-NEXT: s_branch .LBB23_2
;
; VI-LABEL: bitcast_v15i64_to_v30f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v29, v15
; VI-NEXT: v_mov_b32_e32 v28, v14
; VI-NEXT: v_mov_b32_e32 v27, v13
@@ -13835,7 +13928,7 @@ define inreg <30 x float> @bitcast_v15i64_to_v30f32_scalar(<15 x i64> inreg %a,
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
@@ -13849,10 +13942,13 @@ define inreg <30 x float> @bitcast_v15i64_to_v30f32_scalar(<15 x i64> inreg %a,
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB23_4
+; VI-NEXT: s_cbranch_scc0 .LBB23_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB23_3
-; VI-NEXT: .LBB23_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB23_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB23_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v28, vcc, 3, v28
; VI-NEXT: v_addc_u32_e32 v29, vcc, 0, v29, vcc
; VI-NEXT: v_add_u32_e32 v26, vcc, 3, v26
@@ -13883,16 +13979,15 @@ define inreg <30 x float> @bitcast_v15i64_to_v30f32_scalar(<15 x i64> inreg %a,
; VI-NEXT: v_addc_u32_e32 v3, vcc, 0, v3, vcc
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
-; VI-NEXT: .LBB23_3: ; %end
+; VI-NEXT: .LBB23_4: ; %end
; VI-NEXT: v_mov_b32_e32 v16, v30
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB23_4:
-; VI-NEXT: s_branch .LBB23_2
;
; GFX9-LABEL: bitcast_v15i64_to_v30f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v29, v15
; GFX9-NEXT: v_mov_b32_e32 v28, v14
; GFX9-NEXT: v_mov_b32_e32 v27, v13
@@ -13910,7 +14005,7 @@ define inreg <30 x float> @bitcast_v15i64_to_v30f32_scalar(<15 x i64> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
@@ -13924,10 +14019,13 @@ define inreg <30 x float> @bitcast_v15i64_to_v30f32_scalar(<15 x i64> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB23_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB23_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB23_3
-; GFX9-NEXT: .LBB23_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB23_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_co_u32_e32 v28, vcc, 3, v28
; GFX9-NEXT: v_addc_co_u32_e32 v29, vcc, 0, v29, vcc
; GFX9-NEXT: v_add_co_u32_e32 v26, vcc, 3, v26
@@ -13958,43 +14056,41 @@ define inreg <30 x float> @bitcast_v15i64_to_v30f32_scalar(<15 x i64> inreg %a,
; GFX9-NEXT: v_addc_co_u32_e32 v3, vcc, 0, v3, vcc
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 3, v0
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
-; GFX9-NEXT: .LBB23_3: ; %end
+; GFX9-NEXT: .LBB23_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v16, v30
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB23_4:
-; GFX9-NEXT: s_branch .LBB23_2
;
; GFX11-LABEL: bitcast_v15i64_to_v30f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v29, v11
-; GFX11-NEXT: v_dual_mov_b32 v28, v10 :: v_dual_mov_b32 v27, v9
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
+; GFX11-NEXT: v_dual_mov_b32 v15, v12 :: v_dual_mov_b32 v28, v10
+; GFX11-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v26, v8
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB23_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB23_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB23_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB23_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB23_3:
-; GFX11-NEXT: .LBB23_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB23_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_co_u32 v28, vcc_lo, v28, 3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add_co_ci_u32_e64 v29, null, 0, v29, vcc_lo
@@ -14033,6 +14129,7 @@ define inreg <30 x float> @bitcast_v15i64_to_v30f32_scalar(<15 x i64> inreg %a,
; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
+; GFX11-NEXT: .LBB23_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -14231,6 +14328,7 @@ define inreg <15 x double> @bitcast_v30f32_to_v15f64_scalar(<30 x float> inreg %
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v29, v15
; SI-NEXT: v_mov_b32_e32 v28, v14
; SI-NEXT: v_mov_b32_e32 v27, v13
@@ -14248,7 +14346,7 @@ define inreg <15 x double> @bitcast_v30f32_to_v15f64_scalar(<30 x float> inreg %
; SI-NEXT: v_mov_b32_e32 v15, v1
; SI-NEXT: v_mov_b32_e32 v14, v0
; SI-NEXT: v_mov_b32_e32 v0, s16
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
; SI-NEXT: v_mov_b32_e32 v3, s19
@@ -14262,10 +14360,13 @@ define inreg <15 x double> @bitcast_v30f32_to_v15f64_scalar(<30 x float> inreg %
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB25_4
+; SI-NEXT: s_cbranch_scc0 .LBB25_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB25_3
-; SI-NEXT: .LBB25_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB25_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB25_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e32 v29, 1.0, v29
; SI-NEXT: v_add_f32_e32 v28, 1.0, v28
; SI-NEXT: v_add_f32_e32 v27, 1.0, v27
@@ -14296,16 +14397,15 @@ define inreg <15 x double> @bitcast_v30f32_to_v15f64_scalar(<30 x float> inreg %
; SI-NEXT: v_add_f32_e32 v2, 1.0, v2
; SI-NEXT: v_add_f32_e32 v1, 1.0, v1
; SI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; SI-NEXT: .LBB25_3: ; %end
+; SI-NEXT: .LBB25_4: ; %end
; SI-NEXT: v_mov_b32_e32 v16, v30
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB25_4:
-; SI-NEXT: s_branch .LBB25_2
;
; VI-LABEL: bitcast_v30f32_to_v15f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v29, v15
; VI-NEXT: v_mov_b32_e32 v28, v14
; VI-NEXT: v_mov_b32_e32 v27, v13
@@ -14323,7 +14423,7 @@ define inreg <15 x double> @bitcast_v30f32_to_v15f64_scalar(<30 x float> inreg %
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
@@ -14337,10 +14437,13 @@ define inreg <15 x double> @bitcast_v30f32_to_v15f64_scalar(<30 x float> inreg %
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB25_4
+; VI-NEXT: s_cbranch_scc0 .LBB25_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB25_3
-; VI-NEXT: .LBB25_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB25_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB25_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e32 v29, 1.0, v29
; VI-NEXT: v_add_f32_e32 v28, 1.0, v28
; VI-NEXT: v_add_f32_e32 v27, 1.0, v27
@@ -14371,16 +14474,15 @@ define inreg <15 x double> @bitcast_v30f32_to_v15f64_scalar(<30 x float> inreg %
; VI-NEXT: v_add_f32_e32 v2, 1.0, v2
; VI-NEXT: v_add_f32_e32 v1, 1.0, v1
; VI-NEXT: v_add_f32_e32 v0, 1.0, v0
-; VI-NEXT: .LBB25_3: ; %end
+; VI-NEXT: .LBB25_4: ; %end
; VI-NEXT: v_mov_b32_e32 v16, v30
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB25_4:
-; VI-NEXT: s_branch .LBB25_2
;
; GFX9-LABEL: bitcast_v30f32_to_v15f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v29, v15
; GFX9-NEXT: v_mov_b32_e32 v28, v14
; GFX9-NEXT: v_mov_b32_e32 v27, v13
@@ -14398,7 +14500,7 @@ define inreg <15 x double> @bitcast_v30f32_to_v15f64_scalar(<30 x float> inreg %
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
@@ -14412,10 +14514,13 @@ define inreg <15 x double> @bitcast_v30f32_to_v15f64_scalar(<30 x float> inreg %
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB25_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB25_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB25_3
-; GFX9-NEXT: .LBB25_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB25_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB25_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e32 v29, 1.0, v29
; GFX9-NEXT: v_add_f32_e32 v28, 1.0, v28
; GFX9-NEXT: v_add_f32_e32 v27, 1.0, v27
@@ -14446,43 +14551,41 @@ define inreg <15 x double> @bitcast_v30f32_to_v15f64_scalar(<30 x float> inreg %
; GFX9-NEXT: v_add_f32_e32 v2, 1.0, v2
; GFX9-NEXT: v_add_f32_e32 v1, 1.0, v1
; GFX9-NEXT: v_add_f32_e32 v0, 1.0, v0
-; GFX9-NEXT: .LBB25_3: ; %end
+; GFX9-NEXT: .LBB25_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v16, v30
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB25_4:
-; GFX9-NEXT: s_branch .LBB25_2
;
; GFX11-LABEL: bitcast_v30f32_to_v15f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v29, v11
-; GFX11-NEXT: v_dual_mov_b32 v28, v10 :: v_dual_mov_b32 v27, v9
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
+; GFX11-NEXT: v_dual_mov_b32 v15, v12 :: v_dual_mov_b32 v28, v10
+; GFX11-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v26, v8
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB25_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB25_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB25_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB25_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB25_3:
-; GFX11-NEXT: .LBB25_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB25_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_dual_add_f32 v29, 1.0, v29 :: v_dual_add_f32 v28, 1.0, v28
; GFX11-NEXT: v_dual_add_f32 v27, 1.0, v27 :: v_dual_add_f32 v26, 1.0, v26
; GFX11-NEXT: v_dual_add_f32 v25, 1.0, v25 :: v_dual_add_f32 v24, 1.0, v24
@@ -14498,6 +14601,7 @@ define inreg <15 x double> @bitcast_v30f32_to_v15f64_scalar(<30 x float> inreg %
; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
+; GFX11-NEXT: .LBB25_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -14651,6 +14755,7 @@ define inreg <30 x float> @bitcast_v15f64_to_v30f32_scalar(<15 x double> inreg %
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v29, v15
; SI-NEXT: v_mov_b32_e32 v28, v14
; SI-NEXT: v_mov_b32_e32 v27, v13
@@ -14679,13 +14784,16 @@ define inreg <30 x float> @bitcast_v15f64_to_v30f32_scalar(<15 x double> inreg %
; SI-NEXT: v_mov_b32_e32 v9, s25
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB27_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB27_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB27_3
-; SI-NEXT: .LBB27_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB27_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB27_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[28:29], v[28:29], 1.0
; SI-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
; SI-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
@@ -14701,17 +14809,16 @@ define inreg <30 x float> @bitcast_v15f64_to_v30f32_scalar(<15 x double> inreg %
; SI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; SI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; SI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; SI-NEXT: .LBB27_3: ; %end
+; SI-NEXT: .LBB27_4: ; %end
; SI-NEXT: v_mov_b32_e32 v16, v30
; SI-NEXT: v_mov_b32_e32 v17, v31
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB27_4:
-; SI-NEXT: s_branch .LBB27_2
;
; VI-LABEL: bitcast_v15f64_to_v30f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v29, v15
; VI-NEXT: v_mov_b32_e32 v28, v14
; VI-NEXT: v_mov_b32_e32 v27, v13
@@ -14740,13 +14847,16 @@ define inreg <30 x float> @bitcast_v15f64_to_v30f32_scalar(<15 x double> inreg %
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB27_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB27_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB27_3
-; VI-NEXT: .LBB27_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB27_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB27_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[28:29], v[28:29], 1.0
; VI-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
; VI-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
@@ -14762,17 +14872,16 @@ define inreg <30 x float> @bitcast_v15f64_to_v30f32_scalar(<15 x double> inreg %
; VI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; VI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; VI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; VI-NEXT: .LBB27_3: ; %end
+; VI-NEXT: .LBB27_4: ; %end
; VI-NEXT: v_mov_b32_e32 v16, v30
; VI-NEXT: v_mov_b32_e32 v17, v31
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB27_4:
-; VI-NEXT: s_branch .LBB27_2
;
; GFX9-LABEL: bitcast_v15f64_to_v30f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v29, v15
; GFX9-NEXT: v_mov_b32_e32 v28, v14
; GFX9-NEXT: v_mov_b32_e32 v27, v13
@@ -14801,13 +14910,16 @@ define inreg <30 x float> @bitcast_v15f64_to_v30f32_scalar(<15 x double> inreg %
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB27_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB27_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB27_3
-; GFX9-NEXT: .LBB27_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB27_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB27_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[28:29], v[28:29], 1.0
; GFX9-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
; GFX9-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
@@ -14823,44 +14935,42 @@ define inreg <30 x float> @bitcast_v15f64_to_v30f32_scalar(<15 x double> inreg %
; GFX9-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
-; GFX9-NEXT: .LBB27_3: ; %end
+; GFX9-NEXT: .LBB27_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v16, v30
; GFX9-NEXT: v_mov_b32_e32 v17, v31
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB27_4:
-; GFX9-NEXT: s_branch .LBB27_2
;
; GFX11-LABEL: bitcast_v15f64_to_v30f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v29, v11
-; GFX11-NEXT: v_dual_mov_b32 v28, v10 :: v_dual_mov_b32 v27, v9
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
+; GFX11-NEXT: v_dual_mov_b32 v15, v12 :: v_dual_mov_b32 v28, v10
+; GFX11-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v26, v8
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB27_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB27_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB27_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB27_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB27_3:
-; GFX11-NEXT: .LBB27_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB27_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[28:29], v[28:29], 1.0
; GFX11-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
; GFX11-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
@@ -14876,6 +14986,7 @@ define inreg <30 x float> @bitcast_v15f64_to_v30f32_scalar(<15 x double> inreg %
; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
+; GFX11-NEXT: .LBB27_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -15867,12 +15978,13 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v17
-; SI-NEXT: v_mov_b32_e32 v30, s16
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: v_mov_b32_e32 v29, s16
; SI-NEXT: v_mov_b32_e32 v28, s17
; SI-NEXT: v_mov_b32_e32 v33, s18
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v32, s19
-; SI-NEXT: v_mov_b32_e32 v29, s20
+; SI-NEXT: v_mov_b32_e32 v30, s20
; SI-NEXT: v_mov_b32_e32 v27, s21
; SI-NEXT: v_mov_b32_e32 v25, s22
; SI-NEXT: v_mov_b32_e32 v24, s23
@@ -15909,11 +16021,11 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; SI-NEXT: v_alignbit_b32 v53, v19, v20, 16
; SI-NEXT: v_alignbit_b32 v55, v21, v23, 16
; SI-NEXT: v_alignbit_b32 v41, v24, v25, 16
-; SI-NEXT: v_alignbit_b32 v44, v27, v29, 16
+; SI-NEXT: v_alignbit_b32 v44, v27, v30, 16
; SI-NEXT: s_waitcnt expcnt(6)
; SI-NEXT: v_alignbit_b32 v46, v32, v33, 16
; SI-NEXT: s_waitcnt expcnt(4)
-; SI-NEXT: v_alignbit_b32 v56, v28, v30, 16
+; SI-NEXT: v_alignbit_b32 v56, v28, v29, 16
; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v16
; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v14
; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v12
@@ -15936,11 +16048,11 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; SI-NEXT: s_cbranch_execnz .LBB29_3
; SI-NEXT: .LBB29_2: ; %cmp.true
; SI-NEXT: v_add_f32_e32 v28, 1.0, v28
-; SI-NEXT: v_add_f32_e32 v30, 1.0, v30
+; SI-NEXT: v_add_f32_e32 v29, 1.0, v29
; SI-NEXT: v_add_f32_e32 v32, 1.0, v32
; SI-NEXT: v_add_f32_e32 v33, 1.0, v33
; SI-NEXT: v_add_f32_e32 v27, 1.0, v27
-; SI-NEXT: v_add_f32_e32 v29, 1.0, v29
+; SI-NEXT: v_add_f32_e32 v30, 1.0, v30
; SI-NEXT: v_add_f32_e32 v24, 1.0, v24
; SI-NEXT: v_add_f32_e32 v25, 1.0, v25
; SI-NEXT: v_add_f32_e32 v21, 1.0, v21
@@ -15977,11 +16089,11 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; SI-NEXT: v_alignbit_b32 v53, v19, v20, 16
; SI-NEXT: v_alignbit_b32 v55, v21, v23, 16
; SI-NEXT: v_alignbit_b32 v41, v24, v25, 16
-; SI-NEXT: v_alignbit_b32 v44, v27, v29, 16
+; SI-NEXT: v_alignbit_b32 v44, v27, v30, 16
; SI-NEXT: s_waitcnt expcnt(6)
; SI-NEXT: v_alignbit_b32 v46, v32, v33, 16
; SI-NEXT: s_waitcnt expcnt(4)
-; SI-NEXT: v_alignbit_b32 v56, v28, v30, 16
+; SI-NEXT: v_alignbit_b32 v56, v28, v29, 16
; SI-NEXT: v_lshrrev_b32_e32 v37, 16, v16
; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v14
; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v12
@@ -16002,31 +16114,31 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v28
; SI-NEXT: .LBB29_3: ; %end
-; SI-NEXT: v_and_b32_e32 v30, 0xffff, v30
+; SI-NEXT: v_and_b32_e32 v29, 0xffff, v29
; SI-NEXT: s_waitcnt expcnt(4)
; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v56
-; SI-NEXT: v_or_b32_e32 v30, v30, v56
-; SI-NEXT: buffer_store_dword v30, v0, s[0:3], 0 offen
+; SI-NEXT: v_or_b32_e32 v29, v29, v56
+; SI-NEXT: buffer_store_dword v29, v0, s[0:3], 0 offen
; SI-NEXT: v_and_b32_e32 v28, 0xffff, v28
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v60
-; SI-NEXT: v_or_b32_e32 v28, v28, v30
-; SI-NEXT: v_add_i32_e32 v30, vcc, 4, v0
-; SI-NEXT: buffer_store_dword v28, v30, s[0:3], 0 offen
+; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v60
+; SI-NEXT: v_or_b32_e32 v28, v28, v29
+; SI-NEXT: v_add_i32_e32 v29, vcc, 4, v0
+; SI-NEXT: buffer_store_dword v28, v29, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_and_b32_e32 v28, 0xffff, v33
-; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v46
-; SI-NEXT: v_or_b32_e32 v28, v28, v30
-; SI-NEXT: v_add_i32_e32 v30, vcc, 8, v0
-; SI-NEXT: buffer_store_dword v28, v30, s[0:3], 0 offen
+; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v46
+; SI-NEXT: v_or_b32_e32 v28, v28, v29
+; SI-NEXT: v_add_i32_e32 v29, vcc, 8, v0
+; SI-NEXT: buffer_store_dword v28, v29, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_and_b32_e32 v28, 0xffff, v32
-; SI-NEXT: v_lshlrev_b32_e32 v30, 16, v59
-; SI-NEXT: v_or_b32_e32 v28, v28, v30
-; SI-NEXT: v_add_i32_e32 v30, vcc, 12, v0
-; SI-NEXT: buffer_store_dword v28, v30, s[0:3], 0 offen
+; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v59
+; SI-NEXT: v_or_b32_e32 v28, v28, v29
+; SI-NEXT: v_add_i32_e32 v29, vcc, 12, v0
+; SI-NEXT: buffer_store_dword v28, v29, s[0:3], 0 offen
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_and_b32_e32 v28, 0xffff, v29
+; SI-NEXT: v_and_b32_e32 v28, 0xffff, v30
; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v44
; SI-NEXT: v_or_b32_e32 v28, v28, v29
; SI-NEXT: v_add_i32_e32 v29, vcc, 16, v0
@@ -16227,19 +16339,22 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; SI-NEXT: ; implicit-def: $vgpr39
; SI-NEXT: ; implicit-def: $vgpr22
; SI-NEXT: ; implicit-def: $vgpr37
-; SI-NEXT: s_branch .LBB29_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB29_2
+; SI-NEXT: s_branch .LBB29_3
;
; VI-LABEL: bitcast_v30f32_to_v60i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v19, s16
; VI-NEXT: v_mov_b32_e32 v18, s17
; VI-NEXT: v_mov_b32_e32 v17, s18
-; VI-NEXT: v_mov_b32_e32 v28, s19
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: v_mov_b32_e32 v27, s19
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v29, s20
-; VI-NEXT: v_mov_b32_e32 v27, s21
+; VI-NEXT: v_mov_b32_e32 v28, s21
; VI-NEXT: v_mov_b32_e32 v26, s22
; VI-NEXT: v_mov_b32_e32 v25, s23
; VI-NEXT: v_mov_b32_e32 v24, s24
@@ -16286,9 +16401,9 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v48, 16, v24
; VI-NEXT: v_lshrrev_b32_e32 v39, 16, v25
; VI-NEXT: v_lshrrev_b32_e32 v38, 16, v26
-; VI-NEXT: v_lshrrev_b32_e32 v37, 16, v27
+; VI-NEXT: v_lshrrev_b32_e32 v37, 16, v28
; VI-NEXT: v_lshrrev_b32_e32 v36, 16, v29
-; VI-NEXT: v_lshrrev_b32_e32 v35, 16, v28
+; VI-NEXT: v_lshrrev_b32_e32 v35, 16, v27
; VI-NEXT: v_lshrrev_b32_e32 v34, 16, v17
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v18
; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v19
@@ -16318,9 +16433,9 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; VI-NEXT: v_add_f32_e32 v24, 1.0, v24
; VI-NEXT: v_add_f32_e32 v25, 1.0, v25
; VI-NEXT: v_add_f32_e32 v26, 1.0, v26
-; VI-NEXT: v_add_f32_e32 v27, 1.0, v27
-; VI-NEXT: v_add_f32_e32 v29, 1.0, v29
; VI-NEXT: v_add_f32_e32 v28, 1.0, v28
+; VI-NEXT: v_add_f32_e32 v29, 1.0, v29
+; VI-NEXT: v_add_f32_e32 v27, 1.0, v27
; VI-NEXT: v_add_f32_e32 v17, 1.0, v17
; VI-NEXT: v_add_f32_e32 v18, 1.0, v18
; VI-NEXT: v_add_f32_e32 v19, 1.0, v19
@@ -16348,9 +16463,9 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v48, 16, v24
; VI-NEXT: v_lshrrev_b32_e32 v39, 16, v25
; VI-NEXT: v_lshrrev_b32_e32 v38, 16, v26
-; VI-NEXT: v_lshrrev_b32_e32 v37, 16, v27
+; VI-NEXT: v_lshrrev_b32_e32 v37, 16, v28
; VI-NEXT: v_lshrrev_b32_e32 v36, 16, v29
-; VI-NEXT: v_lshrrev_b32_e32 v35, 16, v28
+; VI-NEXT: v_lshrrev_b32_e32 v35, 16, v27
; VI-NEXT: v_lshrrev_b32_e32 v34, 16, v17
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v18
; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v19
@@ -16362,11 +16477,11 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; VI-NEXT: v_lshlrev_b32_e32 v18, 16, v34
; VI-NEXT: v_or_b32_sdwa v34, v17, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v17, 16, v35
-; VI-NEXT: v_or_b32_sdwa v35, v28, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v35, v27, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v17, 16, v36
; VI-NEXT: v_or_b32_sdwa v36, v29, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v17, 16, v37
-; VI-NEXT: v_or_b32_sdwa v37, v27, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v37, v28, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v17, 16, v38
; VI-NEXT: v_or_b32_sdwa v38, v26, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v17, 16, v39
@@ -16476,19 +16591,22 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; VI-NEXT: ; implicit-def: $vgpr40
; VI-NEXT: ; implicit-def: $vgpr55
; VI-NEXT: ; implicit-def: $vgpr54
-; VI-NEXT: s_branch .LBB29_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB29_2
+; VI-NEXT: s_branch .LBB29_3
;
; GFX9-LABEL: bitcast_v30f32_to_v60i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v19, s16
; GFX9-NEXT: v_mov_b32_e32 v18, s17
; GFX9-NEXT: v_mov_b32_e32 v17, s18
-; GFX9-NEXT: v_mov_b32_e32 v28, s19
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: v_mov_b32_e32 v27, s19
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v29, s20
-; GFX9-NEXT: v_mov_b32_e32 v27, s21
+; GFX9-NEXT: v_mov_b32_e32 v28, s21
; GFX9-NEXT: v_mov_b32_e32 v26, s22
; GFX9-NEXT: v_mov_b32_e32 v25, s23
; GFX9-NEXT: v_mov_b32_e32 v24, s24
@@ -16535,9 +16653,9 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v48, 16, v24
; GFX9-NEXT: v_lshrrev_b32_e32 v39, 16, v25
; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v26
-; GFX9-NEXT: v_lshrrev_b32_e32 v37, 16, v27
+; GFX9-NEXT: v_lshrrev_b32_e32 v37, 16, v28
; GFX9-NEXT: v_lshrrev_b32_e32 v36, 16, v29
-; GFX9-NEXT: v_lshrrev_b32_e32 v35, 16, v28
+; GFX9-NEXT: v_lshrrev_b32_e32 v35, 16, v27
; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v17
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v18
; GFX9-NEXT: v_lshrrev_b32_e32 v32, 16, v19
@@ -16567,9 +16685,9 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e32 v24, 1.0, v24
; GFX9-NEXT: v_add_f32_e32 v25, 1.0, v25
; GFX9-NEXT: v_add_f32_e32 v26, 1.0, v26
-; GFX9-NEXT: v_add_f32_e32 v27, 1.0, v27
-; GFX9-NEXT: v_add_f32_e32 v29, 1.0, v29
; GFX9-NEXT: v_add_f32_e32 v28, 1.0, v28
+; GFX9-NEXT: v_add_f32_e32 v29, 1.0, v29
+; GFX9-NEXT: v_add_f32_e32 v27, 1.0, v27
; GFX9-NEXT: v_add_f32_e32 v17, 1.0, v17
; GFX9-NEXT: v_add_f32_e32 v18, 1.0, v18
; GFX9-NEXT: v_add_f32_e32 v19, 1.0, v19
@@ -16597,20 +16715,20 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v48, 16, v24
; GFX9-NEXT: v_lshrrev_b32_e32 v39, 16, v25
; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v26
-; GFX9-NEXT: v_lshrrev_b32_e32 v37, 16, v27
+; GFX9-NEXT: v_lshrrev_b32_e32 v37, 16, v28
; GFX9-NEXT: v_lshrrev_b32_e32 v36, 16, v29
-; GFX9-NEXT: v_lshrrev_b32_e32 v35, 16, v28
+; GFX9-NEXT: v_lshrrev_b32_e32 v35, 16, v27
; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v17
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v18
; GFX9-NEXT: v_lshrrev_b32_e32 v32, 16, v19
; GFX9-NEXT: .LBB29_3: ; %end
; GFX9-NEXT: v_and_b32_e32 v17, 0xffff, v17
; GFX9-NEXT: v_lshl_or_b32 v34, v34, 16, v17
-; GFX9-NEXT: v_and_b32_e32 v17, 0xffff, v28
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff, v27
; GFX9-NEXT: v_lshl_or_b32 v35, v35, 16, v17
; GFX9-NEXT: v_and_b32_e32 v17, 0xffff, v29
; GFX9-NEXT: v_lshl_or_b32 v36, v36, 16, v17
-; GFX9-NEXT: v_and_b32_e32 v17, 0xffff, v27
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff, v28
; GFX9-NEXT: v_lshl_or_b32 v37, v37, 16, v17
; GFX9-NEXT: v_and_b32_e32 v17, 0xffff, v26
; GFX9-NEXT: v_lshl_or_b32 v38, v38, 16, v17
@@ -16725,7 +16843,9 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr40
; GFX9-NEXT: ; implicit-def: $vgpr55
; GFX9-NEXT: ; implicit-def: $vgpr54
-; GFX9-NEXT: s_branch .LBB29_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB29_2
+; GFX9-NEXT: s_branch .LBB29_3
;
; GFX11-LABEL: bitcast_v30f32_to_v60i16_scalar:
; GFX11: ; %bb.0:
@@ -16738,10 +16858,10 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v22, s20 :: v_dual_mov_b32 v21, s21
; GFX11-NEXT: v_dual_mov_b32 v20, s22 :: v_dual_mov_b32 v19, s23
; GFX11-NEXT: v_dual_mov_b32 v18, s24 :: v_dual_mov_b32 v13, s26
-; GFX11-NEXT: v_dual_mov_b32 v14, s25 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-NEXT: v_dual_mov_b32 v14, s25 :: v_dual_mov_b32 v17, s27
+; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v15, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB29_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v11
@@ -16756,9 +16876,9 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v2
; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v1
; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v16
+; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v17
; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v13
; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v14
; GFX11-NEXT: v_lshrrev_b32_e32 v12, 16, v18
@@ -16774,8 +16894,7 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v28
; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v29
; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v30
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB29_3
+; GFX11-NEXT: s_cbranch_execnz .LBB29_3
; GFX11-NEXT: .LBB29_2: ; %cmp.true
; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
@@ -16783,8 +16902,8 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
-; GFX11-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
-; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v14, 1.0, v14
; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v18, 1.0, v18
; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v22, 1.0, v22
@@ -16804,9 +16923,9 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v2
; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v1
; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v16
+; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v17
; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v13
; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v14
; GFX11-NEXT: v_lshrrev_b32_e32 v12, 16, v18
@@ -16837,7 +16956,7 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; GFX11-NEXT: v_lshl_or_b32 v35, v35, 16, v19
; GFX11-NEXT: v_lshl_or_b32 v12, v12, 16, v18
; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v16
+; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v15
; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-NEXT: v_lshl_or_b32 v19, v68, 16, v1
; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
@@ -16853,7 +16972,7 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
; GFX11-NEXT: v_lshl_or_b32 v13, v82, 16, v14
; GFX11-NEXT: v_lshl_or_b32 v14, v81, 16, v21
-; GFX11-NEXT: v_lshl_or_b32 v16, v71, 16, v17
+; GFX11-NEXT: v_lshl_or_b32 v15, v80, 16, v17
; GFX11-NEXT: v_lshl_or_b32 v17, v70, 16, v18
; GFX11-NEXT: v_lshl_or_b32 v18, v69, 16, v0
; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v2
@@ -16865,7 +16984,7 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; GFX11-NEXT: v_lshl_or_b32 v38, v38, 16, v28
; GFX11-NEXT: v_lshl_or_b32 v32, v32, 16, v22
; GFX11-NEXT: v_lshl_or_b32 v34, v34, 16, v20
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
; GFX11-NEXT: v_lshl_or_b32 v20, v67, 16, v0
; GFX11-NEXT: v_lshl_or_b32 v22, v65, 16, v2
; GFX11-NEXT: v_lshl_or_b32 v23, v64, 16, v3
@@ -16881,7 +17000,7 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; GFX11-NEXT: v_lshl_or_b32 v30, v83, 16, v24
; GFX11-NEXT: v_lshl_or_b32 v24, v55, 16, v4
; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v11
-; GFX11-NEXT: v_lshl_or_b32 v15, v80, 16, v15
+; GFX11-NEXT: v_lshl_or_b32 v16, v71, 16, v16
; GFX11-NEXT: v_lshl_or_b32 v25, v54, 16, v0
; GFX11-NEXT: v_lshl_or_b32 v27, v52, 16, v2
; GFX11-NEXT: v_lshl_or_b32 v28, v51, 16, v3
@@ -16924,7 +17043,9 @@ define inreg <60 x i16> @bitcast_v30f32_to_v60i16_scalar(<30 x float> inreg %a,
; GFX11-NEXT: ; implicit-def: $vgpr52
; GFX11-NEXT: ; implicit-def: $vgpr51
; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: s_branch .LBB29_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_vccz .LBB29_2
+; GFX11-NEXT: s_branch .LBB29_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -18342,6 +18463,7 @@ define inreg <30 x float> @bitcast_v60i16_to_v30f32_scalar(<60 x i16> inreg %a,
; SI-NEXT: v_mov_b32_e32 v35, v22
; SI-NEXT: v_mov_b32_e32 v36, v20
; SI-NEXT: v_mov_b32_e32 v37, v18
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v3
; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v5
@@ -18373,7 +18495,7 @@ define inreg <30 x float> @bitcast_v60i16_to_v30f32_scalar(<60 x i16> inreg %a,
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v2
; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v4
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_and_b64 s[6:7], vcc, exec
; SI-NEXT: v_lshlrev_b32_e32 v47, 16, v6
; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v8
; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v10
@@ -18676,7 +18798,9 @@ define inreg <30 x float> @bitcast_v60i16_to_v30f32_scalar(<60 x i16> inreg %a,
; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
; SI-NEXT: v_mov_b32_e32 v30, v32
-; SI-NEXT: s_branch .LBB31_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB31_2
+; SI-NEXT: s_branch .LBB31_3
;
; VI-LABEL: bitcast_v60i16_to_v30f32_scalar:
; VI: ; %bb.0:
@@ -18696,6 +18820,7 @@ define inreg <30 x float> @bitcast_v60i16_to_v30f32_scalar(<60 x i16> inreg %a,
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v15
; VI-NEXT: v_mov_b32_e32 v33, v14
; VI-NEXT: v_mov_b32_e32 v34, v13
@@ -18712,7 +18837,7 @@ define inreg <30 x float> @bitcast_v60i16_to_v30f32_scalar(<60 x i16> inreg %a,
; VI-NEXT: v_mov_b32_e32 v53, v2
; VI-NEXT: v_mov_b32_e32 v54, v1
; VI-NEXT: v_mov_b32_e32 v55, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB31_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v0, 16
@@ -18959,11 +19084,28 @@ define inreg <30 x float> @bitcast_v60i16_to_v30f32_scalar(<60 x i16> inreg %a,
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB31_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB31_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB31_2
+; VI-NEXT: s_branch .LBB31_3
;
; GFX9-LABEL: bitcast_v60i16_to_v30f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_lshr_b32 s40, s29, 16
+; GFX9-NEXT: s_lshr_b32 s41, s28, 16
+; GFX9-NEXT: s_lshr_b32 s42, s27, 16
+; GFX9-NEXT: s_lshr_b32 s43, s26, 16
+; GFX9-NEXT: s_lshr_b32 s15, s25, 16
+; GFX9-NEXT: s_lshr_b32 s14, s24, 16
+; GFX9-NEXT: s_lshr_b32 s13, s23, 16
+; GFX9-NEXT: s_lshr_b32 s12, s22, 16
+; GFX9-NEXT: s_lshr_b32 s11, s21, 16
+; GFX9-NEXT: s_lshr_b32 s10, s20, 16
+; GFX9-NEXT: s_lshr_b32 s9, s19, 16
+; GFX9-NEXT: s_lshr_b32 s8, s18, 16
+; GFX9-NEXT: s_lshr_b32 s7, s17, 16
+; GFX9-NEXT: s_lshr_b32 s6, s16, 16
; GFX9-NEXT: v_mov_b32_e32 v32, v15
; GFX9-NEXT: v_mov_b32_e32 v33, v14
; GFX9-NEXT: v_mov_b32_e32 v34, v13
@@ -18980,21 +19122,7 @@ define inreg <30 x float> @bitcast_v60i16_to_v30f32_scalar(<60 x i16> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v53, v2
; GFX9-NEXT: v_mov_b32_e32 v54, v1
; GFX9-NEXT: v_mov_b32_e32 v55, v0
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
-; GFX9-NEXT: s_lshr_b32 s40, s29, 16
-; GFX9-NEXT: s_lshr_b32 s41, s28, 16
-; GFX9-NEXT: s_lshr_b32 s42, s27, 16
-; GFX9-NEXT: s_lshr_b32 s43, s26, 16
-; GFX9-NEXT: s_lshr_b32 s15, s25, 16
-; GFX9-NEXT: s_lshr_b32 s14, s24, 16
-; GFX9-NEXT: s_lshr_b32 s13, s23, 16
-; GFX9-NEXT: s_lshr_b32 s12, s22, 16
-; GFX9-NEXT: s_lshr_b32 s11, s21, 16
-; GFX9-NEXT: s_lshr_b32 s10, s20, 16
-; GFX9-NEXT: s_lshr_b32 s9, s19, 16
-; GFX9-NEXT: s_lshr_b32 s8, s18, 16
-; GFX9-NEXT: s_lshr_b32 s7, s17, 16
-; GFX9-NEXT: s_lshr_b32 s6, s16, 16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
@@ -19015,7 +19143,6 @@ define inreg <30 x float> @bitcast_v60i16_to_v30f32_scalar(<60 x i16> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v41, 16, v33
; GFX9-NEXT: v_lshrrev_b32_e32 v42, 16, v34
; GFX9-NEXT: v_lshrrev_b32_e32 v43, 16, v35
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -19030,6 +19157,7 @@ define inreg <30 x float> @bitcast_v60i16_to_v30f32_scalar(<60 x i16> inreg %a,
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_lshrrev_b32_e32 v44, 16, v36
; GFX9-NEXT: v_lshrrev_b32_e32 v45, 16, v37
; GFX9-NEXT: v_lshrrev_b32_e32 v46, 16, v38
@@ -19175,7 +19303,9 @@ define inreg <30 x float> @bitcast_v60i16_to_v30f32_scalar(<60 x i16> inreg %a,
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB31_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB31_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB31_2
+; GFX9-NEXT: s_branch .LBB31_3
;
; GFX11-TRUE16-LABEL: bitcast_v60i16_to_v30f32_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -19220,41 +19350,41 @@ define inreg <30 x float> @bitcast_v60i16_to_v30f32_scalar(<60 x i16> inreg %a,
; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s27, s42
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB31_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
@@ -19269,17 +19399,16 @@ define inreg <30 x float> @bitcast_v60i16_to_v30f32_scalar(<60 x i16> inreg %a,
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB31_3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB31_3
; GFX11-TRUE16-NEXT: .LBB31_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
@@ -19293,24 +19422,24 @@ define inreg <30 x float> @bitcast_v60i16_to_v30f32_scalar(<60 x i16> inreg %a,
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s4, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
@@ -19327,7 +19456,9 @@ define inreg <30 x float> @bitcast_v60i16_to_v30f32_scalar(<60 x i16> inreg %a,
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB31_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB31_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB31_2
+; GFX11-TRUE16-NEXT: s_branch .LBB31_3
;
; GFX11-FAKE16-LABEL: bitcast_v60i16_to_v30f32_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -19360,41 +19491,41 @@ define inreg <30 x float> @bitcast_v60i16_to_v30f32_scalar(<60 x i16> inreg %a,
; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s16, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s26, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s25, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s24, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s23, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s22, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s21, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s20, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s19, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s18, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s17, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s16, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s3, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s2, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s15, 0
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s0, 16
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s27, s42
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB31_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
@@ -19409,17 +19540,16 @@ define inreg <30 x float> @bitcast_v60i16_to_v30f32_scalar(<60 x i16> inreg %a,
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB31_3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB31_3
; GFX11-FAKE16-NEXT: .LBB31_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
@@ -19433,24 +19563,24 @@ define inreg <30 x float> @bitcast_v60i16_to_v30f32_scalar(<60 x i16> inreg %a,
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, s4, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, s7, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, s8, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, s9, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, s10, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, s11, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, s16, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, s17, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, s18, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, s5, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, s6, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, s7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, s8, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, s9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, s10, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, s11, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, s12, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, s13, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, s14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, s15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, s0, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, s16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
@@ -19467,7 +19597,9 @@ define inreg <30 x float> @bitcast_v60i16_to_v30f32_scalar(<60 x i16> inreg %a,
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB31_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB31_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB31_2
+; GFX11-FAKE16-NEXT: s_branch .LBB31_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -20815,6 +20947,7 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v17
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s45, v1
; SI-NEXT: v_readfirstlane_b32 s44, v2
; SI-NEXT: v_readfirstlane_b32 s43, v3
@@ -20827,11 +20960,11 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; SI-NEXT: v_readfirstlane_b32 s12, v10
; SI-NEXT: v_readfirstlane_b32 s11, v11
; SI-NEXT: v_readfirstlane_b32 s10, v12
-; SI-NEXT: v_readfirstlane_b32 s8, v13
-; SI-NEXT: v_readfirstlane_b32 s7, v14
-; SI-NEXT: v_readfirstlane_b32 s6, v15
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: v_readfirstlane_b32 s9, v16
+; SI-NEXT: v_readfirstlane_b32 s9, v13
+; SI-NEXT: v_readfirstlane_b32 s8, v14
+; SI-NEXT: v_readfirstlane_b32 s7, v15
+; SI-NEXT: v_readfirstlane_b32 s6, v16
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
@@ -20850,14 +20983,14 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB33_4
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_lshr_b32 s4, s9, 16
+; SI-NEXT: s_lshr_b32 s4, s6, 16
; SI-NEXT: s_waitcnt expcnt(4)
; SI-NEXT: v_cvt_f32_f16_e32 v59, s4
-; SI-NEXT: s_lshr_b32 s4, s6, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v42, s4
; SI-NEXT: s_lshr_b32 s4, s7, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v54, s4
+; SI-NEXT: v_cvt_f32_f16_e32 v42, s4
; SI-NEXT: s_lshr_b32 s4, s8, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v54, s4
+; SI-NEXT: s_lshr_b32 s4, s9, 16
; SI-NEXT: v_cvt_f32_f16_e32 v50, s4
; SI-NEXT: s_lshr_b32 s4, s10, 16
; SI-NEXT: v_cvt_f32_f16_e32 v57, s4
@@ -20912,10 +21045,10 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; SI-NEXT: s_lshr_b32 s4, s16, 16
; SI-NEXT: s_waitcnt expcnt(3)
; SI-NEXT: v_cvt_f32_f16_e32 v60, s4
-; SI-NEXT: v_cvt_f32_f16_e32 v14, s9
-; SI-NEXT: v_cvt_f32_f16_e32 v38, s6
-; SI-NEXT: v_cvt_f32_f16_e32 v45, s7
-; SI-NEXT: v_cvt_f32_f16_e32 v47, s8
+; SI-NEXT: v_cvt_f32_f16_e32 v14, s6
+; SI-NEXT: v_cvt_f32_f16_e32 v38, s7
+; SI-NEXT: v_cvt_f32_f16_e32 v45, s8
+; SI-NEXT: v_cvt_f32_f16_e32 v47, s9
; SI-NEXT: v_cvt_f32_f16_e32 v1, s10
; SI-NEXT: s_waitcnt expcnt(2)
; SI-NEXT: v_cvt_f32_f16_e32 v61, s11
@@ -20969,7 +21102,7 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:80 ; 4-byte Folded Spill
; SI-NEXT: v_add_f32_e64 v11, s22, 1.0
; SI-NEXT: v_lshrrev_b32_e32 v4, 16, v16
-; SI-NEXT: v_add_f32_e64 v40, s6, 1.0
+; SI-NEXT: v_add_f32_e64 v40, s7, 1.0
; SI-NEXT: v_lshrrev_b32_e32 v58, 16, v11
; SI-NEXT: v_lshrrev_b32_e32 v42, 16, v40
; SI-NEXT: v_cvt_f32_f16_e32 v38, v40
@@ -20983,7 +21116,7 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; SI-NEXT: v_add_f32_e64 v19, s26, 1.0
; SI-NEXT: v_lshrrev_b32_e32 v49, 16, v28
; SI-NEXT: v_lshrrev_b32_e32 v30, 16, v23
-; SI-NEXT: v_add_f32_e64 v48, s8, 1.0
+; SI-NEXT: v_add_f32_e64 v48, s9, 1.0
; SI-NEXT: v_lshrrev_b32_e32 v41, 16, v19
; SI-NEXT: v_lshrrev_b32_e32 v50, 16, v48
; SI-NEXT: v_cvt_f32_f16_e32 v47, v48
@@ -21032,8 +21165,8 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; SI-NEXT: v_add_f32_e64 v21, s15, 1.0
; SI-NEXT: v_add_f32_e64 v20, s14, 1.0
; SI-NEXT: v_add_f32_e64 v33, s11, 1.0
-; SI-NEXT: v_add_f32_e64 v52, s7, 1.0
-; SI-NEXT: v_add_f32_e64 v44, s9, 1.0
+; SI-NEXT: v_add_f32_e64 v52, s8, 1.0
+; SI-NEXT: v_add_f32_e64 v44, s6, 1.0
; SI-NEXT: v_lshrrev_b32_e32 v60, 16, v9
; SI-NEXT: v_lshrrev_b32_e32 v39, 16, v31
; SI-NEXT: v_lshrrev_b32_e32 v8, 16, v21
@@ -21375,19 +21508,22 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; SI-NEXT: ; implicit-def: $vgpr42
; SI-NEXT: ; implicit-def: $vgpr14
; SI-NEXT: ; implicit-def: $vgpr59
-; SI-NEXT: s_branch .LBB33_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB33_2
+; SI-NEXT: s_branch .LBB33_3
;
; VI-LABEL: bitcast_v30f32_to_v60f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v19, s16
; VI-NEXT: v_mov_b32_e32 v18, s17
; VI-NEXT: v_mov_b32_e32 v17, s18
-; VI-NEXT: v_mov_b32_e32 v28, s19
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: v_mov_b32_e32 v27, s19
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v29, s20
-; VI-NEXT: v_mov_b32_e32 v27, s21
+; VI-NEXT: v_mov_b32_e32 v28, s21
; VI-NEXT: v_mov_b32_e32 v26, s22
; VI-NEXT: v_mov_b32_e32 v25, s23
; VI-NEXT: v_mov_b32_e32 v24, s24
@@ -21434,9 +21570,9 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v48, 16, v24
; VI-NEXT: v_lshrrev_b32_e32 v39, 16, v25
; VI-NEXT: v_lshrrev_b32_e32 v38, 16, v26
-; VI-NEXT: v_lshrrev_b32_e32 v37, 16, v27
+; VI-NEXT: v_lshrrev_b32_e32 v37, 16, v28
; VI-NEXT: v_lshrrev_b32_e32 v36, 16, v29
-; VI-NEXT: v_lshrrev_b32_e32 v35, 16, v28
+; VI-NEXT: v_lshrrev_b32_e32 v35, 16, v27
; VI-NEXT: v_lshrrev_b32_e32 v34, 16, v17
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v18
; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v19
@@ -21466,9 +21602,9 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; VI-NEXT: v_add_f32_e32 v24, 1.0, v24
; VI-NEXT: v_add_f32_e32 v25, 1.0, v25
; VI-NEXT: v_add_f32_e32 v26, 1.0, v26
-; VI-NEXT: v_add_f32_e32 v27, 1.0, v27
-; VI-NEXT: v_add_f32_e32 v29, 1.0, v29
; VI-NEXT: v_add_f32_e32 v28, 1.0, v28
+; VI-NEXT: v_add_f32_e32 v29, 1.0, v29
+; VI-NEXT: v_add_f32_e32 v27, 1.0, v27
; VI-NEXT: v_add_f32_e32 v17, 1.0, v17
; VI-NEXT: v_add_f32_e32 v18, 1.0, v18
; VI-NEXT: v_add_f32_e32 v19, 1.0, v19
@@ -21496,9 +21632,9 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v48, 16, v24
; VI-NEXT: v_lshrrev_b32_e32 v39, 16, v25
; VI-NEXT: v_lshrrev_b32_e32 v38, 16, v26
-; VI-NEXT: v_lshrrev_b32_e32 v37, 16, v27
+; VI-NEXT: v_lshrrev_b32_e32 v37, 16, v28
; VI-NEXT: v_lshrrev_b32_e32 v36, 16, v29
-; VI-NEXT: v_lshrrev_b32_e32 v35, 16, v28
+; VI-NEXT: v_lshrrev_b32_e32 v35, 16, v27
; VI-NEXT: v_lshrrev_b32_e32 v34, 16, v17
; VI-NEXT: v_lshrrev_b32_e32 v33, 16, v18
; VI-NEXT: v_lshrrev_b32_e32 v32, 16, v19
@@ -21510,11 +21646,11 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; VI-NEXT: v_lshlrev_b32_e32 v18, 16, v34
; VI-NEXT: v_or_b32_sdwa v34, v17, v18 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v17, 16, v35
-; VI-NEXT: v_or_b32_sdwa v35, v28, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v35, v27, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v17, 16, v36
; VI-NEXT: v_or_b32_sdwa v36, v29, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v17, 16, v37
-; VI-NEXT: v_or_b32_sdwa v37, v27, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
+; VI-NEXT: v_or_b32_sdwa v37, v28, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v17, 16, v38
; VI-NEXT: v_or_b32_sdwa v38, v26, v17 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:WORD_0 src1_sel:DWORD
; VI-NEXT: v_lshlrev_b32_e32 v17, 16, v39
@@ -21624,19 +21760,22 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; VI-NEXT: ; implicit-def: $vgpr40
; VI-NEXT: ; implicit-def: $vgpr55
; VI-NEXT: ; implicit-def: $vgpr54
-; VI-NEXT: s_branch .LBB33_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB33_2
+; VI-NEXT: s_branch .LBB33_3
;
; GFX9-LABEL: bitcast_v30f32_to_v60f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v19, s16
; GFX9-NEXT: v_mov_b32_e32 v18, s17
; GFX9-NEXT: v_mov_b32_e32 v17, s18
-; GFX9-NEXT: v_mov_b32_e32 v28, s19
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: v_mov_b32_e32 v27, s19
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v29, s20
-; GFX9-NEXT: v_mov_b32_e32 v27, s21
+; GFX9-NEXT: v_mov_b32_e32 v28, s21
; GFX9-NEXT: v_mov_b32_e32 v26, s22
; GFX9-NEXT: v_mov_b32_e32 v25, s23
; GFX9-NEXT: v_mov_b32_e32 v24, s24
@@ -21683,9 +21822,9 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v48, 16, v24
; GFX9-NEXT: v_lshrrev_b32_e32 v39, 16, v25
; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v26
-; GFX9-NEXT: v_lshrrev_b32_e32 v37, 16, v27
+; GFX9-NEXT: v_lshrrev_b32_e32 v37, 16, v28
; GFX9-NEXT: v_lshrrev_b32_e32 v36, 16, v29
-; GFX9-NEXT: v_lshrrev_b32_e32 v35, 16, v28
+; GFX9-NEXT: v_lshrrev_b32_e32 v35, 16, v27
; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v17
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v18
; GFX9-NEXT: v_lshrrev_b32_e32 v32, 16, v19
@@ -21715,9 +21854,9 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; GFX9-NEXT: v_add_f32_e32 v24, 1.0, v24
; GFX9-NEXT: v_add_f32_e32 v25, 1.0, v25
; GFX9-NEXT: v_add_f32_e32 v26, 1.0, v26
-; GFX9-NEXT: v_add_f32_e32 v27, 1.0, v27
-; GFX9-NEXT: v_add_f32_e32 v29, 1.0, v29
; GFX9-NEXT: v_add_f32_e32 v28, 1.0, v28
+; GFX9-NEXT: v_add_f32_e32 v29, 1.0, v29
+; GFX9-NEXT: v_add_f32_e32 v27, 1.0, v27
; GFX9-NEXT: v_add_f32_e32 v17, 1.0, v17
; GFX9-NEXT: v_add_f32_e32 v18, 1.0, v18
; GFX9-NEXT: v_add_f32_e32 v19, 1.0, v19
@@ -21745,20 +21884,20 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v48, 16, v24
; GFX9-NEXT: v_lshrrev_b32_e32 v39, 16, v25
; GFX9-NEXT: v_lshrrev_b32_e32 v38, 16, v26
-; GFX9-NEXT: v_lshrrev_b32_e32 v37, 16, v27
+; GFX9-NEXT: v_lshrrev_b32_e32 v37, 16, v28
; GFX9-NEXT: v_lshrrev_b32_e32 v36, 16, v29
-; GFX9-NEXT: v_lshrrev_b32_e32 v35, 16, v28
+; GFX9-NEXT: v_lshrrev_b32_e32 v35, 16, v27
; GFX9-NEXT: v_lshrrev_b32_e32 v34, 16, v17
; GFX9-NEXT: v_lshrrev_b32_e32 v33, 16, v18
; GFX9-NEXT: v_lshrrev_b32_e32 v32, 16, v19
; GFX9-NEXT: .LBB33_3: ; %end
; GFX9-NEXT: v_and_b32_e32 v17, 0xffff, v17
; GFX9-NEXT: v_lshl_or_b32 v34, v34, 16, v17
-; GFX9-NEXT: v_and_b32_e32 v17, 0xffff, v28
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff, v27
; GFX9-NEXT: v_lshl_or_b32 v35, v35, 16, v17
; GFX9-NEXT: v_and_b32_e32 v17, 0xffff, v29
; GFX9-NEXT: v_lshl_or_b32 v36, v36, 16, v17
-; GFX9-NEXT: v_and_b32_e32 v17, 0xffff, v27
+; GFX9-NEXT: v_and_b32_e32 v17, 0xffff, v28
; GFX9-NEXT: v_lshl_or_b32 v37, v37, 16, v17
; GFX9-NEXT: v_and_b32_e32 v17, 0xffff, v26
; GFX9-NEXT: v_lshl_or_b32 v38, v38, 16, v17
@@ -21873,7 +22012,9 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr40
; GFX9-NEXT: ; implicit-def: $vgpr55
; GFX9-NEXT: ; implicit-def: $vgpr54
-; GFX9-NEXT: s_branch .LBB33_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB33_2
+; GFX9-NEXT: s_branch .LBB33_3
;
; GFX11-LABEL: bitcast_v30f32_to_v60f16_scalar:
; GFX11: ; %bb.0:
@@ -21886,10 +22027,10 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v22, s20 :: v_dual_mov_b32 v21, s21
; GFX11-NEXT: v_dual_mov_b32 v20, s22 :: v_dual_mov_b32 v19, s23
; GFX11-NEXT: v_dual_mov_b32 v18, s24 :: v_dual_mov_b32 v13, s26
-; GFX11-NEXT: v_dual_mov_b32 v14, s25 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v17, s28 :: v_dual_mov_b32 v16, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-NEXT: v_dual_mov_b32 v14, s25 :: v_dual_mov_b32 v17, s27
+; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v15, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB33_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v11
@@ -21904,9 +22045,9 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v2
; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v1
; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v16
+; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v17
; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v13
; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v14
; GFX11-NEXT: v_lshrrev_b32_e32 v12, 16, v18
@@ -21922,8 +22063,7 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v28
; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v29
; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v30
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB33_3
+; GFX11-NEXT: s_cbranch_execnz .LBB33_3
; GFX11-NEXT: .LBB33_2: ; %cmp.true
; GFX11-NEXT: v_dual_add_f32 v11, 1.0, v11 :: v_dual_add_f32 v10, 1.0, v10
; GFX11-NEXT: v_dual_add_f32 v9, 1.0, v9 :: v_dual_add_f32 v8, 1.0, v8
@@ -21931,8 +22071,8 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; GFX11-NEXT: v_dual_add_f32 v5, 1.0, v5 :: v_dual_add_f32 v4, 1.0, v4
; GFX11-NEXT: v_dual_add_f32 v3, 1.0, v3 :: v_dual_add_f32 v2, 1.0, v2
; GFX11-NEXT: v_dual_add_f32 v1, 1.0, v1 :: v_dual_add_f32 v0, 1.0, v0
-; GFX11-NEXT: v_dual_add_f32 v16, 1.0, v16 :: v_dual_add_f32 v17, 1.0, v17
-; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v14, 1.0, v14
+; GFX11-NEXT: v_dual_add_f32 v15, 1.0, v15 :: v_dual_add_f32 v16, 1.0, v16
+; GFX11-NEXT: v_dual_add_f32 v17, 1.0, v17 :: v_dual_add_f32 v14, 1.0, v14
; GFX11-NEXT: v_dual_add_f32 v13, 1.0, v13 :: v_dual_add_f32 v18, 1.0, v18
; GFX11-NEXT: v_dual_add_f32 v19, 1.0, v19 :: v_dual_add_f32 v20, 1.0, v20
; GFX11-NEXT: v_dual_add_f32 v21, 1.0, v21 :: v_dual_add_f32 v22, 1.0, v22
@@ -21952,9 +22092,9 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v67, 16, v2
; GFX11-NEXT: v_lshrrev_b32_e32 v68, 16, v1
; GFX11-NEXT: v_lshrrev_b32_e32 v69, 16, v0
-; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v16
-; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v17
-; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v70, 16, v15
+; GFX11-NEXT: v_lshrrev_b32_e32 v71, 16, v16
+; GFX11-NEXT: v_lshrrev_b32_e32 v80, 16, v17
; GFX11-NEXT: v_lshrrev_b32_e32 v81, 16, v13
; GFX11-NEXT: v_lshrrev_b32_e32 v82, 16, v14
; GFX11-NEXT: v_lshrrev_b32_e32 v12, 16, v18
@@ -21985,7 +22125,7 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; GFX11-NEXT: v_lshl_or_b32 v35, v35, 16, v19
; GFX11-NEXT: v_lshl_or_b32 v12, v12, 16, v18
; GFX11-NEXT: v_and_b32_e32 v17, 0xffff, v17
-; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v16
+; GFX11-NEXT: v_and_b32_e32 v18, 0xffff, v15
; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v0
; GFX11-NEXT: v_lshl_or_b32 v19, v68, 16, v1
; GFX11-NEXT: v_and_b32_e32 v1, 0xffff, v3
@@ -22001,7 +22141,7 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; GFX11-NEXT: v_and_b32_e32 v20, 0xffff, v20
; GFX11-NEXT: v_lshl_or_b32 v13, v82, 16, v14
; GFX11-NEXT: v_lshl_or_b32 v14, v81, 16, v21
-; GFX11-NEXT: v_lshl_or_b32 v16, v71, 16, v17
+; GFX11-NEXT: v_lshl_or_b32 v15, v80, 16, v17
; GFX11-NEXT: v_lshl_or_b32 v17, v70, 16, v18
; GFX11-NEXT: v_lshl_or_b32 v18, v69, 16, v0
; GFX11-NEXT: v_and_b32_e32 v0, 0xffff, v2
@@ -22013,7 +22153,7 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; GFX11-NEXT: v_lshl_or_b32 v38, v38, 16, v28
; GFX11-NEXT: v_lshl_or_b32 v32, v32, 16, v22
; GFX11-NEXT: v_lshl_or_b32 v34, v34, 16, v20
-; GFX11-NEXT: v_and_b32_e32 v15, 0xffff, v15
+; GFX11-NEXT: v_and_b32_e32 v16, 0xffff, v16
; GFX11-NEXT: v_lshl_or_b32 v20, v67, 16, v0
; GFX11-NEXT: v_lshl_or_b32 v22, v65, 16, v2
; GFX11-NEXT: v_lshl_or_b32 v23, v64, 16, v3
@@ -22029,7 +22169,7 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; GFX11-NEXT: v_lshl_or_b32 v30, v83, 16, v24
; GFX11-NEXT: v_lshl_or_b32 v24, v55, 16, v4
; GFX11-NEXT: v_and_b32_e32 v4, 0xffff, v11
-; GFX11-NEXT: v_lshl_or_b32 v15, v80, 16, v15
+; GFX11-NEXT: v_lshl_or_b32 v16, v71, 16, v16
; GFX11-NEXT: v_lshl_or_b32 v25, v54, 16, v0
; GFX11-NEXT: v_lshl_or_b32 v27, v52, 16, v2
; GFX11-NEXT: v_lshl_or_b32 v28, v51, 16, v3
@@ -22072,7 +22212,9 @@ define inreg <60 x half> @bitcast_v30f32_to_v60f16_scalar(<30 x float> inreg %a,
; GFX11-NEXT: ; implicit-def: $vgpr52
; GFX11-NEXT: ; implicit-def: $vgpr51
; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: s_branch .LBB33_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_vccz .LBB33_2
+; GFX11-NEXT: s_branch .LBB33_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -23683,11 +23825,11 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:60
; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32
-; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:8
+; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:8
; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:4
; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:16
; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:12
-; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:24
+; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:24
; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:20
; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:32
; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:28
@@ -23701,83 +23843,92 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:56
; SI-NEXT: s_waitcnt expcnt(3)
; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:52
-; SI-NEXT: v_cvt_f16_f32_e32 v37, v0
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v2
-; SI-NEXT: v_cvt_f16_f32_e32 v49, v1
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
; SI-NEXT: v_cvt_f16_f32_e32 v39, v3
-; SI-NEXT: v_cvt_f16_f32_e32 v34, v7
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; SI-NEXT: v_cvt_f16_f32_e32 v49, v2
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v5
-; SI-NEXT: v_cvt_f16_f32_e32 v32, v6
-; SI-NEXT: v_cvt_f16_f32_e32 v36, v9
-; SI-NEXT: v_cvt_f16_f32_e32 v35, v8
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v26
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; SI-NEXT: v_cvt_f16_f32_e32 v48, v5
+; SI-NEXT: v_cvt_f16_f32_e32 v36, v4
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v4
-; SI-NEXT: v_cvt_f16_f32_e32 v63, v11
-; SI-NEXT: v_cvt_f16_f32_e32 v62, v10
-; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v29
+; SI-NEXT: v_cvt_f16_f32_e32 v38, v7
+; SI-NEXT: v_cvt_f16_f32_e32 v37, v6
+; SI-NEXT: v_cvt_f16_f32_e32 v32, v9
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v28
+; SI-NEXT: v_cvt_f16_f32_e32 v35, v8
+; SI-NEXT: v_cvt_f16_f32_e32 v34, v11
+; SI-NEXT: v_cvt_f16_f32_e32 v63, v10
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v18
-; SI-NEXT: v_cvt_f16_f32_e32 v43, v12
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v30
+; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
+; SI-NEXT: v_cvt_f16_f32_e32 v62, v12
; SI-NEXT: v_cvt_f16_f32_e32 v41, v15
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
; SI-NEXT: v_cvt_f16_f32_e32 v55, v14
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v20
; SI-NEXT: v_cvt_f16_f32_e32 v15, v17
; SI-NEXT: v_cvt_f16_f32_e32 v61, v16
; SI-NEXT: v_cvt_f16_f32_e32 v16, v19
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
+; SI-NEXT: v_cvt_f16_f32_e32 v53, v18
; SI-NEXT: v_cvt_f16_f32_e32 v17, v21
+; SI-NEXT: v_cvt_f16_f32_e32 v51, v20
; SI-NEXT: v_cvt_f16_f32_e32 v18, v23
; SI-NEXT: v_cvt_f16_f32_e32 v22, v22
; SI-NEXT: v_cvt_f16_f32_e32 v19, v25
; SI-NEXT: v_cvt_f16_f32_e32 v21, v24
; SI-NEXT: v_cvt_f16_f32_e32 v20, v27
-; SI-NEXT: v_cvt_f16_f32_e32 v53, v26
-; SI-NEXT: v_cvt_f16_f32_e32 v52, v29
-; SI-NEXT: v_cvt_f16_f32_e32 v51, v28
-; SI-NEXT: v_cvt_f16_f32_e32 v30, v30
+; SI-NEXT: v_cvt_f16_f32_e32 v12, s16
; SI-NEXT: v_cvt_f16_f32_e32 v1, s19
-; SI-NEXT: v_cvt_f16_f32_e32 v12, s18
+; SI-NEXT: v_cvt_f16_f32_e32 v11, s18
; SI-NEXT: v_cvt_f16_f32_e32 v2, s21
-; SI-NEXT: v_cvt_f16_f32_e32 v11, s20
+; SI-NEXT: v_cvt_f16_f32_e32 v9, s20
; SI-NEXT: v_cvt_f16_f32_e32 v3, s23
; SI-NEXT: v_cvt_f16_f32_e32 v10, s22
; SI-NEXT: v_cvt_f16_f32_e32 v4, s25
-; SI-NEXT: v_cvt_f16_f32_e32 v9, s24
+; SI-NEXT: v_cvt_f16_f32_e32 v8, s24
; SI-NEXT: v_cvt_f16_f32_e32 v5, s27
-; SI-NEXT: v_cvt_f16_f32_e32 v8, s26
+; SI-NEXT: v_cvt_f16_f32_e32 v7, s26
; SI-NEXT: v_cvt_f16_f32_e32 v6, s29
-; SI-NEXT: v_cvt_f16_f32_e32 v7, s28
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31
-; SI-NEXT: v_cvt_f16_f32_e32 v50, v54
-; SI-NEXT: v_cvt_f16_f32_e32 v48, v48
-; SI-NEXT: v_cvt_f16_f32_e32 v31, v40
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v33
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v54
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v50
+; SI-NEXT: v_cvt_f16_f32_e32 v50, s28
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v40
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v33
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v42
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v38
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v43
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v44
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v45
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v46
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v47
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -23786,260 +23937,240 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v57
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v58
-; SI-NEXT: v_cvt_f16_f32_e32 v58, s16
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v59
; SI-NEXT: v_cvt_f16_f32_e32 v59, s17
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v60
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB35_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19
+; SI-NEXT: v_or_b32_e32 v19, v21, v19
+; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
+; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20
+; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18
+; SI-NEXT: v_or_b32_e32 v18, v22, v18
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(4)
-; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; SI-NEXT: v_or_b32_e32 v3, v10, v3
-; SI-NEXT: s_waitcnt expcnt(3)
-; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v34
-; SI-NEXT: v_mov_b32_e32 v33, v32
-; SI-NEXT: v_or_b32_e32 v10, v32, v10
-; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
-; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
-; SI-NEXT: v_mov_b32_e32 v44, v43
-; SI-NEXT: v_or_b32_e32 v13, v43, v13
-; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
-; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; SI-NEXT: v_or_b32_e32 v5, v8, v5
-; SI-NEXT: v_mov_b32_e32 v57, v39
-; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v39
-; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6
-; SI-NEXT: v_or_b32_e32 v6, v7, v6
-; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v49
-; SI-NEXT: v_or_b32_e32 v7, v37, v7
-; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
+; SI-NEXT: v_or_b32_e32 v6, v50, v6
+; SI-NEXT: v_mov_b32_e32 v30, v50
+; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
+; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v59
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18
-; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19
-; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v59
-; SI-NEXT: v_or_b32_e32 v1, v12, v1
-; SI-NEXT: v_or_b32_e32 v2, v11, v2
-; SI-NEXT: v_or_b32_e32 v4, v9, v4
-; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v36
-; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v63
+; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; SI-NEXT: v_mov_b32_e32 v52, v12
+; SI-NEXT: v_or_b32_e32 v0, v12, v0
+; SI-NEXT: v_or_b32_e32 v1, v11, v1
+; SI-NEXT: v_or_b32_e32 v2, v9, v2
+; SI-NEXT: v_or_b32_e32 v3, v10, v3
+; SI-NEXT: v_or_b32_e32 v4, v8, v4
+; SI-NEXT: v_or_b32_e32 v5, v7, v5
+; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v39
+; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v48
+; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v38
+; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v32
+; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v34
+; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v41
; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15
; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17
-; SI-NEXT: v_or_b32_e32 v18, v22, v18
-; SI-NEXT: v_or_b32_e32 v19, v21, v19
-; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20
-; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v52
-; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v50
-; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v48
-; SI-NEXT: v_or_b32_e32 v0, v58, v0
-; SI-NEXT: v_mov_b32_e32 v56, v34
-; SI-NEXT: v_mov_b32_e32 v47, v36
-; SI-NEXT: v_mov_b32_e32 v46, v35
+; SI-NEXT: v_mov_b32_e32 v58, v49
+; SI-NEXT: v_or_b32_e32 v8, v49, v8
+; SI-NEXT: v_mov_b32_e32 v57, v48
+; SI-NEXT: v_mov_b32_e32 v56, v36
+; SI-NEXT: v_or_b32_e32 v9, v36, v9
+; SI-NEXT: v_mov_b32_e32 v47, v38
+; SI-NEXT: v_mov_b32_e32 v46, v37
+; SI-NEXT: v_or_b32_e32 v10, v37, v10
+; SI-NEXT: v_mov_b32_e32 v33, v32
+; SI-NEXT: v_mov_b32_e32 v45, v35
; SI-NEXT: v_or_b32_e32 v11, v35, v11
+; SI-NEXT: v_mov_b32_e32 v44, v34
; SI-NEXT: v_mov_b32_e32 v60, v63
-; SI-NEXT: v_mov_b32_e32 v45, v62
-; SI-NEXT: v_or_b32_e32 v12, v62, v12
+; SI-NEXT: v_or_b32_e32 v12, v63, v12
+; SI-NEXT: v_mov_b32_e32 v43, v62
+; SI-NEXT: v_or_b32_e32 v13, v62, v13
; SI-NEXT: v_mov_b32_e32 v42, v41
; SI-NEXT: v_mov_b32_e32 v40, v55
; SI-NEXT: v_or_b32_e32 v14, v55, v14
; SI-NEXT: v_or_b32_e32 v15, v61, v15
-; SI-NEXT: v_or_b32_e32 v20, v53, v20
-; SI-NEXT: v_or_b32_e32 v21, v51, v21
-; SI-NEXT: v_or_b32_e32 v22, v30, v22
-; SI-NEXT: v_or_b32_e32 v23, v31, v23
+; SI-NEXT: v_or_b32_e32 v16, v53, v16
+; SI-NEXT: v_or_b32_e32 v17, v51, v17
; SI-NEXT: s_mov_b64 s[4:5], 0
; SI-NEXT: s_waitcnt vmcnt(11)
+; SI-NEXT: v_or_b32_e32 v20, v21, v20
+; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21
+; SI-NEXT: v_or_b32_e32 v21, v22, v21
+; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22
+; SI-NEXT: v_or_b32_e32 v22, v23, v22
+; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v23
+; SI-NEXT: v_or_b32_e32 v23, v24, v23
+; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v24
-; SI-NEXT: s_waitcnt vmcnt(10)
; SI-NEXT: v_or_b32_e32 v24, v25, v24
; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(10)
-; SI-NEXT: v_or_b32_e32 v17, v32, v17
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v25
; SI-NEXT: v_or_b32_e32 v25, v26, v25
; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
-; SI-NEXT: v_or_b32_e32 v16, v43, v16
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v26
; SI-NEXT: v_or_b32_e32 v26, v27, v26
; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
-; SI-NEXT: v_mov_b32_e32 v35, v39
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v27
; SI-NEXT: v_or_b32_e32 v27, v28, v27
; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
-; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v37
-; SI-NEXT: v_or_b32_e32 v9, v39, v9
-; SI-NEXT: v_mov_b32_e32 v36, v37
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v28
; SI-NEXT: v_or_b32_e32 v28, v29, v28
; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
-; SI-NEXT: v_or_b32_e32 v8, v38, v8
+; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v50
+; SI-NEXT: v_or_b32_e32 v7, v31, v7
+; SI-NEXT: v_mov_b32_e32 v35, v50
+; SI-NEXT: v_mov_b32_e32 v50, v30
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v29
; SI-NEXT: v_or_b32_e32 v29, v54, v29
-; SI-NEXT: v_mov_b32_e32 v54, v32
; SI-NEXT: s_branch .LBB35_3
; SI-NEXT: .LBB35_2:
-; SI-NEXT: v_mov_b32_e32 v54, v53
-; SI-NEXT: v_mov_b32_e32 v53, v52
-; SI-NEXT: v_mov_b32_e32 v52, v51
-; SI-NEXT: v_mov_b32_e32 v51, v50
-; SI-NEXT: v_mov_b32_e32 v50, v30
-; SI-NEXT: v_mov_b32_e32 v49, v48
-; SI-NEXT: v_mov_b32_e32 v48, v31
+; SI-NEXT: v_mov_b32_e32 v52, v12
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; SI-NEXT: v_mov_b32_e32 v47, v36
-; SI-NEXT: v_mov_b32_e32 v46, v35
-; SI-NEXT: v_mov_b32_e32 v44, v43
-; SI-NEXT: v_mov_b32_e32 v30, v50
-; SI-NEXT: v_mov_b32_e32 v50, v51
-; SI-NEXT: v_mov_b32_e32 v51, v52
-; SI-NEXT: v_mov_b32_e32 v52, v53
-; SI-NEXT: v_mov_b32_e32 v53, v54
-; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
-; SI-NEXT: v_mov_b32_e32 v57, v39
-; SI-NEXT: v_mov_b32_e32 v56, v34
+; SI-NEXT: v_mov_b32_e32 v45, v35
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v58, v49
+; SI-NEXT: v_mov_b32_e32 v57, v48
+; SI-NEXT: v_mov_b32_e32 v56, v36
+; SI-NEXT: v_mov_b32_e32 v47, v38
+; SI-NEXT: v_mov_b32_e32 v46, v37
; SI-NEXT: v_mov_b32_e32 v33, v32
+; SI-NEXT: v_mov_b32_e32 v44, v34
; SI-NEXT: v_mov_b32_e32 v60, v63
-; SI-NEXT: v_mov_b32_e32 v45, v62
+; SI-NEXT: v_mov_b32_e32 v43, v62
; SI-NEXT: v_mov_b32_e32 v42, v41
; SI-NEXT: v_mov_b32_e32 v40, v55
-; SI-NEXT: s_mov_b64 s[4:5], -1
-; SI-NEXT: v_mov_b32_e32 v31, v48
-; SI-NEXT: v_mov_b32_e32 v48, v49
; SI-NEXT: .LBB35_3: ; %Flow
; SI-NEXT: v_mov_b32_e32 v32, v33
-; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
; SI-NEXT: v_mov_b32_e32 v61, v40
-; SI-NEXT: v_mov_b32_e32 v40, v44
; SI-NEXT: s_cbranch_vccnz .LBB35_5
; SI-NEXT: ; %bb.4: ; %cmp.true
-; SI-NEXT: s_waitcnt expcnt(5)
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(4)
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(3)
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload
; SI-NEXT: v_cvt_f32_f16_e32 v0, v59
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v1, v58
-; SI-NEXT: s_waitcnt vmcnt(3)
-; SI-NEXT: v_cvt_f32_f16_e32 v8, v33
-; SI-NEXT: v_cvt_f32_f16_e32 v9, v38
+; SI-NEXT: v_cvt_f32_f16_e32 v1, v52
+; SI-NEXT: v_cvt_f32_f16_e32 v7, v50
+; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: v_cvt_f32_f16_e32 v8, v31
; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0
; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8
-; SI-NEXT: v_cvt_f16_f32_e32 v8, v8
+; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7
+; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8
+; SI-NEXT: v_cvt_f16_f32_e32 v8, v8
+; SI-NEXT: v_cvt_f32_f16_e32 v9, v58
+; SI-NEXT: v_cvt_f32_f16_e32 v10, v56
+; SI-NEXT: v_cvt_f32_f16_e32 v11, v46
+; SI-NEXT: v_cvt_f32_f16_e32 v12, v45
; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9
; SI-NEXT: v_cvt_f16_f32_e32 v9, v9
-; SI-NEXT: v_cvt_f32_f16_e32 v10, v35
-; SI-NEXT: v_cvt_f32_f16_e32 v11, v32
-; SI-NEXT: v_cvt_f32_f16_e32 v12, v46
-; SI-NEXT: v_cvt_f32_f16_e32 v13, v45
; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10
; SI-NEXT: v_cvt_f16_f32_e32 v10, v10
; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11
; SI-NEXT: v_cvt_f16_f32_e32 v11, v11
; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12
; SI-NEXT: v_cvt_f16_f32_e32 v12, v12
-; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13
-; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
-; SI-NEXT: v_cvt_f32_f16_e32 v14, v40
+; SI-NEXT: v_cvt_f32_f16_e32 v13, v60
+; SI-NEXT: v_cvt_f32_f16_e32 v14, v43
; SI-NEXT: v_mov_b32_e32 v55, v42
; SI-NEXT: v_cvt_f32_f16_e32 v15, v61
-; SI-NEXT: v_cvt_f32_f16_e32 v17, v43
+; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13
+; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14
; SI-NEXT: v_cvt_f16_f32_e32 v14, v14
; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15
; SI-NEXT: v_cvt_f16_f32_e32 v15, v15
-; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17
-; SI-NEXT: v_cvt_f16_f32_e32 v17, v17
-; SI-NEXT: v_cvt_f32_f16_e32 v19, v54
-; SI-NEXT: v_cvt_f32_f16_e32 v22, v53
-; SI-NEXT: v_cvt_f32_f16_e32 v23, v51
-; SI-NEXT: v_cvt_f32_f16_e32 v24, v48
-; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19
-; SI-NEXT: v_cvt_f16_f32_e32 v19, v19
-; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22
-; SI-NEXT: v_cvt_f16_f32_e32 v22, v22
-; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23
-; SI-NEXT: v_cvt_f16_f32_e32 v23, v23
-; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24
-; SI-NEXT: v_cvt_f16_f32_e32 v24, v24
-; SI-NEXT: v_cvt_f32_f16_e32 v25, v31
+; SI-NEXT: v_cvt_f32_f16_e32 v17, v53
+; SI-NEXT: v_cvt_f32_f16_e32 v19, v51
; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
+; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17
+; SI-NEXT: v_cvt_f16_f32_e32 v17, v17
+; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
+; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19
+; SI-NEXT: v_cvt_f16_f32_e32 v19, v19
+; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
-; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25
-; SI-NEXT: v_cvt_f16_f32_e32 v25, v25
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
-; SI-NEXT: s_waitcnt vmcnt(13)
; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2
; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
@@ -24047,42 +24178,48 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; SI-NEXT: v_or_b32_e32 v1, v3, v2
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(14)
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
; SI-NEXT: v_cvt_f32_f16_e32 v27, v27
-; SI-NEXT: s_waitcnt vmcnt(11)
-; SI-NEXT: v_cvt_f32_f16_e32 v31, v31
-; SI-NEXT: s_waitcnt vmcnt(10)
+; SI-NEXT: s_waitcnt vmcnt(14)
+; SI-NEXT: v_cvt_f32_f16_e32 v30, v30
+; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27
+; SI-NEXT: s_waitcnt vmcnt(13)
; SI-NEXT: v_cvt_f32_f16_e32 v4, v4
-; SI-NEXT: s_waitcnt vmcnt(9)
+; SI-NEXT: s_waitcnt vmcnt(12)
; SI-NEXT: v_cvt_f32_f16_e32 v5, v5
-; SI-NEXT: s_waitcnt vmcnt(8)
+; SI-NEXT: s_waitcnt vmcnt(11)
; SI-NEXT: v_cvt_f32_f16_e32 v6, v6
-; SI-NEXT: s_waitcnt vmcnt(7)
-; SI-NEXT: v_cvt_f32_f16_e32 v7, v7
+; SI-NEXT: s_waitcnt vmcnt(10)
+; SI-NEXT: v_cvt_f32_f16_e32 v16, v16
; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4
; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5
; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6
; SI-NEXT: v_cvt_f16_f32_e32 v6, v6
-; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7
-; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
-; SI-NEXT: s_waitcnt vmcnt(6)
-; SI-NEXT: v_cvt_f32_f16_e32 v16, v16
-; SI-NEXT: s_waitcnt vmcnt(5)
-; SI-NEXT: v_cvt_f32_f16_e32 v18, v18
-; SI-NEXT: s_waitcnt vmcnt(3)
-; SI-NEXT: v_cvt_f32_f16_e32 v21, v21
-; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27
; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16
; SI-NEXT: v_cvt_f16_f32_e32 v16, v16
+; SI-NEXT: s_waitcnt vmcnt(9)
+; SI-NEXT: v_cvt_f32_f16_e32 v18, v18
+; SI-NEXT: s_waitcnt vmcnt(7)
+; SI-NEXT: v_cvt_f32_f16_e32 v21, v21
+; SI-NEXT: s_waitcnt vmcnt(5)
+; SI-NEXT: v_cvt_f32_f16_e32 v23, v23
+; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: v_cvt_f32_f16_e32 v24, v24
; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18
; SI-NEXT: v_cvt_f16_f32_e32 v18, v18
; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21
; SI-NEXT: v_cvt_f16_f32_e32 v21, v21
+; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23
+; SI-NEXT: v_cvt_f16_f32_e32 v23, v23
+; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24
+; SI-NEXT: v_cvt_f16_f32_e32 v24, v24
; SI-NEXT: v_cvt_f16_f32_e32 v27, v27
+; SI-NEXT: v_cvt_f32_f16_e32 v31, v31
+; SI-NEXT: v_add_f32_e32 v30, 0x38000000, v30
+; SI-NEXT: v_cvt_f16_f32_e32 v30, v30
; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31
; SI-NEXT: v_cvt_f16_f32_e32 v31, v31
; SI-NEXT: s_waitcnt vmcnt(1)
@@ -24095,65 +24232,65 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; SI-NEXT: v_or_b32_e32 v2, v3, v2
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3
; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; SI-NEXT: v_or_b32_e32 v3, v4, v3
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v4, v4
; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4
; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
; SI-NEXT: v_or_b32_e32 v4, v5, v4
-; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v5, v5
; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5
; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: v_or_b32_e32 v5, v6, v5
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v6, v6
; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6
; SI-NEXT: v_cvt_f16_f32_e32 v6, v6
; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6
; SI-NEXT: v_or_b32_e32 v6, v7, v6
-; SI-NEXT: v_cvt_f32_f16_e32 v7, v37
+; SI-NEXT: v_cvt_f32_f16_e32 v7, v35
; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7
; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7
; SI-NEXT: v_or_b32_e32 v7, v8, v7
-; SI-NEXT: v_cvt_f32_f16_e32 v8, v57
+; SI-NEXT: v_cvt_f32_f16_e32 v8, v33
; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8
; SI-NEXT: v_cvt_f16_f32_e32 v8, v8
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8
; SI-NEXT: v_or_b32_e32 v8, v9, v8
-; SI-NEXT: v_cvt_f32_f16_e32 v9, v36
+; SI-NEXT: v_cvt_f32_f16_e32 v9, v57
; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9
; SI-NEXT: v_cvt_f16_f32_e32 v9, v9
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9
; SI-NEXT: v_or_b32_e32 v9, v10, v9
-; SI-NEXT: v_cvt_f32_f16_e32 v10, v56
+; SI-NEXT: v_cvt_f32_f16_e32 v10, v47
; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10
; SI-NEXT: v_cvt_f16_f32_e32 v10, v10
; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10
; SI-NEXT: v_or_b32_e32 v10, v11, v10
-; SI-NEXT: v_cvt_f32_f16_e32 v11, v47
+; SI-NEXT: v_cvt_f32_f16_e32 v11, v32
; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11
; SI-NEXT: v_cvt_f16_f32_e32 v11, v11
; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11
; SI-NEXT: v_or_b32_e32 v11, v12, v11
-; SI-NEXT: v_cvt_f32_f16_e32 v12, v60
+; SI-NEXT: v_cvt_f32_f16_e32 v12, v44
; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12
; SI-NEXT: v_cvt_f16_f32_e32 v12, v12
; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12
; SI-NEXT: v_or_b32_e32 v12, v13, v12
-; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v13, v13
; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13
@@ -24165,14 +24302,14 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v14, v14
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
; SI-NEXT: v_or_b32_e32 v14, v15, v14
-; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v15, v15
; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15
; SI-NEXT: v_cvt_f16_f32_e32 v15, v15
; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15
; SI-NEXT: v_or_b32_e32 v15, v16, v15
-; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v16, v16
; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16
@@ -24180,9 +24317,9 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16
; SI-NEXT: v_or_b32_e32 v16, v17, v16
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v18
-; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
; SI-NEXT: v_or_b32_e32 v17, v19, v17
-; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
; SI-NEXT: v_cvt_f32_f16_e32 v20, v20
; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20
; SI-NEXT: v_cvt_f16_f32_e32 v20, v20
@@ -24192,7 +24329,7 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v18, v18
; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18
; SI-NEXT: v_or_b32_e32 v18, v20, v18
-; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_cvt_f32_f16_e32 v19, v19
; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19
@@ -24204,32 +24341,39 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; SI-NEXT: v_cvt_f16_f32_e32 v20, v20
; SI-NEXT: v_or_b32_e32 v19, v20, v19
; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v21
-; SI-NEXT: v_cvt_f32_f16_e32 v21, v52
+; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
+; SI-NEXT: v_cvt_f32_f16_e32 v22, v22
+; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22
+; SI-NEXT: v_cvt_f16_f32_e32 v22, v22
; SI-NEXT: v_or_b32_e32 v20, v22, v20
-; SI-NEXT: v_cvt_f32_f16_e32 v22, v50
+; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_cvt_f32_f16_e32 v21, v21
; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21
; SI-NEXT: v_cvt_f16_f32_e32 v21, v21
-; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22
-; SI-NEXT: v_cvt_f16_f32_e32 v22, v22
; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21
; SI-NEXT: v_or_b32_e32 v21, v23, v21
-; SI-NEXT: v_cvt_f32_f16_e32 v23, v30
+; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_cvt_f32_f16_e32 v22, v22
+; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22
+; SI-NEXT: v_cvt_f16_f32_e32 v22, v22
; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22
-; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_cvt_f32_f16_e32 v23, v23
; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23
; SI-NEXT: v_cvt_f16_f32_e32 v23, v23
; SI-NEXT: v_or_b32_e32 v22, v23, v22
; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v24
; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; SI-NEXT: v_cvt_f32_f16_e32 v25, v25
+; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25
+; SI-NEXT: v_cvt_f16_f32_e32 v25, v25
; SI-NEXT: v_or_b32_e32 v23, v25, v23
; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
; SI-NEXT: v_cvt_f32_f16_e32 v26, v26
; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26
; SI-NEXT: v_cvt_f16_f32_e32 v26, v26
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_cvt_f32_f16_e32 v30, v30
-; SI-NEXT: v_add_f32_e32 v30, 0x38000000, v30
-; SI-NEXT: v_cvt_f16_f32_e32 v30, v30
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_cvt_f32_f16_e32 v24, v24
; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24
@@ -24314,6 +24458,7 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v15
; VI-NEXT: v_mov_b32_e32 v33, v14
; VI-NEXT: v_mov_b32_e32 v34, v13
@@ -24330,7 +24475,7 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; VI-NEXT: v_mov_b32_e32 v53, v2
; VI-NEXT: v_mov_b32_e32 v54, v1
; VI-NEXT: v_mov_b32_e32 v55, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB35_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v0, 16
@@ -24534,11 +24679,28 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB35_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB35_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB35_2
+; VI-NEXT: s_branch .LBB35_3
;
; GFX9-LABEL: bitcast_v60f16_to_v30f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_lshr_b32 s40, s29, 16
+; GFX9-NEXT: s_lshr_b32 s41, s28, 16
+; GFX9-NEXT: s_lshr_b32 s42, s27, 16
+; GFX9-NEXT: s_lshr_b32 s43, s26, 16
+; GFX9-NEXT: s_lshr_b32 s15, s25, 16
+; GFX9-NEXT: s_lshr_b32 s14, s24, 16
+; GFX9-NEXT: s_lshr_b32 s13, s23, 16
+; GFX9-NEXT: s_lshr_b32 s12, s22, 16
+; GFX9-NEXT: s_lshr_b32 s11, s21, 16
+; GFX9-NEXT: s_lshr_b32 s10, s20, 16
+; GFX9-NEXT: s_lshr_b32 s9, s19, 16
+; GFX9-NEXT: s_lshr_b32 s8, s18, 16
+; GFX9-NEXT: s_lshr_b32 s7, s17, 16
+; GFX9-NEXT: s_lshr_b32 s6, s16, 16
; GFX9-NEXT: v_mov_b32_e32 v32, v15
; GFX9-NEXT: v_mov_b32_e32 v33, v14
; GFX9-NEXT: v_mov_b32_e32 v34, v13
@@ -24555,21 +24717,7 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v53, v2
; GFX9-NEXT: v_mov_b32_e32 v54, v1
; GFX9-NEXT: v_mov_b32_e32 v55, v0
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
-; GFX9-NEXT: s_lshr_b32 s40, s29, 16
-; GFX9-NEXT: s_lshr_b32 s41, s28, 16
-; GFX9-NEXT: s_lshr_b32 s42, s27, 16
-; GFX9-NEXT: s_lshr_b32 s43, s26, 16
-; GFX9-NEXT: s_lshr_b32 s15, s25, 16
-; GFX9-NEXT: s_lshr_b32 s14, s24, 16
-; GFX9-NEXT: s_lshr_b32 s13, s23, 16
-; GFX9-NEXT: s_lshr_b32 s12, s22, 16
-; GFX9-NEXT: s_lshr_b32 s11, s21, 16
-; GFX9-NEXT: s_lshr_b32 s10, s20, 16
-; GFX9-NEXT: s_lshr_b32 s9, s19, 16
-; GFX9-NEXT: s_lshr_b32 s8, s18, 16
-; GFX9-NEXT: s_lshr_b32 s7, s17, 16
-; GFX9-NEXT: s_lshr_b32 s6, s16, 16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
@@ -24590,7 +24738,6 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v41, 16, v33
; GFX9-NEXT: v_lshrrev_b32_e32 v42, 16, v34
; GFX9-NEXT: v_lshrrev_b32_e32 v43, 16, v35
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -24605,6 +24752,7 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_lshrrev_b32_e32 v44, 16, v36
; GFX9-NEXT: v_lshrrev_b32_e32 v45, 16, v37
; GFX9-NEXT: v_lshrrev_b32_e32 v46, 16, v38
@@ -24752,7 +24900,9 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB35_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB35_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB35_2
+; GFX9-NEXT: s_branch .LBB35_3
;
; GFX11-TRUE16-LABEL: bitcast_v60f16_to_v30f32_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -24797,41 +24947,41 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s27, s42
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB35_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
@@ -24846,17 +24996,16 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB35_3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB35_3
; GFX11-TRUE16-NEXT: .LBB35_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
@@ -24870,24 +25019,24 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s4 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
@@ -24904,7 +25053,9 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB35_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB35_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB35_2
+; GFX11-TRUE16-NEXT: s_branch .LBB35_3
;
; GFX11-FAKE16-LABEL: bitcast_v60f16_to_v30f32_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -24937,41 +25088,41 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s16, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s26, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s25, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s24, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s23, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s22, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s21, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s20, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s19, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s18, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s17, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s16, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s3, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s2, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s15, 0
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s0, 16
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s27, s42
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB35_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
@@ -24986,17 +25137,16 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB35_3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB35_3
; GFX11-FAKE16-NEXT: .LBB35_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
@@ -25010,24 +25160,24 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, s4 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, s18 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, s16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
@@ -25044,7 +25194,9 @@ define inreg <30 x float> @bitcast_v60f16_to_v30f32_scalar(<60 x half> inreg %a,
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB35_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB35_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB35_2
+; GFX11-FAKE16-NEXT: s_branch .LBB35_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -25265,6 +25417,7 @@ define inreg <15 x double> @bitcast_v15i64_to_v15f64_scalar(<15 x i64> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v29, v15
; SI-NEXT: v_mov_b32_e32 v28, v14
; SI-NEXT: v_mov_b32_e32 v27, v13
@@ -25282,7 +25435,7 @@ define inreg <15 x double> @bitcast_v15i64_to_v15f64_scalar(<15 x i64> inreg %a,
; SI-NEXT: v_mov_b32_e32 v15, v1
; SI-NEXT: v_mov_b32_e32 v14, v0
; SI-NEXT: v_mov_b32_e32 v0, s16
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
; SI-NEXT: v_mov_b32_e32 v3, s19
@@ -25296,10 +25449,13 @@ define inreg <15 x double> @bitcast_v15i64_to_v15f64_scalar(<15 x i64> inreg %a,
; SI-NEXT: v_mov_b32_e32 v11, s27
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB37_4
+; SI-NEXT: s_cbranch_scc0 .LBB37_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB37_3
-; SI-NEXT: .LBB37_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB37_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB37_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_i32_e32 v0, vcc, 3, v0
; SI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; SI-NEXT: v_add_i32_e32 v2, vcc, 3, v2
@@ -25330,16 +25486,15 @@ define inreg <15 x double> @bitcast_v15i64_to_v15f64_scalar(<15 x i64> inreg %a,
; SI-NEXT: v_addc_u32_e32 v27, vcc, 0, v27, vcc
; SI-NEXT: v_add_i32_e32 v28, vcc, 3, v28
; SI-NEXT: v_addc_u32_e32 v29, vcc, 0, v29, vcc
-; SI-NEXT: .LBB37_3: ; %end
+; SI-NEXT: .LBB37_4: ; %end
; SI-NEXT: v_mov_b32_e32 v16, v30
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB37_4:
-; SI-NEXT: s_branch .LBB37_2
;
; VI-LABEL: bitcast_v15i64_to_v15f64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v29, v15
; VI-NEXT: v_mov_b32_e32 v28, v14
; VI-NEXT: v_mov_b32_e32 v27, v13
@@ -25357,7 +25512,7 @@ define inreg <15 x double> @bitcast_v15i64_to_v15f64_scalar(<15 x i64> inreg %a,
; VI-NEXT: v_mov_b32_e32 v15, v1
; VI-NEXT: v_mov_b32_e32 v14, v0
; VI-NEXT: v_mov_b32_e32 v0, s16
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: v_mov_b32_e32 v3, s19
@@ -25371,10 +25526,13 @@ define inreg <15 x double> @bitcast_v15i64_to_v15f64_scalar(<15 x i64> inreg %a,
; VI-NEXT: v_mov_b32_e32 v11, s27
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB37_4
+; VI-NEXT: s_cbranch_scc0 .LBB37_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB37_3
-; VI-NEXT: .LBB37_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB37_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB37_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_u32_e32 v0, vcc, 3, v0
; VI-NEXT: v_addc_u32_e32 v1, vcc, 0, v1, vcc
; VI-NEXT: v_add_u32_e32 v2, vcc, 3, v2
@@ -25405,16 +25563,15 @@ define inreg <15 x double> @bitcast_v15i64_to_v15f64_scalar(<15 x i64> inreg %a,
; VI-NEXT: v_addc_u32_e32 v27, vcc, 0, v27, vcc
; VI-NEXT: v_add_u32_e32 v28, vcc, 3, v28
; VI-NEXT: v_addc_u32_e32 v29, vcc, 0, v29, vcc
-; VI-NEXT: .LBB37_3: ; %end
+; VI-NEXT: .LBB37_4: ; %end
; VI-NEXT: v_mov_b32_e32 v16, v30
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB37_4:
-; VI-NEXT: s_branch .LBB37_2
;
; GFX9-LABEL: bitcast_v15i64_to_v15f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v29, v15
; GFX9-NEXT: v_mov_b32_e32 v28, v14
; GFX9-NEXT: v_mov_b32_e32 v27, v13
@@ -25432,7 +25589,7 @@ define inreg <15 x double> @bitcast_v15i64_to_v15f64_scalar(<15 x i64> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v15, v1
; GFX9-NEXT: v_mov_b32_e32 v14, v0
; GFX9-NEXT: v_mov_b32_e32 v0, s16
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: v_mov_b32_e32 v3, s19
@@ -25446,10 +25603,13 @@ define inreg <15 x double> @bitcast_v15i64_to_v15f64_scalar(<15 x i64> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v11, s27
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB37_4
+; GFX9-NEXT: s_cbranch_scc0 .LBB37_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB37_3
-; GFX9-NEXT: .LBB37_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB37_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB37_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_co_u32_e32 v0, vcc, 3, v0
; GFX9-NEXT: v_addc_co_u32_e32 v1, vcc, 0, v1, vcc
; GFX9-NEXT: v_add_co_u32_e32 v2, vcc, 3, v2
@@ -25480,43 +25640,41 @@ define inreg <15 x double> @bitcast_v15i64_to_v15f64_scalar(<15 x i64> inreg %a,
; GFX9-NEXT: v_addc_co_u32_e32 v27, vcc, 0, v27, vcc
; GFX9-NEXT: v_add_co_u32_e32 v28, vcc, 3, v28
; GFX9-NEXT: v_addc_co_u32_e32 v29, vcc, 0, v29, vcc
-; GFX9-NEXT: .LBB37_3: ; %end
+; GFX9-NEXT: .LBB37_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v16, v30
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB37_4:
-; GFX9-NEXT: s_branch .LBB37_2
;
; GFX11-LABEL: bitcast_v15i64_to_v15f64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v29, v11
-; GFX11-NEXT: v_dual_mov_b32 v28, v10 :: v_dual_mov_b32 v27, v9
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
+; GFX11-NEXT: v_dual_mov_b32 v15, v12 :: v_dual_mov_b32 v28, v10
+; GFX11-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v26, v8
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB37_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB37_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB37_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB37_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB37_3:
-; GFX11-NEXT: .LBB37_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB37_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_co_u32 v0, vcc_lo, v0, 3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(SKIP_1) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_add_co_ci_u32_e64 v1, null, 0, v1, vcc_lo
@@ -25555,6 +25713,7 @@ define inreg <15 x double> @bitcast_v15i64_to_v15f64_scalar(<15 x i64> inreg %a,
; GFX11-NEXT: v_add_co_u32 v28, vcc_lo, v28, 3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_add_co_ci_u32_e64 v29, null, 0, v29, vcc_lo
+; GFX11-NEXT: .LBB37_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -25708,6 +25867,7 @@ define inreg <15 x i64> @bitcast_v15f64_to_v15i64_scalar(<15 x double> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v29, v15
; SI-NEXT: v_mov_b32_e32 v28, v14
; SI-NEXT: v_mov_b32_e32 v27, v13
@@ -25736,13 +25896,16 @@ define inreg <15 x i64> @bitcast_v15f64_to_v15i64_scalar(<15 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v9, s25
; SI-NEXT: v_mov_b32_e32 v10, s26
; SI-NEXT: v_mov_b32_e32 v11, s27
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v12, s28
; SI-NEXT: v_mov_b32_e32 v13, s29
-; SI-NEXT: s_cbranch_scc0 .LBB39_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB39_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB39_3
-; SI-NEXT: .LBB39_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB39_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB39_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
; SI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; SI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
@@ -25758,17 +25921,16 @@ define inreg <15 x i64> @bitcast_v15f64_to_v15i64_scalar(<15 x double> inreg %a,
; SI-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
; SI-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
; SI-NEXT: v_add_f64 v[28:29], v[28:29], 1.0
-; SI-NEXT: .LBB39_3: ; %end
+; SI-NEXT: .LBB39_4: ; %end
; SI-NEXT: v_mov_b32_e32 v16, v30
; SI-NEXT: v_mov_b32_e32 v17, v31
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB39_4:
-; SI-NEXT: s_branch .LBB39_2
;
; VI-LABEL: bitcast_v15f64_to_v15i64_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v29, v15
; VI-NEXT: v_mov_b32_e32 v28, v14
; VI-NEXT: v_mov_b32_e32 v27, v13
@@ -25797,13 +25959,16 @@ define inreg <15 x i64> @bitcast_v15f64_to_v15i64_scalar(<15 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v9, s25
; VI-NEXT: v_mov_b32_e32 v10, s26
; VI-NEXT: v_mov_b32_e32 v11, s27
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v12, s28
; VI-NEXT: v_mov_b32_e32 v13, s29
-; VI-NEXT: s_cbranch_scc0 .LBB39_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB39_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB39_3
-; VI-NEXT: .LBB39_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB39_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB39_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
; VI-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; VI-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
@@ -25819,17 +25984,16 @@ define inreg <15 x i64> @bitcast_v15f64_to_v15i64_scalar(<15 x double> inreg %a,
; VI-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
; VI-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
; VI-NEXT: v_add_f64 v[28:29], v[28:29], 1.0
-; VI-NEXT: .LBB39_3: ; %end
+; VI-NEXT: .LBB39_4: ; %end
; VI-NEXT: v_mov_b32_e32 v16, v30
; VI-NEXT: v_mov_b32_e32 v17, v31
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB39_4:
-; VI-NEXT: s_branch .LBB39_2
;
; GFX9-LABEL: bitcast_v15f64_to_v15i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v29, v15
; GFX9-NEXT: v_mov_b32_e32 v28, v14
; GFX9-NEXT: v_mov_b32_e32 v27, v13
@@ -25858,13 +26022,16 @@ define inreg <15 x i64> @bitcast_v15f64_to_v15i64_scalar(<15 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v9, s25
; GFX9-NEXT: v_mov_b32_e32 v10, s26
; GFX9-NEXT: v_mov_b32_e32 v11, s27
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v12, s28
; GFX9-NEXT: v_mov_b32_e32 v13, s29
-; GFX9-NEXT: s_cbranch_scc0 .LBB39_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB39_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB39_3
-; GFX9-NEXT: .LBB39_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB39_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB39_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
; GFX9-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX9-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
@@ -25880,44 +26047,42 @@ define inreg <15 x i64> @bitcast_v15f64_to_v15i64_scalar(<15 x double> inreg %a,
; GFX9-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
; GFX9-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
; GFX9-NEXT: v_add_f64 v[28:29], v[28:29], 1.0
-; GFX9-NEXT: .LBB39_3: ; %end
+; GFX9-NEXT: .LBB39_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v16, v30
; GFX9-NEXT: v_mov_b32_e32 v17, v31
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB39_4:
-; GFX9-NEXT: s_branch .LBB39_2
;
; GFX11-LABEL: bitcast_v15f64_to_v15i64_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; GFX11-NEXT: v_dual_mov_b32 v16, v12 :: v_dual_mov_b32 v29, v11
-; GFX11-NEXT: v_dual_mov_b32 v28, v10 :: v_dual_mov_b32 v27, v9
-; GFX11-NEXT: v_dual_mov_b32 v26, v8 :: v_dual_mov_b32 v25, v7
+; GFX11-NEXT: v_dual_mov_b32 v15, v12 :: v_dual_mov_b32 v28, v10
+; GFX11-NEXT: v_dual_mov_b32 v29, v11 :: v_dual_mov_b32 v26, v8
+; GFX11-NEXT: v_dual_mov_b32 v27, v9 :: v_dual_mov_b32 v24, v6
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
-; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v16
-; GFX11-NEXT: v_dual_mov_b32 v24, v6 :: v_dual_mov_b32 v23, v5
-; GFX11-NEXT: v_dual_mov_b32 v22, v4 :: v_dual_mov_b32 v21, v3
-; GFX11-NEXT: v_dual_mov_b32 v20, v2 :: v_dual_mov_b32 v19, v1
-; GFX11-NEXT: v_dual_mov_b32 v18, v0 :: v_dual_mov_b32 v1, s1
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v3, s3
-; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v5, s17
-; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v7, s19
-; GFX11-NEXT: v_dual_mov_b32 v6, s18 :: v_dual_mov_b32 v9, s21
-; GFX11-NEXT: v_dual_mov_b32 v8, s20 :: v_dual_mov_b32 v11, s23
-; GFX11-NEXT: v_dual_mov_b32 v10, s22 :: v_dual_mov_b32 v13, s25
-; GFX11-NEXT: v_dual_mov_b32 v12, s24 :: v_dual_mov_b32 v15, s27
-; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: v_mov_b32_e32 v16, s28
+; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v15
+; GFX11-NEXT: v_dual_mov_b32 v25, v7 :: v_dual_mov_b32 v22, v4
+; GFX11-NEXT: v_dual_mov_b32 v23, v5 :: v_dual_mov_b32 v20, v2
+; GFX11-NEXT: v_dual_mov_b32 v21, v3 :: v_dual_mov_b32 v18, v0
+; GFX11-NEXT: v_dual_mov_b32 v19, v1 :: v_dual_mov_b32 v0, s0
+; GFX11-NEXT: v_dual_mov_b32 v1, s1 :: v_dual_mov_b32 v2, s2
+; GFX11-NEXT: v_dual_mov_b32 v3, s3 :: v_dual_mov_b32 v4, s16
+; GFX11-NEXT: v_dual_mov_b32 v5, s17 :: v_dual_mov_b32 v6, s18
+; GFX11-NEXT: v_dual_mov_b32 v7, s19 :: v_dual_mov_b32 v8, s20
+; GFX11-NEXT: v_dual_mov_b32 v9, s21 :: v_dual_mov_b32 v10, s22
+; GFX11-NEXT: v_dual_mov_b32 v11, s23 :: v_dual_mov_b32 v12, s24
+; GFX11-NEXT: v_dual_mov_b32 v13, s25 :: v_dual_mov_b32 v14, s26
+; GFX11-NEXT: v_dual_mov_b32 v15, s27 :: v_dual_mov_b32 v16, s28
+; GFX11-NEXT: v_mov_b32_e32 v17, s29
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB39_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
-; GFX11-NEXT: s_cbranch_scc0 .LBB39_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB39_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccz .LBB39_4
-; GFX11-NEXT: ; %bb.2: ; %end
-; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB39_3:
-; GFX11-NEXT: .LBB39_4: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB39_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[0:1], v[0:1], 1.0
; GFX11-NEXT: v_add_f64 v[2:3], v[2:3], 1.0
; GFX11-NEXT: v_add_f64 v[4:5], v[4:5], 1.0
@@ -25933,6 +26098,7 @@ define inreg <15 x i64> @bitcast_v15f64_to_v15i64_scalar(<15 x double> inreg %a,
; GFX11-NEXT: v_add_f64 v[24:25], v[24:25], 1.0
; GFX11-NEXT: v_add_f64 v[26:27], v[26:27], 1.0
; GFX11-NEXT: v_add_f64 v[28:29], v[28:29], 1.0
+; GFX11-NEXT: .LBB39_4: ; %end
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -26970,6 +27136,7 @@ define inreg <60 x i16> @bitcast_v15i64_to_v60i16_scalar(<15 x i64> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v17
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s45, v1
; SI-NEXT: v_readfirstlane_b32 s44, v2
; SI-NEXT: v_readfirstlane_b32 s43, v3
@@ -26985,8 +27152,8 @@ define inreg <60 x i16> @bitcast_v15i64_to_v60i16_scalar(<15 x i64> inreg %a, i3
; SI-NEXT: v_readfirstlane_b32 s9, v13
; SI-NEXT: v_readfirstlane_b32 s8, v14
; SI-NEXT: v_readfirstlane_b32 s7, v15
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s6, v16
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB41_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v1, s7
@@ -27324,7 +27491,9 @@ define inreg <60 x i16> @bitcast_v15i64_to_v60i16_scalar(<15 x i64> inreg %a, i3
; SI-NEXT: ; implicit-def: $sgpr47
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; implicit-def: $sgpr46
-; SI-NEXT: s_branch .LBB41_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB41_2
+; SI-NEXT: s_branch .LBB41_3
;
; VI-LABEL: bitcast_v15i64_to_v60i16_scalar:
; VI: ; %bb.0:
@@ -27337,8 +27506,9 @@ define inreg <60 x i16> @bitcast_v15i64_to_v60i16_scalar(<15 x i64> inreg %a, i3
; VI-NEXT: v_writelane_b32 v30, s34, 2
; VI-NEXT: v_writelane_b32 v30, s35, 3
; VI-NEXT: v_writelane_b32 v30, s36, 4
-; VI-NEXT: v_writelane_b32 v30, s37, 5
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: v_writelane_b32 v30, s37, 5
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_writelane_b32 v30, s38, 6
; VI-NEXT: v_readfirstlane_b32 s45, v0
; VI-NEXT: v_readfirstlane_b32 s44, v1
@@ -27354,14 +27524,14 @@ define inreg <60 x i16> @bitcast_v15i64_to_v60i16_scalar(<15 x i64> inreg %a, i3
; VI-NEXT: v_readfirstlane_b32 s10, v11
; VI-NEXT: v_readfirstlane_b32 s9, v12
; VI-NEXT: v_readfirstlane_b32 s8, v13
-; VI-NEXT: v_readfirstlane_b32 s6, v14
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: v_readfirstlane_b32 s7, v15
+; VI-NEXT: v_readfirstlane_b32 s7, v14
+; VI-NEXT: v_readfirstlane_b32 s6, v15
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_writelane_b32 v30, s39, 7
; VI-NEXT: s_cbranch_scc0 .LBB41_4
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_lshr_b32 s46, s7, 16
-; VI-NEXT: s_lshr_b32 s47, s6, 16
+; VI-NEXT: s_lshr_b32 s46, s6, 16
+; VI-NEXT: s_lshr_b32 s47, s7, 16
; VI-NEXT: s_lshr_b32 s56, s8, 16
; VI-NEXT: s_lshr_b32 s57, s9, 16
; VI-NEXT: s_lshr_b32 s58, s10, 16
@@ -27392,8 +27562,8 @@ define inreg <60 x i16> @bitcast_v15i64_to_v60i16_scalar(<15 x i64> inreg %a, i3
; VI-NEXT: s_lshr_b32 s39, s16, 16
; VI-NEXT: s_cbranch_execnz .LBB41_3
; VI-NEXT: .LBB41_2: ; %cmp.true
-; VI-NEXT: s_add_u32 s6, s6, 3
-; VI-NEXT: s_addc_u32 s7, s7, 0
+; VI-NEXT: s_add_u32 s7, s7, 3
+; VI-NEXT: s_addc_u32 s6, s6, 0
; VI-NEXT: s_add_u32 s9, s9, 3
; VI-NEXT: s_addc_u32 s8, s8, 0
; VI-NEXT: s_add_u32 s11, s11, 3
@@ -27422,8 +27592,8 @@ define inreg <60 x i16> @bitcast_v15i64_to_v60i16_scalar(<15 x i64> inreg %a, i3
; VI-NEXT: s_addc_u32 s19, s19, 0
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
-; VI-NEXT: s_lshr_b32 s46, s7, 16
-; VI-NEXT: s_lshr_b32 s47, s6, 16
+; VI-NEXT: s_lshr_b32 s46, s6, 16
+; VI-NEXT: s_lshr_b32 s47, s7, 16
; VI-NEXT: s_lshr_b32 s56, s8, 16
; VI-NEXT: s_lshr_b32 s57, s9, 16
; VI-NEXT: s_lshr_b32 s58, s10, 16
@@ -27537,12 +27707,12 @@ define inreg <60 x i16> @bitcast_v15i64_to_v60i16_scalar(<15 x i64> inreg %a, i3
; VI-NEXT: s_and_b32 s8, 0xffff, s8
; VI-NEXT: s_lshl_b32 s44, s56, 16
; VI-NEXT: s_or_b32 s8, s8, s44
-; VI-NEXT: s_and_b32 s6, 0xffff, s6
-; VI-NEXT: s_lshl_b32 s44, s47, 16
-; VI-NEXT: s_or_b32 s6, s6, s44
; VI-NEXT: s_and_b32 s7, 0xffff, s7
-; VI-NEXT: s_lshl_b32 s44, s46, 16
+; VI-NEXT: s_lshl_b32 s44, s47, 16
; VI-NEXT: s_or_b32 s7, s7, s44
+; VI-NEXT: s_and_b32 s6, 0xffff, s6
+; VI-NEXT: s_lshl_b32 s44, s46, 16
+; VI-NEXT: s_or_b32 s6, s6, s44
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: v_mov_b32_e32 v2, s16
@@ -27571,8 +27741,8 @@ define inreg <60 x i16> @bitcast_v15i64_to_v60i16_scalar(<15 x i64> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v25, s10
; VI-NEXT: v_mov_b32_e32 v26, s9
; VI-NEXT: v_mov_b32_e32 v27, s8
-; VI-NEXT: v_mov_b32_e32 v28, s6
-; VI-NEXT: v_mov_b32_e32 v29, s7
+; VI-NEXT: v_mov_b32_e32 v28, s7
+; VI-NEXT: v_mov_b32_e32 v29, s6
; VI-NEXT: v_readlane_b32 s39, v30, 7
; VI-NEXT: v_readlane_b32 s38, v30, 6
; VI-NEXT: v_readlane_b32 s37, v30, 5
@@ -27617,7 +27787,9 @@ define inreg <60 x i16> @bitcast_v15i64_to_v60i16_scalar(<15 x i64> inreg %a, i3
; VI-NEXT: ; implicit-def: $sgpr56
; VI-NEXT: ; implicit-def: $sgpr47
; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: s_branch .LBB41_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB41_2
+; VI-NEXT: s_branch .LBB41_3
;
; GFX9-LABEL: bitcast_v15i64_to_v60i16_scalar:
; GFX9: ; %bb.0:
@@ -27626,45 +27798,46 @@ define inreg <60 x i16> @bitcast_v15i64_to_v60i16_scalar(<15 x i64> inreg %a, i3
; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 ; 4-byte Folded Spill
; GFX9-NEXT: s_mov_b64 exec, s[4:5]
; GFX9-NEXT: v_writelane_b32 v30, s30, 0
-; GFX9-NEXT: v_writelane_b32 v30, s31, 1
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
-; GFX9-NEXT: v_writelane_b32 v30, s34, 2
-; GFX9-NEXT: v_readfirstlane_b32 s6, v0
-; GFX9-NEXT: v_readfirstlane_b32 s7, v1
-; GFX9-NEXT: v_readfirstlane_b32 s8, v2
-; GFX9-NEXT: v_readfirstlane_b32 s9, v3
-; GFX9-NEXT: v_readfirstlane_b32 s10, v4
-; GFX9-NEXT: v_readfirstlane_b32 s11, v5
-; GFX9-NEXT: v_readfirstlane_b32 s12, v6
-; GFX9-NEXT: v_readfirstlane_b32 s13, v7
-; GFX9-NEXT: v_readfirstlane_b32 s14, v8
-; GFX9-NEXT: v_readfirstlane_b32 s15, v9
-; GFX9-NEXT: v_readfirstlane_b32 s40, v10
-; GFX9-NEXT: v_readfirstlane_b32 s41, v11
-; GFX9-NEXT: v_readfirstlane_b32 s42, v12
-; GFX9-NEXT: v_readfirstlane_b32 s43, v13
-; GFX9-NEXT: v_readfirstlane_b32 s44, v14
+; GFX9-NEXT: v_writelane_b32 v30, s31, 1
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: v_readfirstlane_b32 s45, v15
+; GFX9-NEXT: v_writelane_b32 v30, s34, 2
+; GFX9-NEXT: v_readfirstlane_b32 s7, v0
+; GFX9-NEXT: v_readfirstlane_b32 s8, v1
+; GFX9-NEXT: v_readfirstlane_b32 s9, v2
+; GFX9-NEXT: v_readfirstlane_b32 s10, v3
+; GFX9-NEXT: v_readfirstlane_b32 s11, v4
+; GFX9-NEXT: v_readfirstlane_b32 s12, v5
+; GFX9-NEXT: v_readfirstlane_b32 s13, v6
+; GFX9-NEXT: v_readfirstlane_b32 s14, v7
+; GFX9-NEXT: v_readfirstlane_b32 s15, v8
+; GFX9-NEXT: v_readfirstlane_b32 s40, v9
+; GFX9-NEXT: v_readfirstlane_b32 s41, v10
+; GFX9-NEXT: v_readfirstlane_b32 s42, v11
+; GFX9-NEXT: v_readfirstlane_b32 s43, v12
+; GFX9-NEXT: v_readfirstlane_b32 s44, v13
+; GFX9-NEXT: v_readfirstlane_b32 s45, v14
+; GFX9-NEXT: v_readfirstlane_b32 s6, v15
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_writelane_b32 v30, s35, 3
; GFX9-NEXT: s_cbranch_scc0 .LBB41_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_lshr_b32 s46, s45, 16
-; GFX9-NEXT: s_lshr_b32 s47, s44, 16
-; GFX9-NEXT: s_lshr_b32 s56, s43, 16
-; GFX9-NEXT: s_lshr_b32 s57, s42, 16
-; GFX9-NEXT: s_lshr_b32 s58, s41, 16
-; GFX9-NEXT: s_lshr_b32 s59, s40, 16
-; GFX9-NEXT: s_lshr_b32 s60, s15, 16
-; GFX9-NEXT: s_lshr_b32 s61, s14, 16
-; GFX9-NEXT: s_lshr_b32 s62, s13, 16
-; GFX9-NEXT: s_lshr_b32 s63, s12, 16
-; GFX9-NEXT: s_lshr_b32 s72, s11, 16
-; GFX9-NEXT: s_lshr_b32 s73, s10, 16
-; GFX9-NEXT: s_lshr_b32 s74, s9, 16
-; GFX9-NEXT: s_lshr_b32 s75, s8, 16
-; GFX9-NEXT: s_lshr_b32 s76, s7, 16
-; GFX9-NEXT: s_lshr_b32 s77, s6, 16
+; GFX9-NEXT: s_lshr_b32 s46, s6, 16
+; GFX9-NEXT: s_lshr_b32 s47, s45, 16
+; GFX9-NEXT: s_lshr_b32 s56, s44, 16
+; GFX9-NEXT: s_lshr_b32 s57, s43, 16
+; GFX9-NEXT: s_lshr_b32 s58, s42, 16
+; GFX9-NEXT: s_lshr_b32 s59, s41, 16
+; GFX9-NEXT: s_lshr_b32 s60, s40, 16
+; GFX9-NEXT: s_lshr_b32 s61, s15, 16
+; GFX9-NEXT: s_lshr_b32 s62, s14, 16
+; GFX9-NEXT: s_lshr_b32 s63, s13, 16
+; GFX9-NEXT: s_lshr_b32 s72, s12, 16
+; GFX9-NEXT: s_lshr_b32 s73, s11, 16
+; GFX9-NEXT: s_lshr_b32 s74, s10, 16
+; GFX9-NEXT: s_lshr_b32 s75, s9, 16
+; GFX9-NEXT: s_lshr_b32 s76, s8, 16
+; GFX9-NEXT: s_lshr_b32 s77, s7, 16
; GFX9-NEXT: s_lshr_b32 s78, s29, 16
; GFX9-NEXT: s_lshr_b32 s79, s28, 16
; GFX9-NEXT: s_lshr_b32 s88, s27, 16
@@ -27681,22 +27854,22 @@ define inreg <60 x i16> @bitcast_v15i64_to_v60i16_scalar(<15 x i64> inreg %a, i3
; GFX9-NEXT: s_lshr_b32 s35, s16, 16
; GFX9-NEXT: s_cbranch_execnz .LBB41_3
; GFX9-NEXT: .LBB41_2: ; %cmp.true
-; GFX9-NEXT: s_add_u32 s44, s44, 3
-; GFX9-NEXT: s_addc_u32 s45, s45, 0
-; GFX9-NEXT: s_add_u32 s42, s42, 3
-; GFX9-NEXT: s_addc_u32 s43, s43, 0
-; GFX9-NEXT: s_add_u32 s40, s40, 3
-; GFX9-NEXT: s_addc_u32 s41, s41, 0
-; GFX9-NEXT: s_add_u32 s14, s14, 3
-; GFX9-NEXT: s_addc_u32 s15, s15, 0
-; GFX9-NEXT: s_add_u32 s12, s12, 3
-; GFX9-NEXT: s_addc_u32 s13, s13, 0
-; GFX9-NEXT: s_add_u32 s10, s10, 3
-; GFX9-NEXT: s_addc_u32 s11, s11, 0
-; GFX9-NEXT: s_add_u32 s8, s8, 3
-; GFX9-NEXT: s_addc_u32 s9, s9, 0
-; GFX9-NEXT: s_add_u32 s6, s6, 3
-; GFX9-NEXT: s_addc_u32 s7, s7, 0
+; GFX9-NEXT: s_add_u32 s45, s45, 3
+; GFX9-NEXT: s_addc_u32 s6, s6, 0
+; GFX9-NEXT: s_add_u32 s43, s43, 3
+; GFX9-NEXT: s_addc_u32 s44, s44, 0
+; GFX9-NEXT: s_add_u32 s41, s41, 3
+; GFX9-NEXT: s_addc_u32 s42, s42, 0
+; GFX9-NEXT: s_add_u32 s15, s15, 3
+; GFX9-NEXT: s_addc_u32 s40, s40, 0
+; GFX9-NEXT: s_add_u32 s13, s13, 3
+; GFX9-NEXT: s_addc_u32 s14, s14, 0
+; GFX9-NEXT: s_add_u32 s11, s11, 3
+; GFX9-NEXT: s_addc_u32 s12, s12, 0
+; GFX9-NEXT: s_add_u32 s9, s9, 3
+; GFX9-NEXT: s_addc_u32 s10, s10, 0
+; GFX9-NEXT: s_add_u32 s7, s7, 3
+; GFX9-NEXT: s_addc_u32 s8, s8, 0
; GFX9-NEXT: s_add_u32 s28, s28, 3
; GFX9-NEXT: s_addc_u32 s29, s29, 0
; GFX9-NEXT: s_add_u32 s26, s26, 3
@@ -27711,22 +27884,22 @@ define inreg <60 x i16> @bitcast_v15i64_to_v60i16_scalar(<15 x i64> inreg %a, i3
; GFX9-NEXT: s_addc_u32 s19, s19, 0
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
-; GFX9-NEXT: s_lshr_b32 s46, s45, 16
-; GFX9-NEXT: s_lshr_b32 s47, s44, 16
-; GFX9-NEXT: s_lshr_b32 s56, s43, 16
-; GFX9-NEXT: s_lshr_b32 s57, s42, 16
-; GFX9-NEXT: s_lshr_b32 s58, s41, 16
-; GFX9-NEXT: s_lshr_b32 s59, s40, 16
-; GFX9-NEXT: s_lshr_b32 s60, s15, 16
-; GFX9-NEXT: s_lshr_b32 s61, s14, 16
-; GFX9-NEXT: s_lshr_b32 s62, s13, 16
-; GFX9-NEXT: s_lshr_b32 s63, s12, 16
-; GFX9-NEXT: s_lshr_b32 s72, s11, 16
-; GFX9-NEXT: s_lshr_b32 s73, s10, 16
-; GFX9-NEXT: s_lshr_b32 s74, s9, 16
-; GFX9-NEXT: s_lshr_b32 s75, s8, 16
-; GFX9-NEXT: s_lshr_b32 s76, s7, 16
-; GFX9-NEXT: s_lshr_b32 s77, s6, 16
+; GFX9-NEXT: s_lshr_b32 s46, s6, 16
+; GFX9-NEXT: s_lshr_b32 s47, s45, 16
+; GFX9-NEXT: s_lshr_b32 s56, s44, 16
+; GFX9-NEXT: s_lshr_b32 s57, s43, 16
+; GFX9-NEXT: s_lshr_b32 s58, s42, 16
+; GFX9-NEXT: s_lshr_b32 s59, s41, 16
+; GFX9-NEXT: s_lshr_b32 s60, s40, 16
+; GFX9-NEXT: s_lshr_b32 s61, s15, 16
+; GFX9-NEXT: s_lshr_b32 s62, s14, 16
+; GFX9-NEXT: s_lshr_b32 s63, s13, 16
+; GFX9-NEXT: s_lshr_b32 s72, s12, 16
+; GFX9-NEXT: s_lshr_b32 s73, s11, 16
+; GFX9-NEXT: s_lshr_b32 s74, s10, 16
+; GFX9-NEXT: s_lshr_b32 s75, s9, 16
+; GFX9-NEXT: s_lshr_b32 s76, s8, 16
+; GFX9-NEXT: s_lshr_b32 s77, s7, 16
; GFX9-NEXT: s_lshr_b32 s78, s29, 16
; GFX9-NEXT: s_lshr_b32 s79, s28, 16
; GFX9-NEXT: s_lshr_b32 s88, s27, 16
@@ -27756,22 +27929,22 @@ define inreg <60 x i16> @bitcast_v15i64_to_v60i16_scalar(<15 x i64> inreg %a, i3
; GFX9-NEXT: s_pack_ll_b32_b16 s25, s27, s88
; GFX9-NEXT: s_pack_ll_b32_b16 s26, s28, s79
; GFX9-NEXT: s_pack_ll_b32_b16 s27, s29, s78
-; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s77
-; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s76
-; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s75
-; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s74
-; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s73
-; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s72
-; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s63
-; GFX9-NEXT: s_pack_ll_b32_b16 s13, s13, s62
-; GFX9-NEXT: s_pack_ll_b32_b16 s14, s14, s61
-; GFX9-NEXT: s_pack_ll_b32_b16 s15, s15, s60
-; GFX9-NEXT: s_pack_ll_b32_b16 s28, s40, s59
-; GFX9-NEXT: s_pack_ll_b32_b16 s29, s41, s58
-; GFX9-NEXT: s_pack_ll_b32_b16 s40, s42, s57
-; GFX9-NEXT: s_pack_ll_b32_b16 s41, s43, s56
-; GFX9-NEXT: s_pack_ll_b32_b16 s42, s44, s47
-; GFX9-NEXT: s_pack_ll_b32_b16 s43, s45, s46
+; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s77
+; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s76
+; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s75
+; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s74
+; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s73
+; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s72
+; GFX9-NEXT: s_pack_ll_b32_b16 s13, s13, s63
+; GFX9-NEXT: s_pack_ll_b32_b16 s14, s14, s62
+; GFX9-NEXT: s_pack_ll_b32_b16 s15, s15, s61
+; GFX9-NEXT: s_pack_ll_b32_b16 s28, s40, s60
+; GFX9-NEXT: s_pack_ll_b32_b16 s29, s41, s59
+; GFX9-NEXT: s_pack_ll_b32_b16 s40, s42, s58
+; GFX9-NEXT: s_pack_ll_b32_b16 s41, s43, s57
+; GFX9-NEXT: s_pack_ll_b32_b16 s42, s44, s56
+; GFX9-NEXT: s_pack_ll_b32_b16 s43, s45, s47
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s46
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: v_mov_b32_e32 v2, s16
@@ -27786,22 +27959,22 @@ define inreg <60 x i16> @bitcast_v15i64_to_v60i16_scalar(<15 x i64> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v11, s25
; GFX9-NEXT: v_mov_b32_e32 v12, s26
; GFX9-NEXT: v_mov_b32_e32 v13, s27
-; GFX9-NEXT: v_mov_b32_e32 v14, s6
-; GFX9-NEXT: v_mov_b32_e32 v15, s7
-; GFX9-NEXT: v_mov_b32_e32 v16, s8
-; GFX9-NEXT: v_mov_b32_e32 v17, s9
-; GFX9-NEXT: v_mov_b32_e32 v18, s10
-; GFX9-NEXT: v_mov_b32_e32 v19, s11
-; GFX9-NEXT: v_mov_b32_e32 v20, s12
-; GFX9-NEXT: v_mov_b32_e32 v21, s13
-; GFX9-NEXT: v_mov_b32_e32 v22, s14
-; GFX9-NEXT: v_mov_b32_e32 v23, s15
-; GFX9-NEXT: v_mov_b32_e32 v24, s28
-; GFX9-NEXT: v_mov_b32_e32 v25, s29
-; GFX9-NEXT: v_mov_b32_e32 v26, s40
-; GFX9-NEXT: v_mov_b32_e32 v27, s41
-; GFX9-NEXT: v_mov_b32_e32 v28, s42
-; GFX9-NEXT: v_mov_b32_e32 v29, s43
+; GFX9-NEXT: v_mov_b32_e32 v14, s7
+; GFX9-NEXT: v_mov_b32_e32 v15, s8
+; GFX9-NEXT: v_mov_b32_e32 v16, s9
+; GFX9-NEXT: v_mov_b32_e32 v17, s10
+; GFX9-NEXT: v_mov_b32_e32 v18, s11
+; GFX9-NEXT: v_mov_b32_e32 v19, s12
+; GFX9-NEXT: v_mov_b32_e32 v20, s13
+; GFX9-NEXT: v_mov_b32_e32 v21, s14
+; GFX9-NEXT: v_mov_b32_e32 v22, s15
+; GFX9-NEXT: v_mov_b32_e32 v23, s28
+; GFX9-NEXT: v_mov_b32_e32 v24, s29
+; GFX9-NEXT: v_mov_b32_e32 v25, s40
+; GFX9-NEXT: v_mov_b32_e32 v26, s41
+; GFX9-NEXT: v_mov_b32_e32 v27, s42
+; GFX9-NEXT: v_mov_b32_e32 v28, s43
+; GFX9-NEXT: v_mov_b32_e32 v29, s6
; GFX9-NEXT: v_readlane_b32 s35, v30, 3
; GFX9-NEXT: v_readlane_b32 s34, v30, 2
; GFX9-NEXT: v_readlane_b32 s31, v30, 1
@@ -27842,7 +28015,9 @@ define inreg <60 x i16> @bitcast_v15i64_to_v60i16_scalar(<15 x i64> inreg %a, i3
; GFX9-NEXT: ; implicit-def: $sgpr56
; GFX9-NEXT: ; implicit-def: $sgpr47
; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: s_branch .LBB41_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB41_2
+; GFX9-NEXT: s_branch .LBB41_3
;
; GFX11-LABEL: bitcast_v15i64_to_v60i16_scalar:
; GFX11: ; %bb.0:
@@ -27857,16 +28032,16 @@ define inreg <60 x i16> @bitcast_v15i64_to_v60i16_scalar(<15 x i64> inreg %a, i3
; GFX11-NEXT: v_readfirstlane_b32 s10, v6
; GFX11-NEXT: v_readfirstlane_b32 s11, v7
; GFX11-NEXT: v_readfirstlane_b32 s12, v8
-; GFX11-NEXT: v_readfirstlane_b32 s13, v9
+; GFX11-NEXT: v_readfirstlane_b32 s14, v9
; GFX11-NEXT: v_readfirstlane_b32 s15, v10
-; GFX11-NEXT: v_readfirstlane_b32 s14, v11
-; GFX11-NEXT: s_mov_b32 s94, 0
+; GFX11-NEXT: v_readfirstlane_b32 s13, v11
+; GFX11-NEXT: s_mov_b32 s94, -1
; GFX11-NEXT: s_and_b32 s40, vcc_lo, exec_lo
; GFX11-NEXT: s_cbranch_scc0 .LBB41_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s40, s14, 16
+; GFX11-NEXT: s_lshr_b32 s40, s13, 16
; GFX11-NEXT: s_lshr_b32 s41, s15, 16
-; GFX11-NEXT: s_lshr_b32 s42, s13, 16
+; GFX11-NEXT: s_lshr_b32 s42, s14, 16
; GFX11-NEXT: s_lshr_b32 s43, s12, 16
; GFX11-NEXT: s_lshr_b32 s44, s11, 16
; GFX11-NEXT: s_lshr_b32 s45, s10, 16
@@ -27894,13 +28069,12 @@ define inreg <60 x i16> @bitcast_v15i64_to_v60i16_scalar(<15 x i64> inreg %a, i3
; GFX11-NEXT: s_lshr_b32 s91, s2, 16
; GFX11-NEXT: s_lshr_b32 s92, s1, 16
; GFX11-NEXT: s_lshr_b32 s93, s0, 16
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s94
-; GFX11-NEXT: s_cbranch_vccnz .LBB41_3
+; GFX11-NEXT: s_cbranch_execnz .LBB41_3
; GFX11-NEXT: .LBB41_2: ; %cmp.true
; GFX11-NEXT: s_add_u32 s15, s15, 3
-; GFX11-NEXT: s_addc_u32 s14, s14, 0
-; GFX11-NEXT: s_add_u32 s12, s12, 3
; GFX11-NEXT: s_addc_u32 s13, s13, 0
+; GFX11-NEXT: s_add_u32 s12, s12, 3
+; GFX11-NEXT: s_addc_u32 s14, s14, 0
; GFX11-NEXT: s_add_u32 s10, s10, 3
; GFX11-NEXT: s_addc_u32 s11, s11, 0
; GFX11-NEXT: s_add_u32 s8, s8, 3
@@ -27927,9 +28101,9 @@ define inreg <60 x i16> @bitcast_v15i64_to_v60i16_scalar(<15 x i64> inreg %a, i3
; GFX11-NEXT: s_addc_u32 s3, s3, 0
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: s_lshr_b32 s40, s14, 16
+; GFX11-NEXT: s_lshr_b32 s40, s13, 16
; GFX11-NEXT: s_lshr_b32 s41, s15, 16
-; GFX11-NEXT: s_lshr_b32 s42, s13, 16
+; GFX11-NEXT: s_lshr_b32 s42, s14, 16
; GFX11-NEXT: s_lshr_b32 s43, s12, 16
; GFX11-NEXT: s_lshr_b32 s44, s11, 16
; GFX11-NEXT: s_lshr_b32 s45, s10, 16
@@ -27986,9 +28160,9 @@ define inreg <60 x i16> @bitcast_v15i64_to_v60i16_scalar(<15 x i64> inreg %a, i3
; GFX11-NEXT: s_pack_ll_b32_b16 s10, s10, s45
; GFX11-NEXT: s_pack_ll_b32_b16 s11, s11, s44
; GFX11-NEXT: s_pack_ll_b32_b16 s12, s12, s43
-; GFX11-NEXT: s_pack_ll_b32_b16 s13, s13, s42
+; GFX11-NEXT: s_pack_ll_b32_b16 s14, s14, s42
; GFX11-NEXT: s_pack_ll_b32_b16 s15, s15, s41
-; GFX11-NEXT: s_pack_ll_b32_b16 s14, s14, s40
+; GFX11-NEXT: s_pack_ll_b32_b16 s13, s13, s40
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -28002,8 +28176,8 @@ define inreg <60 x i16> @bitcast_v15i64_to_v60i16_scalar(<15 x i64> inreg %a, i3
; GFX11-NEXT: v_dual_mov_b32 v20, s6 :: v_dual_mov_b32 v21, s7
; GFX11-NEXT: v_dual_mov_b32 v22, s8 :: v_dual_mov_b32 v23, s9
; GFX11-NEXT: v_dual_mov_b32 v24, s10 :: v_dual_mov_b32 v25, s11
-; GFX11-NEXT: v_dual_mov_b32 v26, s12 :: v_dual_mov_b32 v27, s13
-; GFX11-NEXT: v_dual_mov_b32 v28, s15 :: v_dual_mov_b32 v29, s14
+; GFX11-NEXT: v_dual_mov_b32 v26, s12 :: v_dual_mov_b32 v27, s14
+; GFX11-NEXT: v_dual_mov_b32 v28, s15 :: v_dual_mov_b32 v29, s13
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB41_4:
; GFX11-NEXT: ; implicit-def: $sgpr93
@@ -28036,7 +28210,9 @@ define inreg <60 x i16> @bitcast_v15i64_to_v60i16_scalar(<15 x i64> inreg %a, i3
; GFX11-NEXT: ; implicit-def: $sgpr42
; GFX11-NEXT: ; implicit-def: $sgpr41
; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: s_branch .LBB41_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s94
+; GFX11-NEXT: s_cbranch_vccz .LBB41_2
+; GFX11-NEXT: s_branch .LBB41_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -29454,6 +29630,7 @@ define inreg <15 x i64> @bitcast_v60i16_to_v15i64_scalar(<60 x i16> inreg %a, i3
; SI-NEXT: v_mov_b32_e32 v35, v22
; SI-NEXT: v_mov_b32_e32 v36, v20
; SI-NEXT: v_mov_b32_e32 v37, v18
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v3
; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v5
@@ -29485,7 +29662,7 @@ define inreg <15 x i64> @bitcast_v60i16_to_v15i64_scalar(<60 x i16> inreg %a, i3
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v2
; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v4
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_and_b64 s[6:7], vcc, exec
; SI-NEXT: v_lshlrev_b32_e32 v47, 16, v6
; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v8
; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v10
@@ -29788,7 +29965,9 @@ define inreg <15 x i64> @bitcast_v60i16_to_v15i64_scalar(<60 x i16> inreg %a, i3
; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
; SI-NEXT: v_mov_b32_e32 v30, v32
-; SI-NEXT: s_branch .LBB43_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB43_2
+; SI-NEXT: s_branch .LBB43_3
;
; VI-LABEL: bitcast_v60i16_to_v15i64_scalar:
; VI: ; %bb.0:
@@ -29808,6 +29987,7 @@ define inreg <15 x i64> @bitcast_v60i16_to_v15i64_scalar(<60 x i16> inreg %a, i3
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v15
; VI-NEXT: v_mov_b32_e32 v33, v14
; VI-NEXT: v_mov_b32_e32 v34, v13
@@ -29824,7 +30004,7 @@ define inreg <15 x i64> @bitcast_v60i16_to_v15i64_scalar(<60 x i16> inreg %a, i3
; VI-NEXT: v_mov_b32_e32 v53, v2
; VI-NEXT: v_mov_b32_e32 v54, v1
; VI-NEXT: v_mov_b32_e32 v55, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB43_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v0, 16
@@ -30071,11 +30251,28 @@ define inreg <15 x i64> @bitcast_v60i16_to_v15i64_scalar(<60 x i16> inreg %a, i3
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB43_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB43_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB43_2
+; VI-NEXT: s_branch .LBB43_3
;
; GFX9-LABEL: bitcast_v60i16_to_v15i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_lshr_b32 s40, s29, 16
+; GFX9-NEXT: s_lshr_b32 s41, s28, 16
+; GFX9-NEXT: s_lshr_b32 s42, s27, 16
+; GFX9-NEXT: s_lshr_b32 s43, s26, 16
+; GFX9-NEXT: s_lshr_b32 s15, s25, 16
+; GFX9-NEXT: s_lshr_b32 s14, s24, 16
+; GFX9-NEXT: s_lshr_b32 s13, s23, 16
+; GFX9-NEXT: s_lshr_b32 s12, s22, 16
+; GFX9-NEXT: s_lshr_b32 s11, s21, 16
+; GFX9-NEXT: s_lshr_b32 s10, s20, 16
+; GFX9-NEXT: s_lshr_b32 s9, s19, 16
+; GFX9-NEXT: s_lshr_b32 s8, s18, 16
+; GFX9-NEXT: s_lshr_b32 s7, s17, 16
+; GFX9-NEXT: s_lshr_b32 s6, s16, 16
; GFX9-NEXT: v_mov_b32_e32 v32, v15
; GFX9-NEXT: v_mov_b32_e32 v33, v14
; GFX9-NEXT: v_mov_b32_e32 v34, v13
@@ -30092,21 +30289,7 @@ define inreg <15 x i64> @bitcast_v60i16_to_v15i64_scalar(<60 x i16> inreg %a, i3
; GFX9-NEXT: v_mov_b32_e32 v53, v2
; GFX9-NEXT: v_mov_b32_e32 v54, v1
; GFX9-NEXT: v_mov_b32_e32 v55, v0
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
-; GFX9-NEXT: s_lshr_b32 s40, s29, 16
-; GFX9-NEXT: s_lshr_b32 s41, s28, 16
-; GFX9-NEXT: s_lshr_b32 s42, s27, 16
-; GFX9-NEXT: s_lshr_b32 s43, s26, 16
-; GFX9-NEXT: s_lshr_b32 s15, s25, 16
-; GFX9-NEXT: s_lshr_b32 s14, s24, 16
-; GFX9-NEXT: s_lshr_b32 s13, s23, 16
-; GFX9-NEXT: s_lshr_b32 s12, s22, 16
-; GFX9-NEXT: s_lshr_b32 s11, s21, 16
-; GFX9-NEXT: s_lshr_b32 s10, s20, 16
-; GFX9-NEXT: s_lshr_b32 s9, s19, 16
-; GFX9-NEXT: s_lshr_b32 s8, s18, 16
-; GFX9-NEXT: s_lshr_b32 s7, s17, 16
-; GFX9-NEXT: s_lshr_b32 s6, s16, 16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
@@ -30127,7 +30310,6 @@ define inreg <15 x i64> @bitcast_v60i16_to_v15i64_scalar(<60 x i16> inreg %a, i3
; GFX9-NEXT: v_lshrrev_b32_e32 v41, 16, v33
; GFX9-NEXT: v_lshrrev_b32_e32 v42, 16, v34
; GFX9-NEXT: v_lshrrev_b32_e32 v43, 16, v35
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -30142,6 +30324,7 @@ define inreg <15 x i64> @bitcast_v60i16_to_v15i64_scalar(<60 x i16> inreg %a, i3
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_lshrrev_b32_e32 v44, 16, v36
; GFX9-NEXT: v_lshrrev_b32_e32 v45, 16, v37
; GFX9-NEXT: v_lshrrev_b32_e32 v46, 16, v38
@@ -30287,7 +30470,9 @@ define inreg <15 x i64> @bitcast_v60i16_to_v15i64_scalar(<60 x i16> inreg %a, i3
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB43_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB43_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB43_2
+; GFX9-NEXT: s_branch .LBB43_3
;
; GFX11-TRUE16-LABEL: bitcast_v60i16_to_v15i64_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -30332,41 +30517,41 @@ define inreg <15 x i64> @bitcast_v60i16_to_v15i64_scalar(<60 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s27, s42
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB43_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
@@ -30381,17 +30566,16 @@ define inreg <15 x i64> @bitcast_v60i16_to_v15i64_scalar(<60 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB43_3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB43_3
; GFX11-TRUE16-NEXT: .LBB43_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
@@ -30405,24 +30589,24 @@ define inreg <15 x i64> @bitcast_v60i16_to_v15i64_scalar(<60 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s4, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
@@ -30439,7 +30623,9 @@ define inreg <15 x i64> @bitcast_v60i16_to_v15i64_scalar(<60 x i16> inreg %a, i3
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB43_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB43_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB43_2
+; GFX11-TRUE16-NEXT: s_branch .LBB43_3
;
; GFX11-FAKE16-LABEL: bitcast_v60i16_to_v15i64_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -30472,41 +30658,41 @@ define inreg <15 x i64> @bitcast_v60i16_to_v15i64_scalar(<60 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s16, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s26, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s25, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s24, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s23, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s22, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s21, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s20, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s19, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s18, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s17, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s16, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s3, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s2, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s15, 0
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s0, 16
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s27, s42
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB43_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
@@ -30521,17 +30707,16 @@ define inreg <15 x i64> @bitcast_v60i16_to_v15i64_scalar(<60 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB43_3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB43_3
; GFX11-FAKE16-NEXT: .LBB43_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
@@ -30545,24 +30730,24 @@ define inreg <15 x i64> @bitcast_v60i16_to_v15i64_scalar(<60 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, s4, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, s7, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, s8, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, s9, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, s10, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, s11, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, s16, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, s17, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, s18, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, s5, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, s6, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, s7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, s8, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, s9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, s10, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, s11, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, s12, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, s13, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, s14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, s15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, s0, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, s16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
@@ -30579,7 +30764,9 @@ define inreg <15 x i64> @bitcast_v60i16_to_v15i64_scalar(<60 x i16> inreg %a, i3
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB43_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB43_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB43_2
+; GFX11-FAKE16-NEXT: s_branch .LBB43_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -31974,6 +32161,7 @@ define inreg <60 x half> @bitcast_v15i64_to_v60f16_scalar(<15 x i64> inreg %a, i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v17
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s44, v1
; SI-NEXT: v_readfirstlane_b32 s45, v2
; SI-NEXT: v_readfirstlane_b32 s42, v3
@@ -31986,11 +32174,11 @@ define inreg <60 x half> @bitcast_v15i64_to_v60f16_scalar(<15 x i64> inreg %a, i
; SI-NEXT: v_readfirstlane_b32 s13, v10
; SI-NEXT: v_readfirstlane_b32 s10, v11
; SI-NEXT: v_readfirstlane_b32 s11, v12
-; SI-NEXT: v_readfirstlane_b32 s7, v13
-; SI-NEXT: v_readfirstlane_b32 s8, v14
+; SI-NEXT: v_readfirstlane_b32 s8, v13
+; SI-NEXT: v_readfirstlane_b32 s9, v14
; SI-NEXT: v_readfirstlane_b32 s6, v15
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
-; SI-NEXT: v_readfirstlane_b32 s9, v16
+; SI-NEXT: v_readfirstlane_b32 s7, v16
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
@@ -32006,13 +32194,13 @@ define inreg <60 x half> @bitcast_v15i64_to_v60f16_scalar(<15 x i64> inreg %a, i
; SI-NEXT: buffer_store_dword v60, off, s[0:3], s32 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB45_4
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_lshr_b32 s4, s9, 16
+; SI-NEXT: s_lshr_b32 s4, s7, 16
; SI-NEXT: v_cvt_f32_f16_e32 v1, s4
; SI-NEXT: s_lshr_b32 s4, s6, 16
; SI-NEXT: v_cvt_f32_f16_e32 v2, s4
-; SI-NEXT: s_lshr_b32 s4, s8, 16
+; SI-NEXT: s_lshr_b32 s4, s9, 16
; SI-NEXT: v_cvt_f32_f16_e32 v3, s4
-; SI-NEXT: s_lshr_b32 s4, s7, 16
+; SI-NEXT: s_lshr_b32 s4, s8, 16
; SI-NEXT: v_cvt_f32_f16_e32 v5, s4
; SI-NEXT: s_lshr_b32 s4, s11, 16
; SI-NEXT: v_cvt_f32_f16_e32 v7, s4
@@ -32070,10 +32258,10 @@ define inreg <60 x half> @bitcast_v15i64_to_v60f16_scalar(<15 x i64> inreg %a, i
; SI-NEXT: s_lshr_b32 s4, s16, 16
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v60, s4
-; SI-NEXT: v_cvt_f32_f16_e32 v4, s9
+; SI-NEXT: v_cvt_f32_f16_e32 v4, s7
; SI-NEXT: v_cvt_f32_f16_e32 v6, s6
-; SI-NEXT: v_cvt_f32_f16_e32 v8, s8
-; SI-NEXT: v_cvt_f32_f16_e32 v10, s7
+; SI-NEXT: v_cvt_f32_f16_e32 v8, s9
+; SI-NEXT: v_cvt_f32_f16_e32 v10, s8
; SI-NEXT: v_cvt_f32_f16_e32 v12, s11
; SI-NEXT: v_cvt_f32_f16_e32 v14, s10
; SI-NEXT: v_cvt_f32_f16_e32 v16, s13
@@ -32154,18 +32342,18 @@ define inreg <60 x half> @bitcast_v15i64_to_v60f16_scalar(<15 x i64> inreg %a, i
; SI-NEXT: s_addc_u32 s11, s11, 0
; SI-NEXT: s_lshr_b32 s92, s10, 16
; SI-NEXT: s_lshr_b32 s93, s11, 16
-; SI-NEXT: s_add_u32 s7, s7, 3
-; SI-NEXT: s_addc_u32 s8, s8, 0
-; SI-NEXT: s_lshr_b32 s94, s7, 16
-; SI-NEXT: s_lshr_b32 s95, s8, 16
-; SI-NEXT: s_add_u32 s6, s6, 3
+; SI-NEXT: s_add_u32 s8, s8, 3
; SI-NEXT: s_addc_u32 s9, s9, 0
+; SI-NEXT: s_lshr_b32 s94, s8, 16
+; SI-NEXT: s_lshr_b32 s95, s9, 16
+; SI-NEXT: s_add_u32 s6, s6, 3
+; SI-NEXT: s_addc_u32 s7, s7, 0
; SI-NEXT: s_lshr_b32 vcc_lo, s6, 16
-; SI-NEXT: s_lshr_b32 vcc_hi, s9, 16
-; SI-NEXT: v_cvt_f32_f16_e32 v4, s9
+; SI-NEXT: s_lshr_b32 vcc_hi, s7, 16
+; SI-NEXT: v_cvt_f32_f16_e32 v4, s7
; SI-NEXT: v_cvt_f32_f16_e32 v6, s6
-; SI-NEXT: v_cvt_f32_f16_e32 v8, s8
-; SI-NEXT: v_cvt_f32_f16_e32 v10, s7
+; SI-NEXT: v_cvt_f32_f16_e32 v8, s9
+; SI-NEXT: v_cvt_f32_f16_e32 v10, s8
; SI-NEXT: v_cvt_f32_f16_e32 v12, s11
; SI-NEXT: v_cvt_f32_f16_e32 v14, s10
; SI-NEXT: v_cvt_f32_f16_e32 v16, s13
@@ -32512,7 +32700,9 @@ define inreg <60 x half> @bitcast_v15i64_to_v60f16_scalar(<15 x i64> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr4
; SI-NEXT: ; implicit-def: $vgpr1
-; SI-NEXT: s_branch .LBB45_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB45_2
+; SI-NEXT: s_branch .LBB45_3
;
; VI-LABEL: bitcast_v15i64_to_v60f16_scalar:
; VI: ; %bb.0:
@@ -32525,8 +32715,9 @@ define inreg <60 x half> @bitcast_v15i64_to_v60f16_scalar(<15 x i64> inreg %a, i
; VI-NEXT: v_writelane_b32 v30, s34, 2
; VI-NEXT: v_writelane_b32 v30, s35, 3
; VI-NEXT: v_writelane_b32 v30, s36, 4
-; VI-NEXT: v_writelane_b32 v30, s37, 5
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: v_writelane_b32 v30, s37, 5
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_writelane_b32 v30, s38, 6
; VI-NEXT: v_readfirstlane_b32 s45, v0
; VI-NEXT: v_readfirstlane_b32 s44, v1
@@ -32542,14 +32733,14 @@ define inreg <60 x half> @bitcast_v15i64_to_v60f16_scalar(<15 x i64> inreg %a, i
; VI-NEXT: v_readfirstlane_b32 s10, v11
; VI-NEXT: v_readfirstlane_b32 s9, v12
; VI-NEXT: v_readfirstlane_b32 s8, v13
-; VI-NEXT: v_readfirstlane_b32 s6, v14
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
-; VI-NEXT: v_readfirstlane_b32 s7, v15
+; VI-NEXT: v_readfirstlane_b32 s7, v14
+; VI-NEXT: v_readfirstlane_b32 s6, v15
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_writelane_b32 v30, s39, 7
; VI-NEXT: s_cbranch_scc0 .LBB45_4
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_lshr_b32 s46, s7, 16
-; VI-NEXT: s_lshr_b32 s47, s6, 16
+; VI-NEXT: s_lshr_b32 s46, s6, 16
+; VI-NEXT: s_lshr_b32 s47, s7, 16
; VI-NEXT: s_lshr_b32 s56, s8, 16
; VI-NEXT: s_lshr_b32 s57, s9, 16
; VI-NEXT: s_lshr_b32 s58, s10, 16
@@ -32580,8 +32771,8 @@ define inreg <60 x half> @bitcast_v15i64_to_v60f16_scalar(<15 x i64> inreg %a, i
; VI-NEXT: s_lshr_b32 s39, s16, 16
; VI-NEXT: s_cbranch_execnz .LBB45_3
; VI-NEXT: .LBB45_2: ; %cmp.true
-; VI-NEXT: s_add_u32 s6, s6, 3
-; VI-NEXT: s_addc_u32 s7, s7, 0
+; VI-NEXT: s_add_u32 s7, s7, 3
+; VI-NEXT: s_addc_u32 s6, s6, 0
; VI-NEXT: s_add_u32 s9, s9, 3
; VI-NEXT: s_addc_u32 s8, s8, 0
; VI-NEXT: s_add_u32 s11, s11, 3
@@ -32610,8 +32801,8 @@ define inreg <60 x half> @bitcast_v15i64_to_v60f16_scalar(<15 x i64> inreg %a, i
; VI-NEXT: s_addc_u32 s19, s19, 0
; VI-NEXT: s_add_u32 s16, s16, 3
; VI-NEXT: s_addc_u32 s17, s17, 0
-; VI-NEXT: s_lshr_b32 s46, s7, 16
-; VI-NEXT: s_lshr_b32 s47, s6, 16
+; VI-NEXT: s_lshr_b32 s46, s6, 16
+; VI-NEXT: s_lshr_b32 s47, s7, 16
; VI-NEXT: s_lshr_b32 s56, s8, 16
; VI-NEXT: s_lshr_b32 s57, s9, 16
; VI-NEXT: s_lshr_b32 s58, s10, 16
@@ -32725,12 +32916,12 @@ define inreg <60 x half> @bitcast_v15i64_to_v60f16_scalar(<15 x i64> inreg %a, i
; VI-NEXT: s_and_b32 s8, 0xffff, s8
; VI-NEXT: s_lshl_b32 s44, s56, 16
; VI-NEXT: s_or_b32 s8, s8, s44
-; VI-NEXT: s_and_b32 s6, 0xffff, s6
-; VI-NEXT: s_lshl_b32 s44, s47, 16
-; VI-NEXT: s_or_b32 s6, s6, s44
; VI-NEXT: s_and_b32 s7, 0xffff, s7
-; VI-NEXT: s_lshl_b32 s44, s46, 16
+; VI-NEXT: s_lshl_b32 s44, s47, 16
; VI-NEXT: s_or_b32 s7, s7, s44
+; VI-NEXT: s_and_b32 s6, 0xffff, s6
+; VI-NEXT: s_lshl_b32 s44, s46, 16
+; VI-NEXT: s_or_b32 s6, s6, s44
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: v_mov_b32_e32 v2, s16
@@ -32759,8 +32950,8 @@ define inreg <60 x half> @bitcast_v15i64_to_v60f16_scalar(<15 x i64> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v25, s10
; VI-NEXT: v_mov_b32_e32 v26, s9
; VI-NEXT: v_mov_b32_e32 v27, s8
-; VI-NEXT: v_mov_b32_e32 v28, s6
-; VI-NEXT: v_mov_b32_e32 v29, s7
+; VI-NEXT: v_mov_b32_e32 v28, s7
+; VI-NEXT: v_mov_b32_e32 v29, s6
; VI-NEXT: v_readlane_b32 s39, v30, 7
; VI-NEXT: v_readlane_b32 s38, v30, 6
; VI-NEXT: v_readlane_b32 s37, v30, 5
@@ -32805,7 +32996,9 @@ define inreg <60 x half> @bitcast_v15i64_to_v60f16_scalar(<15 x i64> inreg %a, i
; VI-NEXT: ; implicit-def: $sgpr56
; VI-NEXT: ; implicit-def: $sgpr47
; VI-NEXT: ; implicit-def: $sgpr46
-; VI-NEXT: s_branch .LBB45_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB45_2
+; VI-NEXT: s_branch .LBB45_3
;
; GFX9-LABEL: bitcast_v15i64_to_v60f16_scalar:
; GFX9: ; %bb.0:
@@ -32814,45 +33007,46 @@ define inreg <60 x half> @bitcast_v15i64_to_v60f16_scalar(<15 x i64> inreg %a, i
; GFX9-NEXT: buffer_store_dword v30, off, s[0:3], s32 ; 4-byte Folded Spill
; GFX9-NEXT: s_mov_b64 exec, s[4:5]
; GFX9-NEXT: v_writelane_b32 v30, s30, 0
-; GFX9-NEXT: v_writelane_b32 v30, s31, 1
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
-; GFX9-NEXT: v_writelane_b32 v30, s34, 2
-; GFX9-NEXT: v_readfirstlane_b32 s6, v0
-; GFX9-NEXT: v_readfirstlane_b32 s7, v1
-; GFX9-NEXT: v_readfirstlane_b32 s8, v2
-; GFX9-NEXT: v_readfirstlane_b32 s9, v3
-; GFX9-NEXT: v_readfirstlane_b32 s10, v4
-; GFX9-NEXT: v_readfirstlane_b32 s11, v5
-; GFX9-NEXT: v_readfirstlane_b32 s12, v6
-; GFX9-NEXT: v_readfirstlane_b32 s13, v7
-; GFX9-NEXT: v_readfirstlane_b32 s14, v8
-; GFX9-NEXT: v_readfirstlane_b32 s15, v9
-; GFX9-NEXT: v_readfirstlane_b32 s40, v10
-; GFX9-NEXT: v_readfirstlane_b32 s41, v11
-; GFX9-NEXT: v_readfirstlane_b32 s42, v12
-; GFX9-NEXT: v_readfirstlane_b32 s43, v13
-; GFX9-NEXT: v_readfirstlane_b32 s44, v14
+; GFX9-NEXT: v_writelane_b32 v30, s31, 1
; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
-; GFX9-NEXT: v_readfirstlane_b32 s45, v15
+; GFX9-NEXT: v_writelane_b32 v30, s34, 2
+; GFX9-NEXT: v_readfirstlane_b32 s7, v0
+; GFX9-NEXT: v_readfirstlane_b32 s8, v1
+; GFX9-NEXT: v_readfirstlane_b32 s9, v2
+; GFX9-NEXT: v_readfirstlane_b32 s10, v3
+; GFX9-NEXT: v_readfirstlane_b32 s11, v4
+; GFX9-NEXT: v_readfirstlane_b32 s12, v5
+; GFX9-NEXT: v_readfirstlane_b32 s13, v6
+; GFX9-NEXT: v_readfirstlane_b32 s14, v7
+; GFX9-NEXT: v_readfirstlane_b32 s15, v8
+; GFX9-NEXT: v_readfirstlane_b32 s40, v9
+; GFX9-NEXT: v_readfirstlane_b32 s41, v10
+; GFX9-NEXT: v_readfirstlane_b32 s42, v11
+; GFX9-NEXT: v_readfirstlane_b32 s43, v12
+; GFX9-NEXT: v_readfirstlane_b32 s44, v13
+; GFX9-NEXT: v_readfirstlane_b32 s45, v14
+; GFX9-NEXT: v_readfirstlane_b32 s6, v15
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_writelane_b32 v30, s35, 3
; GFX9-NEXT: s_cbranch_scc0 .LBB45_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_lshr_b32 s46, s45, 16
-; GFX9-NEXT: s_lshr_b32 s47, s44, 16
-; GFX9-NEXT: s_lshr_b32 s56, s43, 16
-; GFX9-NEXT: s_lshr_b32 s57, s42, 16
-; GFX9-NEXT: s_lshr_b32 s58, s41, 16
-; GFX9-NEXT: s_lshr_b32 s59, s40, 16
-; GFX9-NEXT: s_lshr_b32 s60, s15, 16
-; GFX9-NEXT: s_lshr_b32 s61, s14, 16
-; GFX9-NEXT: s_lshr_b32 s62, s13, 16
-; GFX9-NEXT: s_lshr_b32 s63, s12, 16
-; GFX9-NEXT: s_lshr_b32 s72, s11, 16
-; GFX9-NEXT: s_lshr_b32 s73, s10, 16
-; GFX9-NEXT: s_lshr_b32 s74, s9, 16
-; GFX9-NEXT: s_lshr_b32 s75, s8, 16
-; GFX9-NEXT: s_lshr_b32 s76, s7, 16
-; GFX9-NEXT: s_lshr_b32 s77, s6, 16
+; GFX9-NEXT: s_lshr_b32 s46, s6, 16
+; GFX9-NEXT: s_lshr_b32 s47, s45, 16
+; GFX9-NEXT: s_lshr_b32 s56, s44, 16
+; GFX9-NEXT: s_lshr_b32 s57, s43, 16
+; GFX9-NEXT: s_lshr_b32 s58, s42, 16
+; GFX9-NEXT: s_lshr_b32 s59, s41, 16
+; GFX9-NEXT: s_lshr_b32 s60, s40, 16
+; GFX9-NEXT: s_lshr_b32 s61, s15, 16
+; GFX9-NEXT: s_lshr_b32 s62, s14, 16
+; GFX9-NEXT: s_lshr_b32 s63, s13, 16
+; GFX9-NEXT: s_lshr_b32 s72, s12, 16
+; GFX9-NEXT: s_lshr_b32 s73, s11, 16
+; GFX9-NEXT: s_lshr_b32 s74, s10, 16
+; GFX9-NEXT: s_lshr_b32 s75, s9, 16
+; GFX9-NEXT: s_lshr_b32 s76, s8, 16
+; GFX9-NEXT: s_lshr_b32 s77, s7, 16
; GFX9-NEXT: s_lshr_b32 s78, s29, 16
; GFX9-NEXT: s_lshr_b32 s79, s28, 16
; GFX9-NEXT: s_lshr_b32 s88, s27, 16
@@ -32869,22 +33063,22 @@ define inreg <60 x half> @bitcast_v15i64_to_v60f16_scalar(<15 x i64> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s35, s16, 16
; GFX9-NEXT: s_cbranch_execnz .LBB45_3
; GFX9-NEXT: .LBB45_2: ; %cmp.true
-; GFX9-NEXT: s_add_u32 s44, s44, 3
-; GFX9-NEXT: s_addc_u32 s45, s45, 0
-; GFX9-NEXT: s_add_u32 s42, s42, 3
-; GFX9-NEXT: s_addc_u32 s43, s43, 0
-; GFX9-NEXT: s_add_u32 s40, s40, 3
-; GFX9-NEXT: s_addc_u32 s41, s41, 0
-; GFX9-NEXT: s_add_u32 s14, s14, 3
-; GFX9-NEXT: s_addc_u32 s15, s15, 0
-; GFX9-NEXT: s_add_u32 s12, s12, 3
-; GFX9-NEXT: s_addc_u32 s13, s13, 0
-; GFX9-NEXT: s_add_u32 s10, s10, 3
-; GFX9-NEXT: s_addc_u32 s11, s11, 0
-; GFX9-NEXT: s_add_u32 s8, s8, 3
-; GFX9-NEXT: s_addc_u32 s9, s9, 0
-; GFX9-NEXT: s_add_u32 s6, s6, 3
-; GFX9-NEXT: s_addc_u32 s7, s7, 0
+; GFX9-NEXT: s_add_u32 s45, s45, 3
+; GFX9-NEXT: s_addc_u32 s6, s6, 0
+; GFX9-NEXT: s_add_u32 s43, s43, 3
+; GFX9-NEXT: s_addc_u32 s44, s44, 0
+; GFX9-NEXT: s_add_u32 s41, s41, 3
+; GFX9-NEXT: s_addc_u32 s42, s42, 0
+; GFX9-NEXT: s_add_u32 s15, s15, 3
+; GFX9-NEXT: s_addc_u32 s40, s40, 0
+; GFX9-NEXT: s_add_u32 s13, s13, 3
+; GFX9-NEXT: s_addc_u32 s14, s14, 0
+; GFX9-NEXT: s_add_u32 s11, s11, 3
+; GFX9-NEXT: s_addc_u32 s12, s12, 0
+; GFX9-NEXT: s_add_u32 s9, s9, 3
+; GFX9-NEXT: s_addc_u32 s10, s10, 0
+; GFX9-NEXT: s_add_u32 s7, s7, 3
+; GFX9-NEXT: s_addc_u32 s8, s8, 0
; GFX9-NEXT: s_add_u32 s28, s28, 3
; GFX9-NEXT: s_addc_u32 s29, s29, 0
; GFX9-NEXT: s_add_u32 s26, s26, 3
@@ -32899,22 +33093,22 @@ define inreg <60 x half> @bitcast_v15i64_to_v60f16_scalar(<15 x i64> inreg %a, i
; GFX9-NEXT: s_addc_u32 s19, s19, 0
; GFX9-NEXT: s_add_u32 s16, s16, 3
; GFX9-NEXT: s_addc_u32 s17, s17, 0
-; GFX9-NEXT: s_lshr_b32 s46, s45, 16
-; GFX9-NEXT: s_lshr_b32 s47, s44, 16
-; GFX9-NEXT: s_lshr_b32 s56, s43, 16
-; GFX9-NEXT: s_lshr_b32 s57, s42, 16
-; GFX9-NEXT: s_lshr_b32 s58, s41, 16
-; GFX9-NEXT: s_lshr_b32 s59, s40, 16
-; GFX9-NEXT: s_lshr_b32 s60, s15, 16
-; GFX9-NEXT: s_lshr_b32 s61, s14, 16
-; GFX9-NEXT: s_lshr_b32 s62, s13, 16
-; GFX9-NEXT: s_lshr_b32 s63, s12, 16
-; GFX9-NEXT: s_lshr_b32 s72, s11, 16
-; GFX9-NEXT: s_lshr_b32 s73, s10, 16
-; GFX9-NEXT: s_lshr_b32 s74, s9, 16
-; GFX9-NEXT: s_lshr_b32 s75, s8, 16
-; GFX9-NEXT: s_lshr_b32 s76, s7, 16
-; GFX9-NEXT: s_lshr_b32 s77, s6, 16
+; GFX9-NEXT: s_lshr_b32 s46, s6, 16
+; GFX9-NEXT: s_lshr_b32 s47, s45, 16
+; GFX9-NEXT: s_lshr_b32 s56, s44, 16
+; GFX9-NEXT: s_lshr_b32 s57, s43, 16
+; GFX9-NEXT: s_lshr_b32 s58, s42, 16
+; GFX9-NEXT: s_lshr_b32 s59, s41, 16
+; GFX9-NEXT: s_lshr_b32 s60, s40, 16
+; GFX9-NEXT: s_lshr_b32 s61, s15, 16
+; GFX9-NEXT: s_lshr_b32 s62, s14, 16
+; GFX9-NEXT: s_lshr_b32 s63, s13, 16
+; GFX9-NEXT: s_lshr_b32 s72, s12, 16
+; GFX9-NEXT: s_lshr_b32 s73, s11, 16
+; GFX9-NEXT: s_lshr_b32 s74, s10, 16
+; GFX9-NEXT: s_lshr_b32 s75, s9, 16
+; GFX9-NEXT: s_lshr_b32 s76, s8, 16
+; GFX9-NEXT: s_lshr_b32 s77, s7, 16
; GFX9-NEXT: s_lshr_b32 s78, s29, 16
; GFX9-NEXT: s_lshr_b32 s79, s28, 16
; GFX9-NEXT: s_lshr_b32 s88, s27, 16
@@ -32944,22 +33138,22 @@ define inreg <60 x half> @bitcast_v15i64_to_v60f16_scalar(<15 x i64> inreg %a, i
; GFX9-NEXT: s_pack_ll_b32_b16 s25, s27, s88
; GFX9-NEXT: s_pack_ll_b32_b16 s26, s28, s79
; GFX9-NEXT: s_pack_ll_b32_b16 s27, s29, s78
-; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s77
-; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s76
-; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s75
-; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s74
-; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s73
-; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s72
-; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s63
-; GFX9-NEXT: s_pack_ll_b32_b16 s13, s13, s62
-; GFX9-NEXT: s_pack_ll_b32_b16 s14, s14, s61
-; GFX9-NEXT: s_pack_ll_b32_b16 s15, s15, s60
-; GFX9-NEXT: s_pack_ll_b32_b16 s28, s40, s59
-; GFX9-NEXT: s_pack_ll_b32_b16 s29, s41, s58
-; GFX9-NEXT: s_pack_ll_b32_b16 s40, s42, s57
-; GFX9-NEXT: s_pack_ll_b32_b16 s41, s43, s56
-; GFX9-NEXT: s_pack_ll_b32_b16 s42, s44, s47
-; GFX9-NEXT: s_pack_ll_b32_b16 s43, s45, s46
+; GFX9-NEXT: s_pack_ll_b32_b16 s7, s7, s77
+; GFX9-NEXT: s_pack_ll_b32_b16 s8, s8, s76
+; GFX9-NEXT: s_pack_ll_b32_b16 s9, s9, s75
+; GFX9-NEXT: s_pack_ll_b32_b16 s10, s10, s74
+; GFX9-NEXT: s_pack_ll_b32_b16 s11, s11, s73
+; GFX9-NEXT: s_pack_ll_b32_b16 s12, s12, s72
+; GFX9-NEXT: s_pack_ll_b32_b16 s13, s13, s63
+; GFX9-NEXT: s_pack_ll_b32_b16 s14, s14, s62
+; GFX9-NEXT: s_pack_ll_b32_b16 s15, s15, s61
+; GFX9-NEXT: s_pack_ll_b32_b16 s28, s40, s60
+; GFX9-NEXT: s_pack_ll_b32_b16 s29, s41, s59
+; GFX9-NEXT: s_pack_ll_b32_b16 s40, s42, s58
+; GFX9-NEXT: s_pack_ll_b32_b16 s41, s43, s57
+; GFX9-NEXT: s_pack_ll_b32_b16 s42, s44, s56
+; GFX9-NEXT: s_pack_ll_b32_b16 s43, s45, s47
+; GFX9-NEXT: s_pack_ll_b32_b16 s6, s6, s46
; GFX9-NEXT: v_mov_b32_e32 v0, s4
; GFX9-NEXT: v_mov_b32_e32 v1, s5
; GFX9-NEXT: v_mov_b32_e32 v2, s16
@@ -32974,22 +33168,22 @@ define inreg <60 x half> @bitcast_v15i64_to_v60f16_scalar(<15 x i64> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v11, s25
; GFX9-NEXT: v_mov_b32_e32 v12, s26
; GFX9-NEXT: v_mov_b32_e32 v13, s27
-; GFX9-NEXT: v_mov_b32_e32 v14, s6
-; GFX9-NEXT: v_mov_b32_e32 v15, s7
-; GFX9-NEXT: v_mov_b32_e32 v16, s8
-; GFX9-NEXT: v_mov_b32_e32 v17, s9
-; GFX9-NEXT: v_mov_b32_e32 v18, s10
-; GFX9-NEXT: v_mov_b32_e32 v19, s11
-; GFX9-NEXT: v_mov_b32_e32 v20, s12
-; GFX9-NEXT: v_mov_b32_e32 v21, s13
-; GFX9-NEXT: v_mov_b32_e32 v22, s14
-; GFX9-NEXT: v_mov_b32_e32 v23, s15
-; GFX9-NEXT: v_mov_b32_e32 v24, s28
-; GFX9-NEXT: v_mov_b32_e32 v25, s29
-; GFX9-NEXT: v_mov_b32_e32 v26, s40
-; GFX9-NEXT: v_mov_b32_e32 v27, s41
-; GFX9-NEXT: v_mov_b32_e32 v28, s42
-; GFX9-NEXT: v_mov_b32_e32 v29, s43
+; GFX9-NEXT: v_mov_b32_e32 v14, s7
+; GFX9-NEXT: v_mov_b32_e32 v15, s8
+; GFX9-NEXT: v_mov_b32_e32 v16, s9
+; GFX9-NEXT: v_mov_b32_e32 v17, s10
+; GFX9-NEXT: v_mov_b32_e32 v18, s11
+; GFX9-NEXT: v_mov_b32_e32 v19, s12
+; GFX9-NEXT: v_mov_b32_e32 v20, s13
+; GFX9-NEXT: v_mov_b32_e32 v21, s14
+; GFX9-NEXT: v_mov_b32_e32 v22, s15
+; GFX9-NEXT: v_mov_b32_e32 v23, s28
+; GFX9-NEXT: v_mov_b32_e32 v24, s29
+; GFX9-NEXT: v_mov_b32_e32 v25, s40
+; GFX9-NEXT: v_mov_b32_e32 v26, s41
+; GFX9-NEXT: v_mov_b32_e32 v27, s42
+; GFX9-NEXT: v_mov_b32_e32 v28, s43
+; GFX9-NEXT: v_mov_b32_e32 v29, s6
; GFX9-NEXT: v_readlane_b32 s35, v30, 3
; GFX9-NEXT: v_readlane_b32 s34, v30, 2
; GFX9-NEXT: v_readlane_b32 s31, v30, 1
@@ -33030,7 +33224,9 @@ define inreg <60 x half> @bitcast_v15i64_to_v60f16_scalar(<15 x i64> inreg %a, i
; GFX9-NEXT: ; implicit-def: $sgpr56
; GFX9-NEXT: ; implicit-def: $sgpr47
; GFX9-NEXT: ; implicit-def: $sgpr46
-; GFX9-NEXT: s_branch .LBB45_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB45_2
+; GFX9-NEXT: s_branch .LBB45_3
;
; GFX11-LABEL: bitcast_v15i64_to_v60f16_scalar:
; GFX11: ; %bb.0:
@@ -33045,16 +33241,16 @@ define inreg <60 x half> @bitcast_v15i64_to_v60f16_scalar(<15 x i64> inreg %a, i
; GFX11-NEXT: v_readfirstlane_b32 s10, v6
; GFX11-NEXT: v_readfirstlane_b32 s11, v7
; GFX11-NEXT: v_readfirstlane_b32 s12, v8
-; GFX11-NEXT: v_readfirstlane_b32 s13, v9
+; GFX11-NEXT: v_readfirstlane_b32 s14, v9
; GFX11-NEXT: v_readfirstlane_b32 s15, v10
-; GFX11-NEXT: v_readfirstlane_b32 s14, v11
-; GFX11-NEXT: s_mov_b32 s94, 0
+; GFX11-NEXT: v_readfirstlane_b32 s13, v11
+; GFX11-NEXT: s_mov_b32 s94, -1
; GFX11-NEXT: s_and_b32 s40, vcc_lo, exec_lo
; GFX11-NEXT: s_cbranch_scc0 .LBB45_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s40, s14, 16
+; GFX11-NEXT: s_lshr_b32 s40, s13, 16
; GFX11-NEXT: s_lshr_b32 s41, s15, 16
-; GFX11-NEXT: s_lshr_b32 s42, s13, 16
+; GFX11-NEXT: s_lshr_b32 s42, s14, 16
; GFX11-NEXT: s_lshr_b32 s43, s12, 16
; GFX11-NEXT: s_lshr_b32 s44, s11, 16
; GFX11-NEXT: s_lshr_b32 s45, s10, 16
@@ -33082,13 +33278,12 @@ define inreg <60 x half> @bitcast_v15i64_to_v60f16_scalar(<15 x i64> inreg %a, i
; GFX11-NEXT: s_lshr_b32 s91, s2, 16
; GFX11-NEXT: s_lshr_b32 s92, s1, 16
; GFX11-NEXT: s_lshr_b32 s93, s0, 16
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s94
-; GFX11-NEXT: s_cbranch_vccnz .LBB45_3
+; GFX11-NEXT: s_cbranch_execnz .LBB45_3
; GFX11-NEXT: .LBB45_2: ; %cmp.true
; GFX11-NEXT: s_add_u32 s15, s15, 3
-; GFX11-NEXT: s_addc_u32 s14, s14, 0
-; GFX11-NEXT: s_add_u32 s12, s12, 3
; GFX11-NEXT: s_addc_u32 s13, s13, 0
+; GFX11-NEXT: s_add_u32 s12, s12, 3
+; GFX11-NEXT: s_addc_u32 s14, s14, 0
; GFX11-NEXT: s_add_u32 s10, s10, 3
; GFX11-NEXT: s_addc_u32 s11, s11, 0
; GFX11-NEXT: s_add_u32 s8, s8, 3
@@ -33115,9 +33310,9 @@ define inreg <60 x half> @bitcast_v15i64_to_v60f16_scalar(<15 x i64> inreg %a, i
; GFX11-NEXT: s_addc_u32 s3, s3, 0
; GFX11-NEXT: s_add_u32 s0, s0, 3
; GFX11-NEXT: s_addc_u32 s1, s1, 0
-; GFX11-NEXT: s_lshr_b32 s40, s14, 16
+; GFX11-NEXT: s_lshr_b32 s40, s13, 16
; GFX11-NEXT: s_lshr_b32 s41, s15, 16
-; GFX11-NEXT: s_lshr_b32 s42, s13, 16
+; GFX11-NEXT: s_lshr_b32 s42, s14, 16
; GFX11-NEXT: s_lshr_b32 s43, s12, 16
; GFX11-NEXT: s_lshr_b32 s44, s11, 16
; GFX11-NEXT: s_lshr_b32 s45, s10, 16
@@ -33174,9 +33369,9 @@ define inreg <60 x half> @bitcast_v15i64_to_v60f16_scalar(<15 x i64> inreg %a, i
; GFX11-NEXT: s_pack_ll_b32_b16 s10, s10, s45
; GFX11-NEXT: s_pack_ll_b32_b16 s11, s11, s44
; GFX11-NEXT: s_pack_ll_b32_b16 s12, s12, s43
-; GFX11-NEXT: s_pack_ll_b32_b16 s13, s13, s42
+; GFX11-NEXT: s_pack_ll_b32_b16 s14, s14, s42
; GFX11-NEXT: s_pack_ll_b32_b16 s15, s15, s41
-; GFX11-NEXT: s_pack_ll_b32_b16 s14, s14, s40
+; GFX11-NEXT: s_pack_ll_b32_b16 s13, s13, s40
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
; GFX11-NEXT: v_dual_mov_b32 v4, s16 :: v_dual_mov_b32 v5, s17
@@ -33190,8 +33385,8 @@ define inreg <60 x half> @bitcast_v15i64_to_v60f16_scalar(<15 x i64> inreg %a, i
; GFX11-NEXT: v_dual_mov_b32 v20, s6 :: v_dual_mov_b32 v21, s7
; GFX11-NEXT: v_dual_mov_b32 v22, s8 :: v_dual_mov_b32 v23, s9
; GFX11-NEXT: v_dual_mov_b32 v24, s10 :: v_dual_mov_b32 v25, s11
-; GFX11-NEXT: v_dual_mov_b32 v26, s12 :: v_dual_mov_b32 v27, s13
-; GFX11-NEXT: v_dual_mov_b32 v28, s15 :: v_dual_mov_b32 v29, s14
+; GFX11-NEXT: v_dual_mov_b32 v26, s12 :: v_dual_mov_b32 v27, s14
+; GFX11-NEXT: v_dual_mov_b32 v28, s15 :: v_dual_mov_b32 v29, s13
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB45_4:
; GFX11-NEXT: ; implicit-def: $sgpr93
@@ -33224,7 +33419,9 @@ define inreg <60 x half> @bitcast_v15i64_to_v60f16_scalar(<15 x i64> inreg %a, i
; GFX11-NEXT: ; implicit-def: $sgpr42
; GFX11-NEXT: ; implicit-def: $sgpr41
; GFX11-NEXT: ; implicit-def: $sgpr40
-; GFX11-NEXT: s_branch .LBB45_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s94
+; GFX11-NEXT: s_cbranch_vccz .LBB45_2
+; GFX11-NEXT: s_branch .LBB45_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -34835,11 +35032,11 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:60
; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32
-; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:8
+; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:8
; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:4
; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:16
; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:12
-; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:24
+; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:24
; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:20
; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:32
; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:28
@@ -34853,83 +35050,92 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:56
; SI-NEXT: s_waitcnt expcnt(3)
; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:52
-; SI-NEXT: v_cvt_f16_f32_e32 v37, v0
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v2
-; SI-NEXT: v_cvt_f16_f32_e32 v49, v1
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
; SI-NEXT: v_cvt_f16_f32_e32 v39, v3
-; SI-NEXT: v_cvt_f16_f32_e32 v34, v7
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; SI-NEXT: v_cvt_f16_f32_e32 v49, v2
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v5
-; SI-NEXT: v_cvt_f16_f32_e32 v32, v6
-; SI-NEXT: v_cvt_f16_f32_e32 v36, v9
-; SI-NEXT: v_cvt_f16_f32_e32 v35, v8
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v26
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; SI-NEXT: v_cvt_f16_f32_e32 v48, v5
+; SI-NEXT: v_cvt_f16_f32_e32 v36, v4
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v4
-; SI-NEXT: v_cvt_f16_f32_e32 v63, v11
-; SI-NEXT: v_cvt_f16_f32_e32 v62, v10
-; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v29
+; SI-NEXT: v_cvt_f16_f32_e32 v38, v7
+; SI-NEXT: v_cvt_f16_f32_e32 v37, v6
+; SI-NEXT: v_cvt_f16_f32_e32 v32, v9
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v28
+; SI-NEXT: v_cvt_f16_f32_e32 v35, v8
+; SI-NEXT: v_cvt_f16_f32_e32 v34, v11
+; SI-NEXT: v_cvt_f16_f32_e32 v63, v10
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v18
-; SI-NEXT: v_cvt_f16_f32_e32 v43, v12
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v30
+; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
+; SI-NEXT: v_cvt_f16_f32_e32 v62, v12
; SI-NEXT: v_cvt_f16_f32_e32 v41, v15
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
; SI-NEXT: v_cvt_f16_f32_e32 v55, v14
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v20
; SI-NEXT: v_cvt_f16_f32_e32 v15, v17
; SI-NEXT: v_cvt_f16_f32_e32 v61, v16
; SI-NEXT: v_cvt_f16_f32_e32 v16, v19
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
+; SI-NEXT: v_cvt_f16_f32_e32 v53, v18
; SI-NEXT: v_cvt_f16_f32_e32 v17, v21
+; SI-NEXT: v_cvt_f16_f32_e32 v51, v20
; SI-NEXT: v_cvt_f16_f32_e32 v18, v23
; SI-NEXT: v_cvt_f16_f32_e32 v22, v22
; SI-NEXT: v_cvt_f16_f32_e32 v19, v25
; SI-NEXT: v_cvt_f16_f32_e32 v21, v24
; SI-NEXT: v_cvt_f16_f32_e32 v20, v27
-; SI-NEXT: v_cvt_f16_f32_e32 v53, v26
-; SI-NEXT: v_cvt_f16_f32_e32 v52, v29
-; SI-NEXT: v_cvt_f16_f32_e32 v51, v28
-; SI-NEXT: v_cvt_f16_f32_e32 v30, v30
+; SI-NEXT: v_cvt_f16_f32_e32 v12, s16
; SI-NEXT: v_cvt_f16_f32_e32 v1, s19
-; SI-NEXT: v_cvt_f16_f32_e32 v12, s18
+; SI-NEXT: v_cvt_f16_f32_e32 v11, s18
; SI-NEXT: v_cvt_f16_f32_e32 v2, s21
-; SI-NEXT: v_cvt_f16_f32_e32 v11, s20
+; SI-NEXT: v_cvt_f16_f32_e32 v9, s20
; SI-NEXT: v_cvt_f16_f32_e32 v3, s23
; SI-NEXT: v_cvt_f16_f32_e32 v10, s22
; SI-NEXT: v_cvt_f16_f32_e32 v4, s25
-; SI-NEXT: v_cvt_f16_f32_e32 v9, s24
+; SI-NEXT: v_cvt_f16_f32_e32 v8, s24
; SI-NEXT: v_cvt_f16_f32_e32 v5, s27
-; SI-NEXT: v_cvt_f16_f32_e32 v8, s26
+; SI-NEXT: v_cvt_f16_f32_e32 v7, s26
; SI-NEXT: v_cvt_f16_f32_e32 v6, s29
-; SI-NEXT: v_cvt_f16_f32_e32 v7, s28
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31
-; SI-NEXT: v_cvt_f16_f32_e32 v50, v54
-; SI-NEXT: v_cvt_f16_f32_e32 v48, v48
-; SI-NEXT: v_cvt_f16_f32_e32 v31, v40
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v33
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v54
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v50
+; SI-NEXT: v_cvt_f16_f32_e32 v50, s28
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v40
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v33
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v42
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v38
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v43
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v44
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v45
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v46
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v47
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -34938,260 +35144,240 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v57
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v58
-; SI-NEXT: v_cvt_f16_f32_e32 v58, s16
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v59
; SI-NEXT: v_cvt_f16_f32_e32 v59, s17
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v60
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB47_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19
+; SI-NEXT: v_or_b32_e32 v19, v21, v19
+; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
+; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20
+; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18
+; SI-NEXT: v_or_b32_e32 v18, v22, v18
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(4)
-; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; SI-NEXT: v_or_b32_e32 v3, v10, v3
-; SI-NEXT: s_waitcnt expcnt(3)
-; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v34
-; SI-NEXT: v_mov_b32_e32 v33, v32
-; SI-NEXT: v_or_b32_e32 v10, v32, v10
-; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
-; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
-; SI-NEXT: v_mov_b32_e32 v44, v43
-; SI-NEXT: v_or_b32_e32 v13, v43, v13
-; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
-; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; SI-NEXT: v_or_b32_e32 v5, v8, v5
-; SI-NEXT: v_mov_b32_e32 v57, v39
-; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v39
-; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6
-; SI-NEXT: v_or_b32_e32 v6, v7, v6
-; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v49
-; SI-NEXT: v_or_b32_e32 v7, v37, v7
-; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
+; SI-NEXT: v_or_b32_e32 v6, v50, v6
+; SI-NEXT: v_mov_b32_e32 v30, v50
+; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
+; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v59
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18
-; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19
-; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v59
-; SI-NEXT: v_or_b32_e32 v1, v12, v1
-; SI-NEXT: v_or_b32_e32 v2, v11, v2
-; SI-NEXT: v_or_b32_e32 v4, v9, v4
-; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v36
-; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v63
+; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; SI-NEXT: v_mov_b32_e32 v52, v12
+; SI-NEXT: v_or_b32_e32 v0, v12, v0
+; SI-NEXT: v_or_b32_e32 v1, v11, v1
+; SI-NEXT: v_or_b32_e32 v2, v9, v2
+; SI-NEXT: v_or_b32_e32 v3, v10, v3
+; SI-NEXT: v_or_b32_e32 v4, v8, v4
+; SI-NEXT: v_or_b32_e32 v5, v7, v5
+; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v39
+; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v48
+; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v38
+; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v32
+; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v34
+; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v41
; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15
; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17
-; SI-NEXT: v_or_b32_e32 v18, v22, v18
-; SI-NEXT: v_or_b32_e32 v19, v21, v19
-; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20
-; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v52
-; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v50
-; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v48
-; SI-NEXT: v_or_b32_e32 v0, v58, v0
-; SI-NEXT: v_mov_b32_e32 v56, v34
-; SI-NEXT: v_mov_b32_e32 v47, v36
-; SI-NEXT: v_mov_b32_e32 v46, v35
+; SI-NEXT: v_mov_b32_e32 v58, v49
+; SI-NEXT: v_or_b32_e32 v8, v49, v8
+; SI-NEXT: v_mov_b32_e32 v57, v48
+; SI-NEXT: v_mov_b32_e32 v56, v36
+; SI-NEXT: v_or_b32_e32 v9, v36, v9
+; SI-NEXT: v_mov_b32_e32 v47, v38
+; SI-NEXT: v_mov_b32_e32 v46, v37
+; SI-NEXT: v_or_b32_e32 v10, v37, v10
+; SI-NEXT: v_mov_b32_e32 v33, v32
+; SI-NEXT: v_mov_b32_e32 v45, v35
; SI-NEXT: v_or_b32_e32 v11, v35, v11
+; SI-NEXT: v_mov_b32_e32 v44, v34
; SI-NEXT: v_mov_b32_e32 v60, v63
-; SI-NEXT: v_mov_b32_e32 v45, v62
-; SI-NEXT: v_or_b32_e32 v12, v62, v12
+; SI-NEXT: v_or_b32_e32 v12, v63, v12
+; SI-NEXT: v_mov_b32_e32 v43, v62
+; SI-NEXT: v_or_b32_e32 v13, v62, v13
; SI-NEXT: v_mov_b32_e32 v42, v41
; SI-NEXT: v_mov_b32_e32 v40, v55
; SI-NEXT: v_or_b32_e32 v14, v55, v14
; SI-NEXT: v_or_b32_e32 v15, v61, v15
-; SI-NEXT: v_or_b32_e32 v20, v53, v20
-; SI-NEXT: v_or_b32_e32 v21, v51, v21
-; SI-NEXT: v_or_b32_e32 v22, v30, v22
-; SI-NEXT: v_or_b32_e32 v23, v31, v23
+; SI-NEXT: v_or_b32_e32 v16, v53, v16
+; SI-NEXT: v_or_b32_e32 v17, v51, v17
; SI-NEXT: s_mov_b64 s[4:5], 0
; SI-NEXT: s_waitcnt vmcnt(11)
+; SI-NEXT: v_or_b32_e32 v20, v21, v20
+; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21
+; SI-NEXT: v_or_b32_e32 v21, v22, v21
+; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22
+; SI-NEXT: v_or_b32_e32 v22, v23, v22
+; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v23
+; SI-NEXT: v_or_b32_e32 v23, v24, v23
+; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v24
-; SI-NEXT: s_waitcnt vmcnt(10)
; SI-NEXT: v_or_b32_e32 v24, v25, v24
; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(10)
-; SI-NEXT: v_or_b32_e32 v17, v32, v17
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v25
; SI-NEXT: v_or_b32_e32 v25, v26, v25
; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
-; SI-NEXT: v_or_b32_e32 v16, v43, v16
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v26
; SI-NEXT: v_or_b32_e32 v26, v27, v26
; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
-; SI-NEXT: v_mov_b32_e32 v35, v39
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v27
; SI-NEXT: v_or_b32_e32 v27, v28, v27
; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
-; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v37
-; SI-NEXT: v_or_b32_e32 v9, v39, v9
-; SI-NEXT: v_mov_b32_e32 v36, v37
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v28
; SI-NEXT: v_or_b32_e32 v28, v29, v28
; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
-; SI-NEXT: v_or_b32_e32 v8, v38, v8
+; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v50
+; SI-NEXT: v_or_b32_e32 v7, v31, v7
+; SI-NEXT: v_mov_b32_e32 v35, v50
+; SI-NEXT: v_mov_b32_e32 v50, v30
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v29
; SI-NEXT: v_or_b32_e32 v29, v54, v29
-; SI-NEXT: v_mov_b32_e32 v54, v32
; SI-NEXT: s_branch .LBB47_3
; SI-NEXT: .LBB47_2:
-; SI-NEXT: v_mov_b32_e32 v54, v53
-; SI-NEXT: v_mov_b32_e32 v53, v52
-; SI-NEXT: v_mov_b32_e32 v52, v51
-; SI-NEXT: v_mov_b32_e32 v51, v50
-; SI-NEXT: v_mov_b32_e32 v50, v30
-; SI-NEXT: v_mov_b32_e32 v49, v48
-; SI-NEXT: v_mov_b32_e32 v48, v31
+; SI-NEXT: v_mov_b32_e32 v52, v12
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; SI-NEXT: v_mov_b32_e32 v47, v36
-; SI-NEXT: v_mov_b32_e32 v46, v35
-; SI-NEXT: v_mov_b32_e32 v44, v43
-; SI-NEXT: v_mov_b32_e32 v30, v50
-; SI-NEXT: v_mov_b32_e32 v50, v51
-; SI-NEXT: v_mov_b32_e32 v51, v52
-; SI-NEXT: v_mov_b32_e32 v52, v53
-; SI-NEXT: v_mov_b32_e32 v53, v54
-; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
-; SI-NEXT: v_mov_b32_e32 v57, v39
-; SI-NEXT: v_mov_b32_e32 v56, v34
+; SI-NEXT: v_mov_b32_e32 v45, v35
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v58, v49
+; SI-NEXT: v_mov_b32_e32 v57, v48
+; SI-NEXT: v_mov_b32_e32 v56, v36
+; SI-NEXT: v_mov_b32_e32 v47, v38
+; SI-NEXT: v_mov_b32_e32 v46, v37
; SI-NEXT: v_mov_b32_e32 v33, v32
+; SI-NEXT: v_mov_b32_e32 v44, v34
; SI-NEXT: v_mov_b32_e32 v60, v63
-; SI-NEXT: v_mov_b32_e32 v45, v62
+; SI-NEXT: v_mov_b32_e32 v43, v62
; SI-NEXT: v_mov_b32_e32 v42, v41
; SI-NEXT: v_mov_b32_e32 v40, v55
-; SI-NEXT: s_mov_b64 s[4:5], -1
-; SI-NEXT: v_mov_b32_e32 v31, v48
-; SI-NEXT: v_mov_b32_e32 v48, v49
; SI-NEXT: .LBB47_3: ; %Flow
; SI-NEXT: v_mov_b32_e32 v32, v33
-; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
; SI-NEXT: v_mov_b32_e32 v61, v40
-; SI-NEXT: v_mov_b32_e32 v40, v44
; SI-NEXT: s_cbranch_vccnz .LBB47_5
; SI-NEXT: ; %bb.4: ; %cmp.true
-; SI-NEXT: s_waitcnt expcnt(5)
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(4)
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(3)
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload
; SI-NEXT: v_cvt_f32_f16_e32 v0, v59
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v1, v58
-; SI-NEXT: s_waitcnt vmcnt(3)
-; SI-NEXT: v_cvt_f32_f16_e32 v8, v33
-; SI-NEXT: v_cvt_f32_f16_e32 v9, v38
+; SI-NEXT: v_cvt_f32_f16_e32 v1, v52
+; SI-NEXT: v_cvt_f32_f16_e32 v7, v50
+; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: v_cvt_f32_f16_e32 v8, v31
; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0
; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8
-; SI-NEXT: v_cvt_f16_f32_e32 v8, v8
+; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7
+; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8
+; SI-NEXT: v_cvt_f16_f32_e32 v8, v8
+; SI-NEXT: v_cvt_f32_f16_e32 v9, v58
+; SI-NEXT: v_cvt_f32_f16_e32 v10, v56
+; SI-NEXT: v_cvt_f32_f16_e32 v11, v46
+; SI-NEXT: v_cvt_f32_f16_e32 v12, v45
; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9
; SI-NEXT: v_cvt_f16_f32_e32 v9, v9
-; SI-NEXT: v_cvt_f32_f16_e32 v10, v35
-; SI-NEXT: v_cvt_f32_f16_e32 v11, v32
-; SI-NEXT: v_cvt_f32_f16_e32 v12, v46
-; SI-NEXT: v_cvt_f32_f16_e32 v13, v45
; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10
; SI-NEXT: v_cvt_f16_f32_e32 v10, v10
; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11
; SI-NEXT: v_cvt_f16_f32_e32 v11, v11
; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12
; SI-NEXT: v_cvt_f16_f32_e32 v12, v12
-; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13
-; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
-; SI-NEXT: v_cvt_f32_f16_e32 v14, v40
+; SI-NEXT: v_cvt_f32_f16_e32 v13, v60
+; SI-NEXT: v_cvt_f32_f16_e32 v14, v43
; SI-NEXT: v_mov_b32_e32 v55, v42
; SI-NEXT: v_cvt_f32_f16_e32 v15, v61
-; SI-NEXT: v_cvt_f32_f16_e32 v17, v43
+; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13
+; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14
; SI-NEXT: v_cvt_f16_f32_e32 v14, v14
; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15
; SI-NEXT: v_cvt_f16_f32_e32 v15, v15
-; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17
-; SI-NEXT: v_cvt_f16_f32_e32 v17, v17
-; SI-NEXT: v_cvt_f32_f16_e32 v19, v54
-; SI-NEXT: v_cvt_f32_f16_e32 v22, v53
-; SI-NEXT: v_cvt_f32_f16_e32 v23, v51
-; SI-NEXT: v_cvt_f32_f16_e32 v24, v48
-; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19
-; SI-NEXT: v_cvt_f16_f32_e32 v19, v19
-; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22
-; SI-NEXT: v_cvt_f16_f32_e32 v22, v22
-; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23
-; SI-NEXT: v_cvt_f16_f32_e32 v23, v23
-; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24
-; SI-NEXT: v_cvt_f16_f32_e32 v24, v24
-; SI-NEXT: v_cvt_f32_f16_e32 v25, v31
+; SI-NEXT: v_cvt_f32_f16_e32 v17, v53
+; SI-NEXT: v_cvt_f32_f16_e32 v19, v51
; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
+; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17
+; SI-NEXT: v_cvt_f16_f32_e32 v17, v17
+; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
+; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19
+; SI-NEXT: v_cvt_f16_f32_e32 v19, v19
+; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
-; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25
-; SI-NEXT: v_cvt_f16_f32_e32 v25, v25
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
-; SI-NEXT: s_waitcnt vmcnt(13)
; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2
; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
@@ -35199,42 +35385,48 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; SI-NEXT: v_or_b32_e32 v1, v3, v2
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(14)
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
; SI-NEXT: v_cvt_f32_f16_e32 v27, v27
-; SI-NEXT: s_waitcnt vmcnt(11)
-; SI-NEXT: v_cvt_f32_f16_e32 v31, v31
-; SI-NEXT: s_waitcnt vmcnt(10)
+; SI-NEXT: s_waitcnt vmcnt(14)
+; SI-NEXT: v_cvt_f32_f16_e32 v30, v30
+; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27
+; SI-NEXT: s_waitcnt vmcnt(13)
; SI-NEXT: v_cvt_f32_f16_e32 v4, v4
-; SI-NEXT: s_waitcnt vmcnt(9)
+; SI-NEXT: s_waitcnt vmcnt(12)
; SI-NEXT: v_cvt_f32_f16_e32 v5, v5
-; SI-NEXT: s_waitcnt vmcnt(8)
+; SI-NEXT: s_waitcnt vmcnt(11)
; SI-NEXT: v_cvt_f32_f16_e32 v6, v6
-; SI-NEXT: s_waitcnt vmcnt(7)
-; SI-NEXT: v_cvt_f32_f16_e32 v7, v7
+; SI-NEXT: s_waitcnt vmcnt(10)
+; SI-NEXT: v_cvt_f32_f16_e32 v16, v16
; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4
; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5
; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6
; SI-NEXT: v_cvt_f16_f32_e32 v6, v6
-; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7
-; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
-; SI-NEXT: s_waitcnt vmcnt(6)
-; SI-NEXT: v_cvt_f32_f16_e32 v16, v16
-; SI-NEXT: s_waitcnt vmcnt(5)
-; SI-NEXT: v_cvt_f32_f16_e32 v18, v18
-; SI-NEXT: s_waitcnt vmcnt(3)
-; SI-NEXT: v_cvt_f32_f16_e32 v21, v21
-; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27
; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16
; SI-NEXT: v_cvt_f16_f32_e32 v16, v16
+; SI-NEXT: s_waitcnt vmcnt(9)
+; SI-NEXT: v_cvt_f32_f16_e32 v18, v18
+; SI-NEXT: s_waitcnt vmcnt(7)
+; SI-NEXT: v_cvt_f32_f16_e32 v21, v21
+; SI-NEXT: s_waitcnt vmcnt(5)
+; SI-NEXT: v_cvt_f32_f16_e32 v23, v23
+; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: v_cvt_f32_f16_e32 v24, v24
; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18
; SI-NEXT: v_cvt_f16_f32_e32 v18, v18
; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21
; SI-NEXT: v_cvt_f16_f32_e32 v21, v21
+; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23
+; SI-NEXT: v_cvt_f16_f32_e32 v23, v23
+; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24
+; SI-NEXT: v_cvt_f16_f32_e32 v24, v24
; SI-NEXT: v_cvt_f16_f32_e32 v27, v27
+; SI-NEXT: v_cvt_f32_f16_e32 v31, v31
+; SI-NEXT: v_add_f32_e32 v30, 0x38000000, v30
+; SI-NEXT: v_cvt_f16_f32_e32 v30, v30
; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31
; SI-NEXT: v_cvt_f16_f32_e32 v31, v31
; SI-NEXT: s_waitcnt vmcnt(1)
@@ -35247,65 +35439,65 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; SI-NEXT: v_or_b32_e32 v2, v3, v2
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3
; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; SI-NEXT: v_or_b32_e32 v3, v4, v3
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v4, v4
; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4
; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
; SI-NEXT: v_or_b32_e32 v4, v5, v4
-; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v5, v5
; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5
; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: v_or_b32_e32 v5, v6, v5
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v6, v6
; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6
; SI-NEXT: v_cvt_f16_f32_e32 v6, v6
; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6
; SI-NEXT: v_or_b32_e32 v6, v7, v6
-; SI-NEXT: v_cvt_f32_f16_e32 v7, v37
+; SI-NEXT: v_cvt_f32_f16_e32 v7, v35
; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7
; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7
; SI-NEXT: v_or_b32_e32 v7, v8, v7
-; SI-NEXT: v_cvt_f32_f16_e32 v8, v57
+; SI-NEXT: v_cvt_f32_f16_e32 v8, v33
; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8
; SI-NEXT: v_cvt_f16_f32_e32 v8, v8
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8
; SI-NEXT: v_or_b32_e32 v8, v9, v8
-; SI-NEXT: v_cvt_f32_f16_e32 v9, v36
+; SI-NEXT: v_cvt_f32_f16_e32 v9, v57
; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9
; SI-NEXT: v_cvt_f16_f32_e32 v9, v9
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9
; SI-NEXT: v_or_b32_e32 v9, v10, v9
-; SI-NEXT: v_cvt_f32_f16_e32 v10, v56
+; SI-NEXT: v_cvt_f32_f16_e32 v10, v47
; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10
; SI-NEXT: v_cvt_f16_f32_e32 v10, v10
; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10
; SI-NEXT: v_or_b32_e32 v10, v11, v10
-; SI-NEXT: v_cvt_f32_f16_e32 v11, v47
+; SI-NEXT: v_cvt_f32_f16_e32 v11, v32
; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11
; SI-NEXT: v_cvt_f16_f32_e32 v11, v11
; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11
; SI-NEXT: v_or_b32_e32 v11, v12, v11
-; SI-NEXT: v_cvt_f32_f16_e32 v12, v60
+; SI-NEXT: v_cvt_f32_f16_e32 v12, v44
; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12
; SI-NEXT: v_cvt_f16_f32_e32 v12, v12
; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12
; SI-NEXT: v_or_b32_e32 v12, v13, v12
-; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v13, v13
; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13
@@ -35317,14 +35509,14 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v14, v14
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
; SI-NEXT: v_or_b32_e32 v14, v15, v14
-; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v15, v15
; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15
; SI-NEXT: v_cvt_f16_f32_e32 v15, v15
; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15
; SI-NEXT: v_or_b32_e32 v15, v16, v15
-; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v16, v16
; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16
@@ -35332,9 +35524,9 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16
; SI-NEXT: v_or_b32_e32 v16, v17, v16
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v18
-; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
; SI-NEXT: v_or_b32_e32 v17, v19, v17
-; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
; SI-NEXT: v_cvt_f32_f16_e32 v20, v20
; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20
; SI-NEXT: v_cvt_f16_f32_e32 v20, v20
@@ -35344,7 +35536,7 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v18, v18
; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18
; SI-NEXT: v_or_b32_e32 v18, v20, v18
-; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_cvt_f32_f16_e32 v19, v19
; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19
@@ -35356,32 +35548,39 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v20, v20
; SI-NEXT: v_or_b32_e32 v19, v20, v19
; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v21
-; SI-NEXT: v_cvt_f32_f16_e32 v21, v52
+; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
+; SI-NEXT: v_cvt_f32_f16_e32 v22, v22
+; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22
+; SI-NEXT: v_cvt_f16_f32_e32 v22, v22
; SI-NEXT: v_or_b32_e32 v20, v22, v20
-; SI-NEXT: v_cvt_f32_f16_e32 v22, v50
+; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_cvt_f32_f16_e32 v21, v21
; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21
; SI-NEXT: v_cvt_f16_f32_e32 v21, v21
-; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22
-; SI-NEXT: v_cvt_f16_f32_e32 v22, v22
; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21
; SI-NEXT: v_or_b32_e32 v21, v23, v21
-; SI-NEXT: v_cvt_f32_f16_e32 v23, v30
+; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_cvt_f32_f16_e32 v22, v22
+; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22
+; SI-NEXT: v_cvt_f16_f32_e32 v22, v22
; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22
-; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_cvt_f32_f16_e32 v23, v23
; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23
; SI-NEXT: v_cvt_f16_f32_e32 v23, v23
; SI-NEXT: v_or_b32_e32 v22, v23, v22
; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v24
; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; SI-NEXT: v_cvt_f32_f16_e32 v25, v25
+; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25
+; SI-NEXT: v_cvt_f16_f32_e32 v25, v25
; SI-NEXT: v_or_b32_e32 v23, v25, v23
; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
; SI-NEXT: v_cvt_f32_f16_e32 v26, v26
; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26
; SI-NEXT: v_cvt_f16_f32_e32 v26, v26
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_cvt_f32_f16_e32 v30, v30
-; SI-NEXT: v_add_f32_e32 v30, 0x38000000, v30
-; SI-NEXT: v_cvt_f16_f32_e32 v30, v30
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_cvt_f32_f16_e32 v24, v24
; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24
@@ -35466,6 +35665,7 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v15
; VI-NEXT: v_mov_b32_e32 v33, v14
; VI-NEXT: v_mov_b32_e32 v34, v13
@@ -35482,7 +35682,7 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v53, v2
; VI-NEXT: v_mov_b32_e32 v54, v1
; VI-NEXT: v_mov_b32_e32 v55, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB47_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v0, 16
@@ -35686,11 +35886,28 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB47_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB47_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB47_2
+; VI-NEXT: s_branch .LBB47_3
;
; GFX9-LABEL: bitcast_v60f16_to_v15i64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_lshr_b32 s40, s29, 16
+; GFX9-NEXT: s_lshr_b32 s41, s28, 16
+; GFX9-NEXT: s_lshr_b32 s42, s27, 16
+; GFX9-NEXT: s_lshr_b32 s43, s26, 16
+; GFX9-NEXT: s_lshr_b32 s15, s25, 16
+; GFX9-NEXT: s_lshr_b32 s14, s24, 16
+; GFX9-NEXT: s_lshr_b32 s13, s23, 16
+; GFX9-NEXT: s_lshr_b32 s12, s22, 16
+; GFX9-NEXT: s_lshr_b32 s11, s21, 16
+; GFX9-NEXT: s_lshr_b32 s10, s20, 16
+; GFX9-NEXT: s_lshr_b32 s9, s19, 16
+; GFX9-NEXT: s_lshr_b32 s8, s18, 16
+; GFX9-NEXT: s_lshr_b32 s7, s17, 16
+; GFX9-NEXT: s_lshr_b32 s6, s16, 16
; GFX9-NEXT: v_mov_b32_e32 v32, v15
; GFX9-NEXT: v_mov_b32_e32 v33, v14
; GFX9-NEXT: v_mov_b32_e32 v34, v13
@@ -35707,21 +35924,7 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; GFX9-NEXT: v_mov_b32_e32 v53, v2
; GFX9-NEXT: v_mov_b32_e32 v54, v1
; GFX9-NEXT: v_mov_b32_e32 v55, v0
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
-; GFX9-NEXT: s_lshr_b32 s40, s29, 16
-; GFX9-NEXT: s_lshr_b32 s41, s28, 16
-; GFX9-NEXT: s_lshr_b32 s42, s27, 16
-; GFX9-NEXT: s_lshr_b32 s43, s26, 16
-; GFX9-NEXT: s_lshr_b32 s15, s25, 16
-; GFX9-NEXT: s_lshr_b32 s14, s24, 16
-; GFX9-NEXT: s_lshr_b32 s13, s23, 16
-; GFX9-NEXT: s_lshr_b32 s12, s22, 16
-; GFX9-NEXT: s_lshr_b32 s11, s21, 16
-; GFX9-NEXT: s_lshr_b32 s10, s20, 16
-; GFX9-NEXT: s_lshr_b32 s9, s19, 16
-; GFX9-NEXT: s_lshr_b32 s8, s18, 16
-; GFX9-NEXT: s_lshr_b32 s7, s17, 16
-; GFX9-NEXT: s_lshr_b32 s6, s16, 16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
@@ -35742,7 +35945,6 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; GFX9-NEXT: v_lshrrev_b32_e32 v41, 16, v33
; GFX9-NEXT: v_lshrrev_b32_e32 v42, 16, v34
; GFX9-NEXT: v_lshrrev_b32_e32 v43, 16, v35
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -35757,6 +35959,7 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_lshrrev_b32_e32 v44, 16, v36
; GFX9-NEXT: v_lshrrev_b32_e32 v45, 16, v37
; GFX9-NEXT: v_lshrrev_b32_e32 v46, 16, v38
@@ -35904,7 +36107,9 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB47_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB47_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB47_2
+; GFX9-NEXT: s_branch .LBB47_3
;
; GFX11-TRUE16-LABEL: bitcast_v60f16_to_v15i64_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -35949,41 +36154,41 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s27, s42
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB47_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
@@ -35998,17 +36203,16 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB47_3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB47_3
; GFX11-TRUE16-NEXT: .LBB47_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
@@ -36022,24 +36226,24 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s4 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
@@ -36056,7 +36260,9 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB47_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB47_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB47_2
+; GFX11-TRUE16-NEXT: s_branch .LBB47_3
;
; GFX11-FAKE16-LABEL: bitcast_v60f16_to_v15i64_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -36089,41 +36295,41 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s16, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s26, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s25, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s24, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s23, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s22, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s21, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s20, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s19, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s18, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s17, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s16, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s3, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s2, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s15, 0
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s0, 16
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s27, s42
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB47_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
@@ -36138,17 +36344,16 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB47_3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB47_3
; GFX11-FAKE16-NEXT: .LBB47_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
@@ -36162,24 +36367,24 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, s4 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, s18 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, s16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
@@ -36196,7 +36401,9 @@ define inreg <15 x i64> @bitcast_v60f16_to_v15i64_scalar(<60 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB47_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB47_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB47_2
+; GFX11-FAKE16-NEXT: s_branch .LBB47_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -37142,6 +37349,7 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v17
+; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v27, s16
; SI-NEXT: v_mov_b32_e32 v28, s17
; SI-NEXT: v_mov_b32_e32 v29, s18
@@ -37154,9 +37362,9 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a,
; SI-NEXT: v_mov_b32_e32 v22, s25
; SI-NEXT: v_mov_b32_e32 v19, s26
; SI-NEXT: v_mov_b32_e32 v20, s27
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
; SI-NEXT: v_mov_b32_e32 v17, s28
; SI-NEXT: v_mov_b32_e32 v18, s29
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:48 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
@@ -37487,12 +37695,15 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a,
; SI-NEXT: ; implicit-def: $vgpr48
; SI-NEXT: ; implicit-def: $vgpr31
; SI-NEXT: ; implicit-def: $vgpr38
-; SI-NEXT: s_branch .LBB49_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB49_2
+; SI-NEXT: s_branch .LBB49_3
;
; VI-LABEL: bitcast_v15f64_to_v60i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v17, s16
; VI-NEXT: v_mov_b32_e32 v18, s17
; VI-NEXT: v_mov_b32_e32 v29, s18
@@ -37505,9 +37716,9 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a,
; VI-NEXT: v_mov_b32_e32 v24, s25
; VI-NEXT: v_mov_b32_e32 v21, s26
; VI-NEXT: v_mov_b32_e32 v22, s27
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v19, s28
; VI-NEXT: v_mov_b32_e32 v20, s29
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
@@ -37721,12 +37932,15 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a,
; VI-NEXT: ; implicit-def: $vgpr40
; VI-NEXT: ; implicit-def: $vgpr55
; VI-NEXT: ; implicit-def: $vgpr54
-; VI-NEXT: s_branch .LBB49_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB49_2
+; VI-NEXT: s_branch .LBB49_3
;
; GFX9-LABEL: bitcast_v15f64_to_v60i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v17, s16
; GFX9-NEXT: v_mov_b32_e32 v18, s17
; GFX9-NEXT: v_mov_b32_e32 v29, s18
@@ -37739,9 +37953,9 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v24, s25
; GFX9-NEXT: v_mov_b32_e32 v21, s26
; GFX9-NEXT: v_mov_b32_e32 v22, s27
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v19, s28
; GFX9-NEXT: v_mov_b32_e32 v20, s29
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
@@ -37955,7 +38169,9 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a,
; GFX9-NEXT: ; implicit-def: $vgpr40
; GFX9-NEXT: ; implicit-def: $vgpr55
; GFX9-NEXT: ; implicit-def: $vgpr54
-; GFX9-NEXT: s_branch .LBB49_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB49_2
+; GFX9-NEXT: s_branch .LBB49_3
;
; GFX11-LABEL: bitcast_v15f64_to_v60i16_scalar:
; GFX11: ; %bb.0:
@@ -37970,8 +38186,8 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a,
; GFX11-NEXT: v_dual_mov_b32 v18, s24 :: v_dual_mov_b32 v19, s25
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB49_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v11
@@ -38004,8 +38220,7 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a,
; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v28
; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v31
; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v30
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB49_3
+; GFX11-NEXT: s_cbranch_execnz .LBB49_3
; GFX11-NEXT: .LBB49_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
; GFX11-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
@@ -38154,7 +38369,9 @@ define inreg <60 x i16> @bitcast_v15f64_to_v60i16_scalar(<15 x double> inreg %a,
; GFX11-NEXT: ; implicit-def: $vgpr52
; GFX11-NEXT: ; implicit-def: $vgpr51
; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: s_branch .LBB49_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_vccz .LBB49_2
+; GFX11-NEXT: s_branch .LBB49_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -39572,6 +39789,7 @@ define inreg <15 x double> @bitcast_v60i16_to_v15f64_scalar(<60 x i16> inreg %a,
; SI-NEXT: v_mov_b32_e32 v35, v22
; SI-NEXT: v_mov_b32_e32 v36, v20
; SI-NEXT: v_mov_b32_e32 v37, v18
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: v_lshlrev_b32_e32 v32, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v3
; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v5
@@ -39603,7 +39821,7 @@ define inreg <15 x double> @bitcast_v60i16_to_v15f64_scalar(<60 x i16> inreg %a,
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v0
; SI-NEXT: v_lshlrev_b32_e32 v57, 16, v2
; SI-NEXT: v_lshlrev_b32_e32 v56, 16, v4
-; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_and_b64 s[6:7], vcc, exec
; SI-NEXT: v_lshlrev_b32_e32 v47, 16, v6
; SI-NEXT: v_lshlrev_b32_e32 v62, 16, v8
; SI-NEXT: v_lshlrev_b32_e32 v61, 16, v10
@@ -39906,7 +40124,9 @@ define inreg <15 x double> @bitcast_v60i16_to_v15f64_scalar(<60 x i16> inreg %a,
; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
; SI-NEXT: v_mov_b32_e32 v30, v32
-; SI-NEXT: s_branch .LBB51_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB51_2
+; SI-NEXT: s_branch .LBB51_3
;
; VI-LABEL: bitcast_v60i16_to_v15f64_scalar:
; VI: ; %bb.0:
@@ -39926,6 +40146,7 @@ define inreg <15 x double> @bitcast_v60i16_to_v15f64_scalar(<60 x i16> inreg %a,
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v15
; VI-NEXT: v_mov_b32_e32 v33, v14
; VI-NEXT: v_mov_b32_e32 v34, v13
@@ -39942,7 +40163,7 @@ define inreg <15 x double> @bitcast_v60i16_to_v15f64_scalar(<60 x i16> inreg %a,
; VI-NEXT: v_mov_b32_e32 v53, v2
; VI-NEXT: v_mov_b32_e32 v54, v1
; VI-NEXT: v_mov_b32_e32 v55, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB51_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v0, 16
@@ -40189,11 +40410,28 @@ define inreg <15 x double> @bitcast_v60i16_to_v15f64_scalar(<60 x i16> inreg %a,
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB51_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB51_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB51_2
+; VI-NEXT: s_branch .LBB51_3
;
; GFX9-LABEL: bitcast_v60i16_to_v15f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_lshr_b32 s40, s29, 16
+; GFX9-NEXT: s_lshr_b32 s41, s28, 16
+; GFX9-NEXT: s_lshr_b32 s42, s27, 16
+; GFX9-NEXT: s_lshr_b32 s43, s26, 16
+; GFX9-NEXT: s_lshr_b32 s15, s25, 16
+; GFX9-NEXT: s_lshr_b32 s14, s24, 16
+; GFX9-NEXT: s_lshr_b32 s13, s23, 16
+; GFX9-NEXT: s_lshr_b32 s12, s22, 16
+; GFX9-NEXT: s_lshr_b32 s11, s21, 16
+; GFX9-NEXT: s_lshr_b32 s10, s20, 16
+; GFX9-NEXT: s_lshr_b32 s9, s19, 16
+; GFX9-NEXT: s_lshr_b32 s8, s18, 16
+; GFX9-NEXT: s_lshr_b32 s7, s17, 16
+; GFX9-NEXT: s_lshr_b32 s6, s16, 16
; GFX9-NEXT: v_mov_b32_e32 v32, v15
; GFX9-NEXT: v_mov_b32_e32 v33, v14
; GFX9-NEXT: v_mov_b32_e32 v34, v13
@@ -40210,21 +40448,7 @@ define inreg <15 x double> @bitcast_v60i16_to_v15f64_scalar(<60 x i16> inreg %a,
; GFX9-NEXT: v_mov_b32_e32 v53, v2
; GFX9-NEXT: v_mov_b32_e32 v54, v1
; GFX9-NEXT: v_mov_b32_e32 v55, v0
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
-; GFX9-NEXT: s_lshr_b32 s40, s29, 16
-; GFX9-NEXT: s_lshr_b32 s41, s28, 16
-; GFX9-NEXT: s_lshr_b32 s42, s27, 16
-; GFX9-NEXT: s_lshr_b32 s43, s26, 16
-; GFX9-NEXT: s_lshr_b32 s15, s25, 16
-; GFX9-NEXT: s_lshr_b32 s14, s24, 16
-; GFX9-NEXT: s_lshr_b32 s13, s23, 16
-; GFX9-NEXT: s_lshr_b32 s12, s22, 16
-; GFX9-NEXT: s_lshr_b32 s11, s21, 16
-; GFX9-NEXT: s_lshr_b32 s10, s20, 16
-; GFX9-NEXT: s_lshr_b32 s9, s19, 16
-; GFX9-NEXT: s_lshr_b32 s8, s18, 16
-; GFX9-NEXT: s_lshr_b32 s7, s17, 16
-; GFX9-NEXT: s_lshr_b32 s6, s16, 16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
@@ -40245,7 +40469,6 @@ define inreg <15 x double> @bitcast_v60i16_to_v15f64_scalar(<60 x i16> inreg %a,
; GFX9-NEXT: v_lshrrev_b32_e32 v41, 16, v33
; GFX9-NEXT: v_lshrrev_b32_e32 v42, 16, v34
; GFX9-NEXT: v_lshrrev_b32_e32 v43, 16, v35
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -40260,6 +40483,7 @@ define inreg <15 x double> @bitcast_v60i16_to_v15f64_scalar(<60 x i16> inreg %a,
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_lshrrev_b32_e32 v44, 16, v36
; GFX9-NEXT: v_lshrrev_b32_e32 v45, 16, v37
; GFX9-NEXT: v_lshrrev_b32_e32 v46, 16, v38
@@ -40405,7 +40629,9 @@ define inreg <15 x double> @bitcast_v60i16_to_v15f64_scalar(<60 x i16> inreg %a,
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB51_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB51_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB51_2
+; GFX9-NEXT: s_branch .LBB51_3
;
; GFX11-TRUE16-LABEL: bitcast_v60i16_to_v15f64_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -40450,41 +40676,41 @@ define inreg <15 x double> @bitcast_v60i16_to_v15f64_scalar(<60 x i16> inreg %a,
; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s27, s42
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB51_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
@@ -40499,17 +40725,16 @@ define inreg <15 x double> @bitcast_v60i16_to_v15f64_scalar(<60 x i16> inreg %a,
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB51_3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB51_3
; GFX11-TRUE16-NEXT: .LBB51_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
@@ -40523,24 +40748,24 @@ define inreg <15 x double> @bitcast_v60i16_to_v15f64_scalar(<60 x i16> inreg %a,
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v0, s4, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s7, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s8, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s9, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s10, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s11, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s16, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s17, 3 op_sel_hi:[1,0]
-; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s18, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v4, s5, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v5, s6, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v6, s7, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v7, s8, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v8, s9, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v9, s10, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v10, s11, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v11, s12, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v12, s13, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v13, s14, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v14, s15, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v15, s0, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v16, s16, 3 op_sel_hi:[1,0]
+; GFX11-TRUE16-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
; GFX11-TRUE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
@@ -40557,7 +40782,9 @@ define inreg <15 x double> @bitcast_v60i16_to_v15f64_scalar(<60 x i16> inreg %a,
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB51_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB51_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB51_2
+; GFX11-TRUE16-NEXT: s_branch .LBB51_3
;
; GFX11-FAKE16-LABEL: bitcast_v60i16_to_v15f64_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -40590,41 +40817,41 @@ define inreg <15 x double> @bitcast_v60i16_to_v15f64_scalar(<60 x i16> inreg %a,
; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s16, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s26, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s25, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s24, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s23, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s22, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s21, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s20, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s19, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s18, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s17, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s16, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s3, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s2, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s15, 0
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s0, 16
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s27, s42
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB51_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
@@ -40639,17 +40866,16 @@ define inreg <15 x double> @bitcast_v60i16_to_v15f64_scalar(<60 x i16> inreg %a,
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB51_3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB51_3
; GFX11-FAKE16-NEXT: .LBB51_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
@@ -40663,24 +40889,24 @@ define inreg <15 x double> @bitcast_v60i16_to_v15f64_scalar(<60 x i16> inreg %a,
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v0, s4, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v3, s3, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, s4, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, s5, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, s6, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, s7, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, s8, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, s9, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, s10, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, s11, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, s12, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, s13, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, s14, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, s16, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, s17, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, s18, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v4, s5, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v5, s6, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v6, s7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v7, s8, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v8, s9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v9, s10, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v10, s11, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, s12, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v12, s13, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v13, s14, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v14, s15, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v15, s0, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v16, s16, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v17, s17, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v18, v18, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v19, v19, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v20, v20, 3 op_sel_hi:[1,0]
@@ -40697,7 +40923,9 @@ define inreg <15 x double> @bitcast_v60i16_to_v15f64_scalar(<60 x i16> inreg %a,
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB51_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB51_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB51_2
+; GFX11-FAKE16-NEXT: s_branch .LBB51_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -41991,6 +42219,7 @@ define inreg <60 x half> @bitcast_v15f64_to_v60f16_scalar(<15 x double> inreg %a
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v17
+; SI-NEXT: s_and_b64 s[44:45], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s42, v1
; SI-NEXT: v_readfirstlane_b32 s43, v2
; SI-NEXT: v_readfirstlane_b32 s40, v3
@@ -42006,8 +42235,8 @@ define inreg <60 x half> @bitcast_v15f64_to_v60f16_scalar(<15 x double> inreg %a
; SI-NEXT: v_readfirstlane_b32 s6, v13
; SI-NEXT: v_readfirstlane_b32 s7, v14
; SI-NEXT: v_readfirstlane_b32 s4, v15
-; SI-NEXT: s_and_b64 s[44:45], vcc, exec
; SI-NEXT: v_readfirstlane_b32 s5, v16
+; SI-NEXT: s_mov_b64 s[44:45], -1
; SI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
@@ -42542,12 +42771,15 @@ define inreg <60 x half> @bitcast_v15f64_to_v60f16_scalar(<15 x double> inreg %a
; SI-NEXT: ; implicit-def: $vgpr36
; SI-NEXT: ; implicit-def: $vgpr14
; SI-NEXT: ; implicit-def: $vgpr59
-; SI-NEXT: s_branch .LBB53_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[44:45]
+; SI-NEXT: s_cbranch_vccz .LBB53_2
+; SI-NEXT: s_branch .LBB53_3
;
; VI-LABEL: bitcast_v15f64_to_v60f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v17, s16
; VI-NEXT: v_mov_b32_e32 v18, s17
; VI-NEXT: v_mov_b32_e32 v29, s18
@@ -42560,9 +42792,9 @@ define inreg <60 x half> @bitcast_v15f64_to_v60f16_scalar(<15 x double> inreg %a
; VI-NEXT: v_mov_b32_e32 v24, s25
; VI-NEXT: v_mov_b32_e32 v21, s26
; VI-NEXT: v_mov_b32_e32 v22, s27
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v19, s28
; VI-NEXT: v_mov_b32_e32 v20, s29
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
@@ -42776,12 +43008,15 @@ define inreg <60 x half> @bitcast_v15f64_to_v60f16_scalar(<15 x double> inreg %a
; VI-NEXT: ; implicit-def: $vgpr40
; VI-NEXT: ; implicit-def: $vgpr55
; VI-NEXT: ; implicit-def: $vgpr54
-; VI-NEXT: s_branch .LBB53_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB53_2
+; VI-NEXT: s_branch .LBB53_3
;
; GFX9-LABEL: bitcast_v15f64_to_v60f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v17, s16
; GFX9-NEXT: v_mov_b32_e32 v18, s17
; GFX9-NEXT: v_mov_b32_e32 v29, s18
@@ -42794,9 +43029,9 @@ define inreg <60 x half> @bitcast_v15f64_to_v60f16_scalar(<15 x double> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v24, s25
; GFX9-NEXT: v_mov_b32_e32 v21, s26
; GFX9-NEXT: v_mov_b32_e32 v22, s27
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_mov_b32_e32 v19, s28
; GFX9-NEXT: v_mov_b32_e32 v20, s29
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:44 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:40 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:36 ; 4-byte Folded Spill
@@ -43010,7 +43245,9 @@ define inreg <60 x half> @bitcast_v15f64_to_v60f16_scalar(<15 x double> inreg %a
; GFX9-NEXT: ; implicit-def: $vgpr40
; GFX9-NEXT: ; implicit-def: $vgpr55
; GFX9-NEXT: ; implicit-def: $vgpr54
-; GFX9-NEXT: s_branch .LBB53_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB53_2
+; GFX9-NEXT: s_branch .LBB53_3
;
; GFX11-LABEL: bitcast_v15f64_to_v60f16_scalar:
; GFX11: ; %bb.0:
@@ -43025,8 +43262,8 @@ define inreg <60 x half> @bitcast_v15f64_to_v60f16_scalar(<15 x double> inreg %a
; GFX11-NEXT: v_dual_mov_b32 v18, s24 :: v_dual_mov_b32 v19, s25
; GFX11-NEXT: v_dual_mov_b32 v14, s26 :: v_dual_mov_b32 v15, s27
; GFX11-NEXT: v_dual_mov_b32 v16, s28 :: v_dual_mov_b32 v17, s29
-; GFX11-NEXT: s_mov_b32 s0, 0
-; GFX11-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-NEXT: s_mov_b32 s0, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB53_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: v_lshrrev_b32_e32 v50, 16, v11
@@ -43059,8 +43296,7 @@ define inreg <60 x half> @bitcast_v15f64_to_v60f16_scalar(<15 x double> inreg %a
; GFX11-NEXT: v_lshrrev_b32_e32 v38, 16, v28
; GFX11-NEXT: v_lshrrev_b32_e32 v37, 16, v31
; GFX11-NEXT: v_lshrrev_b32_e32 v36, 16, v30
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-NEXT: s_cbranch_vccnz .LBB53_3
+; GFX11-NEXT: s_cbranch_execnz .LBB53_3
; GFX11-NEXT: .LBB53_2: ; %cmp.true
; GFX11-NEXT: v_add_f64 v[10:11], v[10:11], 1.0
; GFX11-NEXT: v_add_f64 v[8:9], v[8:9], 1.0
@@ -43209,7 +43445,9 @@ define inreg <60 x half> @bitcast_v15f64_to_v60f16_scalar(<15 x double> inreg %a
; GFX11-NEXT: ; implicit-def: $vgpr52
; GFX11-NEXT: ; implicit-def: $vgpr51
; GFX11-NEXT: ; implicit-def: $vgpr50
-; GFX11-NEXT: s_branch .LBB53_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_vccz .LBB53_2
+; GFX11-NEXT: s_branch .LBB53_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -44820,11 +45058,11 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; SI-NEXT: buffer_store_dword v63, off, s[0:3], s32 offset:64 ; 4-byte Folded Spill
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:60
; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32
-; SI-NEXT: buffer_load_dword v48, off, s[0:3], s32 offset:8
+; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:8
; SI-NEXT: buffer_load_dword v40, off, s[0:3], s32 offset:4
; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:16
; SI-NEXT: buffer_load_dword v42, off, s[0:3], s32 offset:12
-; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:24
+; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:24
; SI-NEXT: buffer_load_dword v44, off, s[0:3], s32 offset:20
; SI-NEXT: buffer_load_dword v45, off, s[0:3], s32 offset:32
; SI-NEXT: buffer_load_dword v46, off, s[0:3], s32 offset:28
@@ -44838,83 +45076,92 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; SI-NEXT: buffer_load_dword v59, off, s[0:3], s32 offset:56
; SI-NEXT: s_waitcnt expcnt(3)
; SI-NEXT: buffer_load_dword v60, off, s[0:3], s32 offset:52
-; SI-NEXT: v_cvt_f16_f32_e32 v37, v0
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v2
-; SI-NEXT: v_cvt_f16_f32_e32 v49, v1
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
+; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
; SI-NEXT: v_cvt_f16_f32_e32 v39, v3
-; SI-NEXT: v_cvt_f16_f32_e32 v34, v7
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
+; SI-NEXT: v_cvt_f16_f32_e32 v49, v2
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v5
-; SI-NEXT: v_cvt_f16_f32_e32 v32, v6
-; SI-NEXT: v_cvt_f16_f32_e32 v36, v9
-; SI-NEXT: v_cvt_f16_f32_e32 v35, v8
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v26
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
+; SI-NEXT: v_cvt_f16_f32_e32 v48, v5
+; SI-NEXT: v_cvt_f16_f32_e32 v36, v4
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v4
-; SI-NEXT: v_cvt_f16_f32_e32 v63, v11
-; SI-NEXT: v_cvt_f16_f32_e32 v62, v10
-; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v29
+; SI-NEXT: v_cvt_f16_f32_e32 v38, v7
+; SI-NEXT: v_cvt_f16_f32_e32 v37, v6
+; SI-NEXT: v_cvt_f16_f32_e32 v32, v9
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v28
+; SI-NEXT: v_cvt_f16_f32_e32 v35, v8
+; SI-NEXT: v_cvt_f16_f32_e32 v34, v11
+; SI-NEXT: v_cvt_f16_f32_e32 v63, v10
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:192 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v18
-; SI-NEXT: v_cvt_f16_f32_e32 v43, v12
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v30
+; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
+; SI-NEXT: v_cvt_f16_f32_e32 v62, v12
; SI-NEXT: v_cvt_f16_f32_e32 v41, v15
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:184 ; 4-byte Folded Spill
; SI-NEXT: v_cvt_f16_f32_e32 v55, v14
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v20
; SI-NEXT: v_cvt_f16_f32_e32 v15, v17
; SI-NEXT: v_cvt_f16_f32_e32 v61, v16
; SI-NEXT: v_cvt_f16_f32_e32 v16, v19
-; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:200 ; 4-byte Folded Spill
+; SI-NEXT: v_cvt_f16_f32_e32 v53, v18
; SI-NEXT: v_cvt_f16_f32_e32 v17, v21
+; SI-NEXT: v_cvt_f16_f32_e32 v51, v20
; SI-NEXT: v_cvt_f16_f32_e32 v18, v23
; SI-NEXT: v_cvt_f16_f32_e32 v22, v22
; SI-NEXT: v_cvt_f16_f32_e32 v19, v25
; SI-NEXT: v_cvt_f16_f32_e32 v21, v24
; SI-NEXT: v_cvt_f16_f32_e32 v20, v27
-; SI-NEXT: v_cvt_f16_f32_e32 v53, v26
-; SI-NEXT: v_cvt_f16_f32_e32 v52, v29
-; SI-NEXT: v_cvt_f16_f32_e32 v51, v28
-; SI-NEXT: v_cvt_f16_f32_e32 v30, v30
+; SI-NEXT: v_cvt_f16_f32_e32 v12, s16
; SI-NEXT: v_cvt_f16_f32_e32 v1, s19
-; SI-NEXT: v_cvt_f16_f32_e32 v12, s18
+; SI-NEXT: v_cvt_f16_f32_e32 v11, s18
; SI-NEXT: v_cvt_f16_f32_e32 v2, s21
-; SI-NEXT: v_cvt_f16_f32_e32 v11, s20
+; SI-NEXT: v_cvt_f16_f32_e32 v9, s20
; SI-NEXT: v_cvt_f16_f32_e32 v3, s23
; SI-NEXT: v_cvt_f16_f32_e32 v10, s22
; SI-NEXT: v_cvt_f16_f32_e32 v4, s25
-; SI-NEXT: v_cvt_f16_f32_e32 v9, s24
+; SI-NEXT: v_cvt_f16_f32_e32 v8, s24
; SI-NEXT: v_cvt_f16_f32_e32 v5, s27
-; SI-NEXT: v_cvt_f16_f32_e32 v8, s26
+; SI-NEXT: v_cvt_f16_f32_e32 v7, s26
; SI-NEXT: v_cvt_f16_f32_e32 v6, s29
-; SI-NEXT: v_cvt_f16_f32_e32 v7, s28
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v31
-; SI-NEXT: v_cvt_f16_f32_e32 v50, v54
-; SI-NEXT: v_cvt_f16_f32_e32 v48, v48
-; SI-NEXT: v_cvt_f16_f32_e32 v31, v40
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v33
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v54
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:188 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v50
+; SI-NEXT: v_cvt_f16_f32_e32 v50, s28
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v40
+; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
+; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v33
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:172 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v42
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f16_f32_e32 v0, v38
+; SI-NEXT: v_cvt_f16_f32_e32 v0, v43
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:164 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v44
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v45
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:156 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v46
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v47
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -44923,260 +45170,240 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v57
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
+; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v58
-; SI-NEXT: v_cvt_f16_f32_e32 v58, s16
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v59
; SI-NEXT: v_cvt_f16_f32_e32 v59, s17
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt expcnt(0)
+; SI-NEXT: s_waitcnt vmcnt(14) expcnt(0)
; SI-NEXT: v_cvt_f16_f32_e32 v0, v60
; SI-NEXT: buffer_store_dword v0, off, s[0:3], s32 offset:128 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:208 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:212 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v37, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v20, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v21, off, s[0:3], s32 offset:220 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v19, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v22, off, s[0:3], s32 offset:228 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v18, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v17, off, s[0:3], s32 offset:236 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v16, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v61, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v15, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v13, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v39, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v6, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v7, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v5, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v8, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v4, off, s[0:3], s32 offset:272 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v10, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v12, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
-; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v3, off, s[0:3], s32 offset:280 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v2, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v9, off, s[0:3], s32 offset:288 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v11, off, s[0:3], s32 offset:292 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:296 ; 4-byte Folded Spill
; SI-NEXT: s_cbranch_scc0 .LBB55_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19
+; SI-NEXT: v_or_b32_e32 v19, v21, v19
+; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
+; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20
+; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18
+; SI-NEXT: v_or_b32_e32 v18, v22, v18
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(4)
-; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
-; SI-NEXT: v_or_b32_e32 v3, v10, v3
-; SI-NEXT: s_waitcnt expcnt(3)
-; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v34
-; SI-NEXT: v_mov_b32_e32 v33, v32
-; SI-NEXT: v_or_b32_e32 v10, v32, v10
-; SI-NEXT: buffer_load_dword v32, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:160 ; 4-byte Folded Reload
-; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
-; SI-NEXT: v_mov_b32_e32 v44, v43
-; SI-NEXT: v_or_b32_e32 v13, v43, v13
-; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
-; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
-; SI-NEXT: v_or_b32_e32 v5, v8, v5
-; SI-NEXT: v_mov_b32_e32 v57, v39
-; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v39
-; SI-NEXT: buffer_load_dword v39, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6
-; SI-NEXT: v_or_b32_e32 v6, v7, v6
-; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v49
-; SI-NEXT: v_or_b32_e32 v7, v37, v7
-; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:136 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
+; SI-NEXT: v_or_b32_e32 v6, v50, v6
+; SI-NEXT: v_mov_b32_e32 v30, v50
+; SI-NEXT: buffer_load_dword v50, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
+; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v59
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v1, 16, v1
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
+; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
-; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18
-; SI-NEXT: v_lshlrev_b32_e32 v19, 16, v19
-; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v59
-; SI-NEXT: v_or_b32_e32 v1, v12, v1
-; SI-NEXT: v_or_b32_e32 v2, v11, v2
-; SI-NEXT: v_or_b32_e32 v4, v9, v4
-; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v36
-; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v63
+; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
+; SI-NEXT: v_mov_b32_e32 v52, v12
+; SI-NEXT: v_or_b32_e32 v0, v12, v0
+; SI-NEXT: v_or_b32_e32 v1, v11, v1
+; SI-NEXT: v_or_b32_e32 v2, v9, v2
+; SI-NEXT: v_or_b32_e32 v3, v10, v3
+; SI-NEXT: v_or_b32_e32 v4, v8, v4
+; SI-NEXT: v_or_b32_e32 v5, v7, v5
+; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v39
+; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v48
+; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v38
+; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v32
+; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v34
+; SI-NEXT: v_lshlrev_b32_e32 v13, 16, v13
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v41
; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15
; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v17
-; SI-NEXT: v_or_b32_e32 v18, v22, v18
-; SI-NEXT: v_or_b32_e32 v19, v21, v19
-; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v20
-; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v52
-; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v50
-; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v48
-; SI-NEXT: v_or_b32_e32 v0, v58, v0
-; SI-NEXT: v_mov_b32_e32 v56, v34
-; SI-NEXT: v_mov_b32_e32 v47, v36
-; SI-NEXT: v_mov_b32_e32 v46, v35
+; SI-NEXT: v_mov_b32_e32 v58, v49
+; SI-NEXT: v_or_b32_e32 v8, v49, v8
+; SI-NEXT: v_mov_b32_e32 v57, v48
+; SI-NEXT: v_mov_b32_e32 v56, v36
+; SI-NEXT: v_or_b32_e32 v9, v36, v9
+; SI-NEXT: v_mov_b32_e32 v47, v38
+; SI-NEXT: v_mov_b32_e32 v46, v37
+; SI-NEXT: v_or_b32_e32 v10, v37, v10
+; SI-NEXT: v_mov_b32_e32 v33, v32
+; SI-NEXT: v_mov_b32_e32 v45, v35
; SI-NEXT: v_or_b32_e32 v11, v35, v11
+; SI-NEXT: v_mov_b32_e32 v44, v34
; SI-NEXT: v_mov_b32_e32 v60, v63
-; SI-NEXT: v_mov_b32_e32 v45, v62
-; SI-NEXT: v_or_b32_e32 v12, v62, v12
+; SI-NEXT: v_or_b32_e32 v12, v63, v12
+; SI-NEXT: v_mov_b32_e32 v43, v62
+; SI-NEXT: v_or_b32_e32 v13, v62, v13
; SI-NEXT: v_mov_b32_e32 v42, v41
; SI-NEXT: v_mov_b32_e32 v40, v55
; SI-NEXT: v_or_b32_e32 v14, v55, v14
; SI-NEXT: v_or_b32_e32 v15, v61, v15
-; SI-NEXT: v_or_b32_e32 v20, v53, v20
-; SI-NEXT: v_or_b32_e32 v21, v51, v21
-; SI-NEXT: v_or_b32_e32 v22, v30, v22
-; SI-NEXT: v_or_b32_e32 v23, v31, v23
+; SI-NEXT: v_or_b32_e32 v16, v53, v16
+; SI-NEXT: v_or_b32_e32 v17, v51, v17
; SI-NEXT: s_mov_b64 s[4:5], 0
; SI-NEXT: s_waitcnt vmcnt(11)
+; SI-NEXT: v_or_b32_e32 v20, v21, v20
+; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21
+; SI-NEXT: v_or_b32_e32 v21, v22, v21
+; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22
+; SI-NEXT: v_or_b32_e32 v22, v23, v22
+; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v23
+; SI-NEXT: v_or_b32_e32 v23, v24, v23
+; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v24, 16, v24
-; SI-NEXT: s_waitcnt vmcnt(10)
; SI-NEXT: v_or_b32_e32 v24, v25, v24
; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(10)
-; SI-NEXT: v_or_b32_e32 v17, v32, v17
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v25, 16, v25
; SI-NEXT: v_or_b32_e32 v25, v26, v25
; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
-; SI-NEXT: v_or_b32_e32 v16, v43, v16
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v26, 16, v26
; SI-NEXT: v_or_b32_e32 v26, v27, v26
; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:148 ; 4-byte Folded Reload
-; SI-NEXT: v_mov_b32_e32 v35, v39
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v27, 16, v27
; SI-NEXT: v_or_b32_e32 v27, v28, v27
; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:140 ; 4-byte Folded Reload
-; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v37
-; SI-NEXT: v_or_b32_e32 v9, v39, v9
-; SI-NEXT: v_mov_b32_e32 v36, v37
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v28, 16, v28
; SI-NEXT: v_or_b32_e32 v28, v29, v28
; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
-; SI-NEXT: v_or_b32_e32 v8, v38, v8
+; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v50
+; SI-NEXT: v_or_b32_e32 v7, v31, v7
+; SI-NEXT: v_mov_b32_e32 v35, v50
+; SI-NEXT: v_mov_b32_e32 v50, v30
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_lshlrev_b32_e32 v29, 16, v29
; SI-NEXT: v_or_b32_e32 v29, v54, v29
-; SI-NEXT: v_mov_b32_e32 v54, v32
; SI-NEXT: s_branch .LBB55_3
; SI-NEXT: .LBB55_2:
-; SI-NEXT: v_mov_b32_e32 v54, v53
-; SI-NEXT: v_mov_b32_e32 v53, v52
-; SI-NEXT: v_mov_b32_e32 v52, v51
-; SI-NEXT: v_mov_b32_e32 v51, v50
-; SI-NEXT: v_mov_b32_e32 v50, v30
-; SI-NEXT: v_mov_b32_e32 v49, v48
-; SI-NEXT: v_mov_b32_e32 v48, v31
+; SI-NEXT: v_mov_b32_e32 v52, v12
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; SI-NEXT: v_mov_b32_e32 v47, v36
-; SI-NEXT: v_mov_b32_e32 v46, v35
-; SI-NEXT: v_mov_b32_e32 v44, v43
-; SI-NEXT: v_mov_b32_e32 v30, v50
-; SI-NEXT: v_mov_b32_e32 v50, v51
-; SI-NEXT: v_mov_b32_e32 v51, v52
-; SI-NEXT: v_mov_b32_e32 v52, v53
-; SI-NEXT: v_mov_b32_e32 v53, v54
-; SI-NEXT: buffer_load_dword v54, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v43, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v36, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v38, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
-; SI-NEXT: v_mov_b32_e32 v57, v39
-; SI-NEXT: v_mov_b32_e32 v56, v34
+; SI-NEXT: v_mov_b32_e32 v45, v35
+; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v35, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
+; SI-NEXT: v_mov_b32_e32 v58, v49
+; SI-NEXT: v_mov_b32_e32 v57, v48
+; SI-NEXT: v_mov_b32_e32 v56, v36
+; SI-NEXT: v_mov_b32_e32 v47, v38
+; SI-NEXT: v_mov_b32_e32 v46, v37
; SI-NEXT: v_mov_b32_e32 v33, v32
+; SI-NEXT: v_mov_b32_e32 v44, v34
; SI-NEXT: v_mov_b32_e32 v60, v63
-; SI-NEXT: v_mov_b32_e32 v45, v62
+; SI-NEXT: v_mov_b32_e32 v43, v62
; SI-NEXT: v_mov_b32_e32 v42, v41
; SI-NEXT: v_mov_b32_e32 v40, v55
-; SI-NEXT: s_mov_b64 s[4:5], -1
-; SI-NEXT: v_mov_b32_e32 v31, v48
-; SI-NEXT: v_mov_b32_e32 v48, v49
; SI-NEXT: .LBB55_3: ; %Flow
; SI-NEXT: v_mov_b32_e32 v32, v33
-; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v37, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v33, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
; SI-NEXT: v_mov_b32_e32 v61, v40
-; SI-NEXT: v_mov_b32_e32 v40, v44
; SI-NEXT: s_cbranch_vccnz .LBB55_5
; SI-NEXT: ; %bb.4: ; %cmp.true
-; SI-NEXT: s_waitcnt expcnt(5)
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt expcnt(4)
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt expcnt(3)
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:296 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:292 ; 4-byte Folded Reload
; SI-NEXT: v_cvt_f32_f16_e32 v0, v59
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_cvt_f32_f16_e32 v1, v58
-; SI-NEXT: s_waitcnt vmcnt(3)
-; SI-NEXT: v_cvt_f32_f16_e32 v8, v33
-; SI-NEXT: v_cvt_f32_f16_e32 v9, v38
+; SI-NEXT: v_cvt_f32_f16_e32 v1, v52
+; SI-NEXT: v_cvt_f32_f16_e32 v7, v50
+; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: v_cvt_f32_f16_e32 v8, v31
; SI-NEXT: v_add_f32_e32 v0, 0x38000000, v0
; SI-NEXT: v_add_f32_e32 v1, 0x38000000, v1
; SI-NEXT: v_cvt_f16_f32_e32 v0, v0
; SI-NEXT: v_cvt_f16_f32_e32 v1, v1
-; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8
-; SI-NEXT: v_cvt_f16_f32_e32 v8, v8
+; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7
+; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v0
; SI-NEXT: v_or_b32_e32 v0, v1, v0
+; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8
+; SI-NEXT: v_cvt_f16_f32_e32 v8, v8
+; SI-NEXT: v_cvt_f32_f16_e32 v9, v58
+; SI-NEXT: v_cvt_f32_f16_e32 v10, v56
+; SI-NEXT: v_cvt_f32_f16_e32 v11, v46
+; SI-NEXT: v_cvt_f32_f16_e32 v12, v45
; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9
; SI-NEXT: v_cvt_f16_f32_e32 v9, v9
-; SI-NEXT: v_cvt_f32_f16_e32 v10, v35
-; SI-NEXT: v_cvt_f32_f16_e32 v11, v32
-; SI-NEXT: v_cvt_f32_f16_e32 v12, v46
-; SI-NEXT: v_cvt_f32_f16_e32 v13, v45
; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10
; SI-NEXT: v_cvt_f16_f32_e32 v10, v10
; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11
; SI-NEXT: v_cvt_f16_f32_e32 v11, v11
; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12
; SI-NEXT: v_cvt_f16_f32_e32 v12, v12
-; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13
-; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
-; SI-NEXT: v_cvt_f32_f16_e32 v14, v40
+; SI-NEXT: v_cvt_f32_f16_e32 v13, v60
+; SI-NEXT: v_cvt_f32_f16_e32 v14, v43
; SI-NEXT: v_mov_b32_e32 v55, v42
; SI-NEXT: v_cvt_f32_f16_e32 v15, v61
-; SI-NEXT: v_cvt_f32_f16_e32 v17, v43
+; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13
+; SI-NEXT: v_cvt_f16_f32_e32 v13, v13
; SI-NEXT: v_add_f32_e32 v14, 0x38000000, v14
; SI-NEXT: v_cvt_f16_f32_e32 v14, v14
; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15
; SI-NEXT: v_cvt_f16_f32_e32 v15, v15
-; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17
-; SI-NEXT: v_cvt_f16_f32_e32 v17, v17
-; SI-NEXT: v_cvt_f32_f16_e32 v19, v54
-; SI-NEXT: v_cvt_f32_f16_e32 v22, v53
-; SI-NEXT: v_cvt_f32_f16_e32 v23, v51
-; SI-NEXT: v_cvt_f32_f16_e32 v24, v48
-; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19
-; SI-NEXT: v_cvt_f16_f32_e32 v19, v19
-; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22
-; SI-NEXT: v_cvt_f16_f32_e32 v22, v22
-; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23
-; SI-NEXT: v_cvt_f16_f32_e32 v23, v23
-; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24
-; SI-NEXT: v_cvt_f16_f32_e32 v24, v24
-; SI-NEXT: v_cvt_f32_f16_e32 v25, v31
+; SI-NEXT: v_cvt_f32_f16_e32 v17, v53
+; SI-NEXT: v_cvt_f32_f16_e32 v19, v51
; SI-NEXT: buffer_load_dword v27, off, s[0:3], s32 offset:156 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v28, off, s[0:3], s32 offset:152 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v29, off, s[0:3], s32 offset:144 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v31, off, s[0:3], s32 offset:128 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:276 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v7, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:204 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:260 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:244 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
+; SI-NEXT: v_add_f32_e32 v17, 0x38000000, v17
+; SI-NEXT: v_cvt_f16_f32_e32 v17, v17
+; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:216 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:200 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:192 ; 4-byte Folded Reload
+; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19
+; SI-NEXT: v_cvt_f16_f32_e32 v19, v19
+; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:180 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:176 ; 4-byte Folded Reload
; SI-NEXT: buffer_load_dword v26, off, s[0:3], s32 offset:168 ; 4-byte Folded Reload
-; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25
-; SI-NEXT: v_cvt_f16_f32_e32 v25, v25
; SI-NEXT: s_waitcnt vmcnt(14)
; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
-; SI-NEXT: s_waitcnt vmcnt(13)
; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
; SI-NEXT: v_add_f32_e32 v2, 0x38000000, v2
; SI-NEXT: v_cvt_f16_f32_e32 v2, v2
@@ -45184,42 +45411,48 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; SI-NEXT: v_or_b32_e32 v1, v3, v2
-; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:268 ; 4-byte Folded Reload
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
-; SI-NEXT: s_waitcnt vmcnt(14)
+; SI-NEXT: buffer_load_dword v2, off, s[0:3], s32 offset:284 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:288 ; 4-byte Folded Reload
; SI-NEXT: v_cvt_f32_f16_e32 v27, v27
-; SI-NEXT: s_waitcnt vmcnt(11)
-; SI-NEXT: v_cvt_f32_f16_e32 v31, v31
-; SI-NEXT: s_waitcnt vmcnt(10)
+; SI-NEXT: s_waitcnt vmcnt(14)
+; SI-NEXT: v_cvt_f32_f16_e32 v30, v30
+; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27
+; SI-NEXT: s_waitcnt vmcnt(13)
; SI-NEXT: v_cvt_f32_f16_e32 v4, v4
-; SI-NEXT: s_waitcnt vmcnt(9)
+; SI-NEXT: s_waitcnt vmcnt(12)
; SI-NEXT: v_cvt_f32_f16_e32 v5, v5
-; SI-NEXT: s_waitcnt vmcnt(8)
+; SI-NEXT: s_waitcnt vmcnt(11)
; SI-NEXT: v_cvt_f32_f16_e32 v6, v6
-; SI-NEXT: s_waitcnt vmcnt(7)
-; SI-NEXT: v_cvt_f32_f16_e32 v7, v7
+; SI-NEXT: s_waitcnt vmcnt(10)
+; SI-NEXT: v_cvt_f32_f16_e32 v16, v16
; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4
; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5
; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6
; SI-NEXT: v_cvt_f16_f32_e32 v6, v6
-; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7
-; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
-; SI-NEXT: s_waitcnt vmcnt(6)
-; SI-NEXT: v_cvt_f32_f16_e32 v16, v16
-; SI-NEXT: s_waitcnt vmcnt(5)
-; SI-NEXT: v_cvt_f32_f16_e32 v18, v18
-; SI-NEXT: s_waitcnt vmcnt(3)
-; SI-NEXT: v_cvt_f32_f16_e32 v21, v21
-; SI-NEXT: v_add_f32_e32 v27, 0x38000000, v27
; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16
; SI-NEXT: v_cvt_f16_f32_e32 v16, v16
+; SI-NEXT: s_waitcnt vmcnt(9)
+; SI-NEXT: v_cvt_f32_f16_e32 v18, v18
+; SI-NEXT: s_waitcnt vmcnt(7)
+; SI-NEXT: v_cvt_f32_f16_e32 v21, v21
+; SI-NEXT: s_waitcnt vmcnt(5)
+; SI-NEXT: v_cvt_f32_f16_e32 v23, v23
+; SI-NEXT: s_waitcnt vmcnt(4)
+; SI-NEXT: v_cvt_f32_f16_e32 v24, v24
; SI-NEXT: v_add_f32_e32 v18, 0x38000000, v18
; SI-NEXT: v_cvt_f16_f32_e32 v18, v18
; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21
; SI-NEXT: v_cvt_f16_f32_e32 v21, v21
+; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23
+; SI-NEXT: v_cvt_f16_f32_e32 v23, v23
+; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24
+; SI-NEXT: v_cvt_f16_f32_e32 v24, v24
; SI-NEXT: v_cvt_f16_f32_e32 v27, v27
+; SI-NEXT: v_cvt_f32_f16_e32 v31, v31
+; SI-NEXT: v_add_f32_e32 v30, 0x38000000, v30
+; SI-NEXT: v_cvt_f16_f32_e32 v30, v30
; SI-NEXT: v_add_f32_e32 v31, 0x38000000, v31
; SI-NEXT: v_cvt_f16_f32_e32 v31, v31
; SI-NEXT: s_waitcnt vmcnt(1)
@@ -45232,65 +45465,65 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
; SI-NEXT: v_lshlrev_b32_e32 v2, 16, v2
; SI-NEXT: v_or_b32_e32 v2, v3, v2
-; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v3, off, s[0:3], s32 offset:280 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
; SI-NEXT: v_add_f32_e32 v3, 0x38000000, v3
; SI-NEXT: v_cvt_f16_f32_e32 v3, v3
; SI-NEXT: v_lshlrev_b32_e32 v3, 16, v3
; SI-NEXT: v_or_b32_e32 v3, v4, v3
-; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v4, off, s[0:3], s32 offset:272 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v4, v4
; SI-NEXT: v_add_f32_e32 v4, 0x38000000, v4
; SI-NEXT: v_cvt_f16_f32_e32 v4, v4
; SI-NEXT: v_lshlrev_b32_e32 v4, 16, v4
; SI-NEXT: v_or_b32_e32 v4, v5, v4
-; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v5, off, s[0:3], s32 offset:264 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v5, v5
; SI-NEXT: v_add_f32_e32 v5, 0x38000000, v5
; SI-NEXT: v_cvt_f16_f32_e32 v5, v5
; SI-NEXT: v_lshlrev_b32_e32 v5, 16, v5
; SI-NEXT: v_or_b32_e32 v5, v6, v5
-; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v6, off, s[0:3], s32 offset:256 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v6, v6
; SI-NEXT: v_add_f32_e32 v6, 0x38000000, v6
; SI-NEXT: v_cvt_f16_f32_e32 v6, v6
; SI-NEXT: v_lshlrev_b32_e32 v6, 16, v6
; SI-NEXT: v_or_b32_e32 v6, v7, v6
-; SI-NEXT: v_cvt_f32_f16_e32 v7, v37
+; SI-NEXT: v_cvt_f32_f16_e32 v7, v35
; SI-NEXT: v_add_f32_e32 v7, 0x38000000, v7
; SI-NEXT: v_cvt_f16_f32_e32 v7, v7
; SI-NEXT: v_lshlrev_b32_e32 v7, 16, v7
; SI-NEXT: v_or_b32_e32 v7, v8, v7
-; SI-NEXT: v_cvt_f32_f16_e32 v8, v57
+; SI-NEXT: v_cvt_f32_f16_e32 v8, v33
; SI-NEXT: v_add_f32_e32 v8, 0x38000000, v8
; SI-NEXT: v_cvt_f16_f32_e32 v8, v8
; SI-NEXT: v_lshlrev_b32_e32 v8, 16, v8
; SI-NEXT: v_or_b32_e32 v8, v9, v8
-; SI-NEXT: v_cvt_f32_f16_e32 v9, v36
+; SI-NEXT: v_cvt_f32_f16_e32 v9, v57
; SI-NEXT: v_add_f32_e32 v9, 0x38000000, v9
; SI-NEXT: v_cvt_f16_f32_e32 v9, v9
; SI-NEXT: v_lshlrev_b32_e32 v9, 16, v9
; SI-NEXT: v_or_b32_e32 v9, v10, v9
-; SI-NEXT: v_cvt_f32_f16_e32 v10, v56
+; SI-NEXT: v_cvt_f32_f16_e32 v10, v47
; SI-NEXT: v_add_f32_e32 v10, 0x38000000, v10
; SI-NEXT: v_cvt_f16_f32_e32 v10, v10
; SI-NEXT: v_lshlrev_b32_e32 v10, 16, v10
; SI-NEXT: v_or_b32_e32 v10, v11, v10
-; SI-NEXT: v_cvt_f32_f16_e32 v11, v47
+; SI-NEXT: v_cvt_f32_f16_e32 v11, v32
; SI-NEXT: v_add_f32_e32 v11, 0x38000000, v11
; SI-NEXT: v_cvt_f16_f32_e32 v11, v11
; SI-NEXT: v_lshlrev_b32_e32 v11, 16, v11
; SI-NEXT: v_or_b32_e32 v11, v12, v11
-; SI-NEXT: v_cvt_f32_f16_e32 v12, v60
+; SI-NEXT: v_cvt_f32_f16_e32 v12, v44
; SI-NEXT: v_add_f32_e32 v12, 0x38000000, v12
; SI-NEXT: v_cvt_f16_f32_e32 v12, v12
; SI-NEXT: v_lshlrev_b32_e32 v12, 16, v12
; SI-NEXT: v_or_b32_e32 v12, v13, v12
-; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v13, off, s[0:3], s32 offset:252 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v13, v13
; SI-NEXT: v_add_f32_e32 v13, 0x38000000, v13
@@ -45302,14 +45535,14 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; SI-NEXT: v_cvt_f16_f32_e32 v14, v14
; SI-NEXT: v_lshlrev_b32_e32 v14, 16, v14
; SI-NEXT: v_or_b32_e32 v14, v15, v14
-; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:236 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v15, off, s[0:3], s32 offset:248 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v15, v15
; SI-NEXT: v_add_f32_e32 v15, 0x38000000, v15
; SI-NEXT: v_cvt_f16_f32_e32 v15, v15
; SI-NEXT: v_lshlrev_b32_e32 v15, 16, v15
; SI-NEXT: v_or_b32_e32 v15, v16, v15
-; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:228 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v16, off, s[0:3], s32 offset:240 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v16, v16
; SI-NEXT: v_add_f32_e32 v16, 0x38000000, v16
@@ -45317,9 +45550,9 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; SI-NEXT: v_lshlrev_b32_e32 v16, 16, v16
; SI-NEXT: v_or_b32_e32 v16, v17, v16
; SI-NEXT: v_lshlrev_b32_e32 v17, 16, v18
-; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v18, off, s[0:3], s32 offset:232 ; 4-byte Folded Reload
; SI-NEXT: v_or_b32_e32 v17, v19, v17
-; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:212 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v19, off, s[0:3], s32 offset:224 ; 4-byte Folded Reload
; SI-NEXT: v_cvt_f32_f16_e32 v20, v20
; SI-NEXT: v_add_f32_e32 v20, 0x38000000, v20
; SI-NEXT: v_cvt_f16_f32_e32 v20, v20
@@ -45329,7 +45562,7 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; SI-NEXT: v_cvt_f16_f32_e32 v18, v18
; SI-NEXT: v_lshlrev_b32_e32 v18, 16, v18
; SI-NEXT: v_or_b32_e32 v18, v20, v18
-; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:208 ; 4-byte Folded Reload
+; SI-NEXT: buffer_load_dword v20, off, s[0:3], s32 offset:220 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_cvt_f32_f16_e32 v19, v19
; SI-NEXT: v_add_f32_e32 v19, 0x38000000, v19
@@ -45341,32 +45574,39 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; SI-NEXT: v_cvt_f16_f32_e32 v20, v20
; SI-NEXT: v_or_b32_e32 v19, v20, v19
; SI-NEXT: v_lshlrev_b32_e32 v20, 16, v21
-; SI-NEXT: v_cvt_f32_f16_e32 v21, v52
+; SI-NEXT: buffer_load_dword v21, off, s[0:3], s32 offset:196 ; 4-byte Folded Reload
+; SI-NEXT: v_cvt_f32_f16_e32 v22, v22
+; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22
+; SI-NEXT: v_cvt_f16_f32_e32 v22, v22
; SI-NEXT: v_or_b32_e32 v20, v22, v20
-; SI-NEXT: v_cvt_f32_f16_e32 v22, v50
+; SI-NEXT: buffer_load_dword v22, off, s[0:3], s32 offset:188 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_cvt_f32_f16_e32 v21, v21
; SI-NEXT: v_add_f32_e32 v21, 0x38000000, v21
; SI-NEXT: v_cvt_f16_f32_e32 v21, v21
-; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22
-; SI-NEXT: v_cvt_f16_f32_e32 v22, v22
; SI-NEXT: v_lshlrev_b32_e32 v21, 16, v21
; SI-NEXT: v_or_b32_e32 v21, v23, v21
-; SI-NEXT: v_cvt_f32_f16_e32 v23, v30
+; SI-NEXT: buffer_load_dword v23, off, s[0:3], s32 offset:184 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: v_cvt_f32_f16_e32 v22, v22
+; SI-NEXT: v_add_f32_e32 v22, 0x38000000, v22
+; SI-NEXT: v_cvt_f16_f32_e32 v22, v22
; SI-NEXT: v_lshlrev_b32_e32 v22, 16, v22
-; SI-NEXT: buffer_load_dword v30, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
+; SI-NEXT: s_waitcnt vmcnt(0)
+; SI-NEXT: v_cvt_f32_f16_e32 v23, v23
; SI-NEXT: v_add_f32_e32 v23, 0x38000000, v23
; SI-NEXT: v_cvt_f16_f32_e32 v23, v23
; SI-NEXT: v_or_b32_e32 v22, v23, v22
; SI-NEXT: v_lshlrev_b32_e32 v23, 16, v24
; SI-NEXT: buffer_load_dword v24, off, s[0:3], s32 offset:172 ; 4-byte Folded Reload
+; SI-NEXT: v_cvt_f32_f16_e32 v25, v25
+; SI-NEXT: v_add_f32_e32 v25, 0x38000000, v25
+; SI-NEXT: v_cvt_f16_f32_e32 v25, v25
; SI-NEXT: v_or_b32_e32 v23, v25, v23
; SI-NEXT: buffer_load_dword v25, off, s[0:3], s32 offset:164 ; 4-byte Folded Reload
; SI-NEXT: v_cvt_f32_f16_e32 v26, v26
; SI-NEXT: v_add_f32_e32 v26, 0x38000000, v26
; SI-NEXT: v_cvt_f16_f32_e32 v26, v26
-; SI-NEXT: s_waitcnt vmcnt(2)
-; SI-NEXT: v_cvt_f32_f16_e32 v30, v30
-; SI-NEXT: v_add_f32_e32 v30, 0x38000000, v30
-; SI-NEXT: v_cvt_f16_f32_e32 v30, v30
; SI-NEXT: s_waitcnt vmcnt(1)
; SI-NEXT: v_cvt_f32_f16_e32 v24, v24
; SI-NEXT: v_add_f32_e32 v24, 0x38000000, v24
@@ -45451,6 +45691,7 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
; VI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_mov_b32_e32 v32, v15
; VI-NEXT: v_mov_b32_e32 v33, v14
; VI-NEXT: v_mov_b32_e32 v34, v13
@@ -45467,7 +45708,7 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; VI-NEXT: v_mov_b32_e32 v53, v2
; VI-NEXT: v_mov_b32_e32 v54, v1
; VI-NEXT: v_mov_b32_e32 v55, v0
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB55_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: v_mov_b32_e32 v0, 16
@@ -45671,11 +45912,28 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB55_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; VI-NEXT: s_branch .LBB55_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB55_2
+; VI-NEXT: s_branch .LBB55_3
;
; GFX9-LABEL: bitcast_v60f16_to_v15f64_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
+; GFX9-NEXT: s_lshr_b32 s40, s29, 16
+; GFX9-NEXT: s_lshr_b32 s41, s28, 16
+; GFX9-NEXT: s_lshr_b32 s42, s27, 16
+; GFX9-NEXT: s_lshr_b32 s43, s26, 16
+; GFX9-NEXT: s_lshr_b32 s15, s25, 16
+; GFX9-NEXT: s_lshr_b32 s14, s24, 16
+; GFX9-NEXT: s_lshr_b32 s13, s23, 16
+; GFX9-NEXT: s_lshr_b32 s12, s22, 16
+; GFX9-NEXT: s_lshr_b32 s11, s21, 16
+; GFX9-NEXT: s_lshr_b32 s10, s20, 16
+; GFX9-NEXT: s_lshr_b32 s9, s19, 16
+; GFX9-NEXT: s_lshr_b32 s8, s18, 16
+; GFX9-NEXT: s_lshr_b32 s7, s17, 16
+; GFX9-NEXT: s_lshr_b32 s6, s16, 16
; GFX9-NEXT: v_mov_b32_e32 v32, v15
; GFX9-NEXT: v_mov_b32_e32 v33, v14
; GFX9-NEXT: v_mov_b32_e32 v34, v13
@@ -45692,21 +45950,7 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; GFX9-NEXT: v_mov_b32_e32 v53, v2
; GFX9-NEXT: v_mov_b32_e32 v54, v1
; GFX9-NEXT: v_mov_b32_e32 v55, v0
-; GFX9-NEXT: v_cmp_ne_u32_e32 vcc, 0, v16
-; GFX9-NEXT: s_lshr_b32 s40, s29, 16
-; GFX9-NEXT: s_lshr_b32 s41, s28, 16
-; GFX9-NEXT: s_lshr_b32 s42, s27, 16
-; GFX9-NEXT: s_lshr_b32 s43, s26, 16
-; GFX9-NEXT: s_lshr_b32 s15, s25, 16
-; GFX9-NEXT: s_lshr_b32 s14, s24, 16
-; GFX9-NEXT: s_lshr_b32 s13, s23, 16
-; GFX9-NEXT: s_lshr_b32 s12, s22, 16
-; GFX9-NEXT: s_lshr_b32 s11, s21, 16
-; GFX9-NEXT: s_lshr_b32 s10, s20, 16
-; GFX9-NEXT: s_lshr_b32 s9, s19, 16
-; GFX9-NEXT: s_lshr_b32 s8, s18, 16
-; GFX9-NEXT: s_lshr_b32 s7, s17, 16
-; GFX9-NEXT: s_lshr_b32 s6, s16, 16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: buffer_store_dword v40, off, s[0:3], s32 offset:60 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v41, off, s[0:3], s32 offset:56 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v42, off, s[0:3], s32 offset:52 ; 4-byte Folded Spill
@@ -45727,7 +45971,6 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; GFX9-NEXT: v_lshrrev_b32_e32 v41, 16, v33
; GFX9-NEXT: v_lshrrev_b32_e32 v42, 16, v34
; GFX9-NEXT: v_lshrrev_b32_e32 v43, 16, v35
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: s_pack_ll_b32_b16 s6, s16, s6
; GFX9-NEXT: s_pack_ll_b32_b16 s7, s17, s7
; GFX9-NEXT: s_pack_ll_b32_b16 s8, s18, s8
@@ -45742,6 +45985,7 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; GFX9-NEXT: s_pack_ll_b32_b16 s17, s27, s42
; GFX9-NEXT: s_pack_ll_b32_b16 s18, s28, s41
; GFX9-NEXT: s_pack_ll_b32_b16 s19, s29, s40
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_lshrrev_b32_e32 v44, 16, v36
; GFX9-NEXT: v_lshrrev_b32_e32 v45, 16, v37
; GFX9-NEXT: v_lshrrev_b32_e32 v46, 16, v38
@@ -45889,7 +46133,9 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB55_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX9-NEXT: s_branch .LBB55_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB55_2
+; GFX9-NEXT: s_branch .LBB55_3
;
; GFX11-TRUE16-LABEL: bitcast_v60f16_to_v15f64_scalar:
; GFX11-TRUE16: ; %bb.0:
@@ -45934,41 +46180,41 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; GFX11-TRUE16-NEXT: s_lshr_b32 s40, s29, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s41, s28, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s16, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s15, s26, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s14, s25, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s13, s24, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s12, s23, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s11, s22, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s10, s21, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s9, s20, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s8, s19, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s18, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s6, s17, 16
+; GFX11-TRUE16-NEXT: s_lshr_b32 s5, s16, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s43, s3, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s44, s2, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-TRUE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-TRUE16-NEXT: s_mov_b32 s15, 0
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
+; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s0, 16
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s0, s27, s42
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-TRUE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-TRUE16-NEXT: s_mov_b32 s18, -1
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB55_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
@@ -45983,17 +46229,16 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB55_3
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s0
+; GFX11-TRUE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB55_3
; GFX11-TRUE16-NEXT: .LBB55_2: ; %cmp.true
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
@@ -46007,24 +46252,24 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
; GFX11-TRUE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v0, 0x200, s4 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s18 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v4, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v5, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v6, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v7, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v8, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v9, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v10, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v11, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v12, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v13, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v14, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v15, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v16, 0x200, s16 op_sel_hi:[0,1]
+; GFX11-TRUE16-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
; GFX11-TRUE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
@@ -46041,7 +46286,9 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB55_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-TRUE16-NEXT: s_branch .LBB55_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB55_2
+; GFX11-TRUE16-NEXT: s_branch .LBB55_3
;
; GFX11-FAKE16-LABEL: bitcast_v60f16_to_v15f64_scalar:
; GFX11-FAKE16: ; %bb.0:
@@ -46074,41 +46321,41 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; GFX11-FAKE16-NEXT: s_lshr_b32 s40, s29, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s41, s28, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s42, s27, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s26, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s25, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s24, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s23, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s22, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s21, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s20, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s19, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s18, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s17, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s16, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s15, s26, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s14, s25, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s13, s24, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s23, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s22, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s21, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s20, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s19, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s18, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s17, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s16, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s43, s3, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s44, s2, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s45, s1, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s46, s0, 16
-; GFX11-FAKE16-NEXT: s_mov_b32 s15, 0
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s46
+; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s0, 16
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s45
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s0, s4
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s44
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s43
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s4, s16, s4
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s17, s5
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s18, s6
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s19, s7
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s20, s8
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s21, s9
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s22, s10
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s23, s11
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s24, s12
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s25, s13
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s26, s14
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s27, s42
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s28, s41
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s18, s29, s40
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s5, s16, s5
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s17, s6
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s18, s7
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s19, s8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s20, s9
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s21, s10
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s22, s11
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s23, s12
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s13, s24, s13
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s14, s25, s14
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s15, s26, s15
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s27, s42
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s16, s28, s41
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s17, s29, s40
+; GFX11-FAKE16-NEXT: s_mov_b32 s18, -1
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB55_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
@@ -46123,17 +46370,16 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s1
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s4 :: v_dual_mov_b32 v5, s5
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s6 :: v_dual_mov_b32 v7, s7
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s8 :: v_dual_mov_b32 v9, s9
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s10 :: v_dual_mov_b32 v11, s11
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s12 :: v_dual_mov_b32 v13, s13
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s14 :: v_dual_mov_b32 v15, s16
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s17 :: v_dual_mov_b32 v17, s18
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s15
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB55_3
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v4, s5 :: v_dual_mov_b32 v5, s6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v6, s7 :: v_dual_mov_b32 v7, s8
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v8, s9 :: v_dual_mov_b32 v9, s10
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v10, s11 :: v_dual_mov_b32 v11, s12
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v12, s13 :: v_dual_mov_b32 v13, s14
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v14, s15 :: v_dual_mov_b32 v15, s0
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v16, s16 :: v_dual_mov_b32 v17, s17
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB55_3
; GFX11-FAKE16-NEXT: .LBB55_2: ; %cmp.true
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v18, v51, 16, v71
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v19, v50, 16, v70
@@ -46147,24 +46393,24 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v27, v34, 16, v54
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v28, v33, 16, v53
; GFX11-FAKE16-NEXT: v_lshl_or_b32 v29, v32, 16, v52
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v0, 0x200, s4 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v3, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, s4 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, s5 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, s6 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, s7 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, s8 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, s9 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, s11 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, s12 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, s13 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, s14 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, s16 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, s17 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, s18 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v4, 0x200, s5 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v5, 0x200, s6 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v6, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v7, 0x200, s8 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v8, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v9, 0x200, s10 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v10, 0x200, s11 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, s12 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v12, 0x200, s13 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v13, 0x200, s14 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v14, 0x200, s15 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v15, 0x200, s0 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v16, 0x200, s16 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v17, 0x200, s17 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v18, 0x200, v18 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v19, 0x200, v19 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v20, 0x200, v20 op_sel_hi:[0,1]
@@ -46181,7 +46427,9 @@ define inreg <15 x double> @bitcast_v60f16_to_v15f64_scalar(<60 x half> inreg %a
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB55_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11_vgpr12_vgpr13_vgpr14_vgpr15_vgpr16_vgpr17_vgpr18_vgpr19_vgpr20_vgpr21_vgpr22_vgpr23_vgpr24_vgpr25_vgpr26_vgpr27_vgpr28_vgpr29_vgpr30_vgpr31
-; GFX11-FAKE16-NEXT: s_branch .LBB55_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s18
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB55_2
+; GFX11-FAKE16-NEXT: s_branch .LBB55_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -47867,9 +48115,10 @@ define inreg <60 x half> @bitcast_v60i16_to_v60f16_scalar(<60 x i16> inreg %a, i
; SI-NEXT: buffer_store_dword v29, off, s[0:3], s32 offset:416 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v30, off, s[0:3], s32 offset:420 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt vmcnt(14)
-; SI-NEXT: buffer_store_dword v35, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
+; SI-NEXT: buffer_store_dword v36, off, s[0:3], s32 offset:424 ; 4-byte Folded Spill
; SI-NEXT: v_cmp_ne_u32_e32 vcc, 0, v49
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB57_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_cvt_f32_f16_e32 v31, v1
@@ -47917,86 +48166,87 @@ define inreg <60 x half> @bitcast_v60i16_to_v60f16_scalar(<60 x i16> inreg %a, i
; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:176 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v49, s22
-; SI-NEXT: v_cvt_f32_f16_e32 v45, v35
+; SI-NEXT: v_mov_b32_e32 v36, v35
; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:264 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v31, v8
; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:148 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v49, s23
-; SI-NEXT: v_mov_b32_e32 v35, v34
+; SI-NEXT: v_cvt_f32_f16_e32 v45, v35
; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:284 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v31, v9
; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v49, s24
-; SI-NEXT: v_cvt_f32_f16_e32 v40, v34
+; SI-NEXT: v_mov_b32_e32 v35, v34
; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:256 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v31, v10
; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v49, s25
-; SI-NEXT: v_mov_b32_e32 v34, v33
+; SI-NEXT: v_cvt_f32_f16_e32 v40, v34
; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:276 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v31, v11
; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v49, s26
-; SI-NEXT: v_cvt_f32_f16_e32 v43, v33
+; SI-NEXT: v_mov_b32_e32 v34, v33
; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:248 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v31, v12
; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v49, s27
-; SI-NEXT: v_mov_b32_e32 v33, v32
+; SI-NEXT: v_cvt_f32_f16_e32 v43, v33
; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:268 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v31, v13
; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v49, s28
-; SI-NEXT: v_cvt_f32_f16_e32 v54, v32
+; SI-NEXT: v_mov_b32_e32 v33, v32
; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:240 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v31, v14
; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v49, s29
-; SI-NEXT: v_mov_b32_e32 v32, v50
+; SI-NEXT: v_cvt_f32_f16_e32 v54, v32
; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:260 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v31, v15
; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v49, v28
-; SI-NEXT: v_cvt_f32_f16_e32 v41, v50
+; SI-NEXT: v_mov_b32_e32 v32, v50
; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:232 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v31, v16
; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:204 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v49, v30
-; SI-NEXT: v_cvt_f32_f16_e32 v52, v63
+; SI-NEXT: v_cvt_f32_f16_e32 v41, v50
; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:252 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v31, v17
; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:196 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v49, v59
-; SI-NEXT: v_cvt_f32_f16_e32 v55, v62
+; SI-NEXT: v_cvt_f32_f16_e32 v52, v63
; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:224 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v31, v18
+; SI-NEXT: v_cvt_f32_f16_e32 v55, v62
; SI-NEXT: v_cvt_f32_f16_e32 v50, v61
; SI-NEXT: v_cvt_f32_f16_e32 v53, v60
-; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:244 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v31, v19
+; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:180 ; 4-byte Folded Spill
; SI-NEXT: v_cvt_f32_f16_e32 v51, v58
; SI-NEXT: buffer_store_dword v31, off, s[0:3], s32 offset:216 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
@@ -48029,27 +48279,27 @@ define inreg <60 x half> @bitcast_v60i16_to_v60f16_scalar(<60 x i16> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr49
; SI-NEXT: ; kill: killed $vgpr49
; SI-NEXT: s_waitcnt expcnt(0)
-; SI-NEXT: v_mov_b32_e32 v35, v34
+; SI-NEXT: v_mov_b32_e32 v36, v35
; SI-NEXT: ; implicit-def: $vgpr31
; SI-NEXT: ; kill: killed $vgpr31
; SI-NEXT: ; implicit-def: $vgpr49
; SI-NEXT: ; kill: killed $vgpr49
-; SI-NEXT: v_mov_b32_e32 v34, v33
+; SI-NEXT: v_mov_b32_e32 v35, v34
; SI-NEXT: ; implicit-def: $vgpr31
; SI-NEXT: ; kill: killed $vgpr31
; SI-NEXT: ; implicit-def: $vgpr49
; SI-NEXT: ; kill: killed $vgpr49
-; SI-NEXT: v_mov_b32_e32 v33, v32
+; SI-NEXT: v_mov_b32_e32 v34, v33
; SI-NEXT: ; implicit-def: $vgpr31
; SI-NEXT: ; kill: killed $vgpr31
; SI-NEXT: ; implicit-def: $vgpr49
; SI-NEXT: ; kill: killed $vgpr49
-; SI-NEXT: v_mov_b32_e32 v32, v50
+; SI-NEXT: v_mov_b32_e32 v33, v32
; SI-NEXT: ; implicit-def: $vgpr31
; SI-NEXT: ; kill: killed $vgpr31
; SI-NEXT: ; implicit-def: $vgpr49
; SI-NEXT: ; kill: killed $vgpr49
-; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: v_mov_b32_e32 v32, v50
; SI-NEXT: ; implicit-def: $vgpr31
; SI-NEXT: ; kill: killed $vgpr31
; SI-NEXT: ; implicit-def: $vgpr49
@@ -48151,6 +48401,7 @@ define inreg <60 x half> @bitcast_v60i16_to_v60f16_scalar(<60 x i16> inreg %a, i
; SI-NEXT: v_add_i32_e32 v32, vcc, 3, v33
; SI-NEXT: v_add_i32_e32 v33, vcc, 3, v34
; SI-NEXT: v_add_i32_e32 v34, vcc, 3, v35
+; SI-NEXT: v_add_i32_e32 v35, vcc, 3, v36
; SI-NEXT: s_add_i32 s16, s16, 3
; SI-NEXT: v_cvt_f32_f16_e32 v49, s16
; SI-NEXT: s_add_i32 s17, s17, 3
@@ -48193,50 +48444,49 @@ define inreg <60 x half> @bitcast_v60i16_to_v60f16_scalar(<60 x i16> inreg %a, i
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v49, s23
; SI-NEXT: v_add_i32_e32 v63, vcc, 3, v63
-; SI-NEXT: v_add_i32_e32 v36, vcc, 3, v36
+; SI-NEXT: v_add_i32_e32 v37, vcc, 3, v37
; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:168 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v49, s24
-; SI-NEXT: v_add_i32_e32 v37, vcc, 3, v37
; SI-NEXT: v_add_i32_e32 v38, vcc, 3, v38
+; SI-NEXT: v_add_i32_e32 v39, vcc, 3, v39
; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:140 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v49, s25
-; SI-NEXT: v_add_i32_e32 v39, vcc, 3, v39
; SI-NEXT: v_add_i32_e32 v48, vcc, 3, v48
+; SI-NEXT: v_cvt_f32_f16_e32 v44, v48
; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:160 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v49, s26
-; SI-NEXT: v_cvt_f32_f16_e32 v44, v48
; SI-NEXT: v_cvt_f32_f16_e32 v57, v39
; SI-NEXT: v_cvt_f32_f16_e32 v42, v38
+; SI-NEXT: v_cvt_f32_f16_e32 v47, v37
; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v49, s27
-; SI-NEXT: v_cvt_f32_f16_e32 v47, v37
-; SI-NEXT: v_cvt_f32_f16_e32 v40, v36
+; SI-NEXT: v_cvt_f32_f16_e32 v45, v35
; SI-NEXT: v_cvt_f32_f16_e32 v54, v34
+; SI-NEXT: v_cvt_f32_f16_e32 v43, v33
; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:152 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v49, s28
-; SI-NEXT: v_cvt_f32_f16_e32 v43, v33
; SI-NEXT: v_cvt_f32_f16_e32 v52, v32
; SI-NEXT: v_cvt_f32_f16_e32 v41, v31
-; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
-; SI-NEXT: s_waitcnt vmcnt(13)
-; SI-NEXT: v_add_i32_e32 v35, vcc, 3, v1
+; SI-NEXT: v_cvt_f32_f16_e32 v50, v63
+; SI-NEXT: s_waitcnt vmcnt(12)
+; SI-NEXT: v_add_i32_e32 v36, vcc, 3, v1
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:420 ; 4-byte Folded Reload
+; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:132 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v49, s29
-; SI-NEXT: v_cvt_f32_f16_e32 v45, v35
-; SI-NEXT: v_cvt_f32_f16_e32 v50, v63
+; SI-NEXT: v_cvt_f32_f16_e32 v40, v36
; SI-NEXT: v_cvt_f32_f16_e32 v55, v62
+; SI-NEXT: v_cvt_f32_f16_e32 v53, v60
; SI-NEXT: buffer_store_dword v49, off, s[0:3], s32 offset:144 ; 4-byte Folded Spill
; SI-NEXT: s_waitcnt expcnt(0)
; SI-NEXT: v_cvt_f32_f16_e32 v49, v61
-; SI-NEXT: v_cvt_f32_f16_e32 v53, v60
; SI-NEXT: v_cvt_f32_f16_e32 v51, v58
-; SI-NEXT: s_waitcnt vmcnt(1)
+; SI-NEXT: s_waitcnt vmcnt(2)
; SI-NEXT: v_add_i32_e32 v30, vcc, 3, v1
; SI-NEXT: buffer_load_dword v1, off, s[0:3], s32 offset:416 ; 4-byte Folded Reload
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -48746,11 +48996,12 @@ define inreg <60 x half> @bitcast_v60i16_to_v60f16_scalar(<60 x i16> inreg %a, i
; VI-NEXT: s_lshr_b32 s41, s18, 16
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_lshrrev_b32_e32 v29, 16, v15
; VI-NEXT: v_lshrrev_b32_e32 v28, 16, v14
; VI-NEXT: v_lshrrev_b32_e32 v27, 16, v13
; VI-NEXT: v_lshrrev_b32_e32 v26, 16, v12
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_lshrrev_b32_e32 v25, 16, v11
; VI-NEXT: v_lshrrev_b32_e32 v24, 16, v10
; VI-NEXT: v_lshrrev_b32_e32 v23, 16, v9
@@ -48763,10 +49014,13 @@ define inreg <60 x half> @bitcast_v60i16_to_v60f16_scalar(<60 x i16> inreg %a, i
; VI-NEXT: v_lshrrev_b32_e32 v16, 16, v2
; VI-NEXT: v_lshrrev_b32_e32 v31, 16, v1
; VI-NEXT: v_lshrrev_b32_e32 v30, 16, v0
-; VI-NEXT: s_cbranch_scc0 .LBB57_4
+; VI-NEXT: s_cbranch_scc0 .LBB57_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB57_3
-; VI-NEXT: .LBB57_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB57_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB57_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s16, s16, 3
; VI-NEXT: s_add_i32 s43, s43, 3
; VI-NEXT: s_add_i32 s17, s17, 3
@@ -48827,7 +49081,7 @@ define inreg <60 x half> @bitcast_v60i16_to_v60f16_scalar(<60 x i16> inreg %a, i
; VI-NEXT: v_add_u32_e32 v28, vcc, 3, v28
; VI-NEXT: v_add_u32_e32 v15, vcc, 3, v15
; VI-NEXT: v_add_u32_e32 v29, vcc, 3, v29
-; VI-NEXT: .LBB57_3: ; %end
+; VI-NEXT: .LBB57_4: ; %end
; VI-NEXT: v_lshlrev_b32_e32 v30, 16, v30
; VI-NEXT: s_and_b32 s4, 0xffff, s16
; VI-NEXT: s_lshl_b32 s5, s43, 16
@@ -48919,8 +49173,6 @@ define inreg <60 x half> @bitcast_v60i16_to_v60f16_scalar(<60 x i16> inreg %a, i
; VI-NEXT: v_mov_b32_e32 v14, v30
; VI-NEXT: v_mov_b32_e32 v15, v31
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB57_4:
-; VI-NEXT: s_branch .LBB57_2
;
; GFX9-LABEL: bitcast_v60i16_to_v60f16_scalar:
; GFX9: ; %bb.0:
@@ -48940,11 +49192,12 @@ define inreg <60 x half> @bitcast_v60i16_to_v60f16_scalar(<60 x i16> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s8, s18, 16
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_lshrrev_b32_e32 v29, 16, v15
; GFX9-NEXT: v_lshrrev_b32_e32 v28, 16, v14
; GFX9-NEXT: v_lshrrev_b32_e32 v27, 16, v13
; GFX9-NEXT: v_lshrrev_b32_e32 v26, 16, v12
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_lshrrev_b32_e32 v55, 16, v11
; GFX9-NEXT: v_lshrrev_b32_e32 v54, 16, v10
; GFX9-NEXT: v_lshrrev_b32_e32 v25, 16, v9
@@ -48969,10 +49222,13 @@ define inreg <60 x half> @bitcast_v60i16_to_v60f16_scalar(<60 x i16> inreg %a, i
; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX9-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB57_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB57_4
-; GFX9-NEXT: .LBB57_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB57_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB57_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_pack_ll_b32_b16 s4, s29, s43
; GFX9-NEXT: v_pk_add_u16 v30, s4, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_pack_ll_b32_b16 s4, s28, s42
@@ -49080,8 +49336,6 @@ define inreg <60 x half> @bitcast_v60i16_to_v60f16_scalar(<60 x i16> inreg %a, i
; GFX9-NEXT: v_lshrrev_b32_e32 v28, 16, v14
; GFX9-NEXT: v_lshrrev_b32_e32 v29, 16, v15
; GFX9-NEXT: s_branch .LBB57_5
-; GFX9-NEXT: .LBB57_3:
-; GFX9-NEXT: s_branch .LBB57_2
; GFX9-NEXT: .LBB57_4:
; GFX9-NEXT: v_mov_b32_e32 v30, s29
; GFX9-NEXT: v_mov_b32_e32 v31, s28
@@ -49249,13 +49503,16 @@ define inreg <60 x half> @bitcast_v60i16_to_v60f16_scalar(<60 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s2, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s46, -1
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB57_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_mov_b32 s46, 0
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB57_3
-; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: .LBB57_2: ; %Flow
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB57_4
-; GFX11-TRUE16-NEXT: .LBB57_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: ; %bb.3: ; %cmp.true
; GFX11-TRUE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
; GFX11-TRUE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
; GFX11-TRUE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
@@ -49359,8 +49616,6 @@ define inreg <60 x half> @bitcast_v60i16_to_v60f16_scalar(<60 x i16> inreg %a, i
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v10
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v11
; GFX11-TRUE16-NEXT: s_branch .LBB57_5
-; GFX11-TRUE16-NEXT: .LBB57_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB57_2
; GFX11-TRUE16-NEXT: .LBB57_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s29 :: v_dual_mov_b32 v16, s28
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s27 :: v_dual_mov_b32 v12, s26
@@ -49480,19 +49735,22 @@ define inreg <60 x half> @bitcast_v60i16_to_v60f16_scalar(<60 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s20, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s19, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s18, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s17, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s3, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s17, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s16, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s2, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s0, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_mov_b32 s46, -1
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB57_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_mov_b32 s46, 0
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB57_3
-; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: .LBB57_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB57_4
-; GFX11-FAKE16-NEXT: .LBB57_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
@@ -49529,10 +49787,10 @@ define inreg <60 x half> @bitcast_v60i16_to_v60f16_scalar(<60 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s20, s12
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s19, s11
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s18, s10
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s17, s9
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s6
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s17, s8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s16, s6
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s9
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s7
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s5
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
; GFX11-FAKE16-NEXT: v_pk_add_u16 v11, v11, 3 op_sel_hi:[1,0]
@@ -49559,12 +49817,12 @@ define inreg <60 x half> @bitcast_v60i16_to_v60f16_scalar(<60 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: v_pk_add_u16 v31, s12, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v32, s11, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v33, s10, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v49, s9, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v49, s8, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v48, s0, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v39, s1, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v38, s2, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_pk_add_u16 v37, s3, 3 op_sel_hi:[1,0]
-; GFX11-FAKE16-NEXT: v_pk_add_u16 v36, s7, 3 op_sel_hi:[1,0]
+; GFX11-FAKE16-NEXT: v_pk_add_u16 v36, s6, 3 op_sel_hi:[1,0]
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v48
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v39
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v38
@@ -49596,8 +49854,6 @@ define inreg <60 x half> @bitcast_v60i16_to_v60f16_scalar(<60 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v10
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v11
; GFX11-FAKE16-NEXT: s_branch .LBB57_5
-; GFX11-FAKE16-NEXT: .LBB57_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB57_2
; GFX11-FAKE16-NEXT: .LBB57_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v15, s29 :: v_dual_mov_b32 v16, s28
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v17, s27 :: v_dual_mov_b32 v12, s26
@@ -49614,8 +49870,8 @@ define inreg <60 x half> @bitcast_v60i16_to_v60f16_scalar(<60 x i16> inreg %a, i
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v64, s15 :: v_dual_mov_b32 v65, s14
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v66, s13 :: v_dual_mov_b32 v67, s12
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v68, s11 :: v_dual_mov_b32 v69, s10
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v70, s9 :: v_dual_mov_b32 v71, s7
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v80, s6 :: v_dual_mov_b32 v81, s8
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v70, s8 :: v_dual_mov_b32 v71, s6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v80, s9 :: v_dual_mov_b32 v81, s7
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v82, s4 :: v_dual_mov_b32 v83, s5
; GFX11-FAKE16-NEXT: .LBB57_5: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v49, 0xffff, v49
@@ -51066,13 +51322,11 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v58, s25
; SI-NEXT: v_cvt_f16_f32_e32 v57, s29
; SI-NEXT: s_and_b64 s[4:5], vcc, exec
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB59_2
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_mov_b64 s[4:5], 0
-; SI-NEXT: s_branch .LBB59_3
-; SI-NEXT: .LBB59_2:
-; SI-NEXT: s_mov_b64 s[4:5], -1
-; SI-NEXT: .LBB59_3: ; %Flow
+; SI-NEXT: .LBB59_2: ; %Flow
; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
; SI-NEXT: v_mov_b32_e32 v61, v14
; SI-NEXT: v_mov_b32_e32 v63, v15
@@ -51084,8 +51338,8 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
; SI-NEXT: v_mov_b32_e32 v8, v5
; SI-NEXT: v_mov_b32_e32 v5, v42
; SI-NEXT: v_mov_b32_e32 v42, v1
-; SI-NEXT: s_cbranch_vccnz .LBB59_5
-; SI-NEXT: ; %bb.4: ; %cmp.true
+; SI-NEXT: s_cbranch_vccnz .LBB59_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: buffer_load_dword v14, off, s[0:3], s32 offset:132 ; 4-byte Folded Reload
; SI-NEXT: v_cvt_f32_f16_e32 v2, v2
; SI-NEXT: v_cvt_f32_f16_e32 v1, v62
@@ -51348,7 +51602,7 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
; SI-NEXT: v_alignbit_b32 v19, v3, v19, 16
; SI-NEXT: v_alignbit_b32 v36, v62, v14, 16
; SI-NEXT: buffer_store_dword v1, off, s[0:3], s32 offset:136 ; 4-byte Folded Spill
-; SI-NEXT: .LBB59_5: ; %end
+; SI-NEXT: .LBB59_4: ; %end
; SI-NEXT: v_and_b32_e32 v39, 0xffff, v47
; SI-NEXT: v_lshlrev_b32_e32 v50, 16, v60
; SI-NEXT: v_or_b32_e32 v39, v39, v50
@@ -51562,11 +51816,12 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
; VI-NEXT: s_lshr_b32 s41, s18, 16
; VI-NEXT: s_lshr_b32 s42, s17, 16
; VI-NEXT: s_lshr_b32 s43, s16, 16
+; VI-NEXT: s_and_b64 s[4:5], vcc, exec
; VI-NEXT: v_lshrrev_b32_e32 v29, 16, v15
; VI-NEXT: v_lshrrev_b32_e32 v28, 16, v14
; VI-NEXT: v_lshrrev_b32_e32 v27, 16, v13
; VI-NEXT: v_lshrrev_b32_e32 v26, 16, v12
-; VI-NEXT: s_and_b64 s[4:5], vcc, exec
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: v_lshrrev_b32_e32 v25, 16, v11
; VI-NEXT: v_lshrrev_b32_e32 v24, 16, v10
; VI-NEXT: v_lshrrev_b32_e32 v23, 16, v9
@@ -51591,10 +51846,13 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
; VI-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; VI-NEXT: buffer_store_dword v59, off, s[0:3], s32 ; 4-byte Folded Spill
-; VI-NEXT: s_cbranch_scc0 .LBB59_3
+; VI-NEXT: s_cbranch_scc0 .LBB59_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB59_4
-; VI-NEXT: .LBB59_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB59_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB59_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_mov_b32_e32 v54, 0x200
; VI-NEXT: v_add_f16_e32 v32, s16, v54
; VI-NEXT: v_add_f16_e32 v59, s43, v54
@@ -51657,8 +51915,6 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
; VI-NEXT: v_add_f16_e32 v15, 0x200, v15
; VI-NEXT: v_add_f16_e32 v29, 0x200, v29
; VI-NEXT: s_branch .LBB59_5
-; VI-NEXT: .LBB59_3:
-; VI-NEXT: s_branch .LBB59_2
; VI-NEXT: .LBB59_4:
; VI-NEXT: v_mov_b32_e32 v54, s6
; VI-NEXT: v_mov_b32_e32 v53, s29
@@ -51798,11 +52054,12 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
; GFX9-NEXT: s_lshr_b32 s8, s18, 16
; GFX9-NEXT: s_lshr_b32 s7, s17, 16
; GFX9-NEXT: s_lshr_b32 s6, s16, 16
+; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
; GFX9-NEXT: v_lshrrev_b32_e32 v29, 16, v15
; GFX9-NEXT: v_lshrrev_b32_e32 v28, 16, v14
; GFX9-NEXT: v_lshrrev_b32_e32 v27, 16, v13
; GFX9-NEXT: v_lshrrev_b32_e32 v26, 16, v12
-; GFX9-NEXT: s_and_b64 s[4:5], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: v_lshrrev_b32_e32 v55, 16, v11
; GFX9-NEXT: v_lshrrev_b32_e32 v54, 16, v10
; GFX9-NEXT: v_lshrrev_b32_e32 v25, 16, v9
@@ -51827,10 +52084,13 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
; GFX9-NEXT: buffer_store_dword v57, off, s[0:3], s32 offset:8 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v58, off, s[0:3], s32 offset:4 ; 4-byte Folded Spill
; GFX9-NEXT: buffer_store_dword v59, off, s[0:3], s32 ; 4-byte Folded Spill
-; GFX9-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX9-NEXT: s_cbranch_scc0 .LBB59_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB59_4
-; GFX9-NEXT: .LBB59_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB59_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB59_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_and_b32_e32 v15, 0xffff, v15
; GFX9-NEXT: v_and_b32_e32 v14, 0xffff, v14
; GFX9-NEXT: v_and_b32_e32 v13, 0xffff, v13
@@ -51940,8 +52200,6 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
; GFX9-NEXT: v_lshrrev_b32_e32 v28, 16, v14
; GFX9-NEXT: v_lshrrev_b32_e32 v29, 16, v15
; GFX9-NEXT: s_branch .LBB59_5
-; GFX9-NEXT: .LBB59_3:
-; GFX9-NEXT: s_branch .LBB59_2
; GFX9-NEXT: .LBB59_4:
; GFX9-NEXT: v_mov_b32_e32 v30, s29
; GFX9-NEXT: v_mov_b32_e32 v31, s28
@@ -52109,13 +52367,16 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
; GFX11-TRUE16-NEXT: s_lshr_b32 s7, s2, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s4, s1, 16
; GFX11-TRUE16-NEXT: s_lshr_b32 s45, s0, 16
+; GFX11-TRUE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s46, -1
+; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB59_2
+; GFX11-TRUE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-TRUE16-NEXT: s_mov_b32 s46, 0
-; GFX11-TRUE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
-; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB59_3
-; GFX11-TRUE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-TRUE16-NEXT: .LBB59_2: ; %Flow
+; GFX11-TRUE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB59_4
-; GFX11-TRUE16-NEXT: .LBB59_2: ; %cmp.true
+; GFX11-TRUE16-NEXT: ; %bb.3: ; %cmp.true
; GFX11-TRUE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
; GFX11-TRUE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
; GFX11-TRUE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
@@ -52219,8 +52480,6 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v28, 16, v10
; GFX11-TRUE16-NEXT: v_lshrrev_b32_e32 v29, 16, v11
; GFX11-TRUE16-NEXT: s_branch .LBB59_5
-; GFX11-TRUE16-NEXT: .LBB59_3:
-; GFX11-TRUE16-NEXT: s_branch .LBB59_2
; GFX11-TRUE16-NEXT: .LBB59_4:
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v15, s29 :: v_dual_mov_b32 v16, s28
; GFX11-TRUE16-NEXT: v_dual_mov_b32 v17, s27 :: v_dual_mov_b32 v12, s26
@@ -52340,19 +52599,22 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_lshr_b32 s12, s20, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s11, s19, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s10, s18, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s17, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s16, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s3, 16
-; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s2, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s8, s17, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s6, s16, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s9, s3, 16
+; GFX11-FAKE16-NEXT: s_lshr_b32 s7, s2, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s4, s1, 16
; GFX11-FAKE16-NEXT: s_lshr_b32 s5, s0, 16
+; GFX11-FAKE16-NEXT: s_and_b32 s46, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_mov_b32 s46, -1
+; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB59_2
+; GFX11-FAKE16-NEXT: ; %bb.1: ; %cmp.false
; GFX11-FAKE16-NEXT: s_mov_b32 s46, 0
-; GFX11-FAKE16-NEXT: s_and_b32 s47, vcc_lo, exec_lo
-; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB59_3
-; GFX11-FAKE16-NEXT: ; %bb.1: ; %Flow
+; GFX11-FAKE16-NEXT: .LBB59_2: ; %Flow
+; GFX11-FAKE16-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s46
; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB59_4
-; GFX11-FAKE16-NEXT: .LBB59_2: ; %cmp.true
+; GFX11-FAKE16-NEXT: ; %bb.3: ; %cmp.true
; GFX11-FAKE16-NEXT: v_and_b32_e32 v11, 0xffff, v11
; GFX11-FAKE16-NEXT: v_and_b32_e32 v10, 0xffff, v10
; GFX11-FAKE16-NEXT: v_and_b32_e32 v9, 0xffff, v9
@@ -52389,10 +52651,10 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s12, s20, s12
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s11, s19, s11
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s10, s18, s10
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s9, s17, s9
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s7, s16, s7
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s6
-; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s8, s17, s8
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s6, s16, s6
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s3, s3, s9
+; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s2, s2, s7
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s0, s0, s5
; GFX11-FAKE16-NEXT: s_pack_ll_b32_b16 s1, s1, s4
; GFX11-FAKE16-NEXT: v_pk_add_f16 v11, 0x200, v11 op_sel_hi:[0,1]
@@ -52419,12 +52681,12 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_pk_add_f16 v31, 0x200, s12 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v32, 0x200, s11 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v33, 0x200, s10 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v49, 0x200, s9 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v49, 0x200, s8 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v48, 0x200, s0 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v39, 0x200, s1 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v38, 0x200, s2 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_pk_add_f16 v37, 0x200, s3 op_sel_hi:[0,1]
-; GFX11-FAKE16-NEXT: v_pk_add_f16 v36, 0x200, s7 op_sel_hi:[0,1]
+; GFX11-FAKE16-NEXT: v_pk_add_f16 v36, 0x200, s6 op_sel_hi:[0,1]
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v83, 16, v48
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v82, 16, v39
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v81, 16, v38
@@ -52456,8 +52718,6 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v28, 16, v10
; GFX11-FAKE16-NEXT: v_lshrrev_b32_e32 v29, 16, v11
; GFX11-FAKE16-NEXT: s_branch .LBB59_5
-; GFX11-FAKE16-NEXT: .LBB59_3:
-; GFX11-FAKE16-NEXT: s_branch .LBB59_2
; GFX11-FAKE16-NEXT: .LBB59_4:
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v15, s29 :: v_dual_mov_b32 v16, s28
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v17, s27 :: v_dual_mov_b32 v12, s26
@@ -52474,8 +52734,8 @@ define inreg <60 x i16> @bitcast_v60f16_to_v60i16_scalar(<60 x half> inreg %a, i
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v64, s15 :: v_dual_mov_b32 v65, s14
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v66, s13 :: v_dual_mov_b32 v67, s12
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v68, s11 :: v_dual_mov_b32 v69, s10
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v70, s9 :: v_dual_mov_b32 v71, s7
-; GFX11-FAKE16-NEXT: v_dual_mov_b32 v80, s6 :: v_dual_mov_b32 v81, s8
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v70, s8 :: v_dual_mov_b32 v71, s6
+; GFX11-FAKE16-NEXT: v_dual_mov_b32 v80, s9 :: v_dual_mov_b32 v81, s7
; GFX11-FAKE16-NEXT: v_dual_mov_b32 v82, s4 :: v_dual_mov_b32 v83, s5
; GFX11-FAKE16-NEXT: .LBB59_5: ; %end
; GFX11-FAKE16-NEXT: v_and_b32_e32 v49, 0xffff, v49
diff --git a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.96bit.ll b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.96bit.ll
index e5245f7..62a8b97 100644
--- a/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.96bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/amdgcn.bitcast.96bit.ll
@@ -89,79 +89,86 @@ define inreg <3 x float> @bitcast_v3i32_to_v3f32_scalar(<3 x i32> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s19, 0
-; SI-NEXT: s_cbranch_scc0 .LBB1_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB1_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB1_3
-; SI-NEXT: .LBB1_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB1_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB1_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: s_add_i32 s18, s18, 3
; SI-NEXT: s_add_i32 s17, s17, 3
; SI-NEXT: s_add_i32 s16, s16, 3
-; SI-NEXT: .LBB1_3: ; %end
+; SI-NEXT: .LBB1_4: ; %end
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
; SI-NEXT: v_mov_b32_e32 v2, s18
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB1_4:
-; SI-NEXT: s_branch .LBB1_2
;
; VI-LABEL: bitcast_v3i32_to_v3f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
-; VI-NEXT: s_cbranch_scc0 .LBB1_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB1_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB1_3
-; VI-NEXT: .LBB1_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB1_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB1_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB1_3: ; %end
+; VI-NEXT: .LBB1_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB1_4:
-; VI-NEXT: s_branch .LBB1_2
;
; GFX9-LABEL: bitcast_v3i32_to_v3f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB1_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB1_3
-; GFX9-NEXT: .LBB1_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB1_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB1_3: ; %end
+; GFX9-NEXT: .LBB1_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB1_4:
-; GFX9-NEXT: s_branch .LBB1_2
;
; GFX11-LABEL: bitcast_v3i32_to_v3f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
+; GFX11-NEXT: s_mov_b32 s3, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB1_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s3, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB1_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB1_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
-; GFX11-NEXT: s_cbranch_vccnz .LBB1_3
-; GFX11-NEXT: .LBB1_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB1_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB1_3: ; %end
+; GFX11-NEXT: .LBB1_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_mov_b32_e32 v2, s2
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB1_4:
-; GFX11-NEXT: s_branch .LBB1_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -261,16 +268,18 @@ define inreg <3 x i32> @bitcast_v3f32_to_v3i32_scalar(<3 x float> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s19, 0
-; SI-NEXT: s_cbranch_scc0 .LBB3_3
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB3_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB3_4
-; SI-NEXT: .LBB3_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB3_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB3_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_add_f32_e64 v2, s18, 1.0
; SI-NEXT: v_add_f32_e64 v1, s17, 1.0
; SI-NEXT: v_add_f32_e64 v0, s16, 1.0
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB3_3:
-; SI-NEXT: s_branch .LBB3_2
; SI-NEXT: .LBB3_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v1, s17
@@ -281,16 +290,18 @@ define inreg <3 x i32> @bitcast_v3f32_to_v3i32_scalar(<3 x float> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
-; VI-NEXT: s_cbranch_scc0 .LBB3_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB3_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB3_4
-; VI-NEXT: .LBB3_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB3_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB3_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v2, s18, 1.0
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB3_3:
-; VI-NEXT: s_branch .LBB3_2
; VI-NEXT: .LBB3_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -301,16 +312,18 @@ define inreg <3 x i32> @bitcast_v3f32_to_v3i32_scalar(<3 x float> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB3_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB3_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB3_4
-; GFX9-NEXT: .LBB3_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB3_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB3_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v2, s18, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB3_3:
-; GFX9-NEXT: s_branch .LBB3_2
; GFX9-NEXT: .LBB3_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -321,18 +334,19 @@ define inreg <3 x i32> @bitcast_v3f32_to_v3i32_scalar(<3 x float> inreg %a, i32
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
+; GFX11-NEXT: s_mov_b32 s3, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB3_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s3, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB3_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB3_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
; GFX11-NEXT: s_cbranch_vccnz .LBB3_4
-; GFX11-NEXT: .LBB3_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v2, s2, 1.0
; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB3_3:
-; GFX11-NEXT: s_branch .LBB3_2
; GFX11-NEXT: .LBB3_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_mov_b32_e32 v2, s2
@@ -630,6 +644,7 @@ define inreg <12 x i8> @bitcast_v3i32_to_v12i8_scalar(<3 x i32> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s19, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB5_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s18
@@ -677,12 +692,15 @@ define inreg <12 x i8> @bitcast_v3i32_to_v12i8_scalar(<3 x i32> inreg %a, i32 in
; SI-NEXT: ; implicit-def: $vgpr9
; SI-NEXT: ; implicit-def: $vgpr10
; SI-NEXT: ; implicit-def: $vgpr11
-; SI-NEXT: s_branch .LBB5_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB5_2
+; SI-NEXT: s_branch .LBB5_3
;
; VI-LABEL: bitcast_v3i32_to_v12i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
+; VI-NEXT: s_mov_b64 s[8:9], -1
; VI-NEXT: s_cbranch_scc0 .LBB5_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s19, s16, 8
@@ -732,12 +750,15 @@ define inreg <12 x i8> @bitcast_v3i32_to_v12i8_scalar(<3 x i32> inreg %a, i32 in
; VI-NEXT: ; implicit-def: $sgpr11
; VI-NEXT: ; implicit-def: $sgpr10
; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: s_branch .LBB5_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; VI-NEXT: s_cbranch_vccz .LBB5_2
+; VI-NEXT: s_branch .LBB5_3
;
; GFX9-LABEL: bitcast_v3i32_to_v12i8_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
+; GFX9-NEXT: s_mov_b64 s[8:9], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s19, s16, 8
@@ -787,16 +808,19 @@ define inreg <12 x i8> @bitcast_v3i32_to_v12i8_scalar(<3 x i32> inreg %a, i32 in
; GFX9-NEXT: ; implicit-def: $sgpr11
; GFX9-NEXT: ; implicit-def: $sgpr10
; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: s_branch .LBB5_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; GFX9-NEXT: s_cbranch_vccz .LBB5_2
+; GFX9-NEXT: s_branch .LBB5_3
;
; GFX11-LABEL: bitcast_v3i32_to_v12i8_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
-; GFX11-NEXT: s_mov_b32 s14, 0
+; GFX11-NEXT: s_mov_b32 s5, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB5_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_lshr_b32 s3, s2, 16
+; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
; GFX11-NEXT: s_lshr_b32 s8, s2, 8
; GFX11-NEXT: s_lshr_b32 s9, s1, 24
; GFX11-NEXT: s_lshr_b32 s10, s1, 16
@@ -804,9 +828,7 @@ define inreg <12 x i8> @bitcast_v3i32_to_v12i8_scalar(<3 x i32> inreg %a, i32 in
; GFX11-NEXT: s_lshr_b32 s12, s0, 16
; GFX11-NEXT: s_lshr_b32 s13, s0, 8
; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
-; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s14
-; GFX11-NEXT: s_cbranch_vccnz .LBB5_3
+; GFX11-NEXT: s_cbranch_execnz .LBB5_3
; GFX11-NEXT: .LBB5_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
@@ -838,7 +860,9 @@ define inreg <12 x i8> @bitcast_v3i32_to_v12i8_scalar(<3 x i32> inreg %a, i32 in
; GFX11-NEXT: ; implicit-def: $sgpr8
; GFX11-NEXT: ; implicit-def: $sgpr3
; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: s_branch .LBB5_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
+; GFX11-NEXT: s_cbranch_vccz .LBB5_2
+; GFX11-NEXT: s_branch .LBB5_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1320,6 +1344,7 @@ define inreg <3 x i32> @bitcast_v12i8_to_v3i32_scalar(<12 x i8> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
+; SI-NEXT: s_mov_b64 s[8:9], -1
; SI-NEXT: s_cbranch_scc0 .LBB7_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
@@ -1397,12 +1422,15 @@ define inreg <3 x i32> @bitcast_v12i8_to_v3i32_scalar(<12 x i8> inreg %a, i32 in
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB7_4:
; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6
-; SI-NEXT: s_branch .LBB7_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; SI-NEXT: s_cbranch_vccz .LBB7_2
+; SI-NEXT: s_branch .LBB7_3
;
; VI-LABEL: bitcast_v12i8_to_v3i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
+; VI-NEXT: s_mov_b64 s[8:9], -1
; VI-NEXT: s_cbranch_scc0 .LBB7_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, s16, 0xff
@@ -1480,12 +1508,15 @@ define inreg <3 x i32> @bitcast_v12i8_to_v3i32_scalar(<12 x i8> inreg %a, i32 in
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB7_4:
; VI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6
-; VI-NEXT: s_branch .LBB7_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; VI-NEXT: s_cbranch_vccz .LBB7_2
+; VI-NEXT: s_branch .LBB7_3
;
; GFX9-LABEL: bitcast_v12i8_to_v3i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
+; GFX9-NEXT: s_mov_b64 s[8:9], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB7_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_and_b32 s4, s16, 0xff
@@ -1563,44 +1594,45 @@ define inreg <3 x i32> @bitcast_v12i8_to_v3i32_scalar(<12 x i8> inreg %a, i32 in
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB7_4:
; GFX9-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6
-; GFX9-NEXT: s_branch .LBB7_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; GFX9-NEXT: s_cbranch_vccz .LBB7_2
+; GFX9-NEXT: s_branch .LBB7_3
;
; GFX11-LABEL: bitcast_v12i8_to_v3i32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
-; GFX11-NEXT: s_mov_b32 s7, 0
+; GFX11-NEXT: s_mov_b32 s7, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB7_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_and_b32 s4, s0, 0xff
; GFX11-NEXT: s_lshl_b32 s5, s1, 8
; GFX11-NEXT: s_and_b32 s6, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s3, 8
+; GFX11-NEXT: s_lshl_b32 s7, s3, 8
; GFX11-NEXT: s_or_b32 s4, s4, s5
-; GFX11-NEXT: s_or_b32 s5, s6, s8
+; GFX11-NEXT: s_or_b32 s5, s6, s7
; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
; GFX11-NEXT: s_lshl_b32 s5, s5, 16
; GFX11-NEXT: s_lshl_b32 s6, s17, 8
; GFX11-NEXT: s_or_b32 s4, s4, s5
; GFX11-NEXT: s_and_b32 s5, s16, 0xff
-; GFX11-NEXT: s_and_b32 s8, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-NEXT: s_and_b32 s7, s18, 0xff
+; GFX11-NEXT: s_lshl_b32 s8, s19, 8
; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_or_b32 s6, s8, s9
-; GFX11-NEXT: s_and_b32 s8, s20, 0xff
-; GFX11-NEXT: s_lshl_b32 s9, s21, 8
-; GFX11-NEXT: s_and_b32 s10, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s11, s23, 8
-; GFX11-NEXT: s_or_b32 s8, s8, s9
-; GFX11-NEXT: s_or_b32 s9, s10, s11
+; GFX11-NEXT: s_or_b32 s6, s7, s8
+; GFX11-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-NEXT: s_lshl_b32 s8, s21, 8
+; GFX11-NEXT: s_and_b32 s9, s22, 0xff
+; GFX11-NEXT: s_lshl_b32 s10, s23, 8
+; GFX11-NEXT: s_or_b32 s7, s7, s8
+; GFX11-NEXT: s_or_b32 s8, s9, s10
; GFX11-NEXT: s_and_b32 s5, s5, 0xffff
; GFX11-NEXT: s_lshl_b32 s6, s6, 16
-; GFX11-NEXT: s_and_b32 s8, s8, 0xffff
-; GFX11-NEXT: s_lshl_b32 s9, s9, 16
+; GFX11-NEXT: s_and_b32 s7, s7, 0xffff
+; GFX11-NEXT: s_lshl_b32 s8, s8, 16
; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_or_b32 s6, s8, s9
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s7
-; GFX11-NEXT: s_cbranch_vccnz .LBB7_3
+; GFX11-NEXT: s_or_b32 s6, s7, s8
+; GFX11-NEXT: s_cbranch_execnz .LBB7_3
; GFX11-NEXT: .LBB7_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
; GFX11-NEXT: s_add_i32 s2, s2, 3
@@ -1647,7 +1679,9 @@ define inreg <3 x i32> @bitcast_v12i8_to_v3i32_scalar(<12 x i8> inreg %a, i32 in
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB7_4:
; GFX11-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6
-; GFX11-NEXT: s_branch .LBB7_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s7
+; GFX11-NEXT: s_cbranch_vccz .LBB7_2
+; GFX11-NEXT: s_branch .LBB7_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -1780,6 +1814,7 @@ define inreg <6 x bfloat> @bitcast_v3i32_to_v6bf16_scalar(<3 x i32> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s19, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB9_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s6, s18, 0xffff0000
@@ -1814,66 +1849,73 @@ define inreg <6 x bfloat> @bitcast_v3i32_to_v6bf16_scalar(<3 x i32> inreg %a, i3
; SI-NEXT: ; implicit-def: $sgpr8
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB9_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB9_2
+; SI-NEXT: s_branch .LBB9_3
;
; VI-LABEL: bitcast_v3i32_to_v6bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
-; VI-NEXT: s_cbranch_scc0 .LBB9_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB9_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB9_3
-; VI-NEXT: .LBB9_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB9_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB9_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB9_3: ; %end
+; VI-NEXT: .LBB9_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB9_4:
-; VI-NEXT: s_branch .LBB9_2
;
; GFX9-LABEL: bitcast_v3i32_to_v6bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB9_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB9_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB9_3
-; GFX9-NEXT: .LBB9_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB9_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB9_3: ; %end
+; GFX9-NEXT: .LBB9_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB9_4:
-; GFX9-NEXT: s_branch .LBB9_2
;
; GFX11-LABEL: bitcast_v3i32_to_v6bf16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
+; GFX11-NEXT: s_mov_b32 s3, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB9_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s3, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB9_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB9_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
-; GFX11-NEXT: s_cbranch_vccnz .LBB9_3
-; GFX11-NEXT: .LBB9_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB9_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB9_3: ; %end
+; GFX11-NEXT: .LBB9_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_mov_b32_e32 v2, s2
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB9_4:
-; GFX11-NEXT: s_branch .LBB9_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2234,6 +2276,7 @@ define inreg <3 x i32> @bitcast_v6bf16_to_v3i32_scalar(<6 x bfloat> inreg %a, i3
; SI-NEXT: v_mul_f32_e64 v6, 1.0, s18
; SI-NEXT: v_mul_f32_e64 v3, 1.0, s21
; SI-NEXT: v_mul_f32_e64 v4, 1.0, s20
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB11_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v7
@@ -2266,16 +2309,22 @@ define inreg <3 x i32> @bitcast_v6bf16_to_v3i32_scalar(<6 x bfloat> inreg %a, i3
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB11_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2
-; SI-NEXT: s_branch .LBB11_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB11_2
+; SI-NEXT: s_branch .LBB11_3
;
; VI-LABEL: bitcast_v6bf16_to_v3i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
-; VI-NEXT: s_cbranch_scc0 .LBB11_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB11_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB11_4
-; VI-NEXT: .LBB11_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB11_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB11_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s18, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x40c00000
; VI-NEXT: v_add_f32_e32 v1, s4, v0
@@ -2332,8 +2381,6 @@ define inreg <3 x i32> @bitcast_v6bf16_to_v3i32_scalar(<6 x bfloat> inreg %a, i3
; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; VI-NEXT: v_alignbit_b32 v0, v0, v3, 16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB11_3:
-; VI-NEXT: s_branch .LBB11_2
; VI-NEXT: .LBB11_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -2344,10 +2391,14 @@ define inreg <3 x i32> @bitcast_v6bf16_to_v3i32_scalar(<6 x bfloat> inreg %a, i3
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB11_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB11_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB11_4
-; GFX9-NEXT: .LBB11_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB11_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB11_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_pack_lh_b32_b16 s4, 0, s18
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
; GFX9-NEXT: v_add_f32_e32 v1, s4, v0
@@ -2408,8 +2459,6 @@ define inreg <3 x i32> @bitcast_v6bf16_to_v3i32_scalar(<6 x bfloat> inreg %a, i3
; GFX9-NEXT: v_and_b32_sdwa v0, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: v_lshl_or_b32 v0, v4, 16, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB11_3:
-; GFX9-NEXT: s_branch .LBB11_2
; GFX9-NEXT: .LBB11_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -2420,12 +2469,15 @@ define inreg <3 x i32> @bitcast_v6bf16_to_v3i32_scalar(<6 x bfloat> inreg %a, i3
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
+; GFX11-NEXT: s_mov_b32 s3, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB11_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s3, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB11_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB11_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
; GFX11-NEXT: s_cbranch_vccnz .LBB11_4
-; GFX11-NEXT: .LBB11_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_pack_lh_b32_b16 s3, 0, s2
; GFX11-NEXT: s_lshl_b32 s2, s2, 16
; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s3
@@ -2493,8 +2545,6 @@ define inreg <3 x i32> @bitcast_v6bf16_to_v3i32_scalar(<6 x bfloat> inreg %a, i3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_lshl_or_b32 v0, v4, 16, v5
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB11_3:
-; GFX11-NEXT: s_branch .LBB11_2
; GFX11-NEXT: .LBB11_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_mov_b32_e32 v2, s2
@@ -2637,6 +2687,7 @@ define inreg <6 x half> @bitcast_v3i32_to_v6f16_scalar(<3 x i32> inreg %a, i32 i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s19, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB13_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s4, s18, 16
@@ -2671,66 +2722,73 @@ define inreg <6 x half> @bitcast_v3i32_to_v6f16_scalar(<3 x i32> inreg %a, i32 i
; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; implicit-def: $vgpr4
; SI-NEXT: ; implicit-def: $vgpr5
-; SI-NEXT: s_branch .LBB13_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB13_2
+; SI-NEXT: s_branch .LBB13_3
;
; VI-LABEL: bitcast_v3i32_to_v6f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
-; VI-NEXT: s_cbranch_scc0 .LBB13_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB13_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB13_3
-; VI-NEXT: .LBB13_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB13_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB13_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB13_3: ; %end
+; VI-NEXT: .LBB13_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB13_4:
-; VI-NEXT: s_branch .LBB13_2
;
; GFX9-LABEL: bitcast_v3i32_to_v6f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB13_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB13_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB13_3
-; GFX9-NEXT: .LBB13_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB13_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB13_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB13_3: ; %end
+; GFX9-NEXT: .LBB13_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB13_4:
-; GFX9-NEXT: s_branch .LBB13_2
;
; GFX11-LABEL: bitcast_v3i32_to_v6f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
+; GFX11-NEXT: s_mov_b32 s3, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB13_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s3, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB13_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB13_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
-; GFX11-NEXT: s_cbranch_vccnz .LBB13_3
-; GFX11-NEXT: .LBB13_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB13_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB13_3: ; %end
+; GFX11-NEXT: .LBB13_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_mov_b32_e32 v2, s2
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB13_4:
-; GFX11-NEXT: s_branch .LBB13_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -2894,6 +2952,7 @@ define inreg <3 x i32> @bitcast_v6f16_to_v3i32_scalar(<6 x half> inreg %a, i32 i
; SI-NEXT: v_cvt_f16_f32_e32 v4, s21
; SI-NEXT: v_cvt_f16_f32_e32 v3, s20
; SI-NEXT: s_cmp_lg_u32 s22, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB15_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v8
@@ -2932,16 +2991,22 @@ define inreg <3 x i32> @bitcast_v6f16_to_v3i32_scalar(<6 x half> inreg %a, i32 i
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB15_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2
-; SI-NEXT: s_branch .LBB15_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB15_2
+; SI-NEXT: s_branch .LBB15_3
;
; VI-LABEL: bitcast_v6f16_to_v3i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
-; VI-NEXT: s_cbranch_scc0 .LBB15_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB15_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB15_4
-; VI-NEXT: .LBB15_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB15_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB15_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s18, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -2959,8 +3024,6 @@ define inreg <3 x i32> @bitcast_v6f16_to_v3i32_scalar(<6 x half> inreg %a, i32 i
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v3
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB15_3:
-; VI-NEXT: s_branch .LBB15_2
; VI-NEXT: .LBB15_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -2971,17 +3034,19 @@ define inreg <3 x i32> @bitcast_v6f16_to_v3i32_scalar(<6 x half> inreg %a, i32 i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB15_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB15_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB15_4
-; GFX9-NEXT: .LBB15_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB15_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB15_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v2, s18, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB15_3:
-; GFX9-NEXT: s_branch .LBB15_2
; GFX9-NEXT: .LBB15_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -2992,18 +3057,19 @@ define inreg <3 x i32> @bitcast_v6f16_to_v3i32_scalar(<6 x half> inreg %a, i32 i
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
+; GFX11-NEXT: s_mov_b32 s3, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB15_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s3, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB15_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB15_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
; GFX11-NEXT: s_cbranch_vccnz .LBB15_4
-; GFX11-NEXT: .LBB15_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB15_3:
-; GFX11-NEXT: s_branch .LBB15_2
; GFX11-NEXT: .LBB15_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_mov_b32_e32 v2, s2
@@ -3127,6 +3193,7 @@ define inreg <6 x i16> @bitcast_v3i32_to_v6i16_scalar(<3 x i32> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s19, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB17_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s18
@@ -3154,66 +3221,73 @@ define inreg <6 x i16> @bitcast_v3i32_to_v6i16_scalar(<3 x i32> inreg %a, i32 in
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; implicit-def: $sgpr6
; SI-NEXT: ; implicit-def: $vgpr5
-; SI-NEXT: s_branch .LBB17_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB17_2
+; SI-NEXT: s_branch .LBB17_3
;
; VI-LABEL: bitcast_v3i32_to_v6i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
-; VI-NEXT: s_cbranch_scc0 .LBB17_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB17_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB17_3
-; VI-NEXT: .LBB17_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB17_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB17_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s18, s18, 3
; VI-NEXT: s_add_i32 s17, s17, 3
; VI-NEXT: s_add_i32 s16, s16, 3
-; VI-NEXT: .LBB17_3: ; %end
+; VI-NEXT: .LBB17_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB17_4:
-; VI-NEXT: s_branch .LBB17_2
;
; GFX9-LABEL: bitcast_v3i32_to_v6i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB17_4
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB17_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB17_3
-; GFX9-NEXT: .LBB17_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB17_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB17_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_add_i32 s18, s18, 3
; GFX9-NEXT: s_add_i32 s17, s17, 3
; GFX9-NEXT: s_add_i32 s16, s16, 3
-; GFX9-NEXT: .LBB17_3: ; %end
+; GFX9-NEXT: .LBB17_4: ; %end
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
; GFX9-NEXT: v_mov_b32_e32 v2, s18
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB17_4:
-; GFX9-NEXT: s_branch .LBB17_2
;
; GFX11-LABEL: bitcast_v3i32_to_v6i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
+; GFX11-NEXT: s_mov_b32 s3, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB17_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s3, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB17_4
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB17_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
-; GFX11-NEXT: s_cbranch_vccnz .LBB17_3
-; GFX11-NEXT: .LBB17_2: ; %cmp.true
+; GFX11-NEXT: s_cbranch_vccnz .LBB17_4
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_add_i32 s2, s2, 3
; GFX11-NEXT: s_add_i32 s1, s1, 3
; GFX11-NEXT: s_add_i32 s0, s0, 3
-; GFX11-NEXT: .LBB17_3: ; %end
+; GFX11-NEXT: .LBB17_4: ; %end
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_mov_b32_e32 v2, s2
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB17_4:
-; GFX11-NEXT: s_branch .LBB17_2
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -3357,6 +3431,7 @@ define inreg <3 x i32> @bitcast_v6i16_to_v3i32_scalar(<6 x i16> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s22, 0
+; SI-NEXT: s_mov_b64 s[8:9], -1
; SI-NEXT: s_cbranch_scc0 .LBB19_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
@@ -3392,16 +3467,22 @@ define inreg <3 x i32> @bitcast_v6i16_to_v3i32_scalar(<6 x i16> inreg %a, i32 in
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB19_4:
; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6
-; SI-NEXT: s_branch .LBB19_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; SI-NEXT: s_cbranch_vccz .LBB19_2
+; SI-NEXT: s_branch .LBB19_3
;
; VI-LABEL: bitcast_v6i16_to_v3i32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
-; VI-NEXT: s_cbranch_scc0 .LBB19_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB19_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB19_3
-; VI-NEXT: .LBB19_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB19_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB19_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s18, 3
; VI-NEXT: s_and_b32 s4, s18, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
@@ -3417,28 +3498,28 @@ define inreg <3 x i32> @bitcast_v6i16_to_v3i32_scalar(<6 x i16> inreg %a, i32 in
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB19_3: ; %end
+; VI-NEXT: .LBB19_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB19_4:
-; VI-NEXT: s_branch .LBB19_2
;
; GFX9-LABEL: bitcast_v6i16_to_v3i32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB19_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB19_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB19_4
-; GFX9-NEXT: .LBB19_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB19_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB19_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v2, s18, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB19_3:
-; GFX9-NEXT: s_branch .LBB19_2
; GFX9-NEXT: .LBB19_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -3449,18 +3530,19 @@ define inreg <3 x i32> @bitcast_v6i16_to_v3i32_scalar(<6 x i16> inreg %a, i32 in
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
+; GFX11-NEXT: s_mov_b32 s3, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB19_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s3, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB19_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB19_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
; GFX11-NEXT: s_cbranch_vccnz .LBB19_4
-; GFX11-NEXT: .LBB19_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB19_3:
-; GFX11-NEXT: s_branch .LBB19_2
; GFX11-NEXT: .LBB19_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_mov_b32_e32 v2, s2
@@ -3756,6 +3838,7 @@ define inreg <12 x i8> @bitcast_v3f32_to_v12i8_scalar(<3 x float> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s19, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB21_3
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s18
@@ -3794,7 +3877,8 @@ define inreg <12 x i8> @bitcast_v3f32_to_v12i8_scalar(<3 x float> inreg %a, i32
; SI-NEXT: ; implicit-def: $vgpr9
; SI-NEXT: ; implicit-def: $vgpr10
; SI-NEXT: ; implicit-def: $vgpr11
-; SI-NEXT: s_branch .LBB21_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB21_2
; SI-NEXT: .LBB21_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v4, s17
@@ -3808,6 +3892,7 @@ define inreg <12 x i8> @bitcast_v3f32_to_v12i8_scalar(<3 x float> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
+; VI-NEXT: s_mov_b64 s[8:9], -1
; VI-NEXT: s_cbranch_scc0 .LBB21_3
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s19, s16, 8
@@ -3844,7 +3929,8 @@ define inreg <12 x i8> @bitcast_v3f32_to_v12i8_scalar(<3 x float> inreg %a, i32
; VI-NEXT: ; implicit-def: $sgpr11
; VI-NEXT: ; implicit-def: $sgpr10
; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: s_branch .LBB21_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; VI-NEXT: s_cbranch_vccz .LBB21_2
; VI-NEXT: .LBB21_4:
; VI-NEXT: v_mov_b32_e32 v13, s16
; VI-NEXT: v_mov_b32_e32 v14, s17
@@ -3867,6 +3953,7 @@ define inreg <12 x i8> @bitcast_v3f32_to_v12i8_scalar(<3 x float> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
+; GFX9-NEXT: s_mov_b64 s[8:9], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB21_3
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s19, s16, 8
@@ -3903,7 +3990,8 @@ define inreg <12 x i8> @bitcast_v3f32_to_v12i8_scalar(<3 x float> inreg %a, i32
; GFX9-NEXT: ; implicit-def: $sgpr11
; GFX9-NEXT: ; implicit-def: $sgpr10
; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: s_branch .LBB21_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; GFX9-NEXT: s_cbranch_vccz .LBB21_2
; GFX9-NEXT: .LBB21_4:
; GFX9-NEXT: v_mov_b32_e32 v13, s16
; GFX9-NEXT: v_mov_b32_e32 v14, s17
@@ -3926,20 +4014,19 @@ define inreg <12 x i8> @bitcast_v3f32_to_v12i8_scalar(<3 x float> inreg %a, i32
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
-; GFX11-NEXT: s_mov_b32 s3, 0
+; GFX11-NEXT: s_mov_b32 s5, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB21_3
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s8, s2, 16
-; GFX11-NEXT: s_lshr_b32 s9, s2, 8
-; GFX11-NEXT: s_lshr_b32 s10, s1, 24
-; GFX11-NEXT: s_lshr_b32 s11, s1, 16
-; GFX11-NEXT: s_lshr_b32 s13, s1, 8
-; GFX11-NEXT: s_lshr_b32 s12, s0, 16
-; GFX11-NEXT: s_lshr_b32 s14, s0, 8
-; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
+; GFX11-NEXT: s_lshr_b32 s3, s2, 16
; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
-; GFX11-NEXT: s_cbranch_vccnz .LBB21_4
+; GFX11-NEXT: s_lshr_b32 s8, s2, 8
+; GFX11-NEXT: s_lshr_b32 s9, s1, 24
+; GFX11-NEXT: s_lshr_b32 s10, s1, 16
+; GFX11-NEXT: s_lshr_b32 s12, s1, 8
+; GFX11-NEXT: s_lshr_b32 s11, s0, 16
+; GFX11-NEXT: s_lshr_b32 s13, s0, 8
+; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
+; GFX11-NEXT: s_cbranch_execnz .LBB21_4
; GFX11-NEXT: .LBB21_2: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v8, s2, 1.0
; GFX11-NEXT: v_add_f32_e64 v14, s1, 1.0
@@ -3956,22 +4043,23 @@ define inreg <12 x i8> @bitcast_v3f32_to_v12i8_scalar(<3 x float> inreg %a, i32
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v13
; GFX11-NEXT: s_branch .LBB21_5
; GFX11-NEXT: .LBB21_3:
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr4
; GFX11-NEXT: ; implicit-def: $sgpr13
; GFX11-NEXT: ; implicit-def: $sgpr11
+; GFX11-NEXT: ; implicit-def: $sgpr4
+; GFX11-NEXT: ; implicit-def: $sgpr12
; GFX11-NEXT: ; implicit-def: $sgpr10
; GFX11-NEXT: ; implicit-def: $sgpr9
; GFX11-NEXT: ; implicit-def: $sgpr8
+; GFX11-NEXT: ; implicit-def: $sgpr3
; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: s_branch .LBB21_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
+; GFX11-NEXT: s_cbranch_vccz .LBB21_2
; GFX11-NEXT: .LBB21_4:
; GFX11-NEXT: v_dual_mov_b32 v13, s0 :: v_dual_mov_b32 v14, s1
-; GFX11-NEXT: v_dual_mov_b32 v8, s2 :: v_dual_mov_b32 v1, s14
-; GFX11-NEXT: v_dual_mov_b32 v2, s12 :: v_dual_mov_b32 v5, s13
-; GFX11-NEXT: v_dual_mov_b32 v6, s11 :: v_dual_mov_b32 v7, s10
-; GFX11-NEXT: v_dual_mov_b32 v9, s9 :: v_dual_mov_b32 v10, s8
+; GFX11-NEXT: v_dual_mov_b32 v8, s2 :: v_dual_mov_b32 v1, s13
+; GFX11-NEXT: v_dual_mov_b32 v2, s11 :: v_dual_mov_b32 v5, s12
+; GFX11-NEXT: v_dual_mov_b32 v6, s10 :: v_dual_mov_b32 v7, s9
+; GFX11-NEXT: v_dual_mov_b32 v9, s8 :: v_dual_mov_b32 v10, s3
; GFX11-NEXT: v_mov_b32_e32 v11, s6
; GFX11-NEXT: v_mov_b32_e32 v3, s4
; GFX11-NEXT: .LBB21_5: ; %end
@@ -4459,6 +4547,7 @@ define inreg <3 x float> @bitcast_v12i8_to_v3f32_scalar(<12 x i8> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
+; SI-NEXT: s_mov_b64 s[8:9], -1
; SI-NEXT: s_cbranch_scc0 .LBB23_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
@@ -4536,12 +4625,15 @@ define inreg <3 x float> @bitcast_v12i8_to_v3f32_scalar(<12 x i8> inreg %a, i32
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB23_4:
; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6
-; SI-NEXT: s_branch .LBB23_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; SI-NEXT: s_cbranch_vccz .LBB23_2
+; SI-NEXT: s_branch .LBB23_3
;
; VI-LABEL: bitcast_v12i8_to_v3f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
+; VI-NEXT: s_mov_b64 s[8:9], -1
; VI-NEXT: s_cbranch_scc0 .LBB23_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, s16, 0xff
@@ -4619,12 +4711,15 @@ define inreg <3 x float> @bitcast_v12i8_to_v3f32_scalar(<12 x i8> inreg %a, i32
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB23_4:
; VI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6
-; VI-NEXT: s_branch .LBB23_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; VI-NEXT: s_cbranch_vccz .LBB23_2
+; VI-NEXT: s_branch .LBB23_3
;
; GFX9-LABEL: bitcast_v12i8_to_v3f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
+; GFX9-NEXT: s_mov_b64 s[8:9], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB23_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_and_b32 s4, s16, 0xff
@@ -4702,44 +4797,45 @@ define inreg <3 x float> @bitcast_v12i8_to_v3f32_scalar(<12 x i8> inreg %a, i32
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB23_4:
; GFX9-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6
-; GFX9-NEXT: s_branch .LBB23_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; GFX9-NEXT: s_cbranch_vccz .LBB23_2
+; GFX9-NEXT: s_branch .LBB23_3
;
; GFX11-LABEL: bitcast_v12i8_to_v3f32_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
-; GFX11-NEXT: s_mov_b32 s7, 0
+; GFX11-NEXT: s_mov_b32 s7, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB23_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_and_b32 s4, s0, 0xff
; GFX11-NEXT: s_lshl_b32 s5, s1, 8
; GFX11-NEXT: s_and_b32 s6, s2, 0xff
-; GFX11-NEXT: s_lshl_b32 s8, s3, 8
+; GFX11-NEXT: s_lshl_b32 s7, s3, 8
; GFX11-NEXT: s_or_b32 s4, s4, s5
-; GFX11-NEXT: s_or_b32 s5, s6, s8
+; GFX11-NEXT: s_or_b32 s5, s6, s7
; GFX11-NEXT: s_and_b32 s4, s4, 0xffff
; GFX11-NEXT: s_lshl_b32 s5, s5, 16
; GFX11-NEXT: s_lshl_b32 s6, s17, 8
; GFX11-NEXT: s_or_b32 s4, s4, s5
; GFX11-NEXT: s_and_b32 s5, s16, 0xff
-; GFX11-NEXT: s_and_b32 s8, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-NEXT: s_and_b32 s7, s18, 0xff
+; GFX11-NEXT: s_lshl_b32 s8, s19, 8
; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_or_b32 s6, s8, s9
-; GFX11-NEXT: s_and_b32 s8, s20, 0xff
-; GFX11-NEXT: s_lshl_b32 s9, s21, 8
-; GFX11-NEXT: s_and_b32 s10, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s11, s23, 8
-; GFX11-NEXT: s_or_b32 s8, s8, s9
-; GFX11-NEXT: s_or_b32 s9, s10, s11
+; GFX11-NEXT: s_or_b32 s6, s7, s8
+; GFX11-NEXT: s_and_b32 s7, s20, 0xff
+; GFX11-NEXT: s_lshl_b32 s8, s21, 8
+; GFX11-NEXT: s_and_b32 s9, s22, 0xff
+; GFX11-NEXT: s_lshl_b32 s10, s23, 8
+; GFX11-NEXT: s_or_b32 s7, s7, s8
+; GFX11-NEXT: s_or_b32 s8, s9, s10
; GFX11-NEXT: s_and_b32 s5, s5, 0xffff
; GFX11-NEXT: s_lshl_b32 s6, s6, 16
-; GFX11-NEXT: s_and_b32 s8, s8, 0xffff
-; GFX11-NEXT: s_lshl_b32 s9, s9, 16
+; GFX11-NEXT: s_and_b32 s7, s7, 0xffff
+; GFX11-NEXT: s_lshl_b32 s8, s8, 16
; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_or_b32 s6, s8, s9
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s7
-; GFX11-NEXT: s_cbranch_vccnz .LBB23_3
+; GFX11-NEXT: s_or_b32 s6, s7, s8
+; GFX11-NEXT: s_cbranch_execnz .LBB23_3
; GFX11-NEXT: .LBB23_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
; GFX11-NEXT: s_add_i32 s2, s2, 3
@@ -4786,7 +4882,9 @@ define inreg <3 x float> @bitcast_v12i8_to_v3f32_scalar(<12 x i8> inreg %a, i32
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB23_4:
; GFX11-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6
-; GFX11-NEXT: s_branch .LBB23_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s7
+; GFX11-NEXT: s_cbranch_vccz .LBB23_2
+; GFX11-NEXT: s_branch .LBB23_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -4918,6 +5016,7 @@ define inreg <6 x bfloat> @bitcast_v3f32_to_v6bf16_scalar(<3 x float> inreg %a,
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s19, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB25_3
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s6, s18, 0xffff0000
@@ -4945,7 +5044,8 @@ define inreg <6 x bfloat> @bitcast_v3f32_to_v6bf16_scalar(<3 x float> inreg %a,
; SI-NEXT: ; implicit-def: $sgpr8
; SI-NEXT: ; implicit-def: $sgpr7
; SI-NEXT: ; implicit-def: $sgpr6
-; SI-NEXT: s_branch .LBB25_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB25_2
; SI-NEXT: .LBB25_4:
; SI-NEXT: v_mov_b32_e32 v0, s11
; SI-NEXT: v_mov_b32_e32 v1, s10
@@ -4959,16 +5059,18 @@ define inreg <6 x bfloat> @bitcast_v3f32_to_v6bf16_scalar(<3 x float> inreg %a,
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
-; VI-NEXT: s_cbranch_scc0 .LBB25_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB25_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB25_4
-; VI-NEXT: .LBB25_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB25_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB25_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v2, s18, 1.0
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB25_3:
-; VI-NEXT: s_branch .LBB25_2
; VI-NEXT: .LBB25_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -4980,16 +5082,18 @@ define inreg <6 x bfloat> @bitcast_v3f32_to_v6bf16_scalar(<3 x float> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB25_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB25_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB25_4
-; GFX9-NEXT: .LBB25_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB25_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB25_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v2, s18, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB25_3:
-; GFX9-NEXT: s_branch .LBB25_2
; GFX9-NEXT: .LBB25_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -5001,18 +5105,19 @@ define inreg <6 x bfloat> @bitcast_v3f32_to_v6bf16_scalar(<3 x float> inreg %a,
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB25_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB25_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB25_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB25_4
-; GFX11-NEXT: .LBB25_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v2, s2, 1.0
; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB25_3:
-; GFX11-NEXT: s_branch .LBB25_2
; GFX11-NEXT: .LBB25_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -5377,6 +5482,7 @@ define inreg <3 x float> @bitcast_v6bf16_to_v3f32_scalar(<6 x bfloat> inreg %a,
; SI-NEXT: v_mul_f32_e64 v6, 1.0, s18
; SI-NEXT: v_mul_f32_e64 v3, 1.0, s21
; SI-NEXT: v_mul_f32_e64 v4, 1.0, s20
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB27_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v7
@@ -5409,16 +5515,22 @@ define inreg <3 x float> @bitcast_v6bf16_to_v3f32_scalar(<6 x bfloat> inreg %a,
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB27_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2
-; SI-NEXT: s_branch .LBB27_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB27_2
+; SI-NEXT: s_branch .LBB27_3
;
; VI-LABEL: bitcast_v6bf16_to_v3f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
-; VI-NEXT: s_cbranch_scc0 .LBB27_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB27_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB27_4
-; VI-NEXT: .LBB27_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB27_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB27_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s18, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x40c00000
; VI-NEXT: v_add_f32_e32 v1, s4, v0
@@ -5475,8 +5587,6 @@ define inreg <3 x float> @bitcast_v6bf16_to_v3f32_scalar(<6 x bfloat> inreg %a,
; VI-NEXT: v_lshrrev_b32_e32 v0, 16, v0
; VI-NEXT: v_alignbit_b32 v0, v0, v3, 16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB27_3:
-; VI-NEXT: s_branch .LBB27_2
; VI-NEXT: .LBB27_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -5487,10 +5597,14 @@ define inreg <3 x float> @bitcast_v6bf16_to_v3f32_scalar(<6 x bfloat> inreg %a,
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB27_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB27_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB27_4
-; GFX9-NEXT: .LBB27_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB27_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB27_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_pack_lh_b32_b16 s4, 0, s18
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
; GFX9-NEXT: v_add_f32_e32 v1, s4, v0
@@ -5551,8 +5665,6 @@ define inreg <3 x float> @bitcast_v6bf16_to_v3f32_scalar(<6 x bfloat> inreg %a,
; GFX9-NEXT: v_and_b32_sdwa v0, v3, v0 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: v_lshl_or_b32 v0, v4, 16, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB27_3:
-; GFX9-NEXT: s_branch .LBB27_2
; GFX9-NEXT: .LBB27_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -5563,12 +5675,15 @@ define inreg <3 x float> @bitcast_v6bf16_to_v3f32_scalar(<6 x bfloat> inreg %a,
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
+; GFX11-NEXT: s_mov_b32 s3, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB27_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s3, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB27_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB27_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
; GFX11-NEXT: s_cbranch_vccnz .LBB27_4
-; GFX11-NEXT: .LBB27_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_pack_lh_b32_b16 s3, 0, s2
; GFX11-NEXT: s_lshl_b32 s2, s2, 16
; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s3
@@ -5636,8 +5751,6 @@ define inreg <3 x float> @bitcast_v6bf16_to_v3f32_scalar(<6 x bfloat> inreg %a,
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-NEXT: v_lshl_or_b32 v0, v4, 16, v5
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB27_3:
-; GFX11-NEXT: s_branch .LBB27_2
; GFX11-NEXT: .LBB27_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_mov_b32_e32 v2, s2
@@ -5779,6 +5892,7 @@ define inreg <6 x half> @bitcast_v3f32_to_v6f16_scalar(<3 x float> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s19, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB29_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshr_b32 s4, s18, 16
@@ -5813,22 +5927,26 @@ define inreg <6 x half> @bitcast_v3f32_to_v6f16_scalar(<3 x float> inreg %a, i32
; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; implicit-def: $vgpr4
; SI-NEXT: ; implicit-def: $vgpr5
-; SI-NEXT: s_branch .LBB29_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB29_2
+; SI-NEXT: s_branch .LBB29_3
;
; VI-LABEL: bitcast_v3f32_to_v6f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
-; VI-NEXT: s_cbranch_scc0 .LBB29_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB29_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB29_4
-; VI-NEXT: .LBB29_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB29_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB29_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v2, s18, 1.0
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB29_3:
-; VI-NEXT: s_branch .LBB29_2
; VI-NEXT: .LBB29_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -5840,16 +5958,18 @@ define inreg <6 x half> @bitcast_v3f32_to_v6f16_scalar(<3 x float> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB29_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB29_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB29_4
-; GFX9-NEXT: .LBB29_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB29_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB29_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v2, s18, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB29_3:
-; GFX9-NEXT: s_branch .LBB29_2
; GFX9-NEXT: .LBB29_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -5861,18 +5981,19 @@ define inreg <6 x half> @bitcast_v3f32_to_v6f16_scalar(<3 x float> inreg %a, i32
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB29_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB29_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB29_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB29_4
-; GFX11-NEXT: .LBB29_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v2, s2, 1.0
; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB29_3:
-; GFX11-NEXT: s_branch .LBB29_2
; GFX11-NEXT: .LBB29_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -6040,6 +6161,7 @@ define inreg <3 x float> @bitcast_v6f16_to_v3f32_scalar(<6 x half> inreg %a, i32
; SI-NEXT: v_cvt_f16_f32_e32 v4, s21
; SI-NEXT: v_cvt_f16_f32_e32 v3, s20
; SI-NEXT: s_cmp_lg_u32 s22, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB31_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v8
@@ -6078,16 +6200,22 @@ define inreg <3 x float> @bitcast_v6f16_to_v3f32_scalar(<6 x half> inreg %a, i32
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB31_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2
-; SI-NEXT: s_branch .LBB31_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB31_2
+; SI-NEXT: s_branch .LBB31_3
;
; VI-LABEL: bitcast_v6f16_to_v3f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
-; VI-NEXT: s_cbranch_scc0 .LBB31_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB31_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB31_4
-; VI-NEXT: .LBB31_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB31_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB31_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s18, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x200
; VI-NEXT: v_mov_b32_e32 v1, s4
@@ -6105,8 +6233,6 @@ define inreg <3 x float> @bitcast_v6f16_to_v3f32_scalar(<6 x half> inreg %a, i32
; VI-NEXT: v_add_f16_e32 v0, s16, v0
; VI-NEXT: v_or_b32_e32 v0, v0, v3
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB31_3:
-; VI-NEXT: s_branch .LBB31_2
; VI-NEXT: .LBB31_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -6117,17 +6243,19 @@ define inreg <3 x float> @bitcast_v6f16_to_v3f32_scalar(<6 x half> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB31_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB31_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB31_4
-; GFX9-NEXT: .LBB31_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB31_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB31_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v2, s18, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB31_3:
-; GFX9-NEXT: s_branch .LBB31_2
; GFX9-NEXT: .LBB31_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -6138,18 +6266,19 @@ define inreg <3 x float> @bitcast_v6f16_to_v3f32_scalar(<6 x half> inreg %a, i32
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
+; GFX11-NEXT: s_mov_b32 s3, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB31_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s3, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB31_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB31_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
; GFX11-NEXT: s_cbranch_vccnz .LBB31_4
-; GFX11-NEXT: .LBB31_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB31_3:
-; GFX11-NEXT: s_branch .LBB31_2
; GFX11-NEXT: .LBB31_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_mov_b32_e32 v2, s2
@@ -6272,6 +6401,7 @@ define inreg <6 x i16> @bitcast_v3f32_to_v6i16_scalar(<3 x float> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s19, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB33_3
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_mov_b32_e32 v0, s18
@@ -6292,7 +6422,8 @@ define inreg <6 x i16> @bitcast_v3f32_to_v6i16_scalar(<3 x float> inreg %a, i32
; SI-NEXT: ; implicit-def: $vgpr1
; SI-NEXT: ; implicit-def: $sgpr6
; SI-NEXT: ; implicit-def: $vgpr5
-; SI-NEXT: s_branch .LBB33_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB33_2
; SI-NEXT: .LBB33_4:
; SI-NEXT: v_mov_b32_e32 v0, s16
; SI-NEXT: v_mov_b32_e32 v2, s17
@@ -6304,16 +6435,18 @@ define inreg <6 x i16> @bitcast_v3f32_to_v6i16_scalar(<3 x float> inreg %a, i32
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
-; VI-NEXT: s_cbranch_scc0 .LBB33_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB33_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB33_4
-; VI-NEXT: .LBB33_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB33_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB33_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: v_add_f32_e64 v2, s18, 1.0
; VI-NEXT: v_add_f32_e64 v1, s17, 1.0
; VI-NEXT: v_add_f32_e64 v0, s16, 1.0
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB33_3:
-; VI-NEXT: s_branch .LBB33_2
; VI-NEXT: .LBB33_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -6325,16 +6458,18 @@ define inreg <6 x i16> @bitcast_v3f32_to_v6i16_scalar(<3 x float> inreg %a, i32
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB33_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB33_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB33_4
-; GFX9-NEXT: .LBB33_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB33_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB33_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_add_f32_e64 v2, s18, 1.0
; GFX9-NEXT: v_add_f32_e64 v1, s17, 1.0
; GFX9-NEXT: v_add_f32_e64 v0, s16, 1.0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB33_3:
-; GFX9-NEXT: s_branch .LBB33_2
; GFX9-NEXT: .LBB33_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -6346,18 +6481,19 @@ define inreg <6 x i16> @bitcast_v3f32_to_v6i16_scalar(<3 x float> inreg %a, i32
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB33_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB33_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB33_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB33_4
-; GFX11-NEXT: .LBB33_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_add_f32_e64 v2, s2, 1.0
; GFX11-NEXT: v_add_f32_e64 v1, s1, 1.0
; GFX11-NEXT: v_add_f32_e64 v0, s0, 1.0
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB33_3:
-; GFX11-NEXT: s_branch .LBB33_2
; GFX11-NEXT: .LBB33_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -6505,6 +6641,7 @@ define inreg <3 x float> @bitcast_v6i16_to_v3f32_scalar(<6 x i16> inreg %a, i32
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s22, 0
+; SI-NEXT: s_mov_b64 s[8:9], -1
; SI-NEXT: s_cbranch_scc0 .LBB35_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
@@ -6540,16 +6677,22 @@ define inreg <3 x float> @bitcast_v6i16_to_v3f32_scalar(<6 x i16> inreg %a, i32
; SI-NEXT: s_setpc_b64 s[30:31]
; SI-NEXT: .LBB35_4:
; SI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6
-; SI-NEXT: s_branch .LBB35_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; SI-NEXT: s_cbranch_vccz .LBB35_2
+; SI-NEXT: s_branch .LBB35_3
;
; VI-LABEL: bitcast_v6i16_to_v3f32_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
-; VI-NEXT: s_cbranch_scc0 .LBB35_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB35_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB35_3
-; VI-NEXT: .LBB35_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB35_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB35_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s18, 3
; VI-NEXT: s_and_b32 s4, s18, 0xffff0000
; VI-NEXT: s_and_b32 s5, s5, 0xffff
@@ -6565,28 +6708,28 @@ define inreg <3 x float> @bitcast_v6i16_to_v3f32_scalar(<6 x i16> inreg %a, i32
; VI-NEXT: s_and_b32 s5, s5, 0xffff
; VI-NEXT: s_or_b32 s4, s4, s5
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB35_3: ; %end
+; VI-NEXT: .LBB35_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB35_4:
-; VI-NEXT: s_branch .LBB35_2
;
; GFX9-LABEL: bitcast_v6i16_to_v3f32_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB35_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB35_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB35_4
-; GFX9-NEXT: .LBB35_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB35_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB35_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v2, s18, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB35_3:
-; GFX9-NEXT: s_branch .LBB35_2
; GFX9-NEXT: .LBB35_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -6597,18 +6740,19 @@ define inreg <3 x float> @bitcast_v6i16_to_v3f32_scalar(<6 x i16> inreg %a, i32
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
+; GFX11-NEXT: s_mov_b32 s3, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB35_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s3, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB35_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB35_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
; GFX11-NEXT: s_cbranch_vccnz .LBB35_4
-; GFX11-NEXT: .LBB35_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB35_3:
-; GFX11-NEXT: s_branch .LBB35_2
; GFX11-NEXT: .LBB35_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_mov_b32_e32 v2, s2
@@ -7104,6 +7248,7 @@ define inreg <6 x bfloat> @bitcast_v12i8_to_v6bf16_scalar(<12 x i8> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB37_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
@@ -7192,12 +7337,15 @@ define inreg <6 x bfloat> @bitcast_v12i8_to_v6bf16_scalar(<12 x i8> inreg %a, i3
; SI-NEXT: ; implicit-def: $sgpr9
; SI-NEXT: ; implicit-def: $sgpr10
; SI-NEXT: ; implicit-def: $sgpr11
-; SI-NEXT: s_branch .LBB37_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB37_2
+; SI-NEXT: s_branch .LBB37_3
;
; VI-LABEL: bitcast_v12i8_to_v6bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
+; VI-NEXT: s_mov_b64 s[8:9], -1
; VI-NEXT: s_cbranch_scc0 .LBB37_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, s16, 0xff
@@ -7275,12 +7423,15 @@ define inreg <6 x bfloat> @bitcast_v12i8_to_v6bf16_scalar(<12 x i8> inreg %a, i3
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB37_4:
; VI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7
-; VI-NEXT: s_branch .LBB37_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; VI-NEXT: s_cbranch_vccz .LBB37_2
+; VI-NEXT: s_branch .LBB37_3
;
; GFX9-LABEL: bitcast_v12i8_to_v6bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
+; GFX9-NEXT: s_mov_b64 s[8:9], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB37_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_and_b32 s4, s16, 0xff
@@ -7358,13 +7509,15 @@ define inreg <6 x bfloat> @bitcast_v12i8_to_v6bf16_scalar(<12 x i8> inreg %a, i3
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB37_4:
; GFX9-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7
-; GFX9-NEXT: s_branch .LBB37_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; GFX9-NEXT: s_cbranch_vccz .LBB37_2
+; GFX9-NEXT: s_branch .LBB37_3
;
; GFX11-LABEL: bitcast_v12i8_to_v6bf16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
-; GFX11-NEXT: s_mov_b32 s8, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB37_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_and_b32 s4, s0, 0xff
@@ -7379,23 +7532,22 @@ define inreg <6 x bfloat> @bitcast_v12i8_to_v6bf16_scalar(<12 x i8> inreg %a, i3
; GFX11-NEXT: s_or_b32 s4, s4, s5
; GFX11-NEXT: s_and_b32 s5, s16, 0xff
; GFX11-NEXT: s_and_b32 s7, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-NEXT: s_lshl_b32 s8, s19, 8
; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_or_b32 s6, s7, s9
+; GFX11-NEXT: s_or_b32 s6, s7, s8
; GFX11-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-NEXT: s_lshl_b32 s9, s21, 8
-; GFX11-NEXT: s_and_b32 s10, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s11, s23, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s9
-; GFX11-NEXT: s_or_b32 s9, s10, s11
+; GFX11-NEXT: s_lshl_b32 s8, s21, 8
+; GFX11-NEXT: s_and_b32 s9, s22, 0xff
+; GFX11-NEXT: s_lshl_b32 s10, s23, 8
+; GFX11-NEXT: s_or_b32 s7, s7, s8
+; GFX11-NEXT: s_or_b32 s8, s9, s10
; GFX11-NEXT: s_and_b32 s5, s5, 0xffff
; GFX11-NEXT: s_lshl_b32 s6, s6, 16
; GFX11-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-NEXT: s_lshl_b32 s9, s9, 16
+; GFX11-NEXT: s_lshl_b32 s8, s8, 16
; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_or_b32 s6, s7, s9
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB37_3
+; GFX11-NEXT: s_or_b32 s6, s7, s8
+; GFX11-NEXT: s_cbranch_execnz .LBB37_3
; GFX11-NEXT: .LBB37_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
; GFX11-NEXT: s_add_i32 s2, s2, 3
@@ -7442,7 +7594,9 @@ define inreg <6 x bfloat> @bitcast_v12i8_to_v6bf16_scalar(<12 x i8> inreg %a, i3
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB37_4:
; GFX11-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7
-; GFX11-NEXT: s_branch .LBB37_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-NEXT: s_cbranch_vccz .LBB37_2
+; GFX11-NEXT: s_branch .LBB37_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -7987,6 +8141,7 @@ define inreg <12 x i8> @bitcast_v6bf16_to_v12i8_scalar(<6 x bfloat> inreg %a, i3
; SI-NEXT: v_mul_f32_e64 v15, 1.0, s18
; SI-NEXT: v_mul_f32_e64 v12, 1.0, s21
; SI-NEXT: v_mul_f32_e64 v13, 1.0, s20
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB39_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v16
@@ -8044,12 +8199,15 @@ define inreg <12 x i8> @bitcast_v6bf16_to_v12i8_scalar(<6 x bfloat> inreg %a, i3
; SI-NEXT: ; implicit-def: $vgpr9
; SI-NEXT: ; implicit-def: $vgpr10
; SI-NEXT: ; implicit-def: $vgpr11
-; SI-NEXT: s_branch .LBB39_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB39_2
+; SI-NEXT: s_branch .LBB39_3
;
; VI-LABEL: bitcast_v6bf16_to_v12i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
+; VI-NEXT: s_mov_b64 s[8:9], -1
; VI-NEXT: s_cbranch_scc0 .LBB39_3
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s19, s16, 8
@@ -8139,7 +8297,8 @@ define inreg <12 x i8> @bitcast_v6bf16_to_v12i8_scalar(<6 x bfloat> inreg %a, i3
; VI-NEXT: ; implicit-def: $sgpr11
; VI-NEXT: ; implicit-def: $sgpr10
; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: s_branch .LBB39_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; VI-NEXT: s_cbranch_vccz .LBB39_2
; VI-NEXT: .LBB39_4:
; VI-NEXT: v_mov_b32_e32 v14, s16
; VI-NEXT: v_mov_b32_e32 v15, s17
@@ -8163,6 +8322,7 @@ define inreg <12 x i8> @bitcast_v6bf16_to_v12i8_scalar(<6 x bfloat> inreg %a, i3
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
+; GFX9-NEXT: s_mov_b64 s[8:9], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB39_3
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s19, s17, 16
@@ -8258,7 +8418,8 @@ define inreg <12 x i8> @bitcast_v6bf16_to_v12i8_scalar(<6 x bfloat> inreg %a, i3
; GFX9-NEXT: ; implicit-def: $sgpr14
; GFX9-NEXT: ; implicit-def: $sgpr15
; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: s_branch .LBB39_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; GFX9-NEXT: s_cbranch_vccz .LBB39_2
; GFX9-NEXT: .LBB39_4:
; GFX9-NEXT: v_mov_b32_e32 v8, s18
; GFX9-NEXT: v_mov_b32_e32 v0, s16
@@ -8278,20 +8439,19 @@ define inreg <12 x i8> @bitcast_v6bf16_to_v12i8_scalar(<6 x bfloat> inreg %a, i3
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
-; GFX11-NEXT: s_mov_b32 s3, 0
+; GFX11-NEXT: s_mov_b32 s5, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB39_3
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s13, s2, 16
-; GFX11-NEXT: s_lshr_b32 s12, s2, 8
-; GFX11-NEXT: s_lshr_b32 s8, s1, 24
-; GFX11-NEXT: s_lshr_b32 s14, s1, 16
-; GFX11-NEXT: s_lshr_b32 s9, s1, 8
-; GFX11-NEXT: s_lshr_b32 s11, s0, 16
-; GFX11-NEXT: s_lshr_b32 s10, s0, 8
-; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
+; GFX11-NEXT: s_lshr_b32 s3, s1, 24
; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
-; GFX11-NEXT: s_cbranch_vccnz .LBB39_4
+; GFX11-NEXT: s_lshr_b32 s12, s2, 16
+; GFX11-NEXT: s_lshr_b32 s11, s2, 8
+; GFX11-NEXT: s_lshr_b32 s13, s1, 16
+; GFX11-NEXT: s_lshr_b32 s8, s1, 8
+; GFX11-NEXT: s_lshr_b32 s10, s0, 16
+; GFX11-NEXT: s_lshr_b32 s9, s0, 8
+; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
+; GFX11-NEXT: s_cbranch_execnz .LBB39_4
; GFX11-NEXT: .LBB39_2: ; %cmp.true
; GFX11-NEXT: s_pack_lh_b32_b16 s3, 0, s1
; GFX11-NEXT: s_lshl_b32 s1, s1, 16
@@ -8374,22 +8534,23 @@ define inreg <12 x i8> @bitcast_v6bf16_to_v12i8_scalar(<6 x bfloat> inreg %a, i3
; GFX11-NEXT: v_mov_b32_e32 v4, v13
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB39_3:
+; GFX11-NEXT: ; implicit-def: $sgpr9
; GFX11-NEXT: ; implicit-def: $sgpr10
-; GFX11-NEXT: ; implicit-def: $sgpr11
; GFX11-NEXT: ; implicit-def: $sgpr4
-; GFX11-NEXT: ; implicit-def: $sgpr9
-; GFX11-NEXT: ; implicit-def: $sgpr14
; GFX11-NEXT: ; implicit-def: $sgpr8
-; GFX11-NEXT: ; implicit-def: $sgpr12
; GFX11-NEXT: ; implicit-def: $sgpr13
+; GFX11-NEXT: ; implicit-def: $sgpr3
+; GFX11-NEXT: ; implicit-def: $sgpr11
+; GFX11-NEXT: ; implicit-def: $sgpr12
; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: s_branch .LBB39_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
+; GFX11-NEXT: s_cbranch_vccz .LBB39_2
; GFX11-NEXT: .LBB39_4:
-; GFX11-NEXT: v_dual_mov_b32 v8, s2 :: v_dual_mov_b32 v9, s12
-; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s10
-; GFX11-NEXT: v_dual_mov_b32 v6, s14 :: v_dual_mov_b32 v7, s8
-; GFX11-NEXT: v_dual_mov_b32 v10, s13 :: v_dual_mov_b32 v5, s9
-; GFX11-NEXT: v_dual_mov_b32 v2, s11 :: v_dual_mov_b32 v11, s6
+; GFX11-NEXT: v_dual_mov_b32 v8, s2 :: v_dual_mov_b32 v9, s11
+; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s9
+; GFX11-NEXT: v_dual_mov_b32 v6, s13 :: v_dual_mov_b32 v7, s3
+; GFX11-NEXT: v_dual_mov_b32 v10, s12 :: v_dual_mov_b32 v5, s8
+; GFX11-NEXT: v_dual_mov_b32 v2, s10 :: v_dual_mov_b32 v11, s6
; GFX11-NEXT: v_dual_mov_b32 v3, s4 :: v_dual_mov_b32 v4, s1
; GFX11-NEXT: s_setpc_b64 s[30:31]
%cmp = icmp eq i32 %b, 0
@@ -8870,6 +9031,7 @@ define inreg <6 x half> @bitcast_v12i8_to_v6f16_scalar(<12 x i8> inreg %a, i32 i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB41_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xff
@@ -8943,12 +9105,15 @@ define inreg <6 x half> @bitcast_v12i8_to_v6f16_scalar(<12 x i8> inreg %a, i32 i
; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; implicit-def: $vgpr4
; SI-NEXT: ; implicit-def: $vgpr5
-; SI-NEXT: s_branch .LBB41_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB41_2
+; SI-NEXT: s_branch .LBB41_3
;
; VI-LABEL: bitcast_v12i8_to_v6f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
+; VI-NEXT: s_mov_b64 s[8:9], -1
; VI-NEXT: s_cbranch_scc0 .LBB41_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, s16, 0xff
@@ -9026,12 +9191,15 @@ define inreg <6 x half> @bitcast_v12i8_to_v6f16_scalar(<12 x i8> inreg %a, i32 i
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB41_4:
; VI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7
-; VI-NEXT: s_branch .LBB41_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; VI-NEXT: s_cbranch_vccz .LBB41_2
+; VI-NEXT: s_branch .LBB41_3
;
; GFX9-LABEL: bitcast_v12i8_to_v6f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
+; GFX9-NEXT: s_mov_b64 s[8:9], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB41_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_and_b32 s4, s16, 0xff
@@ -9109,13 +9277,15 @@ define inreg <6 x half> @bitcast_v12i8_to_v6f16_scalar(<12 x i8> inreg %a, i32 i
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB41_4:
; GFX9-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7
-; GFX9-NEXT: s_branch .LBB41_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; GFX9-NEXT: s_cbranch_vccz .LBB41_2
+; GFX9-NEXT: s_branch .LBB41_3
;
; GFX11-LABEL: bitcast_v12i8_to_v6f16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
-; GFX11-NEXT: s_mov_b32 s8, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB41_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_and_b32 s4, s0, 0xff
@@ -9130,23 +9300,22 @@ define inreg <6 x half> @bitcast_v12i8_to_v6f16_scalar(<12 x i8> inreg %a, i32 i
; GFX11-NEXT: s_or_b32 s4, s4, s5
; GFX11-NEXT: s_and_b32 s5, s16, 0xff
; GFX11-NEXT: s_and_b32 s7, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-NEXT: s_lshl_b32 s8, s19, 8
; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_or_b32 s6, s7, s9
+; GFX11-NEXT: s_or_b32 s6, s7, s8
; GFX11-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-NEXT: s_lshl_b32 s9, s21, 8
-; GFX11-NEXT: s_and_b32 s10, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s11, s23, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s9
-; GFX11-NEXT: s_or_b32 s9, s10, s11
+; GFX11-NEXT: s_lshl_b32 s8, s21, 8
+; GFX11-NEXT: s_and_b32 s9, s22, 0xff
+; GFX11-NEXT: s_lshl_b32 s10, s23, 8
+; GFX11-NEXT: s_or_b32 s7, s7, s8
+; GFX11-NEXT: s_or_b32 s8, s9, s10
; GFX11-NEXT: s_and_b32 s5, s5, 0xffff
; GFX11-NEXT: s_lshl_b32 s6, s6, 16
; GFX11-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-NEXT: s_lshl_b32 s9, s9, 16
+; GFX11-NEXT: s_lshl_b32 s8, s8, 16
; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_or_b32 s6, s7, s9
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB41_3
+; GFX11-NEXT: s_or_b32 s6, s7, s8
+; GFX11-NEXT: s_cbranch_execnz .LBB41_3
; GFX11-NEXT: .LBB41_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
; GFX11-NEXT: s_add_i32 s2, s2, 3
@@ -9193,7 +9362,9 @@ define inreg <6 x half> @bitcast_v12i8_to_v6f16_scalar(<12 x i8> inreg %a, i32 i
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB41_4:
; GFX11-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7
-; GFX11-NEXT: s_branch .LBB41_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-NEXT: s_cbranch_vccz .LBB41_2
+; GFX11-NEXT: s_branch .LBB41_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -9537,6 +9708,7 @@ define inreg <12 x i8> @bitcast_v6f16_to_v12i8_scalar(<6 x half> inreg %a, i32 i
; SI-NEXT: v_cvt_f16_f32_e32 v10, s21
; SI-NEXT: v_cvt_f16_f32_e32 v12, s20
; SI-NEXT: s_cmp_lg_u32 s22, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB43_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v15
@@ -9598,12 +9770,15 @@ define inreg <12 x i8> @bitcast_v6f16_to_v12i8_scalar(<6 x half> inreg %a, i32 i
; SI-NEXT: ; implicit-def: $vgpr8
; SI-NEXT: ; implicit-def: $vgpr9
; SI-NEXT: ; implicit-def: $vgpr11
-; SI-NEXT: s_branch .LBB43_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB43_2
+; SI-NEXT: s_branch .LBB43_3
;
; VI-LABEL: bitcast_v6f16_to_v12i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
+; VI-NEXT: s_mov_b64 s[8:9], -1
; VI-NEXT: s_cbranch_scc0 .LBB43_3
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s19, s16, 16
@@ -9652,7 +9827,8 @@ define inreg <12 x i8> @bitcast_v6f16_to_v12i8_scalar(<6 x half> inreg %a, i32 i
; VI-NEXT: ; implicit-def: $sgpr11
; VI-NEXT: ; implicit-def: $sgpr14
; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: s_branch .LBB43_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; VI-NEXT: s_cbranch_vccz .LBB43_2
; VI-NEXT: .LBB43_4:
; VI-NEXT: v_mov_b32_e32 v2, s19
; VI-NEXT: v_mov_b32_e32 v6, s15
@@ -9672,6 +9848,7 @@ define inreg <12 x i8> @bitcast_v6f16_to_v12i8_scalar(<6 x half> inreg %a, i32 i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
+; GFX9-NEXT: s_mov_b64 s[8:9], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB43_3
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s19, s16, 8
@@ -9710,7 +9887,8 @@ define inreg <12 x i8> @bitcast_v6f16_to_v12i8_scalar(<6 x half> inreg %a, i32 i
; GFX9-NEXT: ; implicit-def: $sgpr11
; GFX9-NEXT: ; implicit-def: $sgpr10
; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: s_branch .LBB43_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; GFX9-NEXT: s_cbranch_vccz .LBB43_2
; GFX9-NEXT: .LBB43_4:
; GFX9-NEXT: v_mov_b32_e32 v14, s16
; GFX9-NEXT: v_mov_b32_e32 v15, s17
@@ -9734,20 +9912,19 @@ define inreg <12 x i8> @bitcast_v6f16_to_v12i8_scalar(<6 x half> inreg %a, i32 i
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
-; GFX11-NEXT: s_mov_b32 s3, 0
+; GFX11-NEXT: s_mov_b32 s5, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB43_3
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s8, s2, 16
-; GFX11-NEXT: s_lshr_b32 s9, s2, 8
-; GFX11-NEXT: s_lshr_b32 s10, s1, 24
-; GFX11-NEXT: s_lshr_b32 s11, s1, 16
-; GFX11-NEXT: s_lshr_b32 s13, s1, 8
-; GFX11-NEXT: s_lshr_b32 s12, s0, 16
-; GFX11-NEXT: s_lshr_b32 s14, s0, 8
-; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
+; GFX11-NEXT: s_lshr_b32 s3, s2, 16
; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
-; GFX11-NEXT: s_cbranch_vccnz .LBB43_4
+; GFX11-NEXT: s_lshr_b32 s8, s2, 8
+; GFX11-NEXT: s_lshr_b32 s9, s1, 24
+; GFX11-NEXT: s_lshr_b32 s10, s1, 16
+; GFX11-NEXT: s_lshr_b32 s12, s1, 8
+; GFX11-NEXT: s_lshr_b32 s11, s0, 16
+; GFX11-NEXT: s_lshr_b32 s13, s0, 8
+; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
+; GFX11-NEXT: s_cbranch_execnz .LBB43_4
; GFX11-NEXT: .LBB43_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v15, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v14, 0x200, s0 op_sel_hi:[0,1]
@@ -9766,22 +9943,23 @@ define inreg <12 x i8> @bitcast_v6f16_to_v12i8_scalar(<6 x half> inreg %a, i32 i
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v14
; GFX11-NEXT: s_branch .LBB43_5
; GFX11-NEXT: .LBB43_3:
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr4
; GFX11-NEXT: ; implicit-def: $sgpr13
; GFX11-NEXT: ; implicit-def: $sgpr11
+; GFX11-NEXT: ; implicit-def: $sgpr4
+; GFX11-NEXT: ; implicit-def: $sgpr12
; GFX11-NEXT: ; implicit-def: $sgpr10
; GFX11-NEXT: ; implicit-def: $sgpr9
; GFX11-NEXT: ; implicit-def: $sgpr8
+; GFX11-NEXT: ; implicit-def: $sgpr3
; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: s_branch .LBB43_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
+; GFX11-NEXT: s_cbranch_vccz .LBB43_2
; GFX11-NEXT: .LBB43_4:
; GFX11-NEXT: v_dual_mov_b32 v14, s0 :: v_dual_mov_b32 v15, s1
-; GFX11-NEXT: v_dual_mov_b32 v8, s2 :: v_dual_mov_b32 v1, s14
-; GFX11-NEXT: v_dual_mov_b32 v2, s12 :: v_dual_mov_b32 v5, s13
-; GFX11-NEXT: v_dual_mov_b32 v6, s11 :: v_dual_mov_b32 v7, s10
-; GFX11-NEXT: v_dual_mov_b32 v13, s9 :: v_dual_mov_b32 v10, s8
+; GFX11-NEXT: v_dual_mov_b32 v8, s2 :: v_dual_mov_b32 v1, s13
+; GFX11-NEXT: v_dual_mov_b32 v2, s11 :: v_dual_mov_b32 v5, s12
+; GFX11-NEXT: v_dual_mov_b32 v6, s10 :: v_dual_mov_b32 v7, s9
+; GFX11-NEXT: v_dual_mov_b32 v13, s8 :: v_dual_mov_b32 v10, s3
; GFX11-NEXT: v_mov_b32_e32 v11, s6
; GFX11-NEXT: v_mov_b32_e32 v3, s4
; GFX11-NEXT: .LBB43_5: ; %end
@@ -10284,6 +10462,7 @@ define inreg <6 x i16> @bitcast_v12i8_to_v6i16_scalar(<12 x i8> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s28, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB45_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s20, 0xff
@@ -10376,12 +10555,15 @@ define inreg <6 x i16> @bitcast_v12i8_to_v6i16_scalar(<12 x i8> inreg %a, i32 in
; SI-NEXT: ; implicit-def: $sgpr9
; SI-NEXT: ; implicit-def: $sgpr8
; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: s_branch .LBB45_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB45_2
+; SI-NEXT: s_branch .LBB45_3
;
; VI-LABEL: bitcast_v12i8_to_v6i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s28, 0
+; VI-NEXT: s_mov_b64 s[8:9], -1
; VI-NEXT: s_cbranch_scc0 .LBB45_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_and_b32 s4, s16, 0xff
@@ -10459,12 +10641,15 @@ define inreg <6 x i16> @bitcast_v12i8_to_v6i16_scalar(<12 x i8> inreg %a, i32 in
; VI-NEXT: s_setpc_b64 s[30:31]
; VI-NEXT: .LBB45_4:
; VI-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7
-; VI-NEXT: s_branch .LBB45_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; VI-NEXT: s_cbranch_vccz .LBB45_2
+; VI-NEXT: s_branch .LBB45_3
;
; GFX9-LABEL: bitcast_v12i8_to_v6i16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s28, 0
+; GFX9-NEXT: s_mov_b64 s[8:9], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB45_4
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_and_b32 s4, s16, 0xff
@@ -10542,13 +10727,15 @@ define inreg <6 x i16> @bitcast_v12i8_to_v6i16_scalar(<12 x i8> inreg %a, i32 in
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB45_4:
; GFX9-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7
-; GFX9-NEXT: s_branch .LBB45_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; GFX9-NEXT: s_cbranch_vccz .LBB45_2
+; GFX9-NEXT: s_branch .LBB45_3
;
; GFX11-LABEL: bitcast_v12i8_to_v6i16_scalar:
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s24, 0
-; GFX11-NEXT: s_mov_b32 s8, 0
+; GFX11-NEXT: s_mov_b32 s8, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB45_4
; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_and_b32 s4, s0, 0xff
@@ -10563,23 +10750,22 @@ define inreg <6 x i16> @bitcast_v12i8_to_v6i16_scalar(<12 x i8> inreg %a, i32 in
; GFX11-NEXT: s_or_b32 s4, s4, s5
; GFX11-NEXT: s_and_b32 s5, s16, 0xff
; GFX11-NEXT: s_and_b32 s7, s18, 0xff
-; GFX11-NEXT: s_lshl_b32 s9, s19, 8
+; GFX11-NEXT: s_lshl_b32 s8, s19, 8
; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_or_b32 s6, s7, s9
+; GFX11-NEXT: s_or_b32 s6, s7, s8
; GFX11-NEXT: s_and_b32 s7, s20, 0xff
-; GFX11-NEXT: s_lshl_b32 s9, s21, 8
-; GFX11-NEXT: s_and_b32 s10, s22, 0xff
-; GFX11-NEXT: s_lshl_b32 s11, s23, 8
-; GFX11-NEXT: s_or_b32 s7, s7, s9
-; GFX11-NEXT: s_or_b32 s9, s10, s11
+; GFX11-NEXT: s_lshl_b32 s8, s21, 8
+; GFX11-NEXT: s_and_b32 s9, s22, 0xff
+; GFX11-NEXT: s_lshl_b32 s10, s23, 8
+; GFX11-NEXT: s_or_b32 s7, s7, s8
+; GFX11-NEXT: s_or_b32 s8, s9, s10
; GFX11-NEXT: s_and_b32 s5, s5, 0xffff
; GFX11-NEXT: s_lshl_b32 s6, s6, 16
; GFX11-NEXT: s_and_b32 s7, s7, 0xffff
-; GFX11-NEXT: s_lshl_b32 s9, s9, 16
+; GFX11-NEXT: s_lshl_b32 s8, s8, 16
; GFX11-NEXT: s_or_b32 s5, s5, s6
-; GFX11-NEXT: s_or_b32 s6, s7, s9
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
-; GFX11-NEXT: s_cbranch_vccnz .LBB45_3
+; GFX11-NEXT: s_or_b32 s6, s7, s8
+; GFX11-NEXT: s_cbranch_execnz .LBB45_3
; GFX11-NEXT: .LBB45_2: ; %cmp.true
; GFX11-NEXT: s_add_i32 s0, s0, 3
; GFX11-NEXT: s_add_i32 s2, s2, 3
@@ -10626,7 +10812,9 @@ define inreg <6 x i16> @bitcast_v12i8_to_v6i16_scalar(<12 x i8> inreg %a, i32 in
; GFX11-NEXT: s_setpc_b64 s[30:31]
; GFX11-NEXT: .LBB45_4:
; GFX11-NEXT: ; implicit-def: $sgpr4_sgpr5_sgpr6_sgpr7
-; GFX11-NEXT: s_branch .LBB45_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-NEXT: s_cbranch_vccz .LBB45_2
+; GFX11-NEXT: s_branch .LBB45_3
%cmp = icmp eq i32 %b, 0
br i1 %cmp, label %cmp.true, label %cmp.false
@@ -10962,6 +11150,7 @@ define inreg <12 x i8> @bitcast_v6i16_to_v12i8_scalar(<6 x i16> inreg %a, i32 in
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s22, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB47_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_and_b32 s4, s16, 0xffff
@@ -11034,12 +11223,15 @@ define inreg <12 x i8> @bitcast_v6i16_to_v12i8_scalar(<6 x i16> inreg %a, i32 in
; SI-NEXT: ; implicit-def: $sgpr12
; SI-NEXT: ; implicit-def: $sgpr13
; SI-NEXT: ; implicit-def: $sgpr14
-; SI-NEXT: s_branch .LBB47_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB47_2
+; SI-NEXT: s_branch .LBB47_3
;
; VI-LABEL: bitcast_v6i16_to_v12i8_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
+; VI-NEXT: s_mov_b64 s[8:9], -1
; VI-NEXT: s_cbranch_scc0 .LBB47_4
; VI-NEXT: ; %bb.1: ; %cmp.false
; VI-NEXT: s_lshr_b32 s19, s16, 8
@@ -11101,12 +11293,15 @@ define inreg <12 x i8> @bitcast_v6i16_to_v12i8_scalar(<6 x i16> inreg %a, i32 in
; VI-NEXT: ; implicit-def: $sgpr11
; VI-NEXT: ; implicit-def: $sgpr10
; VI-NEXT: ; implicit-def: $sgpr6
-; VI-NEXT: s_branch .LBB47_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; VI-NEXT: s_cbranch_vccz .LBB47_2
+; VI-NEXT: s_branch .LBB47_3
;
; GFX9-LABEL: bitcast_v6i16_to_v12i8_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
+; GFX9-NEXT: s_mov_b64 s[8:9], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB47_3
; GFX9-NEXT: ; %bb.1: ; %cmp.false
; GFX9-NEXT: s_lshr_b32 s19, s16, 8
@@ -11143,7 +11338,8 @@ define inreg <12 x i8> @bitcast_v6i16_to_v12i8_scalar(<6 x i16> inreg %a, i32 in
; GFX9-NEXT: ; implicit-def: $sgpr11
; GFX9-NEXT: ; implicit-def: $sgpr10
; GFX9-NEXT: ; implicit-def: $sgpr6
-; GFX9-NEXT: s_branch .LBB47_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; GFX9-NEXT: s_cbranch_vccz .LBB47_2
; GFX9-NEXT: .LBB47_4:
; GFX9-NEXT: v_mov_b32_e32 v13, s16
; GFX9-NEXT: v_mov_b32_e32 v14, s17
@@ -11166,20 +11362,19 @@ define inreg <12 x i8> @bitcast_v6i16_to_v12i8_scalar(<6 x i16> inreg %a, i32 in
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
-; GFX11-NEXT: s_mov_b32 s3, 0
+; GFX11-NEXT: s_mov_b32 s5, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB47_3
; GFX11-NEXT: ; %bb.1: ; %cmp.false
-; GFX11-NEXT: s_lshr_b32 s8, s2, 16
-; GFX11-NEXT: s_lshr_b32 s9, s2, 8
-; GFX11-NEXT: s_lshr_b32 s10, s1, 24
-; GFX11-NEXT: s_lshr_b32 s11, s1, 16
-; GFX11-NEXT: s_lshr_b32 s13, s1, 8
-; GFX11-NEXT: s_lshr_b32 s12, s0, 16
-; GFX11-NEXT: s_lshr_b32 s14, s0, 8
-; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
+; GFX11-NEXT: s_lshr_b32 s3, s2, 16
; GFX11-NEXT: s_lshr_b64 s[4:5], s[0:1], 24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s3
-; GFX11-NEXT: s_cbranch_vccnz .LBB47_4
+; GFX11-NEXT: s_lshr_b32 s8, s2, 8
+; GFX11-NEXT: s_lshr_b32 s9, s1, 24
+; GFX11-NEXT: s_lshr_b32 s10, s1, 16
+; GFX11-NEXT: s_lshr_b32 s12, s1, 8
+; GFX11-NEXT: s_lshr_b32 s11, s0, 16
+; GFX11-NEXT: s_lshr_b32 s13, s0, 8
+; GFX11-NEXT: s_lshr_b64 s[6:7], s[2:3], 24
+; GFX11-NEXT: s_cbranch_execnz .LBB47_4
; GFX11-NEXT: .LBB47_2: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v8, s2, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v14, s1, 3 op_sel_hi:[1,0]
@@ -11196,22 +11391,23 @@ define inreg <12 x i8> @bitcast_v6i16_to_v12i8_scalar(<6 x i16> inreg %a, i32 in
; GFX11-NEXT: v_lshrrev_b32_e32 v1, 8, v13
; GFX11-NEXT: s_branch .LBB47_5
; GFX11-NEXT: .LBB47_3:
-; GFX11-NEXT: ; implicit-def: $sgpr14
-; GFX11-NEXT: ; implicit-def: $sgpr12
-; GFX11-NEXT: ; implicit-def: $sgpr4
; GFX11-NEXT: ; implicit-def: $sgpr13
; GFX11-NEXT: ; implicit-def: $sgpr11
+; GFX11-NEXT: ; implicit-def: $sgpr4
+; GFX11-NEXT: ; implicit-def: $sgpr12
; GFX11-NEXT: ; implicit-def: $sgpr10
; GFX11-NEXT: ; implicit-def: $sgpr9
; GFX11-NEXT: ; implicit-def: $sgpr8
+; GFX11-NEXT: ; implicit-def: $sgpr3
; GFX11-NEXT: ; implicit-def: $sgpr6
-; GFX11-NEXT: s_branch .LBB47_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s5
+; GFX11-NEXT: s_cbranch_vccz .LBB47_2
; GFX11-NEXT: .LBB47_4:
; GFX11-NEXT: v_dual_mov_b32 v13, s0 :: v_dual_mov_b32 v14, s1
-; GFX11-NEXT: v_dual_mov_b32 v8, s2 :: v_dual_mov_b32 v1, s14
-; GFX11-NEXT: v_dual_mov_b32 v2, s12 :: v_dual_mov_b32 v5, s13
-; GFX11-NEXT: v_dual_mov_b32 v6, s11 :: v_dual_mov_b32 v7, s10
-; GFX11-NEXT: v_dual_mov_b32 v9, s9 :: v_dual_mov_b32 v10, s8
+; GFX11-NEXT: v_dual_mov_b32 v8, s2 :: v_dual_mov_b32 v1, s13
+; GFX11-NEXT: v_dual_mov_b32 v2, s11 :: v_dual_mov_b32 v5, s12
+; GFX11-NEXT: v_dual_mov_b32 v6, s10 :: v_dual_mov_b32 v7, s9
+; GFX11-NEXT: v_dual_mov_b32 v9, s8 :: v_dual_mov_b32 v10, s3
; GFX11-NEXT: v_mov_b32_e32 v11, s6
; GFX11-NEXT: v_mov_b32_e32 v3, s4
; GFX11-NEXT: .LBB47_5: ; %end
@@ -11602,6 +11798,7 @@ define inreg <6 x half> @bitcast_v6bf16_to_v6f16_scalar(<6 x bfloat> inreg %a, i
; SI-NEXT: v_mul_f32_e64 v9, 1.0, s19
; SI-NEXT: v_mul_f32_e64 v10, 1.0, s20
; SI-NEXT: v_mul_f32_e64 v11, 1.0, s21
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB49_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v6
@@ -11651,16 +11848,22 @@ define inreg <6 x half> @bitcast_v6bf16_to_v6f16_scalar(<6 x bfloat> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; implicit-def: $vgpr4
; SI-NEXT: ; implicit-def: $vgpr5
-; SI-NEXT: s_branch .LBB49_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB49_2
+; SI-NEXT: s_branch .LBB49_3
;
; VI-LABEL: bitcast_v6bf16_to_v6f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
-; VI-NEXT: s_cbranch_scc0 .LBB49_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB49_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB49_4
-; VI-NEXT: .LBB49_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB49_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB49_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x40c00000
; VI-NEXT: v_add_f32_e32 v1, s4, v0
@@ -11717,8 +11920,6 @@ define inreg <6 x half> @bitcast_v6bf16_to_v6f16_scalar(<6 x bfloat> inreg %a, i
; VI-NEXT: v_alignbit_b32 v1, v5, v1, 16
; VI-NEXT: v_alignbit_b32 v0, v4, v3, 16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB49_3:
-; VI-NEXT: s_branch .LBB49_2
; VI-NEXT: .LBB49_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -11730,10 +11931,14 @@ define inreg <6 x half> @bitcast_v6bf16_to_v6f16_scalar(<6 x bfloat> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB49_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB49_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB49_4
-; GFX9-NEXT: .LBB49_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB49_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB49_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_pack_lh_b32_b16 s4, 0, s16
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
; GFX9-NEXT: v_add_f32_e32 v1, s4, v0
@@ -11794,8 +11999,6 @@ define inreg <6 x half> @bitcast_v6bf16_to_v6f16_scalar(<6 x bfloat> inreg %a, i
; GFX9-NEXT: v_and_b32_sdwa v0, v6, v4 dst_sel:DWORD dst_unused:UNUSED_PAD src0_sel:DWORD src1_sel:WORD_1
; GFX9-NEXT: v_lshl_or_b32 v0, v3, 16, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB49_3:
-; GFX9-NEXT: s_branch .LBB49_2
; GFX9-NEXT: .LBB49_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -11807,12 +12010,15 @@ define inreg <6 x half> @bitcast_v6bf16_to_v6f16_scalar(<6 x bfloat> inreg %a, i
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB49_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB49_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB49_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB49_4
-; GFX11-NEXT: .LBB49_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_pack_lh_b32_b16 s3, 0, s0
; GFX11-NEXT: s_lshl_b32 s0, s0, 16
; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s3
@@ -11880,8 +12086,6 @@ define inreg <6 x half> @bitcast_v6bf16_to_v6f16_scalar(<6 x bfloat> inreg %a, i
; GFX11-NEXT: v_lshrrev_b32_e32 v4, 16, v4
; GFX11-NEXT: v_lshl_or_b32 v2, v4, 16, v5
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB49_3:
-; GFX11-NEXT: s_branch .LBB49_2
; GFX11-NEXT: .LBB49_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -12054,6 +12258,7 @@ define inreg <6 x bfloat> @bitcast_v6f16_to_v6bf16_scalar(<6 x half> inreg %a, i
; SI-NEXT: v_cvt_f16_f32_e32 v10, s20
; SI-NEXT: v_cvt_f16_f32_e32 v11, s21
; SI-NEXT: s_cmp_lg_u32 s22, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB51_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshlrev_b32_e32 v0, 16, v6
@@ -12097,16 +12302,22 @@ define inreg <6 x bfloat> @bitcast_v6f16_to_v6bf16_scalar(<6 x half> inreg %a, i
; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; implicit-def: $vgpr4
; SI-NEXT: ; implicit-def: $vgpr5
-; SI-NEXT: s_branch .LBB51_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB51_2
+; SI-NEXT: s_branch .LBB51_3
;
; VI-LABEL: bitcast_v6f16_to_v6bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
-; VI-NEXT: s_cbranch_scc0 .LBB51_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB51_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB51_4
-; VI-NEXT: .LBB51_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB51_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB51_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v1, s4
; VI-NEXT: s_lshr_b32 s4, s17, 16
@@ -12124,8 +12335,6 @@ define inreg <6 x bfloat> @bitcast_v6f16_to_v6bf16_scalar(<6 x half> inreg %a, i
; VI-NEXT: v_or_b32_e32 v1, v1, v5
; VI-NEXT: v_or_b32_e32 v0, v3, v4
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB51_3:
-; VI-NEXT: s_branch .LBB51_2
; VI-NEXT: .LBB51_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -12137,17 +12346,19 @@ define inreg <6 x bfloat> @bitcast_v6f16_to_v6bf16_scalar(<6 x half> inreg %a, i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB51_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB51_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB51_4
-; GFX9-NEXT: .LBB51_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB51_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB51_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v2, s18, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB51_3:
-; GFX9-NEXT: s_branch .LBB51_2
; GFX9-NEXT: .LBB51_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -12159,18 +12370,19 @@ define inreg <6 x bfloat> @bitcast_v6f16_to_v6bf16_scalar(<6 x half> inreg %a, i
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB51_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB51_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB51_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB51_4
-; GFX11-NEXT: .LBB51_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB51_3:
-; GFX11-NEXT: s_branch .LBB51_2
; GFX11-NEXT: .LBB51_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -12553,6 +12765,7 @@ define inreg <6 x i16> @bitcast_v6bf16_to_v6i16_scalar(<6 x bfloat> inreg %a, i3
; SI-NEXT: v_mul_f32_e64 v6, 1.0, s19
; SI-NEXT: v_mul_f32_e64 v9, 1.0, s20
; SI-NEXT: v_mul_f32_e64 v8, 1.0, s21
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB53_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_lshrrev_b32_e32 v0, 16, v11
@@ -12592,16 +12805,22 @@ define inreg <6 x i16> @bitcast_v6bf16_to_v6i16_scalar(<6 x bfloat> inreg %a, i3
; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; implicit-def: $vgpr4
; SI-NEXT: ; implicit-def: $vgpr5
-; SI-NEXT: s_branch .LBB53_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB53_2
+; SI-NEXT: s_branch .LBB53_3
;
; VI-LABEL: bitcast_v6bf16_to_v6i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
-; VI-NEXT: s_cbranch_scc0 .LBB53_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB53_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB53_4
-; VI-NEXT: .LBB53_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB53_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB53_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshl_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v0, 0x40c00000
; VI-NEXT: v_add_f32_e32 v1, s4, v0
@@ -12658,8 +12877,6 @@ define inreg <6 x i16> @bitcast_v6bf16_to_v6i16_scalar(<6 x bfloat> inreg %a, i3
; VI-NEXT: v_alignbit_b32 v1, v5, v1, 16
; VI-NEXT: v_alignbit_b32 v0, v4, v3, 16
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB53_3:
-; VI-NEXT: s_branch .LBB53_2
; VI-NEXT: .LBB53_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -12671,10 +12888,14 @@ define inreg <6 x i16> @bitcast_v6bf16_to_v6i16_scalar(<6 x bfloat> inreg %a, i3
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB53_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB53_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB53_4
-; GFX9-NEXT: .LBB53_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB53_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB53_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: s_pack_lh_b32_b16 s4, 0, s16
; GFX9-NEXT: v_mov_b32_e32 v0, 0x40c00000
; GFX9-NEXT: v_add_f32_e32 v1, s4, v0
@@ -12732,8 +12953,6 @@ define inreg <6 x i16> @bitcast_v6bf16_to_v6i16_scalar(<6 x bfloat> inreg %a, i3
; GFX9-NEXT: v_lshrrev_b32_e32 v0, 16, v4
; GFX9-NEXT: v_and_or_b32 v0, v3, v6, v0
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB53_3:
-; GFX9-NEXT: s_branch .LBB53_2
; GFX9-NEXT: .LBB53_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -12745,12 +12964,15 @@ define inreg <6 x i16> @bitcast_v6bf16_to_v6i16_scalar(<6 x bfloat> inreg %a, i3
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB53_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB53_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB53_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB53_4
-; GFX11-NEXT: .LBB53_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: s_pack_lh_b32_b16 s3, 0, s0
; GFX11-NEXT: s_lshl_b32 s0, s0, 16
; GFX11-NEXT: v_add_f32_e64 v0, 0x40c00000, s3
@@ -12809,8 +13031,6 @@ define inreg <6 x i16> @bitcast_v6bf16_to_v6i16_scalar(<6 x bfloat> inreg %a, i3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3)
; GFX11-NEXT: v_and_or_b32 v0, 0xffff0000, v0, v7
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB53_3:
-; GFX11-NEXT: s_branch .LBB53_2
; GFX11-NEXT: .LBB53_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -12961,6 +13181,7 @@ define inreg <6 x bfloat> @bitcast_v6i16_to_v6bf16_scalar(<6 x i16> inreg %a, i3
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s22, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB55_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: s_lshl_b32 s6, s16, 16
@@ -13007,16 +13228,22 @@ define inreg <6 x bfloat> @bitcast_v6i16_to_v6bf16_scalar(<6 x i16> inreg %a, i3
; SI-NEXT: ; implicit-def: $sgpr9
; SI-NEXT: ; implicit-def: $sgpr11
; SI-NEXT: ; implicit-def: $sgpr10
-; SI-NEXT: s_branch .LBB55_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB55_2
+; SI-NEXT: s_branch .LBB55_3
;
; VI-LABEL: bitcast_v6i16_to_v6bf16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
-; VI-NEXT: s_cbranch_scc0 .LBB55_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB55_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB55_3
-; VI-NEXT: .LBB55_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB55_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB55_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s16, 3
; VI-NEXT: s_add_i32 s7, s17, 3
; VI-NEXT: s_add_i32 s9, s18, 3
@@ -13032,28 +13259,28 @@ define inreg <6 x bfloat> @bitcast_v6i16_to_v6bf16_scalar(<6 x i16> inreg %a, i3
; VI-NEXT: s_add_i32 s18, s8, 0x30000
; VI-NEXT: s_add_i32 s17, s6, 0x30000
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB55_3: ; %end
+; VI-NEXT: .LBB55_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB55_4:
-; VI-NEXT: s_branch .LBB55_2
;
; GFX9-LABEL: bitcast_v6i16_to_v6bf16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB55_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB55_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB55_4
-; GFX9-NEXT: .LBB55_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB55_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB55_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v2, s18, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB55_3:
-; GFX9-NEXT: s_branch .LBB55_2
; GFX9-NEXT: .LBB55_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -13065,18 +13292,19 @@ define inreg <6 x bfloat> @bitcast_v6i16_to_v6bf16_scalar(<6 x i16> inreg %a, i3
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB55_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB55_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB55_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB55_4
-; GFX11-NEXT: .LBB55_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB55_3:
-; GFX11-NEXT: s_branch .LBB55_2
; GFX11-NEXT: .LBB55_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -13225,10 +13453,14 @@ define inreg <6 x i16> @bitcast_v6f16_to_v6i16_scalar(<6 x half> inreg %a, i32 i
; SI-NEXT: v_cvt_f16_f32_e32 v4, s20
; SI-NEXT: v_cvt_f16_f32_e32 v5, s21
; SI-NEXT: s_cmp_lg_u32 s22, 0
-; SI-NEXT: s_cbranch_scc0 .LBB57_4
+; SI-NEXT: s_mov_b64 s[4:5], -1
+; SI-NEXT: s_cbranch_scc0 .LBB57_2
; SI-NEXT: ; %bb.1: ; %cmp.false
-; SI-NEXT: s_cbranch_execnz .LBB57_3
-; SI-NEXT: .LBB57_2: ; %cmp.true
+; SI-NEXT: s_mov_b64 s[4:5], 0
+; SI-NEXT: .LBB57_2: ; %Flow
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccnz .LBB57_4
+; SI-NEXT: ; %bb.3: ; %cmp.true
; SI-NEXT: v_cvt_f32_f16_e32 v5, v5
; SI-NEXT: v_cvt_f32_f16_e32 v4, v4
; SI-NEXT: v_cvt_f32_f16_e32 v3, v3
@@ -13254,19 +13486,21 @@ define inreg <6 x i16> @bitcast_v6f16_to_v6i16_scalar(<6 x half> inreg %a, i32 i
; SI-NEXT: v_or_b32_e32 v2, v2, v6
; SI-NEXT: v_or_b32_e32 v0, v0, v1
; SI-NEXT: v_alignbit_b32 v1, v2, v1, 16
-; SI-NEXT: .LBB57_3: ; %end
+; SI-NEXT: .LBB57_4: ; %end
; SI-NEXT: s_setpc_b64 s[30:31]
-; SI-NEXT: .LBB57_4:
-; SI-NEXT: s_branch .LBB57_2
;
; VI-LABEL: bitcast_v6f16_to_v6i16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
-; VI-NEXT: s_cbranch_scc0 .LBB57_3
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB57_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB57_4
-; VI-NEXT: .LBB57_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB57_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB57_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_lshr_b32 s4, s16, 16
; VI-NEXT: v_mov_b32_e32 v1, s4
; VI-NEXT: s_lshr_b32 s4, s17, 16
@@ -13284,8 +13518,6 @@ define inreg <6 x i16> @bitcast_v6f16_to_v6i16_scalar(<6 x half> inreg %a, i32 i
; VI-NEXT: v_or_b32_e32 v1, v1, v5
; VI-NEXT: v_or_b32_e32 v0, v3, v4
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB57_3:
-; VI-NEXT: s_branch .LBB57_2
; VI-NEXT: .LBB57_4:
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
@@ -13297,17 +13529,19 @@ define inreg <6 x i16> @bitcast_v6f16_to_v6i16_scalar(<6 x half> inreg %a, i32 i
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB57_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB57_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB57_4
-; GFX9-NEXT: .LBB57_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB57_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB57_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_mov_b32_e32 v0, 0x200
; GFX9-NEXT: v_pk_add_f16 v2, s18, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v1, s17, v0 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_f16 v0, s16, v0 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB57_3:
-; GFX9-NEXT: s_branch .LBB57_2
; GFX9-NEXT: .LBB57_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -13319,18 +13553,19 @@ define inreg <6 x i16> @bitcast_v6f16_to_v6i16_scalar(<6 x half> inreg %a, i32 i
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB57_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB57_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB57_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB57_4
-; GFX11-NEXT: .LBB57_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_f16 v2, 0x200, s2 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v1, 0x200, s1 op_sel_hi:[0,1]
; GFX11-NEXT: v_pk_add_f16 v0, 0x200, s0 op_sel_hi:[0,1]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB57_3:
-; GFX11-NEXT: s_branch .LBB57_2
; GFX11-NEXT: .LBB57_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
@@ -13484,6 +13719,7 @@ define inreg <6 x half> @bitcast_v6i16_to_v6f16_scalar(<6 x i16> inreg %a, i32 i
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s22, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB59_4
; SI-NEXT: ; %bb.1: ; %cmp.false
; SI-NEXT: v_cvt_f32_f16_e32 v0, s16
@@ -13515,16 +13751,22 @@ define inreg <6 x half> @bitcast_v6i16_to_v6f16_scalar(<6 x i16> inreg %a, i32 i
; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; implicit-def: $vgpr4
; SI-NEXT: ; implicit-def: $vgpr5
-; SI-NEXT: s_branch .LBB59_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB59_2
+; SI-NEXT: s_branch .LBB59_3
;
; VI-LABEL: bitcast_v6i16_to_v6f16_scalar:
; VI: ; %bb.0:
; VI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s19, 0
-; VI-NEXT: s_cbranch_scc0 .LBB59_4
+; VI-NEXT: s_mov_b64 s[4:5], -1
+; VI-NEXT: s_cbranch_scc0 .LBB59_2
; VI-NEXT: ; %bb.1: ; %cmp.false
-; VI-NEXT: s_cbranch_execnz .LBB59_3
-; VI-NEXT: .LBB59_2: ; %cmp.true
+; VI-NEXT: s_mov_b64 s[4:5], 0
+; VI-NEXT: .LBB59_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccnz .LBB59_4
+; VI-NEXT: ; %bb.3: ; %cmp.true
; VI-NEXT: s_add_i32 s5, s16, 3
; VI-NEXT: s_add_i32 s7, s17, 3
; VI-NEXT: s_add_i32 s9, s18, 3
@@ -13540,28 +13782,28 @@ define inreg <6 x half> @bitcast_v6i16_to_v6f16_scalar(<6 x i16> inreg %a, i32 i
; VI-NEXT: s_add_i32 s18, s8, 0x30000
; VI-NEXT: s_add_i32 s17, s6, 0x30000
; VI-NEXT: s_add_i32 s16, s4, 0x30000
-; VI-NEXT: .LBB59_3: ; %end
+; VI-NEXT: .LBB59_4: ; %end
; VI-NEXT: v_mov_b32_e32 v0, s16
; VI-NEXT: v_mov_b32_e32 v1, s17
; VI-NEXT: v_mov_b32_e32 v2, s18
; VI-NEXT: s_setpc_b64 s[30:31]
-; VI-NEXT: .LBB59_4:
-; VI-NEXT: s_branch .LBB59_2
;
; GFX9-LABEL: bitcast_v6i16_to_v6f16_scalar:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s19, 0
-; GFX9-NEXT: s_cbranch_scc0 .LBB59_3
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
+; GFX9-NEXT: s_cbranch_scc0 .LBB59_2
; GFX9-NEXT: ; %bb.1: ; %cmp.false
-; GFX9-NEXT: s_cbranch_execnz .LBB59_4
-; GFX9-NEXT: .LBB59_2: ; %cmp.true
+; GFX9-NEXT: s_mov_b64 s[4:5], 0
+; GFX9-NEXT: .LBB59_2: ; %Flow
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccnz .LBB59_4
+; GFX9-NEXT: ; %bb.3: ; %cmp.true
; GFX9-NEXT: v_pk_add_u16 v2, s18, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v1, s17, 3 op_sel_hi:[1,0]
; GFX9-NEXT: v_pk_add_u16 v0, s16, 3 op_sel_hi:[1,0]
; GFX9-NEXT: s_setpc_b64 s[30:31]
-; GFX9-NEXT: .LBB59_3:
-; GFX9-NEXT: s_branch .LBB59_2
; GFX9-NEXT: .LBB59_4:
; GFX9-NEXT: v_mov_b32_e32 v0, s16
; GFX9-NEXT: v_mov_b32_e32 v1, s17
@@ -13573,18 +13815,19 @@ define inreg <6 x half> @bitcast_v6i16_to_v6f16_scalar(<6 x i16> inreg %a, i32 i
; GFX11: ; %bb.0:
; GFX11-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s3, 0
+; GFX11-NEXT: s_mov_b32 s4, -1
+; GFX11-NEXT: s_cbranch_scc0 .LBB59_2
+; GFX11-NEXT: ; %bb.1: ; %cmp.false
; GFX11-NEXT: s_mov_b32 s4, 0
-; GFX11-NEXT: s_cbranch_scc0 .LBB59_3
-; GFX11-NEXT: ; %bb.1: ; %Flow
+; GFX11-NEXT: .LBB59_2: ; %Flow
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
; GFX11-NEXT: s_cbranch_vccnz .LBB59_4
-; GFX11-NEXT: .LBB59_2: ; %cmp.true
+; GFX11-NEXT: ; %bb.3: ; %cmp.true
; GFX11-NEXT: v_pk_add_u16 v2, s2, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v1, s1, 3 op_sel_hi:[1,0]
; GFX11-NEXT: v_pk_add_u16 v0, s0, 3 op_sel_hi:[1,0]
; GFX11-NEXT: s_setpc_b64 s[30:31]
-; GFX11-NEXT: .LBB59_3:
-; GFX11-NEXT: s_branch .LBB59_2
; GFX11-NEXT: .LBB59_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
; GFX11-NEXT: v_dual_mov_b32 v2, s2 :: v_dual_mov_b32 v3, s3
diff --git a/llvm/test/CodeGen/AMDGPU/blender-no-live-segment-at-def-implicit-def.ll b/llvm/test/CodeGen/AMDGPU/blender-no-live-segment-at-def-implicit-def.ll
index ad0d6d8..72fc420 100644
--- a/llvm/test/CodeGen/AMDGPU/blender-no-live-segment-at-def-implicit-def.ll
+++ b/llvm/test/CodeGen/AMDGPU/blender-no-live-segment-at-def-implicit-def.ll
@@ -9,44 +9,37 @@ define amdgpu_kernel void @blender_no_live_segment_at_def_error(<4 x float> %ext
; CHECK-NEXT: s_addc_u32 s13, s13, 0
; CHECK-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_LO), s12
; CHECK-NEXT: s_setreg_b32 hwreg(HW_REG_FLAT_SCR_HI), s13
-; CHECK-NEXT: s_load_dwordx8 s[48:55], s[8:9], 0x0
+; CHECK-NEXT: s_load_dwordx8 s[20:27], s[8:9], 0x0
; CHECK-NEXT: s_add_u32 s0, s0, s17
; CHECK-NEXT: s_addc_u32 s1, s1, 0
-; CHECK-NEXT: s_mov_b32 s12, 0
+; CHECK-NEXT: s_mov_b32 s36, 0
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
-; CHECK-NEXT: s_cmp_lg_u32 s52, 0
+; CHECK-NEXT: s_cmp_lg_u32 s24, 0
; CHECK-NEXT: s_cbranch_scc1 .LBB0_9
; CHECK-NEXT: ; %bb.1: ; %if.end13.i.i
-; CHECK-NEXT: s_cmp_eq_u32 s54, 0
-; CHECK-NEXT: s_cbranch_scc1 .LBB0_4
+; CHECK-NEXT: s_cmp_eq_u32 s26, 0
+; CHECK-NEXT: s_cbranch_scc1 .LBB0_7
; CHECK-NEXT: ; %bb.2: ; %if.else251.i.i
-; CHECK-NEXT: s_cmp_lg_u32 s55, 0
+; CHECK-NEXT: s_cmp_lg_u32 s27, 0
; CHECK-NEXT: s_mov_b32 s17, 0
; CHECK-NEXT: s_cselect_b32 s12, -1, 0
; CHECK-NEXT: s_and_b32 vcc_lo, exec_lo, s12
-; CHECK-NEXT: s_cbranch_vccz .LBB0_5
+; CHECK-NEXT: s_cbranch_vccz .LBB0_4
; CHECK-NEXT: ; %bb.3:
; CHECK-NEXT: s_mov_b32 s18, 0
-; CHECK-NEXT: s_branch .LBB0_6
-; CHECK-NEXT: .LBB0_4:
-; CHECK-NEXT: s_mov_b32 s14, s12
-; CHECK-NEXT: s_mov_b32 s15, s12
-; CHECK-NEXT: s_mov_b32 s13, s12
-; CHECK-NEXT: s_mov_b64 s[50:51], s[14:15]
-; CHECK-NEXT: s_mov_b64 s[48:49], s[12:13]
-; CHECK-NEXT: s_branch .LBB0_8
-; CHECK-NEXT: .LBB0_5: ; %if.then263.i.i
-; CHECK-NEXT: v_cmp_lt_f32_e64 s12, s53, 0
+; CHECK-NEXT: s_branch .LBB0_5
+; CHECK-NEXT: .LBB0_4: ; %if.then263.i.i
+; CHECK-NEXT: v_cmp_lt_f32_e64 s12, s25, 0
; CHECK-NEXT: s_mov_b32 s18, 1.0
; CHECK-NEXT: s_mov_b32 s17, 0x7fc00000
-; CHECK-NEXT: .LBB0_6: ; %Flow
-; CHECK-NEXT: s_mov_b32 s48, 1.0
+; CHECK-NEXT: .LBB0_5: ; %Flow
+; CHECK-NEXT: s_mov_b32 s36, 1.0
; CHECK-NEXT: s_andn2_b32 vcc_lo, exec_lo, s12
-; CHECK-NEXT: s_mov_b32 s49, s48
-; CHECK-NEXT: s_mov_b32 s50, s48
-; CHECK-NEXT: s_mov_b32 s51, s48
+; CHECK-NEXT: s_mov_b32 s37, s36
+; CHECK-NEXT: s_mov_b32 s38, s36
+; CHECK-NEXT: s_mov_b32 s39, s36
; CHECK-NEXT: s_cbranch_vccnz .LBB0_8
-; CHECK-NEXT: ; %bb.7: ; %if.end273.i.i
+; CHECK-NEXT: ; %bb.6: ; %if.end273.i.i
; CHECK-NEXT: s_add_u32 s12, s8, 40
; CHECK-NEXT: s_addc_u32 s13, s9, 0
; CHECK-NEXT: s_getpc_b64 s[20:21]
@@ -65,26 +58,29 @@ define amdgpu_kernel void @blender_no_live_segment_at_def_error(<4 x float> %ext
; CHECK-NEXT: v_mov_b32_e32 v2, 0
; CHECK-NEXT: s_mov_b32 s13, s15
; CHECK-NEXT: s_mov_b32 s14, s16
-; CHECK-NEXT: s_mov_b32 s48, 0
+; CHECK-NEXT: s_mov_b32 s36, 0
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: s_swappc_b64 s[30:31], s[20:21]
; CHECK-NEXT: s_mov_b64 s[8:9], s[34:35]
-; CHECK-NEXT: s_mov_b32 s49, s48
-; CHECK-NEXT: s_mov_b32 s50, s48
-; CHECK-NEXT: s_mov_b32 s51, s48
+; CHECK-NEXT: .LBB0_7: ; %if.end294.i.i
+; CHECK-NEXT: s_mov_b32 s37, s36
+; CHECK-NEXT: s_mov_b32 s38, s36
+; CHECK-NEXT: s_mov_b32 s39, s36
; CHECK-NEXT: .LBB0_8: ; %if.end294.i.i
; CHECK-NEXT: v_mov_b32_e32 v0, 0
+; CHECK-NEXT: s_mov_b64 s[20:21], s[36:37]
+; CHECK-NEXT: s_mov_b64 s[22:23], s[38:39]
; CHECK-NEXT: buffer_store_dword v0, off, s[0:3], 0 offset:12
; CHECK-NEXT: buffer_store_dword v0, off, s[0:3], 0 offset:8
; CHECK-NEXT: buffer_store_dword v0, off, s[0:3], 0 offset:4
; CHECK-NEXT: buffer_store_dword v0, off, s[0:3], 0
; CHECK-NEXT: .LBB0_9: ; %kernel_direct_lighting.exit
; CHECK-NEXT: s_load_dwordx2 s[4:5], s[8:9], 0x20
-; CHECK-NEXT: v_mov_b32_e32 v0, s48
+; CHECK-NEXT: v_mov_b32_e32 v0, s20
; CHECK-NEXT: v_mov_b32_e32 v4, 0
-; CHECK-NEXT: v_mov_b32_e32 v1, s49
-; CHECK-NEXT: v_mov_b32_e32 v2, s50
-; CHECK-NEXT: v_mov_b32_e32 v3, s51
+; CHECK-NEXT: v_mov_b32_e32 v1, s21
+; CHECK-NEXT: v_mov_b32_e32 v2, s22
+; CHECK-NEXT: v_mov_b32_e32 v3, s23
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: global_store_dwordx4 v4, v[0:3], s[4:5]
; CHECK-NEXT: s_endpgm
diff --git a/llvm/test/CodeGen/AMDGPU/branch-folding-implicit-def-subreg.ll b/llvm/test/CodeGen/AMDGPU/branch-folding-implicit-def-subreg.ll
index 006fe51..33bf045 100644
--- a/llvm/test/CodeGen/AMDGPU/branch-folding-implicit-def-subreg.ll
+++ b/llvm/test/CodeGen/AMDGPU/branch-folding-implicit-def-subreg.ll
@@ -32,12 +32,12 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64
; GFX90A-NEXT: S_CBRANCH_VCCZ %bb.2, implicit $vcc
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: bb.1.bb103:
- ; GFX90A-NEXT: successors: %bb.58(0x40000000), %bb.2(0x40000000)
+ ; GFX90A-NEXT: successors: %bb.57(0x40000000), %bb.2(0x40000000)
; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $sgpr33, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr46_sgpr47:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x00000000000000FF, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000FF, $vgpr2_vgpr3:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: renamable $sgpr34_sgpr35 = S_MOV_B64 0
; GFX90A-NEXT: renamable $vcc = S_AND_B64 $exec, renamable $sgpr30_sgpr31, implicit-def dead $scc
- ; GFX90A-NEXT: S_CBRANCH_VCCNZ %bb.58, implicit $vcc
+ ; GFX90A-NEXT: S_CBRANCH_VCCNZ %bb.57, implicit $vcc
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: bb.2:
; GFX90A-NEXT: successors: %bb.3(0x80000000)
@@ -51,14 +51,14 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64
; GFX90A-NEXT: renamable $sgpr36_sgpr37 = S_MOV_B64 0
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: bb.3.Flow17:
- ; GFX90A-NEXT: successors: %bb.4(0x40000000), %bb.57(0x40000000)
+ ; GFX90A-NEXT: successors: %bb.4(0x40000000), %bb.56(0x40000000)
; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr46_sgpr47:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000FF, $vgpr2_vgpr3:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr18_vgpr19:0x000000000000000F, $vgpr20_vgpr21:0x000000000000000F, $vgpr22_vgpr23:0x000000000000000F, $vgpr24_vgpr25:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: renamable $vgpr30 = V_AND_B32_e32 1023, $vgpr31, implicit $exec
; GFX90A-NEXT: renamable $vcc = S_AND_B64 $exec, killed renamable $sgpr34_sgpr35, implicit-def dead $scc
; GFX90A-NEXT: renamable $vgpr15 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec
; GFX90A-NEXT: renamable $vgpr17 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec
- ; GFX90A-NEXT: S_CBRANCH_VCCZ %bb.57, implicit $vcc
+ ; GFX90A-NEXT: S_CBRANCH_VCCZ %bb.56, implicit $vcc
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: bb.4.bb15:
; GFX90A-NEXT: successors: %bb.35(0x40000000), %bb.5(0x40000000)
@@ -72,13 +72,13 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64
; GFX90A-NEXT: renamable $vgpr40, renamable $vcc = V_ADD_CO_U32_e64 $vgpr46, killed $vgpr0, 0, implicit $exec
; GFX90A-NEXT: renamable $vgpr41, dead renamable $vcc = V_ADDC_U32_e64 0, $vgpr47, killed $vcc, 0, implicit $exec
; GFX90A-NEXT: renamable $vcc = S_AND_B64 $exec, renamable $sgpr30_sgpr31, implicit-def dead $scc
+ ; GFX90A-NEXT: renamable $sgpr34_sgpr35 = S_MOV_B64 -1
; GFX90A-NEXT: S_CBRANCH_VCCNZ %bb.35, implicit $vcc
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: bb.5:
; GFX90A-NEXT: successors: %bb.6(0x80000000)
- ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr36_sgpr37, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr18_sgpr19
+ ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3
; GFX90A-NEXT: {{ $}}
- ; GFX90A-NEXT: renamable $sgpr34_sgpr35 = S_MOV_B64 -1
; GFX90A-NEXT: renamable $sgpr44_sgpr45 = S_MOV_B64 0
; GFX90A-NEXT: renamable $sgpr42_sgpr43 = S_MOV_B64 0
; GFX90A-NEXT: renamable $sgpr40_sgpr41 = S_MOV_B64 0
@@ -122,12 +122,12 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64
; GFX90A-NEXT: renamable $vgpr24 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: bb.7.Flow19:
- ; GFX90A-NEXT: successors: %bb.62(0x40000000), %bb.8(0x40000000)
+ ; GFX90A-NEXT: successors: %bb.61(0x40000000), %bb.8(0x40000000)
; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr15, $vgpr17, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000F, $vgpr14_vgpr15:0x0000000000000003, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x000000000000000F, $vgpr20_vgpr21:0x000000000000000F, $vgpr22_vgpr23:0x000000000000000F, $vgpr24_vgpr25:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: renamable $sgpr68_sgpr69 = S_MOV_B64 0
; GFX90A-NEXT: $sgpr24_sgpr25 = S_AND_SAVEEXEC_B64 $sgpr36_sgpr37, implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX90A-NEXT: S_CBRANCH_EXECNZ %bb.62, implicit $exec
+ ; GFX90A-NEXT: S_CBRANCH_EXECNZ %bb.61, implicit $exec
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: bb.8.Flow32:
; GFX90A-NEXT: successors: %bb.9(0x40000000), %bb.10(0x40000000)
@@ -557,11 +557,11 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: bb.43.bb55:
; GFX90A-NEXT: successors: %bb.48(0x40000000), %bb.44(0x40000000)
- ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $vgpr18, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr46_sgpr47:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr44_sgpr45, $sgpr54_sgpr55, $sgpr58_sgpr59, $sgpr56_sgpr57, $sgpr48_sgpr49
+ ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $vgpr18, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr46_sgpr47:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr44_sgpr45, $sgpr52_sgpr53, $sgpr58_sgpr59, $sgpr56_sgpr57, $sgpr48_sgpr49
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: S_BITCMP1_B32 killed renamable $sgpr17, 16, implicit-def $scc
- ; GFX90A-NEXT: renamable $sgpr64_sgpr65 = S_CSELECT_B64 -1, 0, implicit killed $scc
- ; GFX90A-NEXT: renamable $sgpr50_sgpr51 = S_XOR_B64 renamable $sgpr64_sgpr65, -1, implicit-def dead $scc
+ ; GFX90A-NEXT: renamable $sgpr62_sgpr63 = S_CSELECT_B64 -1, 0, implicit killed $scc
+ ; GFX90A-NEXT: renamable $sgpr50_sgpr51 = S_XOR_B64 renamable $sgpr62_sgpr63, -1, implicit-def dead $scc
; GFX90A-NEXT: renamable $vgpr62 = V_ADD_CO_U32_e32 6144, $vgpr40, implicit-def $vcc, implicit $exec
; GFX90A-NEXT: renamable $vgpr63, dead renamable $vcc = V_ADDC_U32_e64 0, $vgpr41, killed $vcc, 0, implicit $exec
; GFX90A-NEXT: renamable $vcc = S_AND_B64 $exec, renamable $sgpr50_sgpr51, implicit-def dead $scc
@@ -569,9 +569,9 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: bb.44:
; GFX90A-NEXT: successors: %bb.45(0x80000000)
- ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr57, $vgpr56, $vgpr18, $vgpr30, $vgpr31, $vgpr60, $vgpr62, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8, $sgpr9, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $vgpr61, $vgpr58, $sgpr54_sgpr55, $sgpr56_sgpr57, $sgpr58_sgpr59, $sgpr20_sgpr21_sgpr22, $sgpr22_sgpr23, $sgpr24_sgpr25_sgpr26, $sgpr26_sgpr27, $vgpr47, $vgpr46, $vgpr2, $vgpr3, $vgpr45, $vgpr44, $vgpr43, $vgpr42, $vgpr41, $vgpr40, $vgpr63
+ ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr57, $vgpr56, $vgpr18, $vgpr30, $vgpr31, $vgpr60, $vgpr62, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8, $sgpr9, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr52_sgpr53, $vgpr61, $vgpr58, $sgpr56_sgpr57, $sgpr58_sgpr59, $sgpr20_sgpr21_sgpr22, $sgpr22_sgpr23, $sgpr24_sgpr25_sgpr26, $sgpr26_sgpr27, $vgpr47, $vgpr46, $vgpr2, $vgpr3, $vgpr45, $vgpr44, $vgpr43, $vgpr42, $vgpr41, $vgpr40, $vgpr63
; GFX90A-NEXT: {{ $}}
- ; GFX90A-NEXT: renamable $sgpr52_sgpr53 = COPY renamable $sgpr36_sgpr37
+ ; GFX90A-NEXT: renamable $sgpr54_sgpr55 = COPY renamable $sgpr36_sgpr37
; GFX90A-NEXT: renamable $vgpr8_vgpr9 = IMPLICIT_DEF
; GFX90A-NEXT: renamable $vgpr6_vgpr7 = IMPLICIT_DEF
; GFX90A-NEXT: renamable $vgpr4_vgpr5 = IMPLICIT_DEF
@@ -596,15 +596,15 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64
; GFX90A-NEXT: renamable $sgpr66_sgpr67 = S_AND_B64 killed renamable $sgpr50_sgpr51, $exec, implicit-def dead $scc
; GFX90A-NEXT: renamable $sgpr50_sgpr51 = S_AND_B64 killed renamable $sgpr56_sgpr57, $exec, implicit-def dead $scc
; GFX90A-NEXT: renamable $sgpr64_sgpr65 = S_AND_B64 killed renamable $sgpr58_sgpr59, $exec, implicit-def dead $scc
- ; GFX90A-NEXT: renamable $sgpr48_sgpr49 = S_AND_B64 killed renamable $sgpr54_sgpr55, $exec, implicit-def dead $scc
+ ; GFX90A-NEXT: renamable $sgpr48_sgpr49 = S_AND_B64 killed renamable $sgpr52_sgpr53, $exec, implicit-def dead $scc
; GFX90A-NEXT: renamable $sgpr44_sgpr45 = S_ANDN2_B64 renamable $sgpr36_sgpr37, $exec, implicit-def dead $scc
- ; GFX90A-NEXT: renamable $sgpr46_sgpr47 = S_AND_B64 killed renamable $sgpr52_sgpr53, $exec, implicit-def dead $scc
+ ; GFX90A-NEXT: renamable $sgpr46_sgpr47 = S_AND_B64 killed renamable $sgpr54_sgpr55, $exec, implicit-def dead $scc
; GFX90A-NEXT: renamable $sgpr62_sgpr63 = S_OR_B64 killed renamable $sgpr44_sgpr45, killed renamable $sgpr46_sgpr47, implicit-def dead $scc
; GFX90A-NEXT: S_BRANCH %bb.47
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: bb.46.bb48:
; GFX90A-NEXT: successors: %bb.43(0x40000000), %bb.47(0x40000000)
- ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $vgpr18, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr46_sgpr47:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr64_sgpr65, $sgpr50_sgpr51, $sgpr66_sgpr67, $sgpr44_sgpr45, $sgpr54_sgpr55, $sgpr58_sgpr59, $sgpr56_sgpr57
+ ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $vgpr18, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr46_sgpr47:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr64_sgpr65, $sgpr50_sgpr51, $sgpr66_sgpr67, $sgpr44_sgpr45, $sgpr52_sgpr53, $sgpr58_sgpr59, $sgpr56_sgpr57
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: renamable $vgpr60 = V_ADD_CO_U32_e32 5120, $vgpr40, implicit-def $vcc, implicit $exec
; GFX90A-NEXT: renamable $sgpr18_sgpr19 = COPY $vcc
@@ -653,34 +653,29 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64
; GFX90A-NEXT: S_BRANCH %bb.42
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: bb.48.bb63:
- ; GFX90A-NEXT: successors: %bb.50(0x40000000), %bb.49(0x40000000)
- ; GFX90A-NEXT: liveins: $vcc, $sgpr14, $sgpr15, $sgpr16, $vgpr18, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr46_sgpr47:0x000000000000000F, $sgpr50_sgpr51, $sgpr64_sgpr65, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr54_sgpr55, $sgpr58_sgpr59, $sgpr56_sgpr57, $sgpr48_sgpr49
+ ; GFX90A-NEXT: successors: %bb.49(0x40000000), %bb.44(0x40000000)
+ ; GFX90A-NEXT: liveins: $vcc, $sgpr14, $sgpr15, $sgpr16, $vgpr18, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr46_sgpr47:0x000000000000000F, $sgpr50_sgpr51, $sgpr62_sgpr63, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr52_sgpr53, $sgpr58_sgpr59, $sgpr56_sgpr57
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: renamable $sgpr44_sgpr45 = S_MOV_B64 0
- ; GFX90A-NEXT: S_CBRANCH_VCCNZ %bb.50, implicit $vcc
- ; GFX90A-NEXT: {{ $}}
- ; GFX90A-NEXT: bb.49:
- ; GFX90A-NEXT: successors: %bb.44(0x80000000)
- ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr18, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr54_sgpr55, $sgpr58_sgpr59, $sgpr56_sgpr57
- ; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: renamable $sgpr48_sgpr49 = S_MOV_B64 -1
- ; GFX90A-NEXT: S_BRANCH %bb.44
+ ; GFX90A-NEXT: S_CBRANCH_VCCZ %bb.44, implicit $vcc
; GFX90A-NEXT: {{ $}}
- ; GFX90A-NEXT: bb.50.bb68:
- ; GFX90A-NEXT: successors: %bb.54(0x40000000), %bb.51(0x40000000)
- ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr18, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47:0x000000000000000F, $sgpr50_sgpr51, $sgpr64_sgpr65, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr48_sgpr49, $sgpr54_sgpr55, $sgpr58_sgpr59, $sgpr56_sgpr57
+ ; GFX90A-NEXT: bb.49.bb68:
+ ; GFX90A-NEXT: successors: %bb.53(0x40000000), %bb.50(0x40000000)
+ ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr18, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47:0x000000000000000F, $sgpr50_sgpr51, $sgpr62_sgpr63, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr52_sgpr53, $sgpr58_sgpr59, $sgpr56_sgpr57
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: renamable $vgpr0 = nuw nsw V_LSHLREV_B32_e32 3, $vgpr30, implicit $exec
; GFX90A-NEXT: renamable $vgpr1 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec
+ ; GFX90A-NEXT: renamable $sgpr48_sgpr49 = S_MOV_B64 0
; GFX90A-NEXT: renamable $vcc = S_AND_B64 $exec, killed renamable $sgpr50_sgpr51, implicit-def dead $scc
- ; GFX90A-NEXT: S_CBRANCH_VCCNZ %bb.54, implicit $vcc
+ ; GFX90A-NEXT: renamable $sgpr50_sgpr51 = S_MOV_B64 -1
+ ; GFX90A-NEXT: S_CBRANCH_VCCNZ %bb.53, implicit $vcc
; GFX90A-NEXT: {{ $}}
- ; GFX90A-NEXT: bb.51:
+ ; GFX90A-NEXT: bb.50:
; GFX90A-NEXT: successors: %bb.45(0x80000000)
- ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr18, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr54_sgpr55, $sgpr58_sgpr59, $sgpr56_sgpr57
+ ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr18, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr52_sgpr53, $sgpr58_sgpr59, $sgpr56_sgpr57
; GFX90A-NEXT: {{ $}}
- ; GFX90A-NEXT: renamable $sgpr50_sgpr51 = S_MOV_B64 -1
- ; GFX90A-NEXT: renamable $sgpr52_sgpr53 = COPY renamable $sgpr36_sgpr37
+ ; GFX90A-NEXT: renamable $sgpr54_sgpr55 = COPY renamable $sgpr36_sgpr37
; GFX90A-NEXT: renamable $vgpr8_vgpr9 = IMPLICIT_DEF
; GFX90A-NEXT: renamable $vgpr6_vgpr7 = IMPLICIT_DEF
; GFX90A-NEXT: renamable $vgpr4_vgpr5 = IMPLICIT_DEF
@@ -694,23 +689,23 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64
; GFX90A-NEXT: renamable $vgpr11 = IMPLICIT_DEF implicit-def $vgpr10
; GFX90A-NEXT: S_BRANCH %bb.45
; GFX90A-NEXT: {{ $}}
- ; GFX90A-NEXT: bb.52.bb80:
- ; GFX90A-NEXT: successors: %bb.59(0x40000000), %bb.53(0x40000000)
- ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr18, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47:0x000000000000000F, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr60_sgpr61, $sgpr64_sgpr65, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX90A-NEXT: bb.51.bb80:
+ ; GFX90A-NEXT: successors: %bb.58(0x40000000), %bb.52(0x40000000)
+ ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr18, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47:0x000000000000000F, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr60_sgpr61, $sgpr62_sgpr63, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: renamable $sgpr17 = S_BFE_U32 renamable $sgpr20, 65560, implicit-def dead $scc
; GFX90A-NEXT: S_CMP_EQ_U32 killed renamable $sgpr17, 0, implicit-def $scc
; GFX90A-NEXT: renamable $vgpr6 = V_ADD_CO_U32_e32 4096, $vgpr0, implicit-def $vcc, implicit $exec
; GFX90A-NEXT: renamable $vgpr7, dead renamable $sgpr52_sgpr53 = V_ADDC_U32_e64 0, 0, killed $vcc, 0, implicit $exec
- ; GFX90A-NEXT: S_CBRANCH_SCC1 %bb.59, implicit killed $scc
+ ; GFX90A-NEXT: renamable $sgpr52_sgpr53 = S_MOV_B64 0
+ ; GFX90A-NEXT: renamable $sgpr58_sgpr59 = S_MOV_B64 -1
+ ; GFX90A-NEXT: S_CBRANCH_SCC1 %bb.58, implicit killed $scc
; GFX90A-NEXT: {{ $}}
- ; GFX90A-NEXT: bb.53:
- ; GFX90A-NEXT: successors: %bb.61(0x80000000)
- ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr18, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr60_sgpr61, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX90A-NEXT: bb.52:
+ ; GFX90A-NEXT: successors: %bb.60(0x80000000)
+ ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr18, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr58_sgpr59, $sgpr60_sgpr61, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3
; GFX90A-NEXT: {{ $}}
- ; GFX90A-NEXT: renamable $sgpr52_sgpr53 = S_MOV_B64 0
- ; GFX90A-NEXT: renamable $sgpr46_sgpr47 = S_MOV_B64 -1
- ; GFX90A-NEXT: renamable $sgpr62_sgpr63 = COPY renamable $sgpr36_sgpr37
+ ; GFX90A-NEXT: renamable $sgpr54_sgpr55 = COPY renamable $sgpr36_sgpr37
; GFX90A-NEXT: renamable $vgpr8_vgpr9 = IMPLICIT_DEF
; GFX90A-NEXT: renamable $vgpr17 = IMPLICIT_DEF
; GFX90A-NEXT: renamable $vgpr15 = IMPLICIT_DEF
@@ -720,17 +715,17 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64
; GFX90A-NEXT: renamable $vgpr53 = IMPLICIT_DEF
; GFX90A-NEXT: renamable $vgpr13 = IMPLICIT_DEF implicit-def $vgpr12
; GFX90A-NEXT: renamable $vgpr11 = IMPLICIT_DEF implicit-def $vgpr10
- ; GFX90A-NEXT: S_BRANCH %bb.61
+ ; GFX90A-NEXT: S_BRANCH %bb.60
; GFX90A-NEXT: {{ $}}
- ; GFX90A-NEXT: bb.54.bb73:
- ; GFX90A-NEXT: successors: %bb.52(0x40000000), %bb.55(0x40000000)
- ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr18, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47:0x000000000000000F, $sgpr48_sgpr49, $sgpr64_sgpr65, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr54_sgpr55
+ ; GFX90A-NEXT: bb.53.bb73:
+ ; GFX90A-NEXT: successors: %bb.51(0x40000000), %bb.54(0x40000000)
+ ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr18, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47:0x000000000000000F, $sgpr48_sgpr49, $sgpr62_sgpr63, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003F, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3, $sgpr52_sgpr53
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: renamable $vgpr6 = FLAT_LOAD_UBYTE renamable $vgpr0_vgpr1, 2048, 0, implicit $exec, implicit $flat_scr :: (load (s8) from %ir.i76)
; GFX90A-NEXT: renamable $vgpr4 = V_ADD_CO_U32_e32 2048, $vgpr0, implicit-def $vcc, implicit $exec
; GFX90A-NEXT: renamable $sgpr50_sgpr51 = S_MOV_B64 0
; GFX90A-NEXT: renamable $sgpr56_sgpr57 = S_MOV_B64 -1
- ; GFX90A-NEXT: renamable $sgpr52_sgpr53 = COPY renamable $sgpr36_sgpr37
+ ; GFX90A-NEXT: renamable $sgpr54_sgpr55 = COPY renamable $sgpr36_sgpr37
; GFX90A-NEXT: renamable $vgpr5, dead renamable $sgpr58_sgpr59 = V_ADDC_U32_e64 0, 0, killed $vcc, 0, implicit $exec
; GFX90A-NEXT: renamable $vcc = V_CMP_EQ_U16_e64 0, killed $vgpr6, implicit $exec
; GFX90A-NEXT: renamable $sgpr58_sgpr59 = S_MOV_B64 0
@@ -745,20 +740,20 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64
; GFX90A-NEXT: renamable $vgpr13 = IMPLICIT_DEF implicit-def $vgpr12
; GFX90A-NEXT: renamable $vgpr11 = IMPLICIT_DEF implicit-def $vgpr10
; GFX90A-NEXT: $sgpr60_sgpr61 = S_AND_SAVEEXEC_B64 $vcc, implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX90A-NEXT: S_CBRANCH_EXECNZ %bb.52, implicit $exec
+ ; GFX90A-NEXT: S_CBRANCH_EXECNZ %bb.51, implicit $exec
; GFX90A-NEXT: {{ $}}
- ; GFX90A-NEXT: bb.55.Flow29:
+ ; GFX90A-NEXT: bb.54.Flow29:
; GFX90A-NEXT: successors: %bb.45(0x80000000)
; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr15, $vgpr17, $vgpr18, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr56_sgpr57, $sgpr58_sgpr59, $sgpr60_sgpr61, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000F, $vgpr14_vgpr15:0x0000000000000003, $vgpr16_vgpr17:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: $exec = S_OR_B64 $exec, killed renamable $sgpr60_sgpr61, implicit-def $scc
; GFX90A-NEXT: S_BRANCH %bb.45
; GFX90A-NEXT: {{ $}}
- ; GFX90A-NEXT: bb.56.bb90:
- ; GFX90A-NEXT: successors: %bb.60(0x80000000)
- ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr18, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47:0x000000000000000F, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr54_sgpr55, $sgpr60_sgpr61, $sgpr64_sgpr65, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX90A-NEXT: bb.55.bb90:
+ ; GFX90A-NEXT: successors: %bb.59(0x80000000)
+ ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr18, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47:0x000000000000000F, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr56_sgpr57, $sgpr60_sgpr61, $sgpr62_sgpr63, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3
; GFX90A-NEXT: {{ $}}
- ; GFX90A-NEXT: renamable $vgpr53 = V_CNDMASK_B32_e64 0, 0, 0, 1, killed $sgpr64_sgpr65, implicit $exec
+ ; GFX90A-NEXT: renamable $vgpr53 = V_CNDMASK_B32_e64 0, 0, 0, 1, killed $sgpr62_sgpr63, implicit $exec
; GFX90A-NEXT: renamable $vgpr10 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec
; GFX90A-NEXT: renamable $vgpr14_vgpr15 = DS_READ_B64_gfx9 killed renamable $vgpr10, 0, 0, implicit $exec :: (load (s64) from `ptr addrspace(3) null`, addrspace 3)
; GFX90A-NEXT: renamable $vgpr10 = COPY renamable $sgpr21, implicit $exec
@@ -771,11 +766,11 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64
; GFX90A-NEXT: renamable $vgpr17 = V_CNDMASK_B32_e64 0, 0, 0, 1, $sgpr12_sgpr13, implicit $exec
; GFX90A-NEXT: renamable $vgpr15 = V_ALIGNBIT_B32_opsel_e64 0, $vgpr15, 0, $vgpr14, 0, 1, 0, 0, implicit $exec
; GFX90A-NEXT: renamable $sgpr52_sgpr53 = S_XOR_B64 $exec, -1, implicit-def dead $scc
- ; GFX90A-NEXT: renamable $sgpr62_sgpr63 = S_OR_B64 renamable $sgpr36_sgpr37, $exec, implicit-def dead $scc
+ ; GFX90A-NEXT: renamable $sgpr54_sgpr55 = S_OR_B64 renamable $sgpr36_sgpr37, $exec, implicit-def dead $scc
; GFX90A-NEXT: renamable $vgpr10 = COPY renamable $vgpr14, implicit $exec
- ; GFX90A-NEXT: S_BRANCH %bb.60
+ ; GFX90A-NEXT: S_BRANCH %bb.59
; GFX90A-NEXT: {{ $}}
- ; GFX90A-NEXT: bb.57:
+ ; GFX90A-NEXT: bb.56:
; GFX90A-NEXT: successors: %bb.7(0x80000000)
; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr15, $vgpr17, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr36_sgpr37, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr2_vgpr3:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr18_vgpr19:0x000000000000000F, $vgpr20_vgpr21:0x000000000000000F, $vgpr22_vgpr23:0x000000000000000F, $vgpr24_vgpr25:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3
; GFX90A-NEXT: {{ $}}
@@ -810,7 +805,7 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64
; GFX90A-NEXT: renamable $sgpr34_sgpr35 = S_MOV_B64 0
; GFX90A-NEXT: S_BRANCH %bb.7
; GFX90A-NEXT: {{ $}}
- ; GFX90A-NEXT: bb.58.bb105:
+ ; GFX90A-NEXT: bb.57.bb105:
; GFX90A-NEXT: successors: %bb.3(0x80000000)
; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $sgpr17, $sgpr33, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr46_sgpr47:0x000000000000000F, $sgpr20_sgpr21_sgpr22_sgpr23:0x00000000000000FF, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000FF, $vgpr2_vgpr3:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3
; GFX90A-NEXT: {{ $}}
@@ -827,16 +822,16 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64
; GFX90A-NEXT: renamable $sgpr36_sgpr37 = S_MOV_B64 -1
; GFX90A-NEXT: S_BRANCH %bb.3
; GFX90A-NEXT: {{ $}}
- ; GFX90A-NEXT: bb.59.bb85:
- ; GFX90A-NEXT: successors: %bb.56(0x40000000), %bb.60(0x40000000)
- ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr18, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47:0x000000000000000F, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr60_sgpr61, $sgpr64_sgpr65, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX90A-NEXT: bb.58.bb85:
+ ; GFX90A-NEXT: successors: %bb.55(0x40000000), %bb.59(0x40000000)
+ ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr18, $vgpr30, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47:0x000000000000000F, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr60_sgpr61, $sgpr62_sgpr63, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: renamable $vgpr8 = V_OR_B32_e32 1, $vgpr6, implicit $exec
; GFX90A-NEXT: renamable $vgpr9 = COPY renamable $vgpr7, implicit $exec
; GFX90A-NEXT: renamable $vgpr10 = FLAT_LOAD_UBYTE renamable $vgpr8_vgpr9, 0, 0, implicit $exec, implicit $flat_scr :: (load (s8) from %ir.i86)
; GFX90A-NEXT: renamable $sgpr52_sgpr53 = S_MOV_B64 -1
; GFX90A-NEXT: renamable $vcc = V_CMP_EQ_U16_e64 0, killed $vgpr10, implicit $exec
- ; GFX90A-NEXT: renamable $sgpr62_sgpr63 = COPY renamable $sgpr36_sgpr37
+ ; GFX90A-NEXT: renamable $sgpr54_sgpr55 = COPY renamable $sgpr36_sgpr37
; GFX90A-NEXT: renamable $vgpr17 = IMPLICIT_DEF
; GFX90A-NEXT: renamable $vgpr15 = IMPLICIT_DEF
; GFX90A-NEXT: renamable $vgpr14 = IMPLICIT_DEF
@@ -845,68 +840,68 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64
; GFX90A-NEXT: renamable $vgpr53 = IMPLICIT_DEF
; GFX90A-NEXT: renamable $vgpr13 = IMPLICIT_DEF implicit-def $vgpr12
; GFX90A-NEXT: renamable $vgpr11 = IMPLICIT_DEF implicit-def $vgpr10
- ; GFX90A-NEXT: $sgpr54_sgpr55 = S_AND_SAVEEXEC_B64 $vcc, implicit-def $exec, implicit-def $scc, implicit $exec
- ; GFX90A-NEXT: S_CBRANCH_EXECNZ %bb.56, implicit $exec
+ ; GFX90A-NEXT: $sgpr56_sgpr57 = S_AND_SAVEEXEC_B64 $vcc, implicit-def $exec, implicit-def $scc, implicit $exec
+ ; GFX90A-NEXT: S_CBRANCH_EXECNZ %bb.55, implicit $exec
; GFX90A-NEXT: {{ $}}
- ; GFX90A-NEXT: bb.60.Flow31:
- ; GFX90A-NEXT: successors: %bb.61(0x80000000)
- ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr15, $vgpr17, $vgpr18, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr60_sgpr61, $sgpr62_sgpr63, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000F, $vgpr14_vgpr15:0x0000000000000003, $vgpr16_vgpr17:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX90A-NEXT: bb.59.Flow31:
+ ; GFX90A-NEXT: successors: %bb.60(0x80000000)
+ ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr15, $vgpr17, $vgpr18, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr56_sgpr57, $sgpr60_sgpr61, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000F, $vgpr14_vgpr15:0x0000000000000003, $vgpr16_vgpr17:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3
; GFX90A-NEXT: {{ $}}
- ; GFX90A-NEXT: $exec = S_OR_B64 $exec, killed renamable $sgpr54_sgpr55, implicit-def $scc
- ; GFX90A-NEXT: renamable $sgpr46_sgpr47 = S_MOV_B64 0
+ ; GFX90A-NEXT: $exec = S_OR_B64 $exec, killed renamable $sgpr56_sgpr57, implicit-def $scc
+ ; GFX90A-NEXT: renamable $sgpr58_sgpr59 = S_MOV_B64 0
; GFX90A-NEXT: {{ $}}
- ; GFX90A-NEXT: bb.61.Flow30:
- ; GFX90A-NEXT: successors: %bb.55(0x80000000)
- ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr15, $vgpr17, $vgpr18, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr46_sgpr47, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr60_sgpr61, $sgpr62_sgpr63, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000F, $vgpr14_vgpr15:0x0000000000000003, $vgpr16_vgpr17:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3
+ ; GFX90A-NEXT: bb.60.Flow30:
+ ; GFX90A-NEXT: successors: %bb.54(0x80000000)
+ ; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr15, $vgpr17, $vgpr18, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr58_sgpr59, $sgpr60_sgpr61, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000F, $vgpr14_vgpr15:0x0000000000000003, $vgpr16_vgpr17:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x0000000000000003, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: renamable $sgpr56_sgpr57 = S_XOR_B64 $exec, -1, implicit-def dead $scc
- ; GFX90A-NEXT: renamable $sgpr58_sgpr59 = S_AND_B64 killed renamable $sgpr46_sgpr47, $exec, implicit-def dead $scc
- ; GFX90A-NEXT: renamable $sgpr54_sgpr55 = S_AND_B64 killed renamable $sgpr52_sgpr53, $exec, implicit-def dead $scc
+ ; GFX90A-NEXT: renamable $sgpr58_sgpr59 = S_AND_B64 killed renamable $sgpr58_sgpr59, $exec, implicit-def dead $scc
+ ; GFX90A-NEXT: renamable $sgpr52_sgpr53 = S_AND_B64 killed renamable $sgpr52_sgpr53, $exec, implicit-def dead $scc
; GFX90A-NEXT: renamable $sgpr46_sgpr47 = S_ANDN2_B64 renamable $sgpr36_sgpr37, $exec, implicit-def dead $scc
- ; GFX90A-NEXT: renamable $sgpr52_sgpr53 = S_AND_B64 killed renamable $sgpr62_sgpr63, $exec, implicit-def dead $scc
- ; GFX90A-NEXT: renamable $sgpr52_sgpr53 = S_OR_B64 killed renamable $sgpr46_sgpr47, killed renamable $sgpr52_sgpr53, implicit-def dead $scc
- ; GFX90A-NEXT: S_BRANCH %bb.55
+ ; GFX90A-NEXT: renamable $sgpr54_sgpr55 = S_AND_B64 killed renamable $sgpr54_sgpr55, $exec, implicit-def dead $scc
+ ; GFX90A-NEXT: renamable $sgpr54_sgpr55 = S_OR_B64 killed renamable $sgpr46_sgpr47, killed renamable $sgpr54_sgpr55, implicit-def dead $scc
+ ; GFX90A-NEXT: S_BRANCH %bb.54
; GFX90A-NEXT: {{ $}}
- ; GFX90A-NEXT: bb.62.bb140:
- ; GFX90A-NEXT: successors: %bb.68(0x40000000), %bb.63(0x40000000)
+ ; GFX90A-NEXT: bb.61.bb140:
+ ; GFX90A-NEXT: successors: %bb.67(0x40000000), %bb.62(0x40000000)
; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr15, $vgpr17, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr30_sgpr31, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000F, $vgpr14_vgpr15:0x0000000000000003, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x000000000000000F, $vgpr20_vgpr21:0x000000000000000F, $vgpr22_vgpr23:0x000000000000000F, $vgpr24_vgpr25:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: renamable $sgpr36_sgpr37 = S_MOV_B64 -1
; GFX90A-NEXT: renamable $vcc = S_AND_B64 $exec, killed renamable $sgpr30_sgpr31, implicit-def dead $scc
- ; GFX90A-NEXT: S_CBRANCH_VCCNZ %bb.68, implicit $vcc
+ ; GFX90A-NEXT: S_CBRANCH_VCCNZ %bb.67, implicit $vcc
; GFX90A-NEXT: {{ $}}
- ; GFX90A-NEXT: bb.63.Flow13:
- ; GFX90A-NEXT: successors: %bb.64(0x40000000), %bb.66(0x40000000)
+ ; GFX90A-NEXT: bb.62.Flow13:
+ ; GFX90A-NEXT: successors: %bb.63(0x40000000), %bb.65(0x40000000)
; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr15, $vgpr17, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr34_sgpr35, $sgpr36_sgpr37, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $vgpr0_vgpr1:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000C, $vgpr12_vgpr13:0x000000000000000C, $vgpr18_vgpr19:0x000000000000000C, $vgpr20_vgpr21:0x000000000000000C, $vgpr22_vgpr23:0x000000000000000C, $vgpr24_vgpr25:0x000000000000000C, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: $vcc = S_ANDN2_B64 $exec, killed renamable $sgpr36_sgpr37, implicit-def dead $scc
- ; GFX90A-NEXT: S_CBRANCH_VCCNZ %bb.66, implicit $vcc
+ ; GFX90A-NEXT: S_CBRANCH_VCCNZ %bb.65, implicit $vcc
; GFX90A-NEXT: {{ $}}
- ; GFX90A-NEXT: bb.64.bb159:
- ; GFX90A-NEXT: successors: %bb.67(0x40000000), %bb.65(0x40000000)
+ ; GFX90A-NEXT: bb.63.bb159:
+ ; GFX90A-NEXT: successors: %bb.66(0x40000000), %bb.64(0x40000000)
; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr15, $vgpr17, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $vgpr0_vgpr1:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000C, $vgpr12_vgpr13:0x000000000000000C, $vgpr18_vgpr19:0x000000000000000C, $vgpr20_vgpr21:0x000000000000000C, $vgpr22_vgpr23:0x000000000000000C, $vgpr24_vgpr25:0x000000000000000C, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: renamable $vcc = V_CMP_NE_U32_e64 0, killed $vgpr30, implicit $exec
; GFX90A-NEXT: $sgpr12_sgpr13 = S_AND_SAVEEXEC_B64 $vcc, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX90A-NEXT: renamable $sgpr12_sgpr13 = S_XOR_B64 $exec, killed renamable $sgpr12_sgpr13, implicit-def dead $scc
- ; GFX90A-NEXT: S_CBRANCH_EXECNZ %bb.67, implicit $exec
+ ; GFX90A-NEXT: S_CBRANCH_EXECNZ %bb.66, implicit $exec
; GFX90A-NEXT: {{ $}}
- ; GFX90A-NEXT: bb.65.Flow10:
- ; GFX90A-NEXT: successors: %bb.66(0x80000000)
+ ; GFX90A-NEXT: bb.64.Flow10:
+ ; GFX90A-NEXT: successors: %bb.65(0x80000000)
; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $vgpr0_vgpr1:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: $sgpr12_sgpr13 = S_ANDN2_SAVEEXEC_B64 $sgpr12_sgpr13, implicit-def $exec, implicit-def $scc, implicit $exec
; GFX90A-NEXT: $exec = S_OR_B64 $exec, killed renamable $sgpr12_sgpr13, implicit-def $scc
; GFX90A-NEXT: {{ $}}
- ; GFX90A-NEXT: bb.66.Flow14:
+ ; GFX90A-NEXT: bb.65.Flow14:
; GFX90A-NEXT: successors: %bb.8(0x80000000)
; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr31, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $vgpr0_vgpr1:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: renamable $sgpr68_sgpr69 = COPY $exec
; GFX90A-NEXT: S_BRANCH %bb.8
; GFX90A-NEXT: {{ $}}
- ; GFX90A-NEXT: bb.67.bb161:
- ; GFX90A-NEXT: successors: %bb.65(0x80000000)
+ ; GFX90A-NEXT: bb.66.bb161:
+ ; GFX90A-NEXT: successors: %bb.64(0x80000000)
; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr15, $vgpr17, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $vgpr0_vgpr1:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000C, $vgpr12_vgpr13:0x000000000000000C, $vgpr18_vgpr19:0x000000000000000C, $vgpr20_vgpr21:0x000000000000000C, $vgpr22_vgpr23:0x000000000000000C, $vgpr24_vgpr25:0x000000000000000C, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: renamable $vgpr2 = V_OR_B32_e32 killed $vgpr21, killed $vgpr23, implicit $exec
@@ -922,10 +917,10 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64
; GFX90A-NEXT: renamable $vgpr2 = V_CNDMASK_B32_e64 0, 0, 0, killed $vgpr2, killed $vcc, implicit $exec
; GFX90A-NEXT: renamable $vgpr2 = V_OR_B32_e32 killed $vgpr2, killed $vgpr15, implicit $exec
; GFX90A-NEXT: DS_WRITE2_B32_gfx9 killed renamable $vgpr3, killed renamable $vgpr2, renamable $vgpr3, 0, 1, 0, implicit $exec :: (store (s64) into `ptr addrspace(3) null`, align 4, addrspace 3)
- ; GFX90A-NEXT: S_BRANCH %bb.65
+ ; GFX90A-NEXT: S_BRANCH %bb.64
; GFX90A-NEXT: {{ $}}
- ; GFX90A-NEXT: bb.68.bb174:
- ; GFX90A-NEXT: successors: %bb.72(0x40000000), %bb.69(0x40000000)
+ ; GFX90A-NEXT: bb.67.bb174:
+ ; GFX90A-NEXT: successors: %bb.71(0x40000000), %bb.68(0x40000000)
; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr15, $vgpr17, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr28_sgpr29, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000F, $vgpr12_vgpr13:0x000000000000000F, $vgpr14_vgpr15:0x0000000000000003, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x000000000000000F, $vgpr20_vgpr21:0x000000000000000F, $vgpr22_vgpr23:0x000000000000000F, $vgpr24_vgpr25:0x000000000000000F, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: renamable $vgpr26 = V_OR_B32_e32 1, $vgpr24, implicit $exec
@@ -938,17 +933,17 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64
; GFX90A-NEXT: renamable $vgpr50 = V_CNDMASK_B32_e64 0, 0, 0, $vgpr32, killed $sgpr12_sgpr13, implicit $exec
; GFX90A-NEXT: renamable $sgpr12_sgpr13 = S_MOV_B64 -1
; GFX90A-NEXT: renamable $vcc = S_AND_B64 $exec, killed renamable $sgpr28_sgpr29, implicit-def dead $scc
- ; GFX90A-NEXT: S_CBRANCH_VCCNZ %bb.72, implicit $vcc
+ ; GFX90A-NEXT: S_CBRANCH_VCCNZ %bb.71, implicit $vcc
; GFX90A-NEXT: {{ $}}
- ; GFX90A-NEXT: bb.69.Flow:
- ; GFX90A-NEXT: successors: %bb.70(0x40000000), %bb.71(0x40000000)
+ ; GFX90A-NEXT: bb.68.Flow:
+ ; GFX90A-NEXT: successors: %bb.69(0x40000000), %bb.70(0x40000000)
; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr15, $vgpr17, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr12_sgpr13, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000C, $vgpr12_vgpr13:0x000000000000000C, $vgpr18_vgpr19:0x000000000000000C, $vgpr20_vgpr21:0x000000000000000C, $vgpr22_vgpr23:0x000000000000000C, $vgpr24_vgpr25:0x000000000000000C, $vgpr26_vgpr27:0x0000000000000003, $vgpr28_vgpr29:0x0000000000000003, $vgpr32_vgpr33:0x0000000000000003, $vgpr34_vgpr35:0x0000000000000003, $vgpr36_vgpr37:0x0000000000000003, $vgpr38_vgpr39:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr48_vgpr49:0x0000000000000003, $vgpr50_vgpr51:0x0000000000000003, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: $vcc = S_ANDN2_B64 $exec, killed renamable $sgpr12_sgpr13, implicit-def dead $scc
- ; GFX90A-NEXT: S_CBRANCH_VCCNZ %bb.71, implicit $vcc
+ ; GFX90A-NEXT: S_CBRANCH_VCCNZ %bb.70, implicit $vcc
; GFX90A-NEXT: {{ $}}
- ; GFX90A-NEXT: bb.70.bb186:
- ; GFX90A-NEXT: successors: %bb.71(0x80000000)
+ ; GFX90A-NEXT: bb.69.bb186:
+ ; GFX90A-NEXT: successors: %bb.70(0x80000000)
; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr15, $vgpr17, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000C, $vgpr12_vgpr13:0x000000000000000C, $vgpr18_vgpr19:0x000000000000000C, $vgpr20_vgpr21:0x000000000000000C, $vgpr22_vgpr23:0x000000000000000C, $vgpr24_vgpr25:0x000000000000000C, $vgpr26_vgpr27:0x0000000000000003, $vgpr28_vgpr29:0x0000000000000003, $vgpr32_vgpr33:0x0000000000000003, $vgpr34_vgpr35:0x0000000000000003, $vgpr36_vgpr37:0x0000000000000003, $vgpr38_vgpr39:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr48_vgpr49:0x0000000000000003, $vgpr50_vgpr51:0x0000000000000003, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: renamable $vgpr2_vgpr3 = V_LSHLREV_B64_e64 3, killed $vgpr2_vgpr3, implicit $exec
@@ -976,15 +971,15 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64
; GFX90A-NEXT: BUFFER_STORE_DWORD_OFFSET killed renamable $vgpr3, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 4, 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(5) null` + 4, basealign 8, addrspace 5)
; GFX90A-NEXT: BUFFER_STORE_DWORD_OFFSET killed renamable $vgpr2, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, implicit $exec :: (store (s32) into `ptr addrspace(5) null`, align 8, addrspace 5)
; GFX90A-NEXT: {{ $}}
- ; GFX90A-NEXT: bb.71.Flow9:
- ; GFX90A-NEXT: successors: %bb.63(0x80000000)
+ ; GFX90A-NEXT: bb.70.Flow9:
+ ; GFX90A-NEXT: successors: %bb.62(0x80000000)
; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr15, $vgpr17, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $vgpr0_vgpr1:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000C, $vgpr12_vgpr13:0x000000000000000C, $vgpr18_vgpr19:0x000000000000000C, $vgpr20_vgpr21:0x000000000000000C, $vgpr22_vgpr23:0x000000000000000C, $vgpr24_vgpr25:0x000000000000000C, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: renamable $sgpr36_sgpr37 = S_MOV_B64 0
- ; GFX90A-NEXT: S_BRANCH %bb.63
+ ; GFX90A-NEXT: S_BRANCH %bb.62
; GFX90A-NEXT: {{ $}}
- ; GFX90A-NEXT: bb.72.bb196:
- ; GFX90A-NEXT: successors: %bb.69(0x80000000)
+ ; GFX90A-NEXT: bb.71.bb196:
+ ; GFX90A-NEXT: successors: %bb.68(0x80000000)
; GFX90A-NEXT: liveins: $sgpr14, $sgpr15, $sgpr16, $vgpr15, $vgpr17, $vgpr30, $vgpr31, $vgpr52, $vgpr53, $sgpr4_sgpr5, $sgpr6_sgpr7, $sgpr8_sgpr9:0x000000000000000F, $sgpr10_sgpr11, $sgpr18_sgpr19, $sgpr24_sgpr25, $sgpr34_sgpr35, $sgpr38_sgpr39, $sgpr40_sgpr41, $sgpr42_sgpr43, $sgpr44_sgpr45, $sgpr48_sgpr49, $sgpr50_sgpr51, $sgpr52_sgpr53, $sgpr54_sgpr55, $sgpr64_sgpr65, $sgpr66_sgpr67, $sgpr20_sgpr21_sgpr22_sgpr23:0x000000000000003C, $sgpr24_sgpr25_sgpr26_sgpr27:0x00000000000000F0, $vgpr0_vgpr1:0x000000000000000F, $vgpr2_vgpr3:0x000000000000000F, $vgpr4_vgpr5:0x000000000000000F, $vgpr6_vgpr7:0x000000000000000F, $vgpr8_vgpr9:0x000000000000000F, $vgpr10_vgpr11:0x000000000000000C, $vgpr12_vgpr13:0x000000000000000C, $vgpr14_vgpr15:0x0000000000000003, $vgpr16_vgpr17:0x0000000000000003, $vgpr18_vgpr19:0x000000000000000C, $vgpr20_vgpr21:0x000000000000000C, $vgpr22_vgpr23:0x000000000000000C, $vgpr24_vgpr25:0x000000000000000C, $vgpr26_vgpr27:0x0000000000000003, $vgpr28_vgpr29:0x0000000000000003, $vgpr32_vgpr33:0x0000000000000003, $vgpr34_vgpr35:0x0000000000000003, $vgpr36_vgpr37:0x0000000000000003, $vgpr38_vgpr39:0x0000000000000003, $vgpr40_vgpr41:0x000000000000000F, $vgpr42_vgpr43:0x000000000000000F, $vgpr44_vgpr45:0x000000000000000F, $vgpr46_vgpr47:0x000000000000000F, $vgpr48_vgpr49:0x0000000000000003, $vgpr50_vgpr51:0x0000000000000003, $vgpr56_vgpr57:0x000000000000000F, $vgpr58_vgpr59:0x000000000000000F, $vgpr60_vgpr61:0x000000000000000F, $vgpr62_vgpr63:0x000000000000000F, $sgpr0_sgpr1_sgpr2_sgpr3
; GFX90A-NEXT: {{ $}}
; GFX90A-NEXT: renamable $vgpr10 = V_OR_B32_e32 $vgpr50, killed $vgpr16, implicit $exec
@@ -992,7 +987,7 @@ define amdgpu_kernel void @f1(ptr addrspace(1) %arg, ptr addrspace(1) %arg1, i64
; GFX90A-NEXT: renamable $vgpr55 = AV_MOV_B32_IMM_PSEUDO 0, implicit $exec
; GFX90A-NEXT: DS_WRITE_B64_gfx9 killed renamable $vgpr55, renamable $vgpr54_vgpr55, 0, 0, implicit $exec :: (store (s64) into `ptr addrspace(3) null`, addrspace 3)
; GFX90A-NEXT: renamable $sgpr12_sgpr13 = S_MOV_B64 0
- ; GFX90A-NEXT: S_BRANCH %bb.69
+ ; GFX90A-NEXT: S_BRANCH %bb.68
bb:
%i = tail call i32 @llvm.amdgcn.workitem.id.x()
%i11 = icmp eq i32 %i, 0
diff --git a/llvm/test/CodeGen/AMDGPU/branch-relaxation.ll b/llvm/test/CodeGen/AMDGPU/branch-relaxation.ll
index 5959f76..d5a0593 100644
--- a/llvm/test/CodeGen/AMDGPU/branch-relaxation.ll
+++ b/llvm/test/CodeGen/AMDGPU/branch-relaxation.ll
@@ -1265,54 +1265,82 @@ define amdgpu_kernel void @long_branch_hang(ptr addrspace(1) nocapture %arg, i32
; GCN-LABEL: long_branch_hang:
; GCN: ; %bb.0: ; %bb
; GCN-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0xb
+; GCN-NEXT: s_mov_b64 s[10:11], -1
+; GCN-NEXT: s_mov_b64 s[8:9], 0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: s_cmp_eq_u32 s0, 0
; GCN-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN-NEXT: s_cmp_lg_u32 s0, 0
-; GCN-NEXT: s_cselect_b64 s[8:9], -1, 0
+; GCN-NEXT: s_cselect_b64 s[12:13], -1, 0
; GCN-NEXT: s_cmp_lt_i32 s3, 6
-; GCN-NEXT: s_cbranch_scc1 .LBB10_1
-; GCN-NEXT: ; %bb.8: ; %bb
+; GCN-NEXT: s_cbranch_scc0 .LBB10_1
+; GCN-NEXT: ; %bb.10: ; %bb
; GCN-NEXT: s_getpc_b64 s[8:9]
-; GCN-NEXT: .Lpost_getpc12:
-; GCN-NEXT: s_add_u32 s8, s8, (.LBB10_2-.Lpost_getpc12)&4294967295
-; GCN-NEXT: s_addc_u32 s9, s9, (.LBB10_2-.Lpost_getpc12)>>32
+; GCN-NEXT: .Lpost_getpc13:
+; GCN-NEXT: s_add_u32 s8, s8, (.LBB10_4-.Lpost_getpc13)&4294967295
+; GCN-NEXT: s_addc_u32 s9, s9, (.LBB10_4-.Lpost_getpc13)>>32
+; GCN-NEXT: s_setpc_b64 s[8:9]
+; GCN-NEXT: .LBB10_1: ; %Flow
+; GCN-NEXT: s_andn2_b64 vcc, exec, s[10:11]
+; GCN-NEXT: s_cbranch_vccnz .LBB10_2
+; GCN-NEXT: ; %bb.12: ; %Flow
+; GCN-NEXT: s_getpc_b64 s[8:9]
+; GCN-NEXT: .Lpost_getpc14:
+; GCN-NEXT: s_add_u32 s8, s8, (.LBB10_5-.Lpost_getpc14)&4294967295
+; GCN-NEXT: s_addc_u32 s9, s9, (.LBB10_5-.Lpost_getpc14)>>32
; GCN-NEXT: s_setpc_b64 s[8:9]
-; GCN-NEXT: .LBB10_1: ; %bb13
+; GCN-NEXT: .LBB10_2: ; %Flow5
+; GCN-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; GCN-NEXT: s_cbranch_vccz .LBB10_3
+; GCN-NEXT: ; %bb.14: ; %Flow5
+; GCN-NEXT: s_getpc_b64 s[0:1]
+; GCN-NEXT: .Lpost_getpc15:
+; GCN-NEXT: s_add_u32 s0, s0, (.LBB10_6-.Lpost_getpc15)&4294967295
+; GCN-NEXT: s_addc_u32 s1, s1, (.LBB10_6-.Lpost_getpc15)>>32
+; GCN-NEXT: s_setpc_b64 s[0:1]
+; GCN-NEXT: .LBB10_3: ; %bb14
+; GCN-NEXT: s_cmp_lt_i32 s1, 9
+; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0
+; GCN-NEXT: s_cmp_lt_i32 s2, s3
+; GCN-NEXT: s_cselect_b64 s[2:3], -1, 0
+; GCN-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
+; GCN-NEXT: s_and_b64 s[0:1], s[6:7], s[0:1]
+; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; GCN-NEXT: ; %bb.8: ; %bb14
+; GCN-NEXT: s_getpc_b64 s[0:1]
+; GCN-NEXT: .Lpost_getpc12:
+; GCN-NEXT: s_add_u32 s0, s0, (.LBB10_7-.Lpost_getpc12)&4294967295
+; GCN-NEXT: s_addc_u32 s1, s1, (.LBB10_7-.Lpost_getpc12)>>32
+; GCN-NEXT: s_setpc_b64 s[0:1]
+; GCN-NEXT: .LBB10_4: ; %bb13
; GCN-NEXT: ;;#ASMSTART
; GCN-NEXT: v_nop_e64
; GCN-NEXT: v_nop_e64
; GCN-NEXT: v_nop_e64
; GCN-NEXT: v_nop_e64
; GCN-NEXT: ;;#ASMEND
-; GCN-NEXT: s_cbranch_execz .LBB10_3
-; GCN-NEXT: s_branch .LBB10_4
-; GCN-NEXT: .LBB10_2:
-; GCN-NEXT: s_mov_b64 s[8:9], 0
-; GCN-NEXT: .LBB10_3: ; %bb9
+; GCN-NEXT: s_mov_b64 s[8:9], s[12:13]
+; GCN-NEXT: s_cbranch_execz .LBB10_5
+; GCN-NEXT: ; %bb.16: ; %bb13
+; GCN-NEXT: s_getpc_b64 s[10:11]
+; GCN-NEXT: .Lpost_getpc16:
+; GCN-NEXT: s_add_u32 s10, s10, (.LBB10_2-.Lpost_getpc16)&4294967295
+; GCN-NEXT: s_addc_u32 s11, s11, (.LBB10_2-.Lpost_getpc16)>>32
+; GCN-NEXT: s_setpc_b64 s[10:11]
+; GCN-NEXT: .LBB10_5: ; %bb9
; GCN-NEXT: s_cmp_lt_i32 s3, 11
; GCN-NEXT: s_cselect_b64 s[8:9], -1, 0
; GCN-NEXT: s_cmp_ge_i32 s2, s3
; GCN-NEXT: s_cselect_b64 s[10:11], -1, 0
; GCN-NEXT: s_and_b64 s[8:9], s[10:11], s[8:9]
-; GCN-NEXT: .LBB10_4: ; %Flow5
; GCN-NEXT: s_andn2_b64 vcc, exec, s[8:9]
-; GCN-NEXT: s_cbranch_vccz .LBB10_5
-; GCN-NEXT: ; %bb.10: ; %Flow5
-; GCN-NEXT: s_getpc_b64 s[0:1]
-; GCN-NEXT: .Lpost_getpc13:
-; GCN-NEXT: s_add_u32 s0, s0, (.LBB10_6-.Lpost_getpc13)&4294967295
-; GCN-NEXT: s_addc_u32 s1, s1, (.LBB10_6-.Lpost_getpc13)>>32
-; GCN-NEXT: s_setpc_b64 s[0:1]
-; GCN-NEXT: .LBB10_5: ; %bb14
-; GCN-NEXT: s_cmp_lt_i32 s1, 9
-; GCN-NEXT: s_cselect_b64 s[0:1], -1, 0
-; GCN-NEXT: s_cmp_lt_i32 s2, s3
-; GCN-NEXT: s_cselect_b64 s[2:3], -1, 0
-; GCN-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
-; GCN-NEXT: s_and_b64 s[0:1], s[6:7], s[0:1]
-; GCN-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
-; GCN-NEXT: s_branch .LBB10_7
+; GCN-NEXT: s_cbranch_vccnz .LBB10_6
+; GCN-NEXT: ; %bb.18: ; %bb9
+; GCN-NEXT: s_getpc_b64 s[8:9]
+; GCN-NEXT: .Lpost_getpc17:
+; GCN-NEXT: s_add_u32 s8, s8, (.LBB10_3-.Lpost_getpc17)&4294967295
+; GCN-NEXT: s_addc_u32 s9, s9, (.LBB10_3-.Lpost_getpc17)>>32
+; GCN-NEXT: s_setpc_b64 s[8:9]
; GCN-NEXT: .LBB10_6:
; GCN-NEXT: ; implicit-def: $vgpr0
; GCN-NEXT: .LBB10_7: ; %bb19
@@ -1330,54 +1358,97 @@ define amdgpu_kernel void @long_branch_hang(ptr addrspace(1) nocapture %arg, i32
; GFX11-LABEL: long_branch_hang:
; GFX11: ; %bb.0: ; %bb
; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x2c
+; GFX11-NEXT: s_mov_b64 s[10:11], -1
+; GFX11-NEXT: s_mov_b64 s[8:9], 0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_cmp_eq_u32 s0, 0
; GFX11-NEXT: s_cselect_b64 s[6:7], -1, 0
; GFX11-NEXT: s_cmp_lg_u32 s0, 0
-; GFX11-NEXT: s_cselect_b64 s[8:9], -1, 0
+; GFX11-NEXT: s_cselect_b64 s[12:13], -1, 0
; GFX11-NEXT: s_cmp_lt_i32 s3, 6
-; GFX11-NEXT: s_cbranch_scc1 .LBB10_1
-; GFX11-NEXT: ; %bb.8: ; %bb
+; GFX11-NEXT: s_cbranch_scc0 .LBB10_1
+; GFX11-NEXT: ; %bb.18: ; %bb
; GFX11-NEXT: s_getpc_b64 s[8:9]
-; GFX11-NEXT: .Lpost_getpc11:
+; GFX11-NEXT: .Lpost_getpc16:
+; GFX11-NEXT: s_waitcnt_depctr 0xfffe
+; GFX11-NEXT: s_add_u32 s8, s8, (.LBB10_4-.Lpost_getpc16)&4294967295
+; GFX11-NEXT: s_addc_u32 s9, s9, (.LBB10_4-.Lpost_getpc16)>>32
+; GFX11-NEXT: s_waitcnt_depctr 0xfffe
+; GFX11-NEXT: s_setpc_b64 s[8:9]
+; GFX11-NEXT: .LBB10_1: ; %Flow
+; GFX11-NEXT: s_and_not1_b64 vcc, exec, s[10:11]
+; GFX11-NEXT: s_cbranch_vccnz .LBB10_2
+; GFX11-NEXT: ; %bb.10: ; %Flow
+; GFX11-NEXT: s_getpc_b64 s[8:9]
+; GFX11-NEXT: .Lpost_getpc12:
; GFX11-NEXT: s_waitcnt_depctr 0xfffe
-; GFX11-NEXT: s_add_u32 s8, s8, (.LBB10_2-.Lpost_getpc11)&4294967295
-; GFX11-NEXT: s_addc_u32 s9, s9, (.LBB10_2-.Lpost_getpc11)>>32
+; GFX11-NEXT: s_add_u32 s8, s8, (.LBB10_5-.Lpost_getpc12)&4294967295
+; GFX11-NEXT: s_addc_u32 s9, s9, (.LBB10_5-.Lpost_getpc12)>>32
; GFX11-NEXT: s_waitcnt_depctr 0xfffe
; GFX11-NEXT: s_setpc_b64 s[8:9]
-; GFX11-NEXT: .LBB10_1: ; %bb13
+; GFX11-NEXT: .LBB10_2: ; %Flow5
+; GFX11-NEXT: s_and_not1_b64 vcc, exec, s[8:9]
+; GFX11-NEXT: s_cbranch_vccz .LBB10_3
+; GFX11-NEXT: ; %bb.12: ; %Flow5
+; GFX11-NEXT: s_getpc_b64 s[0:1]
+; GFX11-NEXT: .Lpost_getpc13:
+; GFX11-NEXT: s_waitcnt_depctr 0xfffe
+; GFX11-NEXT: s_add_u32 s0, s0, (.LBB10_6-.Lpost_getpc13)&4294967295
+; GFX11-NEXT: s_addc_u32 s1, s1, (.LBB10_6-.Lpost_getpc13)>>32
+; GFX11-NEXT: s_waitcnt_depctr 0xfffe
+; GFX11-NEXT: s_setpc_b64 s[0:1]
+; GFX11-NEXT: .LBB10_3: ; %bb14
+; GFX11-NEXT: s_cmp_lt_i32 s1, 9
+; GFX11-NEXT: s_cselect_b64 s[0:1], -1, 0
+; GFX11-NEXT: s_cmp_lt_i32 s2, s3
+; GFX11-NEXT: s_cselect_b64 s[2:3], -1, 0
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
+; GFX11-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
+; GFX11-NEXT: s_and_b64 s[0:1], s[6:7], s[0:1]
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
+; GFX11-NEXT: ; %bb.8: ; %bb14
+; GFX11-NEXT: s_getpc_b64 s[0:1]
+; GFX11-NEXT: .Lpost_getpc11:
+; GFX11-NEXT: s_waitcnt_depctr 0xfffe
+; GFX11-NEXT: s_add_u32 s0, s0, (.LBB10_7-.Lpost_getpc11)&4294967295
+; GFX11-NEXT: s_addc_u32 s1, s1, (.LBB10_7-.Lpost_getpc11)>>32
+; GFX11-NEXT: s_waitcnt_depctr 0xfffe
+; GFX11-NEXT: s_setpc_b64 s[0:1]
+; GFX11-NEXT: .LBB10_4: ; %bb13
+; GFX11-NEXT: s_mov_b64 s[8:9], s[12:13]
; GFX11-NEXT: ;;#ASMSTART
; GFX11-NEXT: v_nop_e64
; GFX11-NEXT: v_nop_e64
; GFX11-NEXT: v_nop_e64
; GFX11-NEXT: v_nop_e64
; GFX11-NEXT: ;;#ASMEND
-; GFX11-NEXT: s_cbranch_execz .LBB10_3
-; GFX11-NEXT: s_branch .LBB10_4
-; GFX11-NEXT: .LBB10_2:
-; GFX11-NEXT: s_mov_b64 s[8:9], 0
-; GFX11-NEXT: .LBB10_3: ; %bb9
+; GFX11-NEXT: s_cbranch_execz .LBB10_5
+; GFX11-NEXT: ; %bb.14: ; %bb13
+; GFX11-NEXT: s_getpc_b64 s[10:11]
+; GFX11-NEXT: .Lpost_getpc14:
+; GFX11-NEXT: s_waitcnt_depctr 0xfffe
+; GFX11-NEXT: s_add_u32 s10, s10, (.LBB10_2-.Lpost_getpc14)&4294967295
+; GFX11-NEXT: s_addc_u32 s11, s11, (.LBB10_2-.Lpost_getpc14)>>32
+; GFX11-NEXT: s_waitcnt_depctr 0xfffe
+; GFX11-NEXT: s_setpc_b64 s[10:11]
+; GFX11-NEXT: .LBB10_5: ; %bb9
; GFX11-NEXT: s_cmp_lt_i32 s3, 11
; GFX11-NEXT: s_cselect_b64 s[8:9], -1, 0
; GFX11-NEXT: s_cmp_ge_i32 s2, s3
; GFX11-NEXT: s_cselect_b64 s[10:11], -1, 0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX11-NEXT: s_and_b64 s[8:9], s[10:11], s[8:9]
-; GFX11-NEXT: .LBB10_4: ; %Flow5
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX11-NEXT: s_and_not1_b64 vcc, exec, s[8:9]
; GFX11-NEXT: s_cbranch_vccnz .LBB10_6
-; GFX11-NEXT: ; %bb.5: ; %bb14
-; GFX11-NEXT: s_cmp_lt_i32 s1, 9
-; GFX11-NEXT: s_cselect_b64 s[0:1], -1, 0
-; GFX11-NEXT: s_cmp_lt_i32 s2, s3
-; GFX11-NEXT: s_cselect_b64 s[2:3], -1, 0
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
-; GFX11-NEXT: s_or_b64 s[0:1], s[2:3], s[0:1]
-; GFX11-NEXT: s_and_b64 s[0:1], s[6:7], s[0:1]
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, 1, s[0:1]
-; GFX11-NEXT: s_branch .LBB10_7
+; GFX11-NEXT: ; %bb.16: ; %bb9
+; GFX11-NEXT: s_getpc_b64 s[8:9]
+; GFX11-NEXT: .Lpost_getpc15:
+; GFX11-NEXT: s_waitcnt_depctr 0xfffe
+; GFX11-NEXT: s_add_u32 s8, s8, (.LBB10_3-.Lpost_getpc15)&4294967295
+; GFX11-NEXT: s_addc_u32 s9, s9, (.LBB10_3-.Lpost_getpc15)>>32
+; GFX11-NEXT: s_waitcnt_depctr 0xfffe
+; GFX11-NEXT: s_setpc_b64 s[8:9]
; GFX11-NEXT: .LBB10_6:
; GFX11-NEXT: ; implicit-def: $vgpr0
; GFX11-NEXT: .LBB10_7: ; %bb19
diff --git a/llvm/test/CodeGen/AMDGPU/carryout-selection.ll b/llvm/test/CodeGen/AMDGPU/carryout-selection.ll
index b71885b..a71bc50 100644
--- a/llvm/test/CodeGen/AMDGPU/carryout-selection.ll
+++ b/llvm/test/CodeGen/AMDGPU/carryout-selection.ll
@@ -1983,6 +1983,7 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; CISI: ; %bb.0:
; CISI-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x9
; CISI-NEXT: s_load_dwordx2 s[2:3], s[4:5], 0xd
+; CISI-NEXT: s_mov_b64 s[4:5], -1
; CISI-NEXT: s_waitcnt lgkmcnt(0)
; CISI-NEXT: s_or_b64 s[0:1], s[10:11], s[2:3]
; CISI-NEXT: s_mov_b32 s0, 0
@@ -2128,12 +2129,15 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; CISI-NEXT: s_endpgm
; CISI-NEXT: .LBB16_4:
; CISI-NEXT: ; implicit-def: $vgpr0_vgpr1
-; CISI-NEXT: s_branch .LBB16_2
+; CISI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; CISI-NEXT: s_cbranch_vccz .LBB16_2
+; CISI-NEXT: s_branch .LBB16_3
;
; VI-LABEL: sudiv64:
; VI: ; %bb.0:
; VI-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24
; VI-NEXT: s_load_dwordx2 s[2:3], s[4:5], 0x34
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_or_b64 s[0:1], s[10:11], s[2:3]
; VI-NEXT: s_mov_b32 s0, 0
@@ -2285,12 +2289,15 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; VI-NEXT: s_endpgm
; VI-NEXT: .LBB16_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1
-; VI-NEXT: s_branch .LBB16_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB16_2
+; VI-NEXT: s_branch .LBB16_3
;
; GFX9-LABEL: sudiv64:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24
; GFX9-NEXT: s_load_dwordx2 s[2:3], s[4:5], 0x34
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_or_b64 s[0:1], s[10:11], s[2:3]
; GFX9-NEXT: s_mov_b32 s0, 0
@@ -2452,7 +2459,9 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GFX9-NEXT: s_endpgm
; GFX9-NEXT: .LBB16_4:
; GFX9-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX9-NEXT: s_branch .LBB16_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB16_2
+; GFX9-NEXT: s_branch .LBB16_3
;
; GFX1010-LABEL: sudiv64:
; GFX1010: ; %bb.0:
@@ -2460,15 +2469,16 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GFX1010-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24
; GFX1010-NEXT: s_load_dwordx2 s[2:3], s[4:5], 0x34
; GFX1010-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1010-NEXT: s_or_b64 s[4:5], s[10:11], s[2:3]
-; GFX1010-NEXT: s_mov_b32 s4, 0
-; GFX1010-NEXT: s_cmp_lg_u64 s[4:5], 0
+; GFX1010-NEXT: s_or_b64 s[0:1], s[10:11], s[2:3]
+; GFX1010-NEXT: s_mov_b32 s0, 0
+; GFX1010-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX1010-NEXT: s_mov_b32 s0, -1
; GFX1010-NEXT: s_cbranch_scc0 .LBB16_4
; GFX1010-NEXT: ; %bb.1:
; GFX1010-NEXT: v_cvt_f32_u32_e32 v0, s2
; GFX1010-NEXT: v_cvt_f32_u32_e32 v1, s3
-; GFX1010-NEXT: s_sub_u32 s5, 0, s2
-; GFX1010-NEXT: s_subb_u32 s6, 0, s3
+; GFX1010-NEXT: s_sub_u32 s4, 0, s2
+; GFX1010-NEXT: s_subb_u32 s5, 0, s3
; GFX1010-NEXT: v_madmk_f32 v0, v1, 0x4f800000, v0
; GFX1010-NEXT: v_rcp_f32_e32 v0, v0
; GFX1010-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -2479,111 +2489,110 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GFX1010-NEXT: v_cvt_u32_f32_e32 v0, v0
; GFX1010-NEXT: v_readfirstlane_b32 s0, v1
; GFX1010-NEXT: v_readfirstlane_b32 s1, v0
-; GFX1010-NEXT: s_mul_i32 s7, s5, s0
-; GFX1010-NEXT: s_mul_hi_u32 s13, s5, s1
-; GFX1010-NEXT: s_mul_i32 s12, s6, s1
-; GFX1010-NEXT: s_add_i32 s7, s13, s7
-; GFX1010-NEXT: s_mul_i32 s14, s5, s1
-; GFX1010-NEXT: s_add_i32 s7, s7, s12
-; GFX1010-NEXT: s_mul_hi_u32 s13, s1, s14
-; GFX1010-NEXT: s_mul_hi_u32 s15, s0, s14
-; GFX1010-NEXT: s_mul_i32 s12, s0, s14
-; GFX1010-NEXT: s_mul_hi_u32 s14, s1, s7
-; GFX1010-NEXT: s_mul_i32 s1, s1, s7
-; GFX1010-NEXT: s_mul_hi_u32 s16, s0, s7
-; GFX1010-NEXT: s_add_u32 s1, s13, s1
-; GFX1010-NEXT: s_addc_u32 s13, 0, s14
-; GFX1010-NEXT: s_add_u32 s1, s1, s12
-; GFX1010-NEXT: s_mul_i32 s7, s0, s7
-; GFX1010-NEXT: s_addc_u32 s1, s13, s15
-; GFX1010-NEXT: s_addc_u32 s12, s16, 0
+; GFX1010-NEXT: s_mul_i32 s6, s4, s0
+; GFX1010-NEXT: s_mul_hi_u32 s12, s4, s1
+; GFX1010-NEXT: s_mul_i32 s7, s5, s1
+; GFX1010-NEXT: s_add_i32 s6, s12, s6
+; GFX1010-NEXT: s_mul_i32 s13, s4, s1
+; GFX1010-NEXT: s_add_i32 s6, s6, s7
+; GFX1010-NEXT: s_mul_hi_u32 s12, s1, s13
+; GFX1010-NEXT: s_mul_hi_u32 s14, s0, s13
+; GFX1010-NEXT: s_mul_i32 s7, s0, s13
+; GFX1010-NEXT: s_mul_hi_u32 s13, s1, s6
+; GFX1010-NEXT: s_mul_i32 s1, s1, s6
+; GFX1010-NEXT: s_mul_hi_u32 s15, s0, s6
+; GFX1010-NEXT: s_add_u32 s1, s12, s1
+; GFX1010-NEXT: s_addc_u32 s12, 0, s13
; GFX1010-NEXT: s_add_u32 s1, s1, s7
-; GFX1010-NEXT: s_addc_u32 s7, 0, s12
+; GFX1010-NEXT: s_mul_i32 s6, s0, s6
+; GFX1010-NEXT: s_addc_u32 s1, s12, s14
+; GFX1010-NEXT: s_addc_u32 s7, s15, 0
+; GFX1010-NEXT: s_add_u32 s1, s1, s6
+; GFX1010-NEXT: s_addc_u32 s6, 0, s7
; GFX1010-NEXT: v_add_co_u32 v0, s1, v0, s1
; GFX1010-NEXT: s_cmp_lg_u32 s1, 0
-; GFX1010-NEXT: s_addc_u32 s0, s0, s7
+; GFX1010-NEXT: s_addc_u32 s0, s0, s6
; GFX1010-NEXT: v_readfirstlane_b32 s1, v0
-; GFX1010-NEXT: s_mul_i32 s7, s5, s0
-; GFX1010-NEXT: s_mul_hi_u32 s12, s5, s1
-; GFX1010-NEXT: s_mul_i32 s6, s6, s1
-; GFX1010-NEXT: s_add_i32 s7, s12, s7
+; GFX1010-NEXT: s_mul_i32 s6, s4, s0
+; GFX1010-NEXT: s_mul_hi_u32 s7, s4, s1
; GFX1010-NEXT: s_mul_i32 s5, s5, s1
-; GFX1010-NEXT: s_add_i32 s7, s7, s6
-; GFX1010-NEXT: s_mul_hi_u32 s12, s0, s5
-; GFX1010-NEXT: s_mul_i32 s13, s0, s5
-; GFX1010-NEXT: s_mul_hi_u32 s5, s1, s5
-; GFX1010-NEXT: s_mul_hi_u32 s14, s1, s7
-; GFX1010-NEXT: s_mul_i32 s1, s1, s7
-; GFX1010-NEXT: s_mul_hi_u32 s6, s0, s7
-; GFX1010-NEXT: s_add_u32 s1, s5, s1
-; GFX1010-NEXT: s_addc_u32 s5, 0, s14
-; GFX1010-NEXT: s_add_u32 s1, s1, s13
-; GFX1010-NEXT: s_mul_i32 s7, s0, s7
-; GFX1010-NEXT: s_addc_u32 s1, s5, s12
-; GFX1010-NEXT: s_addc_u32 s5, s6, 0
-; GFX1010-NEXT: s_add_u32 s1, s1, s7
-; GFX1010-NEXT: s_addc_u32 s5, 0, s5
+; GFX1010-NEXT: s_add_i32 s6, s7, s6
+; GFX1010-NEXT: s_mul_i32 s4, s4, s1
+; GFX1010-NEXT: s_add_i32 s6, s6, s5
+; GFX1010-NEXT: s_mul_hi_u32 s7, s0, s4
+; GFX1010-NEXT: s_mul_i32 s12, s0, s4
+; GFX1010-NEXT: s_mul_hi_u32 s4, s1, s4
+; GFX1010-NEXT: s_mul_hi_u32 s13, s1, s6
+; GFX1010-NEXT: s_mul_i32 s1, s1, s6
+; GFX1010-NEXT: s_mul_hi_u32 s5, s0, s6
+; GFX1010-NEXT: s_add_u32 s1, s4, s1
+; GFX1010-NEXT: s_addc_u32 s4, 0, s13
+; GFX1010-NEXT: s_add_u32 s1, s1, s12
+; GFX1010-NEXT: s_mul_i32 s6, s0, s6
+; GFX1010-NEXT: s_addc_u32 s1, s4, s7
+; GFX1010-NEXT: s_addc_u32 s4, s5, 0
+; GFX1010-NEXT: s_add_u32 s1, s1, s6
+; GFX1010-NEXT: s_addc_u32 s4, 0, s4
; GFX1010-NEXT: v_add_co_u32 v0, s1, v0, s1
; GFX1010-NEXT: s_cmp_lg_u32 s1, 0
-; GFX1010-NEXT: s_addc_u32 s0, s0, s5
+; GFX1010-NEXT: s_addc_u32 s0, s0, s4
; GFX1010-NEXT: v_readfirstlane_b32 s1, v0
-; GFX1010-NEXT: s_mul_i32 s6, s10, s0
-; GFX1010-NEXT: s_mul_hi_u32 s5, s10, s0
-; GFX1010-NEXT: s_mul_hi_u32 s7, s11, s0
+; GFX1010-NEXT: s_mul_i32 s5, s10, s0
+; GFX1010-NEXT: s_mul_hi_u32 s4, s10, s0
+; GFX1010-NEXT: s_mul_hi_u32 s6, s11, s0
; GFX1010-NEXT: s_mul_i32 s0, s11, s0
-; GFX1010-NEXT: s_mul_hi_u32 s12, s10, s1
-; GFX1010-NEXT: s_mul_hi_u32 s13, s11, s1
+; GFX1010-NEXT: s_mul_hi_u32 s7, s10, s1
+; GFX1010-NEXT: s_mul_hi_u32 s12, s11, s1
; GFX1010-NEXT: s_mul_i32 s1, s11, s1
-; GFX1010-NEXT: s_add_u32 s6, s12, s6
-; GFX1010-NEXT: s_addc_u32 s5, 0, s5
-; GFX1010-NEXT: s_add_u32 s1, s6, s1
-; GFX1010-NEXT: s_addc_u32 s1, s5, s13
-; GFX1010-NEXT: s_addc_u32 s5, s7, 0
+; GFX1010-NEXT: s_add_u32 s5, s7, s5
+; GFX1010-NEXT: s_addc_u32 s4, 0, s4
+; GFX1010-NEXT: s_add_u32 s1, s5, s1
+; GFX1010-NEXT: s_addc_u32 s1, s4, s12
+; GFX1010-NEXT: s_addc_u32 s4, s6, 0
; GFX1010-NEXT: s_add_u32 s1, s1, s0
-; GFX1010-NEXT: s_addc_u32 s5, 0, s5
+; GFX1010-NEXT: s_addc_u32 s4, 0, s4
; GFX1010-NEXT: s_mul_hi_u32 s0, s2, s1
-; GFX1010-NEXT: s_mul_i32 s7, s2, s5
-; GFX1010-NEXT: s_mul_i32 s12, s2, s1
-; GFX1010-NEXT: s_add_i32 s0, s0, s7
-; GFX1010-NEXT: v_sub_co_u32 v0, s7, s10, s12
-; GFX1010-NEXT: s_mul_i32 s6, s3, s1
+; GFX1010-NEXT: s_mul_i32 s6, s2, s4
+; GFX1010-NEXT: s_mul_i32 s7, s2, s1
; GFX1010-NEXT: s_add_i32 s0, s0, s6
-; GFX1010-NEXT: v_sub_co_u32 v1, s12, v0, s2
-; GFX1010-NEXT: s_sub_i32 s6, s11, s0
+; GFX1010-NEXT: v_sub_co_u32 v0, s6, s10, s7
+; GFX1010-NEXT: s_mul_i32 s5, s3, s1
+; GFX1010-NEXT: s_add_i32 s0, s0, s5
+; GFX1010-NEXT: v_sub_co_u32 v1, s7, v0, s2
+; GFX1010-NEXT: s_sub_i32 s5, s11, s0
+; GFX1010-NEXT: s_cmp_lg_u32 s6, 0
+; GFX1010-NEXT: s_subb_u32 s5, s5, s3
; GFX1010-NEXT: s_cmp_lg_u32 s7, 0
-; GFX1010-NEXT: s_subb_u32 s6, s6, s3
-; GFX1010-NEXT: s_cmp_lg_u32 s12, 0
; GFX1010-NEXT: v_cmp_le_u32_e32 vcc_lo, s2, v1
-; GFX1010-NEXT: s_subb_u32 s6, s6, 0
-; GFX1010-NEXT: s_cmp_ge_u32 s6, s3
+; GFX1010-NEXT: s_subb_u32 s5, s5, 0
+; GFX1010-NEXT: s_cmp_ge_u32 s5, s3
; GFX1010-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo
-; GFX1010-NEXT: s_cselect_b32 s12, -1, 0
-; GFX1010-NEXT: s_cmp_eq_u32 s6, s3
+; GFX1010-NEXT: s_cselect_b32 s7, -1, 0
+; GFX1010-NEXT: s_cmp_eq_u32 s5, s3
; GFX1010-NEXT: s_cselect_b32 vcc_lo, -1, 0
-; GFX1010-NEXT: s_add_u32 s6, s1, 1
-; GFX1010-NEXT: v_cndmask_b32_e32 v1, s12, v1, vcc_lo
-; GFX1010-NEXT: s_addc_u32 s12, s5, 0
-; GFX1010-NEXT: s_add_u32 s13, s1, 2
-; GFX1010-NEXT: s_addc_u32 s14, s5, 0
-; GFX1010-NEXT: s_cmp_lg_u32 s7, 0
+; GFX1010-NEXT: s_add_u32 s5, s1, 1
+; GFX1010-NEXT: v_cndmask_b32_e32 v1, s7, v1, vcc_lo
+; GFX1010-NEXT: s_addc_u32 s7, s4, 0
+; GFX1010-NEXT: s_add_u32 s12, s1, 2
+; GFX1010-NEXT: s_addc_u32 s13, s4, 0
+; GFX1010-NEXT: s_cmp_lg_u32 s6, 0
; GFX1010-NEXT: v_cmp_le_u32_e32 vcc_lo, s2, v0
; GFX1010-NEXT: s_subb_u32 s0, s11, s0
-; GFX1010-NEXT: v_mov_b32_e32 v2, s13
+; GFX1010-NEXT: v_mov_b32_e32 v2, s12
; GFX1010-NEXT: s_cmp_ge_u32 s0, s3
; GFX1010-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo
-; GFX1010-NEXT: s_cselect_b32 s7, -1, 0
+; GFX1010-NEXT: s_cselect_b32 s6, -1, 0
; GFX1010-NEXT: s_cmp_eq_u32 s0, s3
; GFX1010-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v1
; GFX1010-NEXT: s_cselect_b32 s0, -1, 0
-; GFX1010-NEXT: v_mov_b32_e32 v1, s14
-; GFX1010-NEXT: v_cndmask_b32_e64 v0, s7, v0, s0
-; GFX1010-NEXT: v_cndmask_b32_e32 v2, s6, v2, vcc_lo
-; GFX1010-NEXT: v_cndmask_b32_e32 v1, s12, v1, vcc_lo
+; GFX1010-NEXT: v_mov_b32_e32 v1, s13
+; GFX1010-NEXT: v_cndmask_b32_e64 v0, s6, v0, s0
+; GFX1010-NEXT: v_cndmask_b32_e32 v2, s5, v2, vcc_lo
+; GFX1010-NEXT: v_cndmask_b32_e32 v1, s7, v1, vcc_lo
; GFX1010-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX1010-NEXT: v_cndmask_b32_e32 v1, s5, v1, vcc_lo
+; GFX1010-NEXT: v_cndmask_b32_e32 v1, s4, v1, vcc_lo
; GFX1010-NEXT: v_cndmask_b32_e32 v0, s1, v2, vcc_lo
-; GFX1010-NEXT: s_andn2_b32 vcc_lo, exec_lo, s4
-; GFX1010-NEXT: s_cbranch_vccnz .LBB16_3
+; GFX1010-NEXT: s_cbranch_execnz .LBB16_3
; GFX1010-NEXT: .LBB16_2:
; GFX1010-NEXT: v_cvt_f32_u32_e32 v0, s2
; GFX1010-NEXT: s_sub_i32 s1, 0, s2
@@ -2614,7 +2623,9 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GFX1010-NEXT: s_endpgm
; GFX1010-NEXT: .LBB16_4:
; GFX1010-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1010-NEXT: s_branch .LBB16_2
+; GFX1010-NEXT: s_andn2_b32 vcc_lo, exec_lo, s0
+; GFX1010-NEXT: s_cbranch_vccz .LBB16_2
+; GFX1010-NEXT: s_branch .LBB16_3
;
; GFX1030W32-LABEL: sudiv64:
; GFX1030W32: ; %bb.0:
@@ -2622,15 +2633,16 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GFX1030W32-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24
; GFX1030W32-NEXT: s_load_dwordx2 s[2:3], s[4:5], 0x34
; GFX1030W32-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1030W32-NEXT: s_or_b64 s[4:5], s[10:11], s[2:3]
-; GFX1030W32-NEXT: s_mov_b32 s4, 0
-; GFX1030W32-NEXT: s_cmp_lg_u64 s[4:5], 0
+; GFX1030W32-NEXT: s_or_b64 s[0:1], s[10:11], s[2:3]
+; GFX1030W32-NEXT: s_mov_b32 s0, 0
+; GFX1030W32-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX1030W32-NEXT: s_mov_b32 s0, -1
; GFX1030W32-NEXT: s_cbranch_scc0 .LBB16_4
; GFX1030W32-NEXT: ; %bb.1:
; GFX1030W32-NEXT: v_cvt_f32_u32_e32 v0, s2
; GFX1030W32-NEXT: v_cvt_f32_u32_e32 v1, s3
-; GFX1030W32-NEXT: s_sub_u32 s5, 0, s2
-; GFX1030W32-NEXT: s_subb_u32 s6, 0, s3
+; GFX1030W32-NEXT: s_sub_u32 s4, 0, s2
+; GFX1030W32-NEXT: s_subb_u32 s5, 0, s3
; GFX1030W32-NEXT: v_fmamk_f32 v0, v1, 0x4f800000, v0
; GFX1030W32-NEXT: v_rcp_f32_e32 v0, v0
; GFX1030W32-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -2641,111 +2653,110 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GFX1030W32-NEXT: v_cvt_u32_f32_e32 v0, v0
; GFX1030W32-NEXT: v_readfirstlane_b32 s0, v1
; GFX1030W32-NEXT: v_readfirstlane_b32 s1, v0
-; GFX1030W32-NEXT: s_mul_i32 s7, s5, s0
-; GFX1030W32-NEXT: s_mul_hi_u32 s13, s5, s1
-; GFX1030W32-NEXT: s_mul_i32 s12, s6, s1
-; GFX1030W32-NEXT: s_add_i32 s7, s13, s7
-; GFX1030W32-NEXT: s_mul_i32 s14, s5, s1
-; GFX1030W32-NEXT: s_add_i32 s7, s7, s12
-; GFX1030W32-NEXT: s_mul_hi_u32 s13, s1, s14
-; GFX1030W32-NEXT: s_mul_hi_u32 s15, s0, s14
-; GFX1030W32-NEXT: s_mul_i32 s12, s0, s14
-; GFX1030W32-NEXT: s_mul_hi_u32 s14, s1, s7
-; GFX1030W32-NEXT: s_mul_i32 s1, s1, s7
-; GFX1030W32-NEXT: s_mul_hi_u32 s16, s0, s7
-; GFX1030W32-NEXT: s_add_u32 s1, s13, s1
-; GFX1030W32-NEXT: s_addc_u32 s13, 0, s14
-; GFX1030W32-NEXT: s_add_u32 s1, s1, s12
-; GFX1030W32-NEXT: s_mul_i32 s7, s0, s7
-; GFX1030W32-NEXT: s_addc_u32 s1, s13, s15
-; GFX1030W32-NEXT: s_addc_u32 s12, s16, 0
+; GFX1030W32-NEXT: s_mul_i32 s6, s4, s0
+; GFX1030W32-NEXT: s_mul_hi_u32 s12, s4, s1
+; GFX1030W32-NEXT: s_mul_i32 s7, s5, s1
+; GFX1030W32-NEXT: s_add_i32 s6, s12, s6
+; GFX1030W32-NEXT: s_mul_i32 s13, s4, s1
+; GFX1030W32-NEXT: s_add_i32 s6, s6, s7
+; GFX1030W32-NEXT: s_mul_hi_u32 s12, s1, s13
+; GFX1030W32-NEXT: s_mul_hi_u32 s14, s0, s13
+; GFX1030W32-NEXT: s_mul_i32 s7, s0, s13
+; GFX1030W32-NEXT: s_mul_hi_u32 s13, s1, s6
+; GFX1030W32-NEXT: s_mul_i32 s1, s1, s6
+; GFX1030W32-NEXT: s_mul_hi_u32 s15, s0, s6
+; GFX1030W32-NEXT: s_add_u32 s1, s12, s1
+; GFX1030W32-NEXT: s_addc_u32 s12, 0, s13
; GFX1030W32-NEXT: s_add_u32 s1, s1, s7
-; GFX1030W32-NEXT: s_addc_u32 s7, 0, s12
+; GFX1030W32-NEXT: s_mul_i32 s6, s0, s6
+; GFX1030W32-NEXT: s_addc_u32 s1, s12, s14
+; GFX1030W32-NEXT: s_addc_u32 s7, s15, 0
+; GFX1030W32-NEXT: s_add_u32 s1, s1, s6
+; GFX1030W32-NEXT: s_addc_u32 s6, 0, s7
; GFX1030W32-NEXT: v_add_co_u32 v0, s1, v0, s1
; GFX1030W32-NEXT: s_cmp_lg_u32 s1, 0
-; GFX1030W32-NEXT: s_addc_u32 s0, s0, s7
+; GFX1030W32-NEXT: s_addc_u32 s0, s0, s6
; GFX1030W32-NEXT: v_readfirstlane_b32 s1, v0
-; GFX1030W32-NEXT: s_mul_i32 s7, s5, s0
-; GFX1030W32-NEXT: s_mul_hi_u32 s12, s5, s1
-; GFX1030W32-NEXT: s_mul_i32 s6, s6, s1
-; GFX1030W32-NEXT: s_add_i32 s7, s12, s7
+; GFX1030W32-NEXT: s_mul_i32 s6, s4, s0
+; GFX1030W32-NEXT: s_mul_hi_u32 s7, s4, s1
; GFX1030W32-NEXT: s_mul_i32 s5, s5, s1
-; GFX1030W32-NEXT: s_add_i32 s7, s7, s6
-; GFX1030W32-NEXT: s_mul_hi_u32 s12, s0, s5
-; GFX1030W32-NEXT: s_mul_i32 s13, s0, s5
-; GFX1030W32-NEXT: s_mul_hi_u32 s5, s1, s5
-; GFX1030W32-NEXT: s_mul_hi_u32 s14, s1, s7
-; GFX1030W32-NEXT: s_mul_i32 s1, s1, s7
-; GFX1030W32-NEXT: s_mul_hi_u32 s6, s0, s7
-; GFX1030W32-NEXT: s_add_u32 s1, s5, s1
-; GFX1030W32-NEXT: s_addc_u32 s5, 0, s14
-; GFX1030W32-NEXT: s_add_u32 s1, s1, s13
-; GFX1030W32-NEXT: s_mul_i32 s7, s0, s7
-; GFX1030W32-NEXT: s_addc_u32 s1, s5, s12
-; GFX1030W32-NEXT: s_addc_u32 s5, s6, 0
-; GFX1030W32-NEXT: s_add_u32 s1, s1, s7
-; GFX1030W32-NEXT: s_addc_u32 s5, 0, s5
+; GFX1030W32-NEXT: s_add_i32 s6, s7, s6
+; GFX1030W32-NEXT: s_mul_i32 s4, s4, s1
+; GFX1030W32-NEXT: s_add_i32 s6, s6, s5
+; GFX1030W32-NEXT: s_mul_hi_u32 s7, s0, s4
+; GFX1030W32-NEXT: s_mul_i32 s12, s0, s4
+; GFX1030W32-NEXT: s_mul_hi_u32 s4, s1, s4
+; GFX1030W32-NEXT: s_mul_hi_u32 s13, s1, s6
+; GFX1030W32-NEXT: s_mul_i32 s1, s1, s6
+; GFX1030W32-NEXT: s_mul_hi_u32 s5, s0, s6
+; GFX1030W32-NEXT: s_add_u32 s1, s4, s1
+; GFX1030W32-NEXT: s_addc_u32 s4, 0, s13
+; GFX1030W32-NEXT: s_add_u32 s1, s1, s12
+; GFX1030W32-NEXT: s_mul_i32 s6, s0, s6
+; GFX1030W32-NEXT: s_addc_u32 s1, s4, s7
+; GFX1030W32-NEXT: s_addc_u32 s4, s5, 0
+; GFX1030W32-NEXT: s_add_u32 s1, s1, s6
+; GFX1030W32-NEXT: s_addc_u32 s4, 0, s4
; GFX1030W32-NEXT: v_add_co_u32 v0, s1, v0, s1
; GFX1030W32-NEXT: s_cmp_lg_u32 s1, 0
-; GFX1030W32-NEXT: s_addc_u32 s0, s0, s5
+; GFX1030W32-NEXT: s_addc_u32 s0, s0, s4
; GFX1030W32-NEXT: v_readfirstlane_b32 s1, v0
-; GFX1030W32-NEXT: s_mul_i32 s6, s10, s0
-; GFX1030W32-NEXT: s_mul_hi_u32 s5, s10, s0
-; GFX1030W32-NEXT: s_mul_hi_u32 s7, s11, s0
+; GFX1030W32-NEXT: s_mul_i32 s5, s10, s0
+; GFX1030W32-NEXT: s_mul_hi_u32 s4, s10, s0
+; GFX1030W32-NEXT: s_mul_hi_u32 s6, s11, s0
; GFX1030W32-NEXT: s_mul_i32 s0, s11, s0
-; GFX1030W32-NEXT: s_mul_hi_u32 s12, s10, s1
-; GFX1030W32-NEXT: s_mul_hi_u32 s13, s11, s1
+; GFX1030W32-NEXT: s_mul_hi_u32 s7, s10, s1
+; GFX1030W32-NEXT: s_mul_hi_u32 s12, s11, s1
; GFX1030W32-NEXT: s_mul_i32 s1, s11, s1
-; GFX1030W32-NEXT: s_add_u32 s6, s12, s6
-; GFX1030W32-NEXT: s_addc_u32 s5, 0, s5
-; GFX1030W32-NEXT: s_add_u32 s1, s6, s1
-; GFX1030W32-NEXT: s_addc_u32 s1, s5, s13
-; GFX1030W32-NEXT: s_addc_u32 s5, s7, 0
+; GFX1030W32-NEXT: s_add_u32 s5, s7, s5
+; GFX1030W32-NEXT: s_addc_u32 s4, 0, s4
+; GFX1030W32-NEXT: s_add_u32 s1, s5, s1
+; GFX1030W32-NEXT: s_addc_u32 s1, s4, s12
+; GFX1030W32-NEXT: s_addc_u32 s4, s6, 0
; GFX1030W32-NEXT: s_add_u32 s1, s1, s0
-; GFX1030W32-NEXT: s_addc_u32 s5, 0, s5
+; GFX1030W32-NEXT: s_addc_u32 s4, 0, s4
; GFX1030W32-NEXT: s_mul_hi_u32 s0, s2, s1
-; GFX1030W32-NEXT: s_mul_i32 s7, s2, s5
-; GFX1030W32-NEXT: s_mul_i32 s12, s2, s1
-; GFX1030W32-NEXT: s_add_i32 s0, s0, s7
-; GFX1030W32-NEXT: v_sub_co_u32 v0, s7, s10, s12
-; GFX1030W32-NEXT: s_mul_i32 s6, s3, s1
+; GFX1030W32-NEXT: s_mul_i32 s6, s2, s4
+; GFX1030W32-NEXT: s_mul_i32 s7, s2, s1
; GFX1030W32-NEXT: s_add_i32 s0, s0, s6
-; GFX1030W32-NEXT: v_sub_co_u32 v1, s12, v0, s2
-; GFX1030W32-NEXT: s_sub_i32 s6, s11, s0
+; GFX1030W32-NEXT: v_sub_co_u32 v0, s6, s10, s7
+; GFX1030W32-NEXT: s_mul_i32 s5, s3, s1
+; GFX1030W32-NEXT: s_add_i32 s0, s0, s5
+; GFX1030W32-NEXT: v_sub_co_u32 v1, s7, v0, s2
+; GFX1030W32-NEXT: s_sub_i32 s5, s11, s0
+; GFX1030W32-NEXT: s_cmp_lg_u32 s6, 0
+; GFX1030W32-NEXT: s_subb_u32 s5, s5, s3
; GFX1030W32-NEXT: s_cmp_lg_u32 s7, 0
-; GFX1030W32-NEXT: s_subb_u32 s6, s6, s3
-; GFX1030W32-NEXT: s_cmp_lg_u32 s12, 0
; GFX1030W32-NEXT: v_cmp_le_u32_e32 vcc_lo, s2, v1
-; GFX1030W32-NEXT: s_subb_u32 s6, s6, 0
-; GFX1030W32-NEXT: s_cmp_ge_u32 s6, s3
+; GFX1030W32-NEXT: s_subb_u32 s5, s5, 0
+; GFX1030W32-NEXT: s_cmp_ge_u32 s5, s3
; GFX1030W32-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo
-; GFX1030W32-NEXT: s_cselect_b32 s12, -1, 0
-; GFX1030W32-NEXT: s_cmp_eq_u32 s6, s3
+; GFX1030W32-NEXT: s_cselect_b32 s7, -1, 0
+; GFX1030W32-NEXT: s_cmp_eq_u32 s5, s3
; GFX1030W32-NEXT: s_cselect_b32 vcc_lo, -1, 0
-; GFX1030W32-NEXT: s_add_u32 s6, s1, 1
-; GFX1030W32-NEXT: v_cndmask_b32_e32 v1, s12, v1, vcc_lo
-; GFX1030W32-NEXT: s_addc_u32 s12, s5, 0
-; GFX1030W32-NEXT: s_add_u32 s13, s1, 2
-; GFX1030W32-NEXT: s_addc_u32 s14, s5, 0
-; GFX1030W32-NEXT: s_cmp_lg_u32 s7, 0
+; GFX1030W32-NEXT: s_add_u32 s5, s1, 1
+; GFX1030W32-NEXT: v_cndmask_b32_e32 v1, s7, v1, vcc_lo
+; GFX1030W32-NEXT: s_addc_u32 s7, s4, 0
+; GFX1030W32-NEXT: s_add_u32 s12, s1, 2
+; GFX1030W32-NEXT: s_addc_u32 s13, s4, 0
+; GFX1030W32-NEXT: s_cmp_lg_u32 s6, 0
; GFX1030W32-NEXT: v_cmp_le_u32_e32 vcc_lo, s2, v0
; GFX1030W32-NEXT: s_subb_u32 s0, s11, s0
-; GFX1030W32-NEXT: v_mov_b32_e32 v2, s13
+; GFX1030W32-NEXT: v_mov_b32_e32 v2, s12
; GFX1030W32-NEXT: s_cmp_ge_u32 s0, s3
; GFX1030W32-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo
-; GFX1030W32-NEXT: s_cselect_b32 s7, -1, 0
+; GFX1030W32-NEXT: s_cselect_b32 s6, -1, 0
; GFX1030W32-NEXT: s_cmp_eq_u32 s0, s3
; GFX1030W32-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v1
; GFX1030W32-NEXT: s_cselect_b32 s0, -1, 0
-; GFX1030W32-NEXT: v_mov_b32_e32 v1, s14
-; GFX1030W32-NEXT: v_cndmask_b32_e64 v0, s7, v0, s0
-; GFX1030W32-NEXT: v_cndmask_b32_e32 v2, s6, v2, vcc_lo
-; GFX1030W32-NEXT: v_cndmask_b32_e32 v1, s12, v1, vcc_lo
+; GFX1030W32-NEXT: v_mov_b32_e32 v1, s13
+; GFX1030W32-NEXT: v_cndmask_b32_e64 v0, s6, v0, s0
+; GFX1030W32-NEXT: v_cndmask_b32_e32 v2, s5, v2, vcc_lo
+; GFX1030W32-NEXT: v_cndmask_b32_e32 v1, s7, v1, vcc_lo
; GFX1030W32-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX1030W32-NEXT: v_cndmask_b32_e32 v1, s5, v1, vcc_lo
+; GFX1030W32-NEXT: v_cndmask_b32_e32 v1, s4, v1, vcc_lo
; GFX1030W32-NEXT: v_cndmask_b32_e32 v0, s1, v2, vcc_lo
-; GFX1030W32-NEXT: s_andn2_b32 vcc_lo, exec_lo, s4
-; GFX1030W32-NEXT: s_cbranch_vccnz .LBB16_3
+; GFX1030W32-NEXT: s_cbranch_execnz .LBB16_3
; GFX1030W32-NEXT: .LBB16_2:
; GFX1030W32-NEXT: v_cvt_f32_u32_e32 v0, s2
; GFX1030W32-NEXT: s_sub_i32 s1, 0, s2
@@ -2776,7 +2787,9 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GFX1030W32-NEXT: s_endpgm
; GFX1030W32-NEXT: .LBB16_4:
; GFX1030W32-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1030W32-NEXT: s_branch .LBB16_2
+; GFX1030W32-NEXT: s_andn2_b32 vcc_lo, exec_lo, s0
+; GFX1030W32-NEXT: s_cbranch_vccz .LBB16_2
+; GFX1030W32-NEXT: s_branch .LBB16_3
;
; GFX1030W64-LABEL: sudiv64:
; GFX1030W64: ; %bb.0:
@@ -2787,6 +2800,7 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GFX1030W64-NEXT: s_or_b64 s[0:1], s[10:11], s[2:3]
; GFX1030W64-NEXT: s_mov_b32 s0, 0
; GFX1030W64-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX1030W64-NEXT: s_mov_b64 s[0:1], -1
; GFX1030W64-NEXT: s_cbranch_scc0 .LBB16_4
; GFX1030W64-NEXT: ; %bb.1:
; GFX1030W64-NEXT: v_cvt_f32_u32_e32 v0, s2
@@ -2937,7 +2951,9 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GFX1030W64-NEXT: s_endpgm
; GFX1030W64-NEXT: .LBB16_4:
; GFX1030W64-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1030W64-NEXT: s_branch .LBB16_2
+; GFX1030W64-NEXT: s_andn2_b64 vcc, exec, s[0:1]
+; GFX1030W64-NEXT: s_cbranch_vccz .LBB16_2
+; GFX1030W64-NEXT: s_branch .LBB16_3
;
; GFX11-LABEL: sudiv64:
; GFX11: ; %bb.0:
@@ -2945,16 +2961,17 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GFX11-NEXT: s_load_b128 s[8:11], s[4:5], 0x24
; GFX11-NEXT: s_load_b64 s[2:3], s[4:5], 0x34
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
-; GFX11-NEXT: s_or_b64 s[4:5], s[10:11], s[2:3]
-; GFX11-NEXT: s_mov_b32 s4, 0
+; GFX11-NEXT: s_or_b64 s[0:1], s[10:11], s[2:3]
+; GFX11-NEXT: s_mov_b32 s0, 0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_cmp_lg_u64 s[4:5], 0
+; GFX11-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX11-NEXT: s_mov_b32 s0, -1
; GFX11-NEXT: s_cbranch_scc0 .LBB16_4
; GFX11-NEXT: ; %bb.1:
; GFX11-NEXT: v_cvt_f32_u32_e32 v0, s2
; GFX11-NEXT: v_cvt_f32_u32_e32 v1, s3
-; GFX11-NEXT: s_sub_u32 s5, 0, s2
-; GFX11-NEXT: s_subb_u32 s6, 0, s3
+; GFX11-NEXT: s_sub_u32 s4, 0, s2
+; GFX11-NEXT: s_subb_u32 s5, 0, s3
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-NEXT: v_fmamk_f32 v0, v1, 0x4f800000, v0
; GFX11-NEXT: v_rcp_f32_e32 v0, v0
@@ -2970,115 +2987,114 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_2)
; GFX11-NEXT: v_readfirstlane_b32 s0, v1
; GFX11-NEXT: v_readfirstlane_b32 s1, v0
-; GFX11-NEXT: s_mul_i32 s7, s5, s0
-; GFX11-NEXT: s_mul_hi_u32 s13, s5, s1
-; GFX11-NEXT: s_mul_i32 s12, s6, s1
-; GFX11-NEXT: s_add_i32 s7, s13, s7
-; GFX11-NEXT: s_mul_i32 s14, s5, s1
-; GFX11-NEXT: s_add_i32 s7, s7, s12
-; GFX11-NEXT: s_mul_hi_u32 s13, s1, s14
-; GFX11-NEXT: s_mul_hi_u32 s15, s0, s14
-; GFX11-NEXT: s_mul_i32 s12, s0, s14
-; GFX11-NEXT: s_mul_hi_u32 s14, s1, s7
-; GFX11-NEXT: s_mul_i32 s1, s1, s7
-; GFX11-NEXT: s_mul_hi_u32 s16, s0, s7
-; GFX11-NEXT: s_add_u32 s1, s13, s1
-; GFX11-NEXT: s_addc_u32 s13, 0, s14
-; GFX11-NEXT: s_add_u32 s1, s1, s12
-; GFX11-NEXT: s_mul_i32 s7, s0, s7
-; GFX11-NEXT: s_addc_u32 s1, s13, s15
-; GFX11-NEXT: s_addc_u32 s12, s16, 0
+; GFX11-NEXT: s_mul_i32 s6, s4, s0
+; GFX11-NEXT: s_mul_hi_u32 s12, s4, s1
+; GFX11-NEXT: s_mul_i32 s7, s5, s1
+; GFX11-NEXT: s_add_i32 s6, s12, s6
+; GFX11-NEXT: s_mul_i32 s13, s4, s1
+; GFX11-NEXT: s_add_i32 s6, s6, s7
+; GFX11-NEXT: s_mul_hi_u32 s12, s1, s13
+; GFX11-NEXT: s_mul_hi_u32 s14, s0, s13
+; GFX11-NEXT: s_mul_i32 s7, s0, s13
+; GFX11-NEXT: s_mul_hi_u32 s13, s1, s6
+; GFX11-NEXT: s_mul_i32 s1, s1, s6
+; GFX11-NEXT: s_mul_hi_u32 s15, s0, s6
+; GFX11-NEXT: s_add_u32 s1, s12, s1
+; GFX11-NEXT: s_addc_u32 s12, 0, s13
; GFX11-NEXT: s_add_u32 s1, s1, s7
-; GFX11-NEXT: s_addc_u32 s7, 0, s12
+; GFX11-NEXT: s_mul_i32 s6, s0, s6
+; GFX11-NEXT: s_addc_u32 s1, s12, s14
+; GFX11-NEXT: s_addc_u32 s7, s15, 0
+; GFX11-NEXT: s_add_u32 s1, s1, s6
+; GFX11-NEXT: s_addc_u32 s6, 0, s7
; GFX11-NEXT: v_add_co_u32 v0, s1, v0, s1
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
-; GFX11-NEXT: s_addc_u32 s0, s0, s7
+; GFX11-NEXT: s_addc_u32 s0, s0, s6
; GFX11-NEXT: v_readfirstlane_b32 s1, v0
-; GFX11-NEXT: s_mul_i32 s7, s5, s0
-; GFX11-NEXT: s_mul_hi_u32 s12, s5, s1
-; GFX11-NEXT: s_mul_i32 s6, s6, s1
-; GFX11-NEXT: s_add_i32 s7, s12, s7
+; GFX11-NEXT: s_mul_i32 s6, s4, s0
+; GFX11-NEXT: s_mul_hi_u32 s7, s4, s1
; GFX11-NEXT: s_mul_i32 s5, s5, s1
-; GFX11-NEXT: s_add_i32 s7, s7, s6
-; GFX11-NEXT: s_mul_hi_u32 s12, s0, s5
-; GFX11-NEXT: s_mul_i32 s13, s0, s5
-; GFX11-NEXT: s_mul_hi_u32 s5, s1, s5
-; GFX11-NEXT: s_mul_hi_u32 s14, s1, s7
-; GFX11-NEXT: s_mul_i32 s1, s1, s7
-; GFX11-NEXT: s_mul_hi_u32 s6, s0, s7
-; GFX11-NEXT: s_add_u32 s1, s5, s1
-; GFX11-NEXT: s_addc_u32 s5, 0, s14
-; GFX11-NEXT: s_add_u32 s1, s1, s13
-; GFX11-NEXT: s_mul_i32 s7, s0, s7
-; GFX11-NEXT: s_addc_u32 s1, s5, s12
-; GFX11-NEXT: s_addc_u32 s5, s6, 0
-; GFX11-NEXT: s_add_u32 s1, s1, s7
-; GFX11-NEXT: s_addc_u32 s5, 0, s5
+; GFX11-NEXT: s_add_i32 s6, s7, s6
+; GFX11-NEXT: s_mul_i32 s4, s4, s1
+; GFX11-NEXT: s_add_i32 s6, s6, s5
+; GFX11-NEXT: s_mul_hi_u32 s7, s0, s4
+; GFX11-NEXT: s_mul_i32 s12, s0, s4
+; GFX11-NEXT: s_mul_hi_u32 s4, s1, s4
+; GFX11-NEXT: s_mul_hi_u32 s13, s1, s6
+; GFX11-NEXT: s_mul_i32 s1, s1, s6
+; GFX11-NEXT: s_mul_hi_u32 s5, s0, s6
+; GFX11-NEXT: s_add_u32 s1, s4, s1
+; GFX11-NEXT: s_addc_u32 s4, 0, s13
+; GFX11-NEXT: s_add_u32 s1, s1, s12
+; GFX11-NEXT: s_mul_i32 s6, s0, s6
+; GFX11-NEXT: s_addc_u32 s1, s4, s7
+; GFX11-NEXT: s_addc_u32 s4, s5, 0
+; GFX11-NEXT: s_add_u32 s1, s1, s6
+; GFX11-NEXT: s_addc_u32 s4, 0, s4
; GFX11-NEXT: v_add_co_u32 v0, s1, v0, s1
; GFX11-NEXT: s_cmp_lg_u32 s1, 0
-; GFX11-NEXT: s_addc_u32 s0, s0, s5
+; GFX11-NEXT: s_addc_u32 s0, s0, s4
; GFX11-NEXT: v_readfirstlane_b32 s1, v0
-; GFX11-NEXT: s_mul_i32 s6, s10, s0
-; GFX11-NEXT: s_mul_hi_u32 s5, s10, s0
-; GFX11-NEXT: s_mul_hi_u32 s7, s11, s0
+; GFX11-NEXT: s_mul_i32 s5, s10, s0
+; GFX11-NEXT: s_mul_hi_u32 s4, s10, s0
+; GFX11-NEXT: s_mul_hi_u32 s6, s11, s0
; GFX11-NEXT: s_mul_i32 s0, s11, s0
-; GFX11-NEXT: s_mul_hi_u32 s12, s10, s1
-; GFX11-NEXT: s_mul_hi_u32 s13, s11, s1
+; GFX11-NEXT: s_mul_hi_u32 s7, s10, s1
+; GFX11-NEXT: s_mul_hi_u32 s12, s11, s1
; GFX11-NEXT: s_mul_i32 s1, s11, s1
-; GFX11-NEXT: s_add_u32 s6, s12, s6
-; GFX11-NEXT: s_addc_u32 s5, 0, s5
-; GFX11-NEXT: s_add_u32 s1, s6, s1
-; GFX11-NEXT: s_addc_u32 s1, s5, s13
-; GFX11-NEXT: s_addc_u32 s5, s7, 0
+; GFX11-NEXT: s_add_u32 s5, s7, s5
+; GFX11-NEXT: s_addc_u32 s4, 0, s4
+; GFX11-NEXT: s_add_u32 s1, s5, s1
+; GFX11-NEXT: s_addc_u32 s1, s4, s12
+; GFX11-NEXT: s_addc_u32 s4, s6, 0
; GFX11-NEXT: s_add_u32 s1, s1, s0
-; GFX11-NEXT: s_addc_u32 s5, 0, s5
+; GFX11-NEXT: s_addc_u32 s4, 0, s4
; GFX11-NEXT: s_mul_hi_u32 s0, s2, s1
-; GFX11-NEXT: s_mul_i32 s7, s2, s5
-; GFX11-NEXT: s_mul_i32 s12, s2, s1
-; GFX11-NEXT: s_add_i32 s0, s0, s7
-; GFX11-NEXT: v_sub_co_u32 v0, s7, s10, s12
-; GFX11-NEXT: s_mul_i32 s6, s3, s1
-; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_mul_i32 s6, s2, s4
+; GFX11-NEXT: s_mul_i32 s7, s2, s1
; GFX11-NEXT: s_add_i32 s0, s0, s6
-; GFX11-NEXT: v_sub_co_u32 v1, s12, v0, s2
-; GFX11-NEXT: s_sub_i32 s6, s11, s0
+; GFX11-NEXT: v_sub_co_u32 v0, s6, s10, s7
+; GFX11-NEXT: s_mul_i32 s5, s3, s1
+; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
+; GFX11-NEXT: s_add_i32 s0, s0, s5
+; GFX11-NEXT: v_sub_co_u32 v1, s7, v0, s2
+; GFX11-NEXT: s_sub_i32 s5, s11, s0
+; GFX11-NEXT: s_cmp_lg_u32 s6, 0
+; GFX11-NEXT: s_subb_u32 s5, s5, s3
; GFX11-NEXT: s_cmp_lg_u32 s7, 0
-; GFX11-NEXT: s_subb_u32 s6, s6, s3
-; GFX11-NEXT: s_cmp_lg_u32 s12, 0
; GFX11-NEXT: v_cmp_le_u32_e32 vcc_lo, s2, v1
-; GFX11-NEXT: s_subb_u32 s6, s6, 0
+; GFX11-NEXT: s_subb_u32 s5, s5, 0
; GFX11-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
-; GFX11-NEXT: s_cmp_ge_u32 s6, s3
+; GFX11-NEXT: s_cmp_ge_u32 s5, s3
; GFX11-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo
-; GFX11-NEXT: s_cselect_b32 s12, -1, 0
-; GFX11-NEXT: s_cmp_eq_u32 s6, s3
+; GFX11-NEXT: s_cselect_b32 s7, -1, 0
+; GFX11-NEXT: s_cmp_eq_u32 s5, s3
; GFX11-NEXT: s_cselect_b32 vcc_lo, -1, 0
-; GFX11-NEXT: s_add_u32 s6, s1, 1
-; GFX11-NEXT: v_cndmask_b32_e32 v1, s12, v1, vcc_lo
-; GFX11-NEXT: s_addc_u32 s12, s5, 0
-; GFX11-NEXT: s_add_u32 s13, s1, 2
-; GFX11-NEXT: s_addc_u32 s14, s5, 0
-; GFX11-NEXT: s_cmp_lg_u32 s7, 0
+; GFX11-NEXT: s_add_u32 s5, s1, 1
+; GFX11-NEXT: v_cndmask_b32_e32 v1, s7, v1, vcc_lo
+; GFX11-NEXT: s_addc_u32 s7, s4, 0
+; GFX11-NEXT: s_add_u32 s12, s1, 2
+; GFX11-NEXT: s_addc_u32 s13, s4, 0
+; GFX11-NEXT: s_cmp_lg_u32 s6, 0
; GFX11-NEXT: v_cmp_le_u32_e32 vcc_lo, s2, v0
; GFX11-NEXT: s_subb_u32 s0, s11, s0
-; GFX11-NEXT: v_mov_b32_e32 v2, s13
+; GFX11-NEXT: v_mov_b32_e32 v2, s12
; GFX11-NEXT: s_cmp_ge_u32 s0, s3
; GFX11-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo
-; GFX11-NEXT: s_cselect_b32 s7, -1, 0
+; GFX11-NEXT: s_cselect_b32 s6, -1, 0
; GFX11-NEXT: s_cmp_eq_u32 s0, s3
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v1
; GFX11-NEXT: s_cselect_b32 s0, -1, 0
-; GFX11-NEXT: v_mov_b32_e32 v1, s14
-; GFX11-NEXT: v_cndmask_b32_e64 v0, s7, v0, s0
-; GFX11-NEXT: v_cndmask_b32_e32 v2, s6, v2, vcc_lo
+; GFX11-NEXT: v_mov_b32_e32 v1, s13
+; GFX11-NEXT: v_cndmask_b32_e64 v0, s6, v0, s0
+; GFX11-NEXT: v_cndmask_b32_e32 v2, s5, v2, vcc_lo
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_3) | instskip(NEXT) | instid1(VALU_DEP_3)
-; GFX11-NEXT: v_cndmask_b32_e32 v1, s12, v1, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e32 v1, s7, v1, vcc_lo
; GFX11-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
; GFX11-NEXT: s_delay_alu instid0(VALU_DEP_2) | instskip(NEXT) | instid1(VALU_DEP_4)
-; GFX11-NEXT: v_cndmask_b32_e32 v1, s5, v1, vcc_lo
+; GFX11-NEXT: v_cndmask_b32_e32 v1, s4, v1, vcc_lo
; GFX11-NEXT: v_cndmask_b32_e32 v0, s1, v2, vcc_lo
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
-; GFX11-NEXT: s_cbranch_vccnz .LBB16_3
+; GFX11-NEXT: s_cbranch_execnz .LBB16_3
; GFX11-NEXT: .LBB16_2:
; GFX11-NEXT: v_cvt_f32_u32_e32 v0, s2
; GFX11-NEXT: s_sub_i32 s1, 0, s2
@@ -3114,7 +3130,9 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GFX11-NEXT: s_endpgm
; GFX11-NEXT: .LBB16_4:
; GFX11-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX11-NEXT: s_branch .LBB16_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-NEXT: s_cbranch_vccz .LBB16_2
+; GFX11-NEXT: s_branch .LBB16_3
;
; GFX1250-LABEL: sudiv64:
; GFX1250: ; %bb.0:
@@ -3126,6 +3144,7 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GFX1250-NEXT: s_delay_alu instid0(SALU_CYCLE_1) | instskip(NEXT) | instid1(SALU_CYCLE_1)
; GFX1250-NEXT: s_and_b64 s[0:1], s[0:1], lit64(0xffffffff00000000)
; GFX1250-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX1250-NEXT: s_mov_b32 s1, -1
; GFX1250-NEXT: s_cbranch_scc0 .LBB16_4
; GFX1250-NEXT: ; %bb.1:
; GFX1250-NEXT: s_cvt_f32_u32 s0, s2
@@ -3272,7 +3291,9 @@ define amdgpu_kernel void @sudiv64(ptr addrspace(1) %out, i64 %x, i64 %y) {
; GFX1250-NEXT: s_endpgm
; GFX1250-NEXT: .LBB16_4:
; GFX1250-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1250-NEXT: s_branch .LBB16_2
+; GFX1250-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s1
+; GFX1250-NEXT: s_cbranch_vccz .LBB16_2
+; GFX1250-NEXT: s_branch .LBB16_3
%result = udiv i64 %x, %y
store i64 %result, ptr addrspace(1) %out
ret void
diff --git a/llvm/test/CodeGen/AMDGPU/ctpop16.ll b/llvm/test/CodeGen/AMDGPU/ctpop16.ll
index cefcbdd..20df66d 100644
--- a/llvm/test/CodeGen/AMDGPU/ctpop16.ll
+++ b/llvm/test/CodeGen/AMDGPU/ctpop16.ll
@@ -1492,6 +1492,7 @@ define amdgpu_kernel void @ctpop_i16_in_br(ptr addrspace(1) %out, ptr addrspace(
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_lshr_b32 s4, s6, 16
; SI-NEXT: s_cmp_lg_u32 s4, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB14_4
; SI-NEXT: ; %bb.1: ; %else
; SI-NEXT: s_mov_b32 s11, 0xf000
@@ -1513,7 +1514,9 @@ define amdgpu_kernel void @ctpop_i16_in_br(ptr addrspace(1) %out, ptr addrspace(
; SI-NEXT: s_endpgm
; SI-NEXT: .LBB14_4:
; SI-NEXT: ; implicit-def: $vgpr0
-; SI-NEXT: s_branch .LBB14_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB14_2
+; SI-NEXT: s_branch .LBB14_3
;
; VI-LABEL: ctpop_i16_in_br:
; VI: ; %bb.0: ; %entry
@@ -1522,6 +1525,7 @@ define amdgpu_kernel void @ctpop_i16_in_br(ptr addrspace(1) %out, ptr addrspace(
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_lshr_b32 s4, s6, 16
; VI-NEXT: s_cmp_lg_u32 s4, 0
+; VI-NEXT: s_mov_b64 s[4:5], -1
; VI-NEXT: s_cbranch_scc0 .LBB14_4
; VI-NEXT: ; %bb.1: ; %else
; VI-NEXT: s_mov_b32 s11, 0xf000
@@ -1543,7 +1547,9 @@ define amdgpu_kernel void @ctpop_i16_in_br(ptr addrspace(1) %out, ptr addrspace(
; VI-NEXT: s_endpgm
; VI-NEXT: .LBB14_4:
; VI-NEXT: ; implicit-def: $vgpr0
-; VI-NEXT: s_branch .LBB14_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; VI-NEXT: s_cbranch_vccz .LBB14_2
+; VI-NEXT: s_branch .LBB14_3
;
; EG-LABEL: ctpop_i16_in_br:
; EG: ; %bb.0: ; %entry
diff --git a/llvm/test/CodeGen/AMDGPU/ctpop64.ll b/llvm/test/CodeGen/AMDGPU/ctpop64.ll
index 37f5889..861777c 100644
--- a/llvm/test/CodeGen/AMDGPU/ctpop64.ll
+++ b/llvm/test/CodeGen/AMDGPU/ctpop64.ll
@@ -339,11 +339,12 @@ define amdgpu_kernel void @ctpop_i64_in_br(ptr addrspace(1) %out, ptr addrspace(
; SI-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0xd
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s8, 0
+; SI-NEXT: s_mov_b64 s[8:9], -1
; SI-NEXT: s_cbranch_scc0 .LBB7_4
; SI-NEXT: ; %bb.1: ; %else
; SI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x2
-; SI-NEXT: s_mov_b64 s[2:3], 0
-; SI-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; SI-NEXT: s_mov_b64 s[8:9], 0
+; SI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b64 vcc, vcc
; SI-NEXT: s_cbranch_vccnz .LBB7_3
@@ -359,7 +360,9 @@ define amdgpu_kernel void @ctpop_i64_in_br(ptr addrspace(1) %out, ptr addrspace(
; SI-NEXT: s_endpgm
; SI-NEXT: .LBB7_4:
; SI-NEXT: ; implicit-def: $sgpr4_sgpr5
-; SI-NEXT: s_branch .LBB7_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; SI-NEXT: s_cbranch_vccz .LBB7_2
+; SI-NEXT: s_branch .LBB7_3
;
; VI-LABEL: ctpop_i64_in_br:
; VI: ; %bb.0: ; %entry
@@ -368,6 +371,7 @@ define amdgpu_kernel void @ctpop_i64_in_br(ptr addrspace(1) %out, ptr addrspace(
; VI-NEXT: s_load_dwordx2 s[6:7], s[4:5], 0x34
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s8, 0
+; VI-NEXT: s_mov_b64 s[8:9], -1
; VI-NEXT: s_cbranch_scc0 .LBB7_4
; VI-NEXT: ; %bb.1: ; %else
; VI-NEXT: s_load_dwordx2 s[4:5], s[2:3], 0x8
@@ -386,7 +390,9 @@ define amdgpu_kernel void @ctpop_i64_in_br(ptr addrspace(1) %out, ptr addrspace(
; VI-NEXT: s_endpgm
; VI-NEXT: .LBB7_4:
; VI-NEXT: ; implicit-def: $sgpr4_sgpr5
-; VI-NEXT: s_branch .LBB7_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; VI-NEXT: s_cbranch_vccz .LBB7_2
+; VI-NEXT: s_branch .LBB7_3
entry:
%tmp0 = icmp eq i32 %cond, 0
br i1 %tmp0, label %if, label %else
diff --git a/llvm/test/CodeGen/AMDGPU/divergent-branch-uniform-condition.ll b/llvm/test/CodeGen/AMDGPU/divergent-branch-uniform-condition.ll
index 8c3d20f..09a6bc7 100644
--- a/llvm/test/CodeGen/AMDGPU/divergent-branch-uniform-condition.ll
+++ b/llvm/test/CodeGen/AMDGPU/divergent-branch-uniform-condition.ll
@@ -131,15 +131,9 @@ define amdgpu_ps void @i1_copy_assert(i1 %v4) {
; ISA-NEXT: s_mov_b64 s[0:1], 0
; ISA-NEXT: ; implicit-def: $sgpr4_sgpr5
; ISA-NEXT: ; implicit-def: $sgpr2_sgpr3
-; ISA-NEXT: s_branch .LBB1_3
-; ISA-NEXT: .LBB1_1: ; %endif1
-; ISA-NEXT: ; in Loop: Header=BB1_3 Depth=1
-; ISA-NEXT: s_andn2_b64 s[4:5], s[4:5], exec
-; ISA-NEXT: s_and_b64 s[8:9], vcc, exec
-; ISA-NEXT: s_mov_b64 s[6:7], 0
-; ISA-NEXT: s_or_b64 s[4:5], s[4:5], s[8:9]
-; ISA-NEXT: .LBB1_2: ; %Flow
-; ISA-NEXT: ; in Loop: Header=BB1_3 Depth=1
+; ISA-NEXT: s_branch .LBB1_2
+; ISA-NEXT: .LBB1_1: ; %Flow
+; ISA-NEXT: ; in Loop: Header=BB1_2 Depth=1
; ISA-NEXT: s_and_b64 s[8:9], exec, s[4:5]
; ISA-NEXT: s_or_b64 s[0:1], s[8:9], s[0:1]
; ISA-NEXT: s_andn2_b64 s[2:3], s[2:3], exec
@@ -147,16 +141,21 @@ define amdgpu_ps void @i1_copy_assert(i1 %v4) {
; ISA-NEXT: s_mov_b32 s8, 1
; ISA-NEXT: s_or_b64 s[2:3], s[2:3], s[6:7]
; ISA-NEXT: s_andn2_b64 exec, exec, s[0:1]
-; ISA-NEXT: s_cbranch_execz .LBB1_5
-; ISA-NEXT: .LBB1_3: ; %loop
+; ISA-NEXT: s_cbranch_execz .LBB1_4
+; ISA-NEXT: .LBB1_2: ; %loop
; ISA-NEXT: ; =>This Inner Loop Header: Depth=1
; ISA-NEXT: s_or_b64 s[4:5], s[4:5], exec
; ISA-NEXT: s_cmp_lg_u32 s8, 0
-; ISA-NEXT: s_cbranch_scc1 .LBB1_1
-; ISA-NEXT: ; %bb.4: ; in Loop: Header=BB1_3 Depth=1
; ISA-NEXT: s_mov_b64 s[6:7], -1
-; ISA-NEXT: s_branch .LBB1_2
-; ISA-NEXT: .LBB1_5: ; %Flow2
+; ISA-NEXT: s_cbranch_scc0 .LBB1_1
+; ISA-NEXT: ; %bb.3: ; %endif1
+; ISA-NEXT: ; in Loop: Header=BB1_2 Depth=1
+; ISA-NEXT: s_andn2_b64 s[4:5], s[4:5], exec
+; ISA-NEXT: s_and_b64 s[8:9], vcc, exec
+; ISA-NEXT: s_mov_b64 s[6:7], 0
+; ISA-NEXT: s_or_b64 s[4:5], s[4:5], s[8:9]
+; ISA-NEXT: s_branch .LBB1_1
+; ISA-NEXT: .LBB1_4: ; %Flow2
; ISA-NEXT: s_or_b64 exec, exec, s[0:1]
; ISA-NEXT: v_mov_b32_e32 v0, 0
; ISA-NEXT: v_cndmask_b32_e64 v1, 0, 1.0, s[2:3]
diff --git a/llvm/test/CodeGen/AMDGPU/dynamic_stackalloc.ll b/llvm/test/CodeGen/AMDGPU/dynamic_stackalloc.ll
index b0439b1..09730e8 100644
--- a/llvm/test/CodeGen/AMDGPU/dynamic_stackalloc.ll
+++ b/llvm/test/CodeGen/AMDGPU/dynamic_stackalloc.ll
@@ -839,33 +839,37 @@ define amdgpu_kernel void @test_dynamic_stackalloc_kernel_control_flow(i32 %n, i
; GFX9-SDAG-NEXT: s_load_dwordx2 s[4:5], s[8:9], 0x0
; GFX9-SDAG-NEXT: s_add_u32 s0, s0, s17
; GFX9-SDAG-NEXT: s_addc_u32 s1, s1, 0
-; GFX9-SDAG-NEXT: s_mov_b32 s33, 0
-; GFX9-SDAG-NEXT: s_movk_i32 s32, 0x1000
+; GFX9-SDAG-NEXT: s_mov_b32 s8, 0
+; GFX9-SDAG-NEXT: s_mov_b64 s[6:7], -1
; GFX9-SDAG-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-SDAG-NEXT: s_cmp_lg_u32 s4, 0
-; GFX9-SDAG-NEXT: s_mov_b32 s4, 0
-; GFX9-SDAG-NEXT: s_cbranch_scc0 .LBB7_6
+; GFX9-SDAG-NEXT: s_mov_b32 s33, 0
+; GFX9-SDAG-NEXT: s_movk_i32 s32, 0x1000
+; GFX9-SDAG-NEXT: s_cbranch_scc0 .LBB7_4
; GFX9-SDAG-NEXT: ; %bb.1: ; %bb.1
; GFX9-SDAG-NEXT: v_lshl_add_u32 v0, v0, 2, 15
; GFX9-SDAG-NEXT: v_and_b32_e32 v0, 0x1ff0, v0
; GFX9-SDAG-NEXT: s_mov_b64 s[6:7], exec
; GFX9-SDAG-NEXT: .LBB7_2: ; =>This Inner Loop Header: Depth=1
-; GFX9-SDAG-NEXT: s_ff1_i32_b64 s8, s[6:7]
-; GFX9-SDAG-NEXT: v_readlane_b32 s9, v0, s8
-; GFX9-SDAG-NEXT: s_bitset0_b64 s[6:7], s8
-; GFX9-SDAG-NEXT: s_max_u32 s4, s4, s9
+; GFX9-SDAG-NEXT: s_ff1_i32_b64 s4, s[6:7]
+; GFX9-SDAG-NEXT: v_readlane_b32 s9, v0, s4
+; GFX9-SDAG-NEXT: s_bitset0_b64 s[6:7], s4
+; GFX9-SDAG-NEXT: s_max_u32 s8, s8, s9
; GFX9-SDAG-NEXT: s_cmp_lg_u64 s[6:7], 0
; GFX9-SDAG-NEXT: s_cbranch_scc1 .LBB7_2
; GFX9-SDAG-NEXT: ; %bb.3:
-; GFX9-SDAG-NEXT: s_mov_b32 s6, s32
-; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, s6
-; GFX9-SDAG-NEXT: v_lshl_add_u32 v0, s4, 6, v0
+; GFX9-SDAG-NEXT: s_mov_b32 s4, s32
+; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, s4
+; GFX9-SDAG-NEXT: v_lshl_add_u32 v0, s8, 6, v0
; GFX9-SDAG-NEXT: v_readfirstlane_b32 s32, v0
; GFX9-SDAG-NEXT: v_mov_b32_e32 v0, 1
-; GFX9-SDAG-NEXT: buffer_store_dword v0, off, s[0:3], s6
+; GFX9-SDAG-NEXT: buffer_store_dword v0, off, s[0:3], s4
; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
-; GFX9-SDAG-NEXT: s_cbranch_execnz .LBB7_5
-; GFX9-SDAG-NEXT: .LBB7_4: ; %bb.0
+; GFX9-SDAG-NEXT: s_mov_b64 s[6:7], 0
+; GFX9-SDAG-NEXT: .LBB7_4: ; %Flow
+; GFX9-SDAG-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GFX9-SDAG-NEXT: s_cbranch_vccnz .LBB7_6
+; GFX9-SDAG-NEXT: ; %bb.5: ; %bb.0
; GFX9-SDAG-NEXT: s_lshl_b32 s5, s5, 2
; GFX9-SDAG-NEXT: s_add_i32 s4, s32, 0xfff
; GFX9-SDAG-NEXT: s_add_i32 s5, s5, 15
@@ -877,10 +881,8 @@ define amdgpu_kernel void @test_dynamic_stackalloc_kernel_control_flow(i32 %n, i
; GFX9-SDAG-NEXT: s_add_i32 s32, s4, s5
; GFX9-SDAG-NEXT: buffer_store_dword v0, v1, s[0:3], 0 offen
; GFX9-SDAG-NEXT: s_waitcnt vmcnt(0)
-; GFX9-SDAG-NEXT: .LBB7_5: ; %bb.2
+; GFX9-SDAG-NEXT: .LBB7_6: ; %bb.2
; GFX9-SDAG-NEXT: s_endpgm
-; GFX9-SDAG-NEXT: .LBB7_6:
-; GFX9-SDAG-NEXT: s_branch .LBB7_4
;
; GFX9-GISEL-LABEL: test_dynamic_stackalloc_kernel_control_flow:
; GFX9-GISEL: ; %bb.0: ; %entry
@@ -935,35 +937,39 @@ define amdgpu_kernel void @test_dynamic_stackalloc_kernel_control_flow(i32 %n, i
; GFX11-SDAG-LABEL: test_dynamic_stackalloc_kernel_control_flow:
; GFX11-SDAG: ; %bb.0: ; %entry
; GFX11-SDAG-NEXT: s_load_b64 s[0:1], s[4:5], 0x0
+; GFX11-SDAG-NEXT: s_mov_b32 s2, 0
; GFX11-SDAG-NEXT: s_mov_b32 s33, 0
; GFX11-SDAG-NEXT: s_mov_b32 s32, 64
; GFX11-SDAG-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-SDAG-NEXT: s_cmp_lg_u32 s0, 0
-; GFX11-SDAG-NEXT: s_mov_b32 s0, 0
-; GFX11-SDAG-NEXT: s_cbranch_scc0 .LBB7_6
+; GFX11-SDAG-NEXT: s_mov_b32 s0, -1
+; GFX11-SDAG-NEXT: s_cbranch_scc0 .LBB7_4
; GFX11-SDAG-NEXT: ; %bb.1: ; %bb.1
; GFX11-SDAG-NEXT: v_and_b32_e32 v0, 0x3ff, v0
-; GFX11-SDAG-NEXT: s_mov_b32 s2, exec_lo
+; GFX11-SDAG-NEXT: s_mov_b32 s0, exec_lo
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instskip(NEXT) | instid1(VALU_DEP_1)
; GFX11-SDAG-NEXT: v_lshl_add_u32 v0, v0, 2, 15
; GFX11-SDAG-NEXT: v_and_b32_e32 v0, 0x1ff0, v0
; GFX11-SDAG-NEXT: .LBB7_2: ; =>This Inner Loop Header: Depth=1
-; GFX11-SDAG-NEXT: s_ctz_i32_b32 s3, s2
+; GFX11-SDAG-NEXT: s_ctz_i32_b32 s3, s0
; GFX11-SDAG-NEXT: s_delay_alu instid0(VALU_DEP_1) | instid1(SALU_CYCLE_1)
; GFX11-SDAG-NEXT: v_readlane_b32 s4, v0, s3
-; GFX11-SDAG-NEXT: s_bitset0_b32 s2, s3
-; GFX11-SDAG-NEXT: s_max_u32 s0, s0, s4
-; GFX11-SDAG-NEXT: s_cmp_lg_u32 s2, 0
+; GFX11-SDAG-NEXT: s_bitset0_b32 s0, s3
+; GFX11-SDAG-NEXT: s_max_u32 s2, s2, s4
+; GFX11-SDAG-NEXT: s_cmp_lg_u32 s0, 0
; GFX11-SDAG-NEXT: s_cbranch_scc1 .LBB7_2
; GFX11-SDAG-NEXT: ; %bb.3:
-; GFX11-SDAG-NEXT: s_mov_b32 s2, s32
+; GFX11-SDAG-NEXT: s_mov_b32 s3, s32
; GFX11-SDAG-NEXT: v_mov_b32_e32 v1, 1
-; GFX11-SDAG-NEXT: v_lshl_add_u32 v0, s0, 5, s2
-; GFX11-SDAG-NEXT: scratch_store_b32 off, v1, s2 dlc
+; GFX11-SDAG-NEXT: v_lshl_add_u32 v0, s2, 5, s3
+; GFX11-SDAG-NEXT: s_mov_b32 s0, 0
+; GFX11-SDAG-NEXT: scratch_store_b32 off, v1, s3 dlc
; GFX11-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-SDAG-NEXT: v_readfirstlane_b32 s32, v0
-; GFX11-SDAG-NEXT: s_cbranch_execnz .LBB7_5
-; GFX11-SDAG-NEXT: .LBB7_4: ; %bb.0
+; GFX11-SDAG-NEXT: .LBB7_4: ; %Flow
+; GFX11-SDAG-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-SDAG-NEXT: s_cbranch_vccnz .LBB7_6
+; GFX11-SDAG-NEXT: ; %bb.5: ; %bb.0
; GFX11-SDAG-NEXT: s_lshl_b32 s0, s1, 2
; GFX11-SDAG-NEXT: v_mov_b32_e32 v0, 2
; GFX11-SDAG-NEXT: s_add_i32 s0, s0, 15
@@ -974,10 +980,8 @@ define amdgpu_kernel void @test_dynamic_stackalloc_kernel_control_flow(i32 %n, i
; GFX11-SDAG-NEXT: scratch_store_b32 off, v0, s1 dlc
; GFX11-SDAG-NEXT: s_waitcnt_vscnt null, 0x0
; GFX11-SDAG-NEXT: s_add_i32 s32, s1, s0
-; GFX11-SDAG-NEXT: .LBB7_5: ; %bb.2
+; GFX11-SDAG-NEXT: .LBB7_6: ; %bb.2
; GFX11-SDAG-NEXT: s_endpgm
-; GFX11-SDAG-NEXT: .LBB7_6:
-; GFX11-SDAG-NEXT: s_branch .LBB7_4
;
; GFX11-GISEL-LABEL: test_dynamic_stackalloc_kernel_control_flow:
; GFX11-GISEL: ; %bb.0: ; %entry
diff --git a/llvm/test/CodeGen/AMDGPU/exec-mask-opt-cannot-create-empty-or-backward-segment.ll b/llvm/test/CodeGen/AMDGPU/exec-mask-opt-cannot-create-empty-or-backward-segment.ll
index 72913d2..d766183 100644
--- a/llvm/test/CodeGen/AMDGPU/exec-mask-opt-cannot-create-empty-or-backward-segment.ll
+++ b/llvm/test/CodeGen/AMDGPU/exec-mask-opt-cannot-create-empty-or-backward-segment.ll
@@ -34,9 +34,6 @@ define amdgpu_kernel void @cannot_create_empty_or_backwards_segment(i1 %arg, i1
; CHECK-NEXT: v_mov_b32_e32 v0, 0
; CHECK-NEXT: s_branch .LBB0_3
; CHECK-NEXT: .LBB0_1: ; in Loop: Header=BB0_3 Depth=1
-; CHECK-NEXT: s_mov_b64 s[18:19], 0
-; CHECK-NEXT: s_mov_b64 s[20:21], -1
-; CHECK-NEXT: s_mov_b64 s[16:17], -1
; CHECK-NEXT: s_mov_b64 s[22:23], -1
; CHECK-NEXT: .LBB0_2: ; %Flow7
; CHECK-NEXT: ; in Loop: Header=BB0_3 Depth=1
@@ -45,6 +42,9 @@ define amdgpu_kernel void @cannot_create_empty_or_backwards_segment(i1 %arg, i1
; CHECK-NEXT: .LBB0_3: ; %bb7
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: s_and_b64 vcc, exec, s[2:3]
+; CHECK-NEXT: s_mov_b64 s[18:19], 0
+; CHECK-NEXT: s_mov_b64 s[20:21], -1
+; CHECK-NEXT: s_mov_b64 s[16:17], -1
; CHECK-NEXT: s_cbranch_vccnz .LBB0_1
; CHECK-NEXT: ; %bb.4: ; %bb8
; CHECK-NEXT: ; in Loop: Header=BB0_3 Depth=1
@@ -58,9 +58,9 @@ define amdgpu_kernel void @cannot_create_empty_or_backwards_segment(i1 %arg, i1
; CHECK-NEXT: s_cbranch_execz .LBB0_7
; CHECK-NEXT: s_branch .LBB0_8
; CHECK-NEXT: .LBB0_6: ; in Loop: Header=BB0_3 Depth=1
-; CHECK-NEXT: s_mov_b64 s[16:17], -1
-; CHECK-NEXT: s_mov_b64 s[18:19], 0
; CHECK-NEXT: s_mov_b64 s[22:23], 0
+; CHECK-NEXT: s_andn2_b64 vcc, exec, s[16:17]
+; CHECK-NEXT: s_cbranch_vccnz .LBB0_8
; CHECK-NEXT: .LBB0_7: ; %bb10
; CHECK-NEXT: ; in Loop: Header=BB0_3 Depth=1
; CHECK-NEXT: s_mov_b64 s[18:19], -1
@@ -68,7 +68,6 @@ define amdgpu_kernel void @cannot_create_empty_or_backwards_segment(i1 %arg, i1
; CHECK-NEXT: s_mov_b64 s[22:23], s[14:15]
; CHECK-NEXT: .LBB0_8: ; %Flow9
; CHECK-NEXT: ; in Loop: Header=BB0_3 Depth=1
-; CHECK-NEXT: s_mov_b64 s[20:21], -1
; CHECK-NEXT: s_andn2_b64 vcc, exec, s[22:23]
; CHECK-NEXT: s_mov_b64 s[22:23], -1
; CHECK-NEXT: s_cbranch_vccnz .LBB0_2
diff --git a/llvm/test/CodeGen/AMDGPU/extract-subvector-16bit.ll b/llvm/test/CodeGen/AMDGPU/extract-subvector-16bit.ll
index 555adec..24982a5 100644
--- a/llvm/test/CodeGen/AMDGPU/extract-subvector-16bit.ll
+++ b/llvm/test/CodeGen/AMDGPU/extract-subvector-16bit.ll
@@ -8,8 +8,9 @@ define <4 x i16> @vec_8xi16_extract_4xi16(ptr addrspace(1) %p0, ptr addrspace(1)
; SI-LABEL: vec_8xi16_extract_4xi16:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: s_cmp_lg_u32 s16, 0
; SI-NEXT: s_mov_b32 s6, 0
+; SI-NEXT: s_cmp_lg_u32 s16, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB0_4
; SI-NEXT: ; %bb.1: ; %F
; SI-NEXT: s_mov_b32 s7, 0xf000
@@ -86,13 +87,15 @@ define <4 x i16> @vec_8xi16_extract_4xi16(ptr addrspace(1) %p0, ptr addrspace(1)
; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; implicit-def: $vgpr4
; SI-NEXT: ; implicit-def: $vgpr2
-; SI-NEXT: s_mov_b64 vcc, 0
-; SI-NEXT: s_branch .LBB0_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB0_2
+; SI-NEXT: s_branch .LBB0_3
;
; GFX9-LABEL: vec_8xi16_extract_4xi16:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s16, 0
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB0_4
; GFX9-NEXT: ; %bb.1: ; %F
; GFX9-NEXT: global_load_dwordx4 v[2:5], v[2:3], off glc
@@ -115,19 +118,20 @@ define <4 x i16> @vec_8xi16_extract_4xi16(ptr addrspace(1) %p0, ptr addrspace(1)
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB0_4:
; GFX9-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5
-; GFX9-NEXT: s_branch .LBB0_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB0_2
+; GFX9-NEXT: s_branch .LBB0_3
;
; GFX11-TRUE16-LABEL: vec_8xi16_extract_4xi16:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s0, 0
-; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, -1
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB0_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %F
; GFX11-TRUE16-NEXT: global_load_b128 v[2:5], v[2:3], off glc dlc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB0_3
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB0_3
; GFX11-TRUE16-NEXT: .LBB0_2: ; %T
; GFX11-TRUE16-NEXT: global_load_b128 v[2:5], v[0:1], off glc dlc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
@@ -143,19 +147,20 @@ define <4 x i16> @vec_8xi16_extract_4xi16(ptr addrspace(1) %p0, ptr addrspace(1)
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB0_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5
-; GFX11-TRUE16-NEXT: s_branch .LBB0_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB0_2
+; GFX11-TRUE16-NEXT: s_branch .LBB0_3
;
; GFX11-FAKE16-LABEL: vec_8xi16_extract_4xi16:
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s0, 0
-; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, -1
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB0_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %F
; GFX11-FAKE16-NEXT: global_load_b128 v[2:5], v[2:3], off glc dlc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB0_3
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB0_3
; GFX11-FAKE16-NEXT: .LBB0_2: ; %T
; GFX11-FAKE16-NEXT: global_load_b128 v[2:5], v[0:1], off glc dlc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
@@ -176,7 +181,9 @@ define <4 x i16> @vec_8xi16_extract_4xi16(ptr addrspace(1) %p0, ptr addrspace(1)
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB0_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5
-; GFX11-FAKE16-NEXT: s_branch .LBB0_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB0_2
+; GFX11-FAKE16-NEXT: s_branch .LBB0_3
%cond = icmp eq i32 %cond.arg, 0
br i1 %cond, label %T, label %F
@@ -200,8 +207,9 @@ define <4 x i16> @vec_8xi16_extract_4xi16_2(ptr addrspace(1) %p0, ptr addrspace(
; SI-LABEL: vec_8xi16_extract_4xi16_2:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: s_cmp_lg_u32 s16, 0
; SI-NEXT: s_mov_b32 s6, 0
+; SI-NEXT: s_cmp_lg_u32 s16, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB1_4
; SI-NEXT: ; %bb.1: ; %F
; SI-NEXT: s_mov_b32 s7, 0xf000
@@ -280,13 +288,15 @@ define <4 x i16> @vec_8xi16_extract_4xi16_2(ptr addrspace(1) %p0, ptr addrspace(
; SI-NEXT: ; implicit-def: $vgpr4
; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; implicit-def: $vgpr2
-; SI-NEXT: s_mov_b64 vcc, 0
-; SI-NEXT: s_branch .LBB1_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB1_2
+; SI-NEXT: s_branch .LBB1_3
;
; GFX9-LABEL: vec_8xi16_extract_4xi16_2:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s16, 0
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB1_4
; GFX9-NEXT: ; %bb.1: ; %F
; GFX9-NEXT: global_load_dwordx4 v[2:5], v[2:3], off glc
@@ -309,19 +319,20 @@ define <4 x i16> @vec_8xi16_extract_4xi16_2(ptr addrspace(1) %p0, ptr addrspace(
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB1_4:
; GFX9-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5
-; GFX9-NEXT: s_branch .LBB1_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB1_2
+; GFX9-NEXT: s_branch .LBB1_3
;
; GFX11-TRUE16-LABEL: vec_8xi16_extract_4xi16_2:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s0, 0
-; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, -1
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB1_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %F
; GFX11-TRUE16-NEXT: global_load_b128 v[2:5], v[2:3], off glc dlc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB1_3
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB1_3
; GFX11-TRUE16-NEXT: .LBB1_2: ; %T
; GFX11-TRUE16-NEXT: global_load_b128 v[2:5], v[0:1], off glc dlc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
@@ -337,19 +348,20 @@ define <4 x i16> @vec_8xi16_extract_4xi16_2(ptr addrspace(1) %p0, ptr addrspace(
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB1_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5
-; GFX11-TRUE16-NEXT: s_branch .LBB1_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB1_2
+; GFX11-TRUE16-NEXT: s_branch .LBB1_3
;
; GFX11-FAKE16-LABEL: vec_8xi16_extract_4xi16_2:
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s0, 0
-; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, -1
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB1_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %F
; GFX11-FAKE16-NEXT: global_load_b128 v[2:5], v[2:3], off glc dlc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB1_3
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB1_3
; GFX11-FAKE16-NEXT: .LBB1_2: ; %T
; GFX11-FAKE16-NEXT: global_load_b128 v[2:5], v[0:1], off glc dlc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
@@ -370,7 +382,9 @@ define <4 x i16> @vec_8xi16_extract_4xi16_2(ptr addrspace(1) %p0, ptr addrspace(
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB1_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5
-; GFX11-FAKE16-NEXT: s_branch .LBB1_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB1_2
+; GFX11-FAKE16-NEXT: s_branch .LBB1_3
%cond = icmp eq i32 %cond.arg, 0
br i1 %cond, label %T, label %F
@@ -394,8 +408,9 @@ define <4 x half> @vec_8xf16_extract_4xf16(ptr addrspace(1) %p0, ptr addrspace(1
; SI-LABEL: vec_8xf16_extract_4xf16:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: s_cmp_lg_u32 s16, 0
; SI-NEXT: s_mov_b32 s6, 0
+; SI-NEXT: s_cmp_lg_u32 s16, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB2_4
; SI-NEXT: ; %bb.1: ; %F
; SI-NEXT: s_mov_b32 s7, 0xf000
@@ -474,13 +489,15 @@ define <4 x half> @vec_8xf16_extract_4xf16(ptr addrspace(1) %p0, ptr addrspace(1
; SI-NEXT: ; implicit-def: $vgpr4
; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; implicit-def: $vgpr2
-; SI-NEXT: s_mov_b64 vcc, 0
-; SI-NEXT: s_branch .LBB2_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB2_2
+; SI-NEXT: s_branch .LBB2_3
;
; GFX9-LABEL: vec_8xf16_extract_4xf16:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s16, 0
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB2_4
; GFX9-NEXT: ; %bb.1: ; %F
; GFX9-NEXT: global_load_dwordx4 v[2:5], v[2:3], off glc
@@ -506,19 +523,20 @@ define <4 x half> @vec_8xf16_extract_4xf16(ptr addrspace(1) %p0, ptr addrspace(1
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB2_4:
; GFX9-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5
-; GFX9-NEXT: s_branch .LBB2_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB2_2
+; GFX9-NEXT: s_branch .LBB2_3
;
; GFX11-TRUE16-LABEL: vec_8xf16_extract_4xf16:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s0, 0
-; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, -1
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB2_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %F
; GFX11-TRUE16-NEXT: global_load_b128 v[2:5], v[2:3], off glc dlc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB2_3
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB2_3
; GFX11-TRUE16-NEXT: .LBB2_2: ; %T
; GFX11-TRUE16-NEXT: global_load_b128 v[2:5], v[0:1], off glc dlc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
@@ -540,19 +558,20 @@ define <4 x half> @vec_8xf16_extract_4xf16(ptr addrspace(1) %p0, ptr addrspace(1
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB2_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5
-; GFX11-TRUE16-NEXT: s_branch .LBB2_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB2_2
+; GFX11-TRUE16-NEXT: s_branch .LBB2_3
;
; GFX11-FAKE16-LABEL: vec_8xf16_extract_4xf16:
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s0, 0
-; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, -1
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB2_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %F
; GFX11-FAKE16-NEXT: global_load_b128 v[2:5], v[2:3], off glc dlc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB2_3
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB2_3
; GFX11-FAKE16-NEXT: .LBB2_2: ; %T
; GFX11-FAKE16-NEXT: global_load_b128 v[2:5], v[0:1], off glc dlc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
@@ -574,7 +593,9 @@ define <4 x half> @vec_8xf16_extract_4xf16(ptr addrspace(1) %p0, ptr addrspace(1
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB2_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5
-; GFX11-FAKE16-NEXT: s_branch .LBB2_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB2_2
+; GFX11-FAKE16-NEXT: s_branch .LBB2_3
%cond = icmp eq i32 %cond.arg, 0
br i1 %cond, label %T, label %F
@@ -599,8 +620,9 @@ define <4 x i16> @vec_16xi16_extract_4xi16(ptr addrspace(1) %p0, ptr addrspace(1
; SI-LABEL: vec_16xi16_extract_4xi16:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: s_cmp_lg_u32 s16, 0
; SI-NEXT: s_mov_b32 s6, 0
+; SI-NEXT: s_cmp_lg_u32 s16, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB3_4
; SI-NEXT: ; %bb.1: ; %F
; SI-NEXT: s_mov_b32 s7, 0xf000
@@ -709,13 +731,15 @@ define <4 x i16> @vec_16xi16_extract_4xi16(ptr addrspace(1) %p0, ptr addrspace(1
; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; implicit-def: $vgpr4
; SI-NEXT: ; implicit-def: $vgpr2
-; SI-NEXT: s_mov_b64 vcc, 0
-; SI-NEXT: s_branch .LBB3_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB3_2
+; SI-NEXT: s_branch .LBB3_3
;
; GFX9-LABEL: vec_16xi16_extract_4xi16:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s16, 0
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB3_4
; GFX9-NEXT: ; %bb.1: ; %F
; GFX9-NEXT: global_load_dwordx4 v[4:7], v[2:3], off offset:16 glc
@@ -744,21 +768,22 @@ define <4 x i16> @vec_16xi16_extract_4xi16(ptr addrspace(1) %p0, ptr addrspace(1
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB3_4:
; GFX9-NEXT: ; implicit-def: $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11
-; GFX9-NEXT: s_branch .LBB3_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB3_2
+; GFX9-NEXT: s_branch .LBB3_3
;
; GFX11-TRUE16-LABEL: vec_16xi16_extract_4xi16:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s0, 0
-; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, -1
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB3_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %F
; GFX11-TRUE16-NEXT: global_load_b128 v[4:7], v[2:3], off offset:16 glc dlc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: global_load_b128 v[2:5], v[2:3], off glc dlc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB3_3
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB3_3
; GFX11-TRUE16-NEXT: .LBB3_2: ; %T
; GFX11-TRUE16-NEXT: global_load_b128 v[2:5], v[0:1], off offset:16 glc dlc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
@@ -776,21 +801,22 @@ define <4 x i16> @vec_16xi16_extract_4xi16(ptr addrspace(1) %p0, ptr addrspace(1
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB3_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
-; GFX11-TRUE16-NEXT: s_branch .LBB3_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB3_2
+; GFX11-TRUE16-NEXT: s_branch .LBB3_3
;
; GFX11-FAKE16-LABEL: vec_16xi16_extract_4xi16:
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s0, 0
-; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, -1
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB3_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %F
; GFX11-FAKE16-NEXT: global_load_b128 v[4:7], v[2:3], off offset:16 glc dlc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-FAKE16-NEXT: global_load_b128 v[2:5], v[2:3], off glc dlc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB3_3
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB3_3
; GFX11-FAKE16-NEXT: .LBB3_2: ; %T
; GFX11-FAKE16-NEXT: global_load_b128 v[2:5], v[0:1], off offset:16 glc dlc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
@@ -813,7 +839,9 @@ define <4 x i16> @vec_16xi16_extract_4xi16(ptr addrspace(1) %p0, ptr addrspace(1
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB3_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
-; GFX11-FAKE16-NEXT: s_branch .LBB3_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB3_2
+; GFX11-FAKE16-NEXT: s_branch .LBB3_3
%cond = icmp eq i32 %cond.arg, 0
br i1 %cond, label %T, label %F
@@ -838,8 +866,9 @@ define <4 x i16> @vec_16xi16_extract_4xi16_2(ptr addrspace(1) %p0, ptr addrspace
; SI-LABEL: vec_16xi16_extract_4xi16_2:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: s_cmp_lg_u32 s16, 0
; SI-NEXT: s_mov_b32 s6, 0
+; SI-NEXT: s_cmp_lg_u32 s16, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB4_4
; SI-NEXT: ; %bb.1: ; %F
; SI-NEXT: s_mov_b32 s7, 0xf000
@@ -950,13 +979,15 @@ define <4 x i16> @vec_16xi16_extract_4xi16_2(ptr addrspace(1) %p0, ptr addrspace
; SI-NEXT: ; implicit-def: $vgpr4
; SI-NEXT: ; implicit-def: $vgpr2
; SI-NEXT: ; implicit-def: $vgpr5
-; SI-NEXT: s_mov_b64 vcc, 0
-; SI-NEXT: s_branch .LBB4_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB4_2
+; SI-NEXT: s_branch .LBB4_3
;
; GFX9-LABEL: vec_16xi16_extract_4xi16_2:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s16, 0
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB4_4
; GFX9-NEXT: ; %bb.1: ; %F
; GFX9-NEXT: global_load_dwordx4 v[4:7], v[2:3], off offset:16 glc
@@ -985,21 +1016,22 @@ define <4 x i16> @vec_16xi16_extract_4xi16_2(ptr addrspace(1) %p0, ptr addrspace
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB4_4:
; GFX9-NEXT: ; implicit-def: $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11
-; GFX9-NEXT: s_branch .LBB4_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB4_2
+; GFX9-NEXT: s_branch .LBB4_3
;
; GFX11-TRUE16-LABEL: vec_16xi16_extract_4xi16_2:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s0, 0
-; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, -1
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB4_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %F
; GFX11-TRUE16-NEXT: global_load_b128 v[4:7], v[2:3], off offset:16 glc dlc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: global_load_b128 v[2:5], v[2:3], off glc dlc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB4_3
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB4_3
; GFX11-TRUE16-NEXT: .LBB4_2: ; %T
; GFX11-TRUE16-NEXT: global_load_b128 v[2:5], v[0:1], off offset:16 glc dlc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
@@ -1017,21 +1049,22 @@ define <4 x i16> @vec_16xi16_extract_4xi16_2(ptr addrspace(1) %p0, ptr addrspace
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB4_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
-; GFX11-TRUE16-NEXT: s_branch .LBB4_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB4_2
+; GFX11-TRUE16-NEXT: s_branch .LBB4_3
;
; GFX11-FAKE16-LABEL: vec_16xi16_extract_4xi16_2:
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s0, 0
-; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, -1
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB4_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %F
; GFX11-FAKE16-NEXT: global_load_b128 v[4:7], v[2:3], off offset:16 glc dlc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-FAKE16-NEXT: global_load_b128 v[2:5], v[2:3], off glc dlc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB4_3
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB4_3
; GFX11-FAKE16-NEXT: .LBB4_2: ; %T
; GFX11-FAKE16-NEXT: global_load_b128 v[2:5], v[0:1], off offset:16 glc dlc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
@@ -1054,7 +1087,9 @@ define <4 x i16> @vec_16xi16_extract_4xi16_2(ptr addrspace(1) %p0, ptr addrspace
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB4_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
-; GFX11-FAKE16-NEXT: s_branch .LBB4_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB4_2
+; GFX11-FAKE16-NEXT: s_branch .LBB4_3
%cond = icmp eq i32 %cond.arg, 0
br i1 %cond, label %T, label %F
@@ -1079,8 +1114,9 @@ define <4 x half> @vec_16xf16_extract_4xf16(ptr addrspace(1) %p0, ptr addrspace(
; SI-LABEL: vec_16xf16_extract_4xf16:
; SI: ; %bb.0:
; SI-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
-; SI-NEXT: s_cmp_lg_u32 s16, 0
; SI-NEXT: s_mov_b32 s6, 0
+; SI-NEXT: s_cmp_lg_u32 s16, 0
+; SI-NEXT: s_mov_b64 s[4:5], -1
; SI-NEXT: s_cbranch_scc0 .LBB5_4
; SI-NEXT: ; %bb.1: ; %F
; SI-NEXT: s_mov_b32 s7, 0xf000
@@ -1191,13 +1227,15 @@ define <4 x half> @vec_16xf16_extract_4xf16(ptr addrspace(1) %p0, ptr addrspace(
; SI-NEXT: ; implicit-def: $vgpr4
; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; implicit-def: $vgpr2
-; SI-NEXT: s_mov_b64 vcc, 0
-; SI-NEXT: s_branch .LBB5_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-NEXT: s_cbranch_vccz .LBB5_2
+; SI-NEXT: s_branch .LBB5_3
;
; GFX9-LABEL: vec_16xf16_extract_4xf16:
; GFX9: ; %bb.0:
; GFX9-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s16, 0
+; GFX9-NEXT: s_mov_b64 s[4:5], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB5_4
; GFX9-NEXT: ; %bb.1: ; %F
; GFX9-NEXT: global_load_dwordx4 v[4:7], v[2:3], off offset:16 glc
@@ -1229,21 +1267,22 @@ define <4 x half> @vec_16xf16_extract_4xf16(ptr addrspace(1) %p0, ptr addrspace(
; GFX9-NEXT: s_setpc_b64 s[30:31]
; GFX9-NEXT: .LBB5_4:
; GFX9-NEXT: ; implicit-def: $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11
-; GFX9-NEXT: s_branch .LBB5_2
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX9-NEXT: s_cbranch_vccz .LBB5_2
+; GFX9-NEXT: s_branch .LBB5_3
;
; GFX11-TRUE16-LABEL: vec_16xf16_extract_4xf16:
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: s_cmp_lg_u32 s0, 0
-; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, -1
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB5_4
; GFX11-TRUE16-NEXT: ; %bb.1: ; %F
; GFX11-TRUE16-NEXT: global_load_b128 v[4:7], v[2:3], off offset:16 glc dlc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: global_load_b128 v[2:5], v[2:3], off glc dlc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB5_3
+; GFX11-TRUE16-NEXT: s_cbranch_execnz .LBB5_3
; GFX11-TRUE16-NEXT: .LBB5_2: ; %T
; GFX11-TRUE16-NEXT: global_load_b128 v[2:5], v[0:1], off offset:16 glc dlc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
@@ -1267,21 +1306,22 @@ define <4 x half> @vec_16xf16_extract_4xf16(ptr addrspace(1) %p0, ptr addrspace(
; GFX11-TRUE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-TRUE16-NEXT: .LBB5_4:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
-; GFX11-TRUE16-NEXT: s_branch .LBB5_2
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB5_2
+; GFX11-TRUE16-NEXT: s_branch .LBB5_3
;
; GFX11-FAKE16-LABEL: vec_16xf16_extract_4xf16:
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: s_cmp_lg_u32 s0, 0
-; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, -1
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB5_4
; GFX11-FAKE16-NEXT: ; %bb.1: ; %F
; GFX11-FAKE16-NEXT: global_load_b128 v[4:7], v[2:3], off offset:16 glc dlc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-FAKE16-NEXT: global_load_b128 v[2:5], v[2:3], off glc dlc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB5_3
+; GFX11-FAKE16-NEXT: s_cbranch_execnz .LBB5_3
; GFX11-FAKE16-NEXT: .LBB5_2: ; %T
; GFX11-FAKE16-NEXT: global_load_b128 v[2:5], v[0:1], off offset:16 glc dlc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
@@ -1305,7 +1345,9 @@ define <4 x half> @vec_16xf16_extract_4xf16(ptr addrspace(1) %p0, ptr addrspace(
; GFX11-FAKE16-NEXT: s_setpc_b64 s[30:31]
; GFX11-FAKE16-NEXT: .LBB5_4:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
-; GFX11-FAKE16-NEXT: s_branch .LBB5_2
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB5_2
+; GFX11-FAKE16-NEXT: s_branch .LBB5_3
%cond = icmp eq i32 %cond.arg, 0
br i1 %cond, label %T, label %F
@@ -1417,9 +1459,10 @@ define amdgpu_gfx <8 x i16> @vec_16xi16_extract_8xi16_0(i1 inreg %cond, ptr addr
; SI-NEXT: buffer_load_ubyte v4, off, s[0:3], s32
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v4, 1, v4
+; SI-NEXT: s_mov_b32 s38, 0
; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
; SI-NEXT: s_and_b64 s[34:35], vcc, exec
-; SI-NEXT: s_mov_b32 s38, 0
+; SI-NEXT: s_mov_b64 s[34:35], -1
; SI-NEXT: s_cbranch_scc0 .LBB7_2
; SI-NEXT: ; %bb.1: ; %F
; SI-NEXT: s_mov_b32 s39, 0xf000
@@ -1477,7 +1520,8 @@ define amdgpu_gfx <8 x i16> @vec_16xi16_extract_8xi16_0(i1 inreg %cond, ptr addr
; SI-NEXT: ; implicit-def: $vgpr6
; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; implicit-def: $vgpr7
-; SI-NEXT: s_mov_b64 vcc, 0
+; SI-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; SI-NEXT: s_cbranch_vccnz .LBB7_4
; SI-NEXT: .LBB7_3: ; %T
; SI-NEXT: s_mov_b32 s39, 0xf000
; SI-NEXT: s_mov_b32 s36, s38
@@ -1570,6 +1614,7 @@ define amdgpu_gfx <8 x i16> @vec_16xi16_extract_8xi16_0(i1 inreg %cond, ptr addr
; GFX9-NEXT: v_and_b32_e32 v4, 1, v4
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
; GFX9-NEXT: s_and_b64 s[34:35], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[34:35], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB7_2
; GFX9-NEXT: ; %bb.1: ; %F
; GFX9-NEXT: global_load_dwordx4 v[4:7], v[2:3], off offset:16 glc
@@ -1581,6 +1626,8 @@ define amdgpu_gfx <8 x i16> @vec_16xi16_extract_8xi16_0(i1 inreg %cond, ptr addr
; GFX9-NEXT: s_branch .LBB7_4
; GFX9-NEXT: .LBB7_2:
; GFX9-NEXT: ; implicit-def: $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GFX9-NEXT: s_cbranch_vccnz .LBB7_4
; GFX9-NEXT: .LBB7_3: ; %T
; GFX9-NEXT: global_load_dwordx4 v[2:5], v[0:1], off offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
@@ -1619,23 +1666,24 @@ define amdgpu_gfx <8 x i16> @vec_16xi16_extract_8xi16_0(i1 inreg %cond, ptr addr
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: scratch_load_u8 v4, off, s32
-; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 1, v4
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v4
-; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, -1
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB7_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %F
; GFX11-TRUE16-NEXT: global_load_b128 v[4:7], v[2:3], off offset:16 glc dlc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: global_load_b128 v[4:7], v[2:3], off glc dlc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB7_3
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB7_3
; GFX11-TRUE16-NEXT: s_branch .LBB7_4
; GFX11-TRUE16-NEXT: .LBB7_2:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB7_4
; GFX11-TRUE16-NEXT: .LBB7_3: ; %T
; GFX11-TRUE16-NEXT: global_load_b128 v[2:5], v[0:1], off offset:16 glc dlc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
@@ -1665,23 +1713,24 @@ define amdgpu_gfx <8 x i16> @vec_16xi16_extract_8xi16_0(i1 inreg %cond, ptr addr
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: scratch_load_u8 v4, off, s32
-; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 1, v4
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v4
-; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, -1
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB7_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %F
; GFX11-FAKE16-NEXT: global_load_b128 v[4:7], v[2:3], off offset:16 glc dlc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-FAKE16-NEXT: global_load_b128 v[2:5], v[2:3], off glc dlc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB7_3
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB7_3
; GFX11-FAKE16-NEXT: s_branch .LBB7_4
; GFX11-FAKE16-NEXT: .LBB7_2:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB7_4
; GFX11-FAKE16-NEXT: .LBB7_3: ; %T
; GFX11-FAKE16-NEXT: global_load_b128 v[2:5], v[0:1], off offset:16 glc dlc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
@@ -1742,9 +1791,10 @@ define amdgpu_gfx <8 x half> @vec_16xf16_extract_8xf16_0(i1 inreg %cond, ptr add
; SI-NEXT: buffer_load_ubyte v4, off, s[0:3], s32
; SI-NEXT: s_waitcnt vmcnt(0)
; SI-NEXT: v_and_b32_e32 v4, 1, v4
+; SI-NEXT: s_mov_b32 s38, 0
; SI-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
; SI-NEXT: s_and_b64 s[34:35], vcc, exec
-; SI-NEXT: s_mov_b32 s38, 0
+; SI-NEXT: s_mov_b64 s[34:35], -1
; SI-NEXT: s_cbranch_scc0 .LBB8_2
; SI-NEXT: ; %bb.1: ; %F
; SI-NEXT: s_mov_b32 s39, 0xf000
@@ -1810,7 +1860,8 @@ define amdgpu_gfx <8 x half> @vec_16xf16_extract_8xf16_0(i1 inreg %cond, ptr add
; SI-NEXT: ; implicit-def: $vgpr3
; SI-NEXT: ; implicit-def: $vgpr5
; SI-NEXT: ; implicit-def: $vgpr2
-; SI-NEXT: s_mov_b64 vcc, 0
+; SI-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; SI-NEXT: s_cbranch_vccnz .LBB8_4
; SI-NEXT: .LBB8_3: ; %T
; SI-NEXT: s_mov_b32 s39, 0xf000
; SI-NEXT: s_mov_b32 s36, s38
@@ -1908,6 +1959,7 @@ define amdgpu_gfx <8 x half> @vec_16xf16_extract_8xf16_0(i1 inreg %cond, ptr add
; GFX9-NEXT: v_and_b32_e32 v4, 1, v4
; GFX9-NEXT: v_cmp_eq_u32_e32 vcc, 0, v4
; GFX9-NEXT: s_and_b64 s[34:35], vcc, exec
+; GFX9-NEXT: s_mov_b64 s[34:35], -1
; GFX9-NEXT: s_cbranch_scc0 .LBB8_2
; GFX9-NEXT: ; %bb.1: ; %F
; GFX9-NEXT: global_load_dwordx4 v[4:7], v[2:3], off offset:16 glc
@@ -1919,6 +1971,8 @@ define amdgpu_gfx <8 x half> @vec_16xf16_extract_8xf16_0(i1 inreg %cond, ptr add
; GFX9-NEXT: s_branch .LBB8_4
; GFX9-NEXT: .LBB8_2:
; GFX9-NEXT: ; implicit-def: $vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9_vgpr10_vgpr11
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GFX9-NEXT: s_cbranch_vccnz .LBB8_4
; GFX9-NEXT: .LBB8_3: ; %T
; GFX9-NEXT: global_load_dwordx4 v[2:5], v[0:1], off offset:16 glc
; GFX9-NEXT: s_waitcnt vmcnt(0)
@@ -1955,23 +2009,24 @@ define amdgpu_gfx <8 x half> @vec_16xf16_extract_8xf16_0(i1 inreg %cond, ptr add
; GFX11-TRUE16: ; %bb.0:
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-TRUE16-NEXT: scratch_load_u8 v4, off, s32
-; GFX11-TRUE16-NEXT: s_mov_b32 s0, 0
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: v_and_b32_e32 v4, 1, v4
; GFX11-TRUE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-TRUE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v4
-; GFX11-TRUE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-TRUE16-NEXT: s_mov_b32 s0, -1
; GFX11-TRUE16-NEXT: s_cbranch_scc0 .LBB8_2
; GFX11-TRUE16-NEXT: ; %bb.1: ; %F
; GFX11-TRUE16-NEXT: global_load_b128 v[4:7], v[2:3], off offset:16 glc dlc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-TRUE16-NEXT: global_load_b128 v[2:5], v[2:3], off glc dlc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-TRUE16-NEXT: s_cbranch_vccz .LBB8_3
+; GFX11-TRUE16-NEXT: s_cbranch_execz .LBB8_3
; GFX11-TRUE16-NEXT: s_branch .LBB8_4
; GFX11-TRUE16-NEXT: .LBB8_2:
; GFX11-TRUE16-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
+; GFX11-TRUE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-TRUE16-NEXT: s_cbranch_vccnz .LBB8_4
; GFX11-TRUE16-NEXT: .LBB8_3: ; %T
; GFX11-TRUE16-NEXT: global_load_b128 v[2:5], v[0:1], off offset:16 glc dlc
; GFX11-TRUE16-NEXT: s_waitcnt vmcnt(0)
@@ -2007,23 +2062,24 @@ define amdgpu_gfx <8 x half> @vec_16xf16_extract_8xf16_0(i1 inreg %cond, ptr add
; GFX11-FAKE16: ; %bb.0:
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
; GFX11-FAKE16-NEXT: scratch_load_u8 v4, off, s32
-; GFX11-FAKE16-NEXT: s_mov_b32 s0, 0
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-FAKE16-NEXT: v_and_b32_e32 v4, 1, v4
; GFX11-FAKE16-NEXT: s_delay_alu instid0(VALU_DEP_1)
; GFX11-FAKE16-NEXT: v_cmp_eq_u32_e32 vcc_lo, 0, v4
-; GFX11-FAKE16-NEXT: s_and_b32 s1, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_and_b32 s0, vcc_lo, exec_lo
+; GFX11-FAKE16-NEXT: s_mov_b32 s0, -1
; GFX11-FAKE16-NEXT: s_cbranch_scc0 .LBB8_2
; GFX11-FAKE16-NEXT: ; %bb.1: ; %F
; GFX11-FAKE16-NEXT: global_load_b128 v[4:7], v[2:3], off offset:16 glc dlc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
; GFX11-FAKE16-NEXT: global_load_b128 v[2:5], v[2:3], off glc dlc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
-; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
-; GFX11-FAKE16-NEXT: s_cbranch_vccz .LBB8_3
+; GFX11-FAKE16-NEXT: s_cbranch_execz .LBB8_3
; GFX11-FAKE16-NEXT: s_branch .LBB8_4
; GFX11-FAKE16-NEXT: .LBB8_2:
; GFX11-FAKE16-NEXT: ; implicit-def: $vgpr2_vgpr3_vgpr4_vgpr5_vgpr6_vgpr7_vgpr8_vgpr9
+; GFX11-FAKE16-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
+; GFX11-FAKE16-NEXT: s_cbranch_vccnz .LBB8_4
; GFX11-FAKE16-NEXT: .LBB8_3: ; %T
; GFX11-FAKE16-NEXT: global_load_b128 v[2:5], v[0:1], off offset:16 glc dlc
; GFX11-FAKE16-NEXT: s_waitcnt vmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/fix-sgpr-copies-phi-regression-issue130646-issue130119.ll b/llvm/test/CodeGen/AMDGPU/fix-sgpr-copies-phi-regression-issue130646-issue130119.ll
index d03d53a..fc4db39 100644
--- a/llvm/test/CodeGen/AMDGPU/fix-sgpr-copies-phi-regression-issue130646-issue130119.ll
+++ b/llvm/test/CodeGen/AMDGPU/fix-sgpr-copies-phi-regression-issue130646-issue130119.ll
@@ -12,9 +12,9 @@ define double @issue130646(i64 %arg) {
; CHECK-NEXT: v_mov_b32_e32 v2, 0
; CHECK-NEXT: v_mov_b32_e32 v3, 0
; CHECK-NEXT: s_mov_b64 s[4:5], 0
-; CHECK-NEXT: s_branch .LBB0_2
+; CHECK-NEXT: s_branch .LBB0_3
; CHECK-NEXT: .LBB0_1: ; %for.body.5
-; CHECK-NEXT: ; in Loop: Header=BB0_2 Depth=1
+; CHECK-NEXT: ; in Loop: Header=BB0_3 Depth=1
; CHECK-NEXT: s_lshr_b64 s[6:7], s[4:5], 1
; CHECK-NEXT: v_or_b32_e32 v3, s7, v3
; CHECK-NEXT: v_or_b32_e32 v2, s6, v2
@@ -22,18 +22,24 @@ define double @issue130646(i64 %arg) {
; CHECK-NEXT: s_or_b32 s6, s6, 1
; CHECK-NEXT: v_or3_b32 v3, v3, v1, s7
; CHECK-NEXT: v_or3_b32 v2, v2, v0, s6
-; CHECK-NEXT: s_lshr_b64 s[4:5], s[4:5], 8
-; CHECK-NEXT: s_cbranch_execz .LBB0_4
-; CHECK-NEXT: .LBB0_2: ; %for.body
+; CHECK-NEXT: s_lshr_b64 s[6:7], s[4:5], 8
+; CHECK-NEXT: s_mov_b64 s[8:9], 0
+; CHECK-NEXT: .LBB0_2: ; %Flow
+; CHECK-NEXT: ; in Loop: Header=BB0_3 Depth=1
+; CHECK-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; CHECK-NEXT: s_mov_b64 s[4:5], s[6:7]
+; CHECK-NEXT: s_cbranch_vccz .LBB0_5
+; CHECK-NEXT: .LBB0_3: ; %for.body
; CHECK-NEXT: ; =>This Inner Loop Header: Depth=1
; CHECK-NEXT: s_cmp_eq_u64 s[4:5], 0
-; CHECK-NEXT: v_readfirstlane_b32 s8, v0
-; CHECK-NEXT: v_readfirstlane_b32 s9, v1
+; CHECK-NEXT: v_readfirstlane_b32 s6, v0
+; CHECK-NEXT: v_readfirstlane_b32 s7, v1
+; CHECK-NEXT: s_mov_b64 s[8:9], -1
; CHECK-NEXT: s_cbranch_scc0 .LBB0_1
-; CHECK-NEXT: ; %bb.3:
+; CHECK-NEXT: ; %bb.4: ; in Loop: Header=BB0_3 Depth=1
; CHECK-NEXT: ; implicit-def: $vgpr2_vgpr3
-; CHECK-NEXT: s_mov_b64 s[4:5], s[8:9]
-; CHECK-NEXT: .LBB0_4: ; %for.cond.cleanup
+; CHECK-NEXT: s_branch .LBB0_2
+; CHECK-NEXT: .LBB0_5: ; %for.cond.cleanup
; CHECK-NEXT: v_mov_b32_e32 v0, 0
; CHECK-NEXT: v_mov_b32_e32 v1, 0
; CHECK-NEXT: s_setpc_b64 s[30:31]
diff --git a/llvm/test/CodeGen/AMDGPU/flat_atomics_i64.ll b/llvm/test/CodeGen/AMDGPU/flat_atomics_i64.ll
index ffe0596..efec8bb 100644
--- a/llvm/test/CodeGen/AMDGPU/flat_atomics_i64.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat_atomics_i64.ll
@@ -161,6 +161,7 @@ define amdgpu_kernel void @atomic_add_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GCN1-NEXT: s_cmp_eq_u32 s1, s6
; GCN1-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN1-NEXT: s_mov_b64 s[6:7], -1
; GCN1-NEXT: s_cbranch_vccz .LBB1_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s0
@@ -174,6 +175,8 @@ define amdgpu_kernel void @atomic_add_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GCN1-NEXT: s_branch .LBB1_4
; GCN1-NEXT: .LBB1_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN1-NEXT: s_cbranch_vccnz .LBB1_4
; GCN1-NEXT: .LBB1_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[6:7], s[0:1], 0
; GCN1-NEXT: v_mov_b32_e32 v4, s5
@@ -213,6 +216,7 @@ define amdgpu_kernel void @atomic_add_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GCN2-NEXT: s_cmp_eq_u32 s1, s6
; GCN2-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN2-NEXT: s_mov_b64 s[6:7], -1
; GCN2-NEXT: s_cbranch_vccz .LBB1_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s0
@@ -226,6 +230,8 @@ define amdgpu_kernel void @atomic_add_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GCN2-NEXT: s_branch .LBB1_4
; GCN2-NEXT: .LBB1_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN2-NEXT: s_cbranch_vccnz .LBB1_4
; GCN2-NEXT: .LBB1_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -259,6 +265,7 @@ define amdgpu_kernel void @atomic_add_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: s_cmp_eq_u32 s1, s7
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB1_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -270,6 +277,8 @@ define amdgpu_kernel void @atomic_add_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: s_branch .LBB1_4
; GFX12-NEXT: .LBB1_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB1_4
; GFX12-NEXT: .LBB1_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: s_cselect_b32 s0, s0, -1
@@ -464,6 +473,7 @@ define amdgpu_kernel void @atomic_add_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GCN1-NEXT: s_cmp_eq_u32 s1, s2
; GCN1-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_mov_b64 s[2:3], -1
; GCN1-NEXT: s_cbranch_vccz .LBB3_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s0
@@ -477,6 +487,8 @@ define amdgpu_kernel void @atomic_add_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GCN1-NEXT: s_branch .LBB3_4
; GCN1-NEXT: .LBB3_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_vccnz .LBB3_4
; GCN1-NEXT: .LBB3_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[2:3], s[0:1], 0
; GCN1-NEXT: v_mov_b32_e32 v4, s13
@@ -518,6 +530,7 @@ define amdgpu_kernel void @atomic_add_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GCN2-NEXT: s_cmp_eq_u32 s1, s2
; GCN2-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_mov_b64 s[2:3], -1
; GCN2-NEXT: s_cbranch_vccz .LBB3_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s0
@@ -531,6 +544,8 @@ define amdgpu_kernel void @atomic_add_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GCN2-NEXT: s_branch .LBB3_4
; GCN2-NEXT: .LBB3_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_vccnz .LBB3_4
; GCN2-NEXT: .LBB3_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -565,6 +580,7 @@ define amdgpu_kernel void @atomic_add_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB3_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -576,6 +592,8 @@ define amdgpu_kernel void @atomic_add_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GFX12-NEXT: s_branch .LBB3_4
; GFX12-NEXT: .LBB3_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB3_4
; GFX12-NEXT: .LBB3_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: s_cselect_b32 s0, s0, -1
@@ -747,6 +765,7 @@ define amdgpu_kernel void @atomic_add_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN1-NEXT: s_cmp_eq_u32 s1, s6
; GCN1-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN1-NEXT: s_mov_b64 s[6:7], -1
; GCN1-NEXT: s_cbranch_vccz .LBB5_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s0
@@ -760,6 +779,8 @@ define amdgpu_kernel void @atomic_add_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN1-NEXT: s_branch .LBB5_4
; GCN1-NEXT: .LBB5_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN1-NEXT: s_cbranch_vccnz .LBB5_4
; GCN1-NEXT: .LBB5_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[6:7], s[0:1], 0
; GCN1-NEXT: v_mov_b32_e32 v4, s5
@@ -797,6 +818,7 @@ define amdgpu_kernel void @atomic_add_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN2-NEXT: s_cmp_eq_u32 s1, s6
; GCN2-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN2-NEXT: s_mov_b64 s[6:7], -1
; GCN2-NEXT: s_cbranch_vccz .LBB5_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s0
@@ -810,6 +832,8 @@ define amdgpu_kernel void @atomic_add_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN2-NEXT: s_branch .LBB5_4
; GCN2-NEXT: .LBB5_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN2-NEXT: s_cbranch_vccnz .LBB5_4
; GCN2-NEXT: .LBB5_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -842,6 +866,7 @@ define amdgpu_kernel void @atomic_add_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB5_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -853,6 +878,8 @@ define amdgpu_kernel void @atomic_add_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX12-NEXT: s_branch .LBB5_4
; GFX12-NEXT: .LBB5_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB5_4
; GFX12-NEXT: .LBB5_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: s_cselect_b32 s0, s0, -1
@@ -1037,6 +1064,7 @@ define amdgpu_kernel void @atomic_add_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GCN1-NEXT: s_cmp_eq_u32 s1, s2
; GCN1-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_mov_b64 s[2:3], -1
; GCN1-NEXT: s_cbranch_vccz .LBB7_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s0
@@ -1050,6 +1078,8 @@ define amdgpu_kernel void @atomic_add_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GCN1-NEXT: s_branch .LBB7_4
; GCN1-NEXT: .LBB7_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_vccnz .LBB7_4
; GCN1-NEXT: .LBB7_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[2:3], s[0:1], 0
; GCN1-NEXT: v_mov_b32_e32 v4, s13
@@ -1089,6 +1119,7 @@ define amdgpu_kernel void @atomic_add_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GCN2-NEXT: s_cmp_eq_u32 s1, s2
; GCN2-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_mov_b64 s[2:3], -1
; GCN2-NEXT: s_cbranch_vccz .LBB7_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s0
@@ -1102,6 +1133,8 @@ define amdgpu_kernel void @atomic_add_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GCN2-NEXT: s_branch .LBB7_4
; GCN2-NEXT: .LBB7_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_vccnz .LBB7_4
; GCN2-NEXT: .LBB7_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -1135,6 +1168,7 @@ define amdgpu_kernel void @atomic_add_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB7_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -1146,6 +1180,8 @@ define amdgpu_kernel void @atomic_add_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: s_branch .LBB7_4
; GFX12-NEXT: .LBB7_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB7_4
; GFX12-NEXT: .LBB7_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: s_cselect_b32 s0, s0, -1
@@ -1321,6 +1357,7 @@ define amdgpu_kernel void @atomic_and_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GCN1-NEXT: s_cmp_eq_u32 s1, s6
; GCN1-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN1-NEXT: s_mov_b64 s[6:7], -1
; GCN1-NEXT: s_cbranch_vccz .LBB9_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s0
@@ -1334,6 +1371,8 @@ define amdgpu_kernel void @atomic_and_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GCN1-NEXT: s_branch .LBB9_4
; GCN1-NEXT: .LBB9_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN1-NEXT: s_cbranch_vccnz .LBB9_4
; GCN1-NEXT: .LBB9_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[6:7], s[0:1], 0
; GCN1-NEXT: s_and_b64 s[6:7], s[6:7], exec
@@ -1372,6 +1411,7 @@ define amdgpu_kernel void @atomic_and_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GCN2-NEXT: s_cmp_eq_u32 s1, s6
; GCN2-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN2-NEXT: s_mov_b64 s[6:7], -1
; GCN2-NEXT: s_cbranch_vccz .LBB9_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s0
@@ -1385,6 +1425,8 @@ define amdgpu_kernel void @atomic_and_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GCN2-NEXT: s_branch .LBB9_4
; GCN2-NEXT: .LBB9_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN2-NEXT: s_cbranch_vccnz .LBB9_4
; GCN2-NEXT: .LBB9_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -1417,6 +1459,7 @@ define amdgpu_kernel void @atomic_and_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: s_cmp_eq_u32 s1, s7
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB9_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -1428,6 +1471,8 @@ define amdgpu_kernel void @atomic_and_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: s_branch .LBB9_4
; GFX12-NEXT: .LBB9_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB9_4
; GFX12-NEXT: .LBB9_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: s_cselect_b32 s0, s0, -1
@@ -1618,6 +1663,7 @@ define amdgpu_kernel void @atomic_and_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GCN1-NEXT: s_cmp_eq_u32 s1, s2
; GCN1-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_mov_b64 s[2:3], -1
; GCN1-NEXT: s_cbranch_vccz .LBB11_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s0
@@ -1631,6 +1677,8 @@ define amdgpu_kernel void @atomic_and_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GCN1-NEXT: s_branch .LBB11_4
; GCN1-NEXT: .LBB11_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_vccnz .LBB11_4
; GCN1-NEXT: .LBB11_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[2:3], s[0:1], 0
; GCN1-NEXT: s_and_b64 s[2:3], s[2:3], exec
@@ -1671,6 +1719,7 @@ define amdgpu_kernel void @atomic_and_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GCN2-NEXT: s_cmp_eq_u32 s1, s2
; GCN2-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_mov_b64 s[2:3], -1
; GCN2-NEXT: s_cbranch_vccz .LBB11_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s0
@@ -1684,6 +1733,8 @@ define amdgpu_kernel void @atomic_and_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GCN2-NEXT: s_branch .LBB11_4
; GCN2-NEXT: .LBB11_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_vccnz .LBB11_4
; GCN2-NEXT: .LBB11_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -1717,6 +1768,7 @@ define amdgpu_kernel void @atomic_and_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB11_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -1728,6 +1780,8 @@ define amdgpu_kernel void @atomic_and_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GFX12-NEXT: s_branch .LBB11_4
; GFX12-NEXT: .LBB11_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB11_4
; GFX12-NEXT: .LBB11_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: s_cselect_b32 s0, s0, -1
@@ -1895,6 +1949,7 @@ define amdgpu_kernel void @atomic_and_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN1-NEXT: s_cmp_eq_u32 s1, s6
; GCN1-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN1-NEXT: s_mov_b64 s[6:7], -1
; GCN1-NEXT: s_cbranch_vccz .LBB13_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s0
@@ -1908,6 +1963,8 @@ define amdgpu_kernel void @atomic_and_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN1-NEXT: s_branch .LBB13_4
; GCN1-NEXT: .LBB13_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN1-NEXT: s_cbranch_vccnz .LBB13_4
; GCN1-NEXT: .LBB13_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[6:7], s[0:1], 0
; GCN1-NEXT: s_and_b64 s[6:7], s[6:7], exec
@@ -1944,6 +2001,7 @@ define amdgpu_kernel void @atomic_and_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN2-NEXT: s_cmp_eq_u32 s1, s6
; GCN2-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN2-NEXT: s_mov_b64 s[6:7], -1
; GCN2-NEXT: s_cbranch_vccz .LBB13_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s0
@@ -1957,6 +2015,8 @@ define amdgpu_kernel void @atomic_and_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN2-NEXT: s_branch .LBB13_4
; GCN2-NEXT: .LBB13_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN2-NEXT: s_cbranch_vccnz .LBB13_4
; GCN2-NEXT: .LBB13_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -1988,6 +2048,7 @@ define amdgpu_kernel void @atomic_and_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB13_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -1999,6 +2060,8 @@ define amdgpu_kernel void @atomic_and_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX12-NEXT: s_branch .LBB13_4
; GFX12-NEXT: .LBB13_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB13_4
; GFX12-NEXT: .LBB13_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: s_cselect_b32 s0, s0, -1
@@ -2179,6 +2242,7 @@ define amdgpu_kernel void @atomic_and_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GCN1-NEXT: s_cmp_eq_u32 s1, s2
; GCN1-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_mov_b64 s[2:3], -1
; GCN1-NEXT: s_cbranch_vccz .LBB15_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s0
@@ -2192,6 +2256,8 @@ define amdgpu_kernel void @atomic_and_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GCN1-NEXT: s_branch .LBB15_4
; GCN1-NEXT: .LBB15_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_vccnz .LBB15_4
; GCN1-NEXT: .LBB15_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[2:3], s[0:1], 0
; GCN1-NEXT: s_and_b64 s[2:3], s[2:3], exec
@@ -2230,6 +2296,7 @@ define amdgpu_kernel void @atomic_and_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GCN2-NEXT: s_cmp_eq_u32 s1, s2
; GCN2-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_mov_b64 s[2:3], -1
; GCN2-NEXT: s_cbranch_vccz .LBB15_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s0
@@ -2243,6 +2310,8 @@ define amdgpu_kernel void @atomic_and_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GCN2-NEXT: s_branch .LBB15_4
; GCN2-NEXT: .LBB15_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_vccnz .LBB15_4
; GCN2-NEXT: .LBB15_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -2275,6 +2344,7 @@ define amdgpu_kernel void @atomic_and_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB15_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -2286,6 +2356,8 @@ define amdgpu_kernel void @atomic_and_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: s_branch .LBB15_4
; GFX12-NEXT: .LBB15_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB15_4
; GFX12-NEXT: .LBB15_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: s_cselect_b32 s0, s0, -1
@@ -2463,6 +2535,7 @@ define amdgpu_kernel void @atomic_sub_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GCN1-NEXT: s_cmp_eq_u32 s1, s6
; GCN1-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN1-NEXT: s_mov_b64 s[6:7], -1
; GCN1-NEXT: s_cbranch_vccz .LBB17_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s0
@@ -2476,6 +2549,8 @@ define amdgpu_kernel void @atomic_sub_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GCN1-NEXT: s_branch .LBB17_4
; GCN1-NEXT: .LBB17_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN1-NEXT: s_cbranch_vccnz .LBB17_4
; GCN1-NEXT: .LBB17_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[6:7], s[0:1], 0
; GCN1-NEXT: v_mov_b32_e32 v4, s5
@@ -2515,6 +2590,7 @@ define amdgpu_kernel void @atomic_sub_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GCN2-NEXT: s_cmp_eq_u32 s1, s6
; GCN2-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN2-NEXT: s_mov_b64 s[6:7], -1
; GCN2-NEXT: s_cbranch_vccz .LBB17_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s0
@@ -2528,6 +2604,8 @@ define amdgpu_kernel void @atomic_sub_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GCN2-NEXT: s_branch .LBB17_4
; GCN2-NEXT: .LBB17_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN2-NEXT: s_cbranch_vccnz .LBB17_4
; GCN2-NEXT: .LBB17_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -2561,6 +2639,7 @@ define amdgpu_kernel void @atomic_sub_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: s_cmp_eq_u32 s1, s7
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB17_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -2572,6 +2651,8 @@ define amdgpu_kernel void @atomic_sub_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: s_branch .LBB17_4
; GFX12-NEXT: .LBB17_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB17_4
; GFX12-NEXT: .LBB17_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: s_cselect_b32 s0, s0, -1
@@ -2766,6 +2847,7 @@ define amdgpu_kernel void @atomic_sub_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GCN1-NEXT: s_cmp_eq_u32 s1, s2
; GCN1-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_mov_b64 s[2:3], -1
; GCN1-NEXT: s_cbranch_vccz .LBB19_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s0
@@ -2779,6 +2861,8 @@ define amdgpu_kernel void @atomic_sub_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GCN1-NEXT: s_branch .LBB19_4
; GCN1-NEXT: .LBB19_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_vccnz .LBB19_4
; GCN1-NEXT: .LBB19_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[2:3], s[0:1], 0
; GCN1-NEXT: v_mov_b32_e32 v4, s13
@@ -2820,6 +2904,7 @@ define amdgpu_kernel void @atomic_sub_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GCN2-NEXT: s_cmp_eq_u32 s1, s2
; GCN2-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_mov_b64 s[2:3], -1
; GCN2-NEXT: s_cbranch_vccz .LBB19_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s0
@@ -2833,6 +2918,8 @@ define amdgpu_kernel void @atomic_sub_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GCN2-NEXT: s_branch .LBB19_4
; GCN2-NEXT: .LBB19_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_vccnz .LBB19_4
; GCN2-NEXT: .LBB19_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -2867,6 +2954,7 @@ define amdgpu_kernel void @atomic_sub_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB19_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -2878,6 +2966,8 @@ define amdgpu_kernel void @atomic_sub_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GFX12-NEXT: s_branch .LBB19_4
; GFX12-NEXT: .LBB19_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB19_4
; GFX12-NEXT: .LBB19_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: s_cselect_b32 s0, s0, -1
@@ -3049,6 +3139,7 @@ define amdgpu_kernel void @atomic_sub_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN1-NEXT: s_cmp_eq_u32 s1, s6
; GCN1-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN1-NEXT: s_mov_b64 s[6:7], -1
; GCN1-NEXT: s_cbranch_vccz .LBB21_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s0
@@ -3062,6 +3153,8 @@ define amdgpu_kernel void @atomic_sub_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN1-NEXT: s_branch .LBB21_4
; GCN1-NEXT: .LBB21_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN1-NEXT: s_cbranch_vccnz .LBB21_4
; GCN1-NEXT: .LBB21_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[6:7], s[0:1], 0
; GCN1-NEXT: v_mov_b32_e32 v4, s5
@@ -3099,6 +3192,7 @@ define amdgpu_kernel void @atomic_sub_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN2-NEXT: s_cmp_eq_u32 s1, s6
; GCN2-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN2-NEXT: s_mov_b64 s[6:7], -1
; GCN2-NEXT: s_cbranch_vccz .LBB21_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s0
@@ -3112,6 +3206,8 @@ define amdgpu_kernel void @atomic_sub_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN2-NEXT: s_branch .LBB21_4
; GCN2-NEXT: .LBB21_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN2-NEXT: s_cbranch_vccnz .LBB21_4
; GCN2-NEXT: .LBB21_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -3144,6 +3240,7 @@ define amdgpu_kernel void @atomic_sub_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB21_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -3155,6 +3252,8 @@ define amdgpu_kernel void @atomic_sub_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX12-NEXT: s_branch .LBB21_4
; GFX12-NEXT: .LBB21_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB21_4
; GFX12-NEXT: .LBB21_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: s_cselect_b32 s0, s0, -1
@@ -3339,6 +3438,7 @@ define amdgpu_kernel void @atomic_sub_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GCN1-NEXT: s_cmp_eq_u32 s1, s2
; GCN1-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_mov_b64 s[2:3], -1
; GCN1-NEXT: s_cbranch_vccz .LBB23_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s0
@@ -3352,6 +3452,8 @@ define amdgpu_kernel void @atomic_sub_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GCN1-NEXT: s_branch .LBB23_4
; GCN1-NEXT: .LBB23_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_vccnz .LBB23_4
; GCN1-NEXT: .LBB23_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[2:3], s[0:1], 0
; GCN1-NEXT: v_mov_b32_e32 v4, s13
@@ -3391,6 +3493,7 @@ define amdgpu_kernel void @atomic_sub_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GCN2-NEXT: s_cmp_eq_u32 s1, s2
; GCN2-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_mov_b64 s[2:3], -1
; GCN2-NEXT: s_cbranch_vccz .LBB23_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s0
@@ -3404,6 +3507,8 @@ define amdgpu_kernel void @atomic_sub_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GCN2-NEXT: s_branch .LBB23_4
; GCN2-NEXT: .LBB23_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_vccnz .LBB23_4
; GCN2-NEXT: .LBB23_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -3437,6 +3542,7 @@ define amdgpu_kernel void @atomic_sub_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB23_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -3448,6 +3554,8 @@ define amdgpu_kernel void @atomic_sub_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: s_branch .LBB23_4
; GFX12-NEXT: .LBB23_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB23_4
; GFX12-NEXT: .LBB23_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: s_cselect_b32 s0, s0, -1
@@ -3626,6 +3734,7 @@ define amdgpu_kernel void @atomic_max_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GCN1-NEXT: s_cmp_eq_u32 s1, s6
; GCN1-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN1-NEXT: s_mov_b64 s[6:7], -1
; GCN1-NEXT: s_cbranch_vccz .LBB25_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s0
@@ -3638,6 +3747,8 @@ define amdgpu_kernel void @atomic_max_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GCN1-NEXT: s_branch .LBB25_4
; GCN1-NEXT: .LBB25_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN1-NEXT: s_cbranch_vccnz .LBB25_4
; GCN1-NEXT: .LBB25_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[6:7], s[0:1], 0
; GCN1-NEXT: v_mov_b32_e32 v5, s4
@@ -3679,6 +3790,7 @@ define amdgpu_kernel void @atomic_max_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GCN2-NEXT: s_cmp_eq_u32 s1, s6
; GCN2-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN2-NEXT: s_mov_b64 s[6:7], -1
; GCN2-NEXT: s_cbranch_vccz .LBB25_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s0
@@ -3691,6 +3803,8 @@ define amdgpu_kernel void @atomic_max_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GCN2-NEXT: s_branch .LBB25_4
; GCN2-NEXT: .LBB25_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN2-NEXT: s_cbranch_vccnz .LBB25_4
; GCN2-NEXT: .LBB25_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -3726,6 +3840,7 @@ define amdgpu_kernel void @atomic_max_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: s_cmp_eq_u32 s1, s7
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB25_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -3737,6 +3852,8 @@ define amdgpu_kernel void @atomic_max_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: s_branch .LBB25_4
; GFX12-NEXT: .LBB25_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB25_4
; GFX12-NEXT: .LBB25_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: s_cselect_b32 s0, s0, -1
@@ -3931,6 +4048,7 @@ define amdgpu_kernel void @atomic_max_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GCN1-NEXT: s_cmp_eq_u32 s1, s2
; GCN1-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_mov_b64 s[2:3], -1
; GCN1-NEXT: s_cbranch_vccz .LBB27_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s0
@@ -3943,6 +4061,8 @@ define amdgpu_kernel void @atomic_max_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GCN1-NEXT: s_branch .LBB27_4
; GCN1-NEXT: .LBB27_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_vccnz .LBB27_4
; GCN1-NEXT: .LBB27_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[2:3], s[0:1], 0
; GCN1-NEXT: v_mov_b32_e32 v5, s12
@@ -3986,6 +4106,7 @@ define amdgpu_kernel void @atomic_max_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GCN2-NEXT: s_cmp_eq_u32 s1, s2
; GCN2-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_mov_b64 s[2:3], -1
; GCN2-NEXT: s_cbranch_vccz .LBB27_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s0
@@ -3998,6 +4119,8 @@ define amdgpu_kernel void @atomic_max_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GCN2-NEXT: s_branch .LBB27_4
; GCN2-NEXT: .LBB27_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_vccnz .LBB27_4
; GCN2-NEXT: .LBB27_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -4034,6 +4157,7 @@ define amdgpu_kernel void @atomic_max_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB27_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -4045,6 +4169,8 @@ define amdgpu_kernel void @atomic_max_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GFX12-NEXT: s_branch .LBB27_4
; GFX12-NEXT: .LBB27_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB27_4
; GFX12-NEXT: .LBB27_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: s_cselect_b32 s0, s0, -1
@@ -4216,6 +4342,7 @@ define amdgpu_kernel void @atomic_max_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN1-NEXT: s_cmp_eq_u32 s1, s6
; GCN1-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN1-NEXT: s_mov_b64 s[6:7], -1
; GCN1-NEXT: s_cbranch_vccz .LBB29_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s0
@@ -4228,6 +4355,8 @@ define amdgpu_kernel void @atomic_max_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN1-NEXT: s_branch .LBB29_4
; GCN1-NEXT: .LBB29_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN1-NEXT: s_cbranch_vccnz .LBB29_4
; GCN1-NEXT: .LBB29_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[6:7], s[0:1], 0
; GCN1-NEXT: v_mov_b32_e32 v5, s4
@@ -4267,6 +4396,7 @@ define amdgpu_kernel void @atomic_max_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN2-NEXT: s_cmp_eq_u32 s1, s6
; GCN2-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN2-NEXT: s_mov_b64 s[6:7], -1
; GCN2-NEXT: s_cbranch_vccz .LBB29_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s0
@@ -4279,6 +4409,8 @@ define amdgpu_kernel void @atomic_max_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN2-NEXT: s_branch .LBB29_4
; GCN2-NEXT: .LBB29_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN2-NEXT: s_cbranch_vccnz .LBB29_4
; GCN2-NEXT: .LBB29_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -4313,6 +4445,7 @@ define amdgpu_kernel void @atomic_max_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB29_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -4324,6 +4457,8 @@ define amdgpu_kernel void @atomic_max_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX12-NEXT: s_branch .LBB29_4
; GFX12-NEXT: .LBB29_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB29_4
; GFX12-NEXT: .LBB29_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: s_cselect_b32 s0, s0, -1
@@ -4508,6 +4643,7 @@ define amdgpu_kernel void @atomic_max_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GCN1-NEXT: s_cmp_eq_u32 s1, s2
; GCN1-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_mov_b64 s[2:3], -1
; GCN1-NEXT: s_cbranch_vccz .LBB31_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s0
@@ -4520,6 +4656,8 @@ define amdgpu_kernel void @atomic_max_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GCN1-NEXT: s_branch .LBB31_4
; GCN1-NEXT: .LBB31_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_vccnz .LBB31_4
; GCN1-NEXT: .LBB31_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[2:3], s[0:1], 0
; GCN1-NEXT: v_mov_b32_e32 v5, s12
@@ -4561,6 +4699,7 @@ define amdgpu_kernel void @atomic_max_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GCN2-NEXT: s_cmp_eq_u32 s1, s2
; GCN2-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_mov_b64 s[2:3], -1
; GCN2-NEXT: s_cbranch_vccz .LBB31_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s0
@@ -4573,6 +4712,8 @@ define amdgpu_kernel void @atomic_max_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GCN2-NEXT: s_branch .LBB31_4
; GCN2-NEXT: .LBB31_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_vccnz .LBB31_4
; GCN2-NEXT: .LBB31_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -4608,6 +4749,7 @@ define amdgpu_kernel void @atomic_max_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB31_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -4619,6 +4761,8 @@ define amdgpu_kernel void @atomic_max_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: s_branch .LBB31_4
; GFX12-NEXT: .LBB31_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB31_4
; GFX12-NEXT: .LBB31_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: s_cselect_b32 s0, s0, -1
@@ -4797,6 +4941,7 @@ define amdgpu_kernel void @atomic_umax_i64_ret_offset(ptr %out, ptr %out2, i64 %
; GCN1-NEXT: s_cmp_eq_u32 s1, s6
; GCN1-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN1-NEXT: s_mov_b64 s[6:7], -1
; GCN1-NEXT: s_cbranch_vccz .LBB33_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s0
@@ -4809,6 +4954,8 @@ define amdgpu_kernel void @atomic_umax_i64_ret_offset(ptr %out, ptr %out2, i64 %
; GCN1-NEXT: s_branch .LBB33_4
; GCN1-NEXT: .LBB33_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN1-NEXT: s_cbranch_vccnz .LBB33_4
; GCN1-NEXT: .LBB33_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[6:7], s[0:1], 0
; GCN1-NEXT: v_mov_b32_e32 v5, s4
@@ -4850,6 +4997,7 @@ define amdgpu_kernel void @atomic_umax_i64_ret_offset(ptr %out, ptr %out2, i64 %
; GCN2-NEXT: s_cmp_eq_u32 s1, s6
; GCN2-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN2-NEXT: s_mov_b64 s[6:7], -1
; GCN2-NEXT: s_cbranch_vccz .LBB33_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s0
@@ -4862,6 +5010,8 @@ define amdgpu_kernel void @atomic_umax_i64_ret_offset(ptr %out, ptr %out2, i64 %
; GCN2-NEXT: s_branch .LBB33_4
; GCN2-NEXT: .LBB33_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN2-NEXT: s_cbranch_vccnz .LBB33_4
; GCN2-NEXT: .LBB33_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -4897,6 +5047,7 @@ define amdgpu_kernel void @atomic_umax_i64_ret_offset(ptr %out, ptr %out2, i64 %
; GFX12-NEXT: s_cmp_eq_u32 s1, s7
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB33_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -4908,6 +5059,8 @@ define amdgpu_kernel void @atomic_umax_i64_ret_offset(ptr %out, ptr %out2, i64 %
; GFX12-NEXT: s_branch .LBB33_4
; GFX12-NEXT: .LBB33_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB33_4
; GFX12-NEXT: .LBB33_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: s_cselect_b32 s0, s0, -1
@@ -5102,6 +5255,7 @@ define amdgpu_kernel void @atomic_umax_i64_ret_addr64_offset(ptr %out, ptr %out2
; GCN1-NEXT: s_cmp_eq_u32 s1, s2
; GCN1-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_mov_b64 s[2:3], -1
; GCN1-NEXT: s_cbranch_vccz .LBB35_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s0
@@ -5114,6 +5268,8 @@ define amdgpu_kernel void @atomic_umax_i64_ret_addr64_offset(ptr %out, ptr %out2
; GCN1-NEXT: s_branch .LBB35_4
; GCN1-NEXT: .LBB35_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_vccnz .LBB35_4
; GCN1-NEXT: .LBB35_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[2:3], s[0:1], 0
; GCN1-NEXT: v_mov_b32_e32 v5, s12
@@ -5157,6 +5313,7 @@ define amdgpu_kernel void @atomic_umax_i64_ret_addr64_offset(ptr %out, ptr %out2
; GCN2-NEXT: s_cmp_eq_u32 s1, s2
; GCN2-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_mov_b64 s[2:3], -1
; GCN2-NEXT: s_cbranch_vccz .LBB35_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s0
@@ -5169,6 +5326,8 @@ define amdgpu_kernel void @atomic_umax_i64_ret_addr64_offset(ptr %out, ptr %out2
; GCN2-NEXT: s_branch .LBB35_4
; GCN2-NEXT: .LBB35_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_vccnz .LBB35_4
; GCN2-NEXT: .LBB35_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -5205,6 +5364,7 @@ define amdgpu_kernel void @atomic_umax_i64_ret_addr64_offset(ptr %out, ptr %out2
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB35_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -5216,6 +5376,8 @@ define amdgpu_kernel void @atomic_umax_i64_ret_addr64_offset(ptr %out, ptr %out2
; GFX12-NEXT: s_branch .LBB35_4
; GFX12-NEXT: .LBB35_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB35_4
; GFX12-NEXT: .LBB35_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: s_cselect_b32 s0, s0, -1
@@ -5387,6 +5549,7 @@ define amdgpu_kernel void @atomic_umax_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN1-NEXT: s_cmp_eq_u32 s1, s6
; GCN1-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN1-NEXT: s_mov_b64 s[6:7], -1
; GCN1-NEXT: s_cbranch_vccz .LBB37_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s0
@@ -5399,6 +5562,8 @@ define amdgpu_kernel void @atomic_umax_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN1-NEXT: s_branch .LBB37_4
; GCN1-NEXT: .LBB37_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN1-NEXT: s_cbranch_vccnz .LBB37_4
; GCN1-NEXT: .LBB37_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[6:7], s[0:1], 0
; GCN1-NEXT: v_mov_b32_e32 v5, s4
@@ -5438,6 +5603,7 @@ define amdgpu_kernel void @atomic_umax_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN2-NEXT: s_cmp_eq_u32 s1, s6
; GCN2-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN2-NEXT: s_mov_b64 s[6:7], -1
; GCN2-NEXT: s_cbranch_vccz .LBB37_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s0
@@ -5450,6 +5616,8 @@ define amdgpu_kernel void @atomic_umax_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN2-NEXT: s_branch .LBB37_4
; GCN2-NEXT: .LBB37_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN2-NEXT: s_cbranch_vccnz .LBB37_4
; GCN2-NEXT: .LBB37_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -5484,6 +5652,7 @@ define amdgpu_kernel void @atomic_umax_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB37_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -5495,6 +5664,8 @@ define amdgpu_kernel void @atomic_umax_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX12-NEXT: s_branch .LBB37_4
; GFX12-NEXT: .LBB37_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB37_4
; GFX12-NEXT: .LBB37_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: s_cselect_b32 s0, s0, -1
@@ -5679,6 +5850,7 @@ define amdgpu_kernel void @atomic_umax_i64_ret_addr64(ptr %out, ptr %out2, i64 %
; GCN1-NEXT: s_cmp_eq_u32 s1, s2
; GCN1-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_mov_b64 s[2:3], -1
; GCN1-NEXT: s_cbranch_vccz .LBB39_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s0
@@ -5691,6 +5863,8 @@ define amdgpu_kernel void @atomic_umax_i64_ret_addr64(ptr %out, ptr %out2, i64 %
; GCN1-NEXT: s_branch .LBB39_4
; GCN1-NEXT: .LBB39_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_vccnz .LBB39_4
; GCN1-NEXT: .LBB39_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[2:3], s[0:1], 0
; GCN1-NEXT: v_mov_b32_e32 v5, s12
@@ -5732,6 +5906,7 @@ define amdgpu_kernel void @atomic_umax_i64_ret_addr64(ptr %out, ptr %out2, i64 %
; GCN2-NEXT: s_cmp_eq_u32 s1, s2
; GCN2-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_mov_b64 s[2:3], -1
; GCN2-NEXT: s_cbranch_vccz .LBB39_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s0
@@ -5744,6 +5919,8 @@ define amdgpu_kernel void @atomic_umax_i64_ret_addr64(ptr %out, ptr %out2, i64 %
; GCN2-NEXT: s_branch .LBB39_4
; GCN2-NEXT: .LBB39_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_vccnz .LBB39_4
; GCN2-NEXT: .LBB39_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -5779,6 +5956,7 @@ define amdgpu_kernel void @atomic_umax_i64_ret_addr64(ptr %out, ptr %out2, i64 %
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB39_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -5790,6 +5968,8 @@ define amdgpu_kernel void @atomic_umax_i64_ret_addr64(ptr %out, ptr %out2, i64 %
; GFX12-NEXT: s_branch .LBB39_4
; GFX12-NEXT: .LBB39_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB39_4
; GFX12-NEXT: .LBB39_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: s_cselect_b32 s0, s0, -1
@@ -5968,6 +6148,7 @@ define amdgpu_kernel void @atomic_min_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GCN1-NEXT: s_cmp_eq_u32 s1, s6
; GCN1-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN1-NEXT: s_mov_b64 s[6:7], -1
; GCN1-NEXT: s_cbranch_vccz .LBB41_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s0
@@ -5980,6 +6161,8 @@ define amdgpu_kernel void @atomic_min_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GCN1-NEXT: s_branch .LBB41_4
; GCN1-NEXT: .LBB41_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN1-NEXT: s_cbranch_vccnz .LBB41_4
; GCN1-NEXT: .LBB41_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[6:7], s[0:1], 0
; GCN1-NEXT: v_mov_b32_e32 v5, s4
@@ -6021,6 +6204,7 @@ define amdgpu_kernel void @atomic_min_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GCN2-NEXT: s_cmp_eq_u32 s1, s6
; GCN2-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN2-NEXT: s_mov_b64 s[6:7], -1
; GCN2-NEXT: s_cbranch_vccz .LBB41_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s0
@@ -6033,6 +6217,8 @@ define amdgpu_kernel void @atomic_min_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GCN2-NEXT: s_branch .LBB41_4
; GCN2-NEXT: .LBB41_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN2-NEXT: s_cbranch_vccnz .LBB41_4
; GCN2-NEXT: .LBB41_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -6068,6 +6254,7 @@ define amdgpu_kernel void @atomic_min_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: s_cmp_eq_u32 s1, s7
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB41_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -6079,6 +6266,8 @@ define amdgpu_kernel void @atomic_min_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: s_branch .LBB41_4
; GFX12-NEXT: .LBB41_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB41_4
; GFX12-NEXT: .LBB41_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: s_cselect_b32 s0, s0, -1
@@ -6273,6 +6462,7 @@ define amdgpu_kernel void @atomic_min_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GCN1-NEXT: s_cmp_eq_u32 s1, s2
; GCN1-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_mov_b64 s[2:3], -1
; GCN1-NEXT: s_cbranch_vccz .LBB43_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s0
@@ -6285,6 +6475,8 @@ define amdgpu_kernel void @atomic_min_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GCN1-NEXT: s_branch .LBB43_4
; GCN1-NEXT: .LBB43_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_vccnz .LBB43_4
; GCN1-NEXT: .LBB43_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[2:3], s[0:1], 0
; GCN1-NEXT: v_mov_b32_e32 v5, s12
@@ -6328,6 +6520,7 @@ define amdgpu_kernel void @atomic_min_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GCN2-NEXT: s_cmp_eq_u32 s1, s2
; GCN2-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_mov_b64 s[2:3], -1
; GCN2-NEXT: s_cbranch_vccz .LBB43_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s0
@@ -6340,6 +6533,8 @@ define amdgpu_kernel void @atomic_min_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GCN2-NEXT: s_branch .LBB43_4
; GCN2-NEXT: .LBB43_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_vccnz .LBB43_4
; GCN2-NEXT: .LBB43_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -6376,6 +6571,7 @@ define amdgpu_kernel void @atomic_min_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB43_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -6387,6 +6583,8 @@ define amdgpu_kernel void @atomic_min_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GFX12-NEXT: s_branch .LBB43_4
; GFX12-NEXT: .LBB43_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB43_4
; GFX12-NEXT: .LBB43_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: s_cselect_b32 s0, s0, -1
@@ -6558,6 +6756,7 @@ define amdgpu_kernel void @atomic_min_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN1-NEXT: s_cmp_eq_u32 s1, s6
; GCN1-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN1-NEXT: s_mov_b64 s[6:7], -1
; GCN1-NEXT: s_cbranch_vccz .LBB45_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s0
@@ -6570,6 +6769,8 @@ define amdgpu_kernel void @atomic_min_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN1-NEXT: s_branch .LBB45_4
; GCN1-NEXT: .LBB45_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN1-NEXT: s_cbranch_vccnz .LBB45_4
; GCN1-NEXT: .LBB45_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[6:7], s[0:1], 0
; GCN1-NEXT: v_mov_b32_e32 v5, s4
@@ -6609,6 +6810,7 @@ define amdgpu_kernel void @atomic_min_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN2-NEXT: s_cmp_eq_u32 s1, s6
; GCN2-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN2-NEXT: s_mov_b64 s[6:7], -1
; GCN2-NEXT: s_cbranch_vccz .LBB45_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s0
@@ -6621,6 +6823,8 @@ define amdgpu_kernel void @atomic_min_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN2-NEXT: s_branch .LBB45_4
; GCN2-NEXT: .LBB45_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN2-NEXT: s_cbranch_vccnz .LBB45_4
; GCN2-NEXT: .LBB45_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -6655,6 +6859,7 @@ define amdgpu_kernel void @atomic_min_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB45_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -6666,6 +6871,8 @@ define amdgpu_kernel void @atomic_min_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX12-NEXT: s_branch .LBB45_4
; GFX12-NEXT: .LBB45_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB45_4
; GFX12-NEXT: .LBB45_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: s_cselect_b32 s0, s0, -1
@@ -6850,6 +7057,7 @@ define amdgpu_kernel void @atomic_min_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GCN1-NEXT: s_cmp_eq_u32 s1, s2
; GCN1-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_mov_b64 s[2:3], -1
; GCN1-NEXT: s_cbranch_vccz .LBB47_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s0
@@ -6862,6 +7070,8 @@ define amdgpu_kernel void @atomic_min_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GCN1-NEXT: s_branch .LBB47_4
; GCN1-NEXT: .LBB47_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_vccnz .LBB47_4
; GCN1-NEXT: .LBB47_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[2:3], s[0:1], 0
; GCN1-NEXT: v_mov_b32_e32 v5, s12
@@ -6903,6 +7113,7 @@ define amdgpu_kernel void @atomic_min_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GCN2-NEXT: s_cmp_eq_u32 s1, s2
; GCN2-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_mov_b64 s[2:3], -1
; GCN2-NEXT: s_cbranch_vccz .LBB47_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s0
@@ -6915,6 +7126,8 @@ define amdgpu_kernel void @atomic_min_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GCN2-NEXT: s_branch .LBB47_4
; GCN2-NEXT: .LBB47_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_vccnz .LBB47_4
; GCN2-NEXT: .LBB47_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -6950,6 +7163,7 @@ define amdgpu_kernel void @atomic_min_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB47_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -6961,6 +7175,8 @@ define amdgpu_kernel void @atomic_min_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: s_branch .LBB47_4
; GFX12-NEXT: .LBB47_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB47_4
; GFX12-NEXT: .LBB47_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: s_cselect_b32 s0, s0, -1
@@ -7139,6 +7355,7 @@ define amdgpu_kernel void @atomic_umin_i64_ret_offset(ptr %out, ptr %out2, i64 %
; GCN1-NEXT: s_cmp_eq_u32 s1, s6
; GCN1-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN1-NEXT: s_mov_b64 s[6:7], -1
; GCN1-NEXT: s_cbranch_vccz .LBB49_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s0
@@ -7151,6 +7368,8 @@ define amdgpu_kernel void @atomic_umin_i64_ret_offset(ptr %out, ptr %out2, i64 %
; GCN1-NEXT: s_branch .LBB49_4
; GCN1-NEXT: .LBB49_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN1-NEXT: s_cbranch_vccnz .LBB49_4
; GCN1-NEXT: .LBB49_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[6:7], s[0:1], 0
; GCN1-NEXT: v_mov_b32_e32 v5, s4
@@ -7192,6 +7411,7 @@ define amdgpu_kernel void @atomic_umin_i64_ret_offset(ptr %out, ptr %out2, i64 %
; GCN2-NEXT: s_cmp_eq_u32 s1, s6
; GCN2-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN2-NEXT: s_mov_b64 s[6:7], -1
; GCN2-NEXT: s_cbranch_vccz .LBB49_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s0
@@ -7204,6 +7424,8 @@ define amdgpu_kernel void @atomic_umin_i64_ret_offset(ptr %out, ptr %out2, i64 %
; GCN2-NEXT: s_branch .LBB49_4
; GCN2-NEXT: .LBB49_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN2-NEXT: s_cbranch_vccnz .LBB49_4
; GCN2-NEXT: .LBB49_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -7239,6 +7461,7 @@ define amdgpu_kernel void @atomic_umin_i64_ret_offset(ptr %out, ptr %out2, i64 %
; GFX12-NEXT: s_cmp_eq_u32 s1, s7
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB49_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -7250,6 +7473,8 @@ define amdgpu_kernel void @atomic_umin_i64_ret_offset(ptr %out, ptr %out2, i64 %
; GFX12-NEXT: s_branch .LBB49_4
; GFX12-NEXT: .LBB49_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB49_4
; GFX12-NEXT: .LBB49_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: s_cselect_b32 s0, s0, -1
@@ -7444,6 +7669,7 @@ define amdgpu_kernel void @atomic_umin_i64_ret_addr64_offset(ptr %out, ptr %out2
; GCN1-NEXT: s_cmp_eq_u32 s1, s2
; GCN1-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_mov_b64 s[2:3], -1
; GCN1-NEXT: s_cbranch_vccz .LBB51_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s0
@@ -7456,6 +7682,8 @@ define amdgpu_kernel void @atomic_umin_i64_ret_addr64_offset(ptr %out, ptr %out2
; GCN1-NEXT: s_branch .LBB51_4
; GCN1-NEXT: .LBB51_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_vccnz .LBB51_4
; GCN1-NEXT: .LBB51_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[2:3], s[0:1], 0
; GCN1-NEXT: v_mov_b32_e32 v5, s12
@@ -7499,6 +7727,7 @@ define amdgpu_kernel void @atomic_umin_i64_ret_addr64_offset(ptr %out, ptr %out2
; GCN2-NEXT: s_cmp_eq_u32 s1, s2
; GCN2-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_mov_b64 s[2:3], -1
; GCN2-NEXT: s_cbranch_vccz .LBB51_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s0
@@ -7511,6 +7740,8 @@ define amdgpu_kernel void @atomic_umin_i64_ret_addr64_offset(ptr %out, ptr %out2
; GCN2-NEXT: s_branch .LBB51_4
; GCN2-NEXT: .LBB51_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_vccnz .LBB51_4
; GCN2-NEXT: .LBB51_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -7547,6 +7778,7 @@ define amdgpu_kernel void @atomic_umin_i64_ret_addr64_offset(ptr %out, ptr %out2
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB51_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -7558,6 +7790,8 @@ define amdgpu_kernel void @atomic_umin_i64_ret_addr64_offset(ptr %out, ptr %out2
; GFX12-NEXT: s_branch .LBB51_4
; GFX12-NEXT: .LBB51_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB51_4
; GFX12-NEXT: .LBB51_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: s_cselect_b32 s0, s0, -1
@@ -7729,6 +7963,7 @@ define amdgpu_kernel void @atomic_umin_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN1-NEXT: s_cmp_eq_u32 s1, s6
; GCN1-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN1-NEXT: s_mov_b64 s[6:7], -1
; GCN1-NEXT: s_cbranch_vccz .LBB53_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s0
@@ -7741,6 +7976,8 @@ define amdgpu_kernel void @atomic_umin_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN1-NEXT: s_branch .LBB53_4
; GCN1-NEXT: .LBB53_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN1-NEXT: s_cbranch_vccnz .LBB53_4
; GCN1-NEXT: .LBB53_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[6:7], s[0:1], 0
; GCN1-NEXT: v_mov_b32_e32 v5, s4
@@ -7780,6 +8017,7 @@ define amdgpu_kernel void @atomic_umin_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN2-NEXT: s_cmp_eq_u32 s1, s6
; GCN2-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN2-NEXT: s_mov_b64 s[6:7], -1
; GCN2-NEXT: s_cbranch_vccz .LBB53_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s0
@@ -7792,6 +8030,8 @@ define amdgpu_kernel void @atomic_umin_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN2-NEXT: s_branch .LBB53_4
; GCN2-NEXT: .LBB53_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN2-NEXT: s_cbranch_vccnz .LBB53_4
; GCN2-NEXT: .LBB53_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -7826,6 +8066,7 @@ define amdgpu_kernel void @atomic_umin_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB53_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -7837,6 +8078,8 @@ define amdgpu_kernel void @atomic_umin_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX12-NEXT: s_branch .LBB53_4
; GFX12-NEXT: .LBB53_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB53_4
; GFX12-NEXT: .LBB53_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: s_cselect_b32 s0, s0, -1
@@ -8021,6 +8264,7 @@ define amdgpu_kernel void @atomic_umin_i64_ret_addr64(ptr %out, ptr %out2, i64 %
; GCN1-NEXT: s_cmp_eq_u32 s1, s2
; GCN1-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_mov_b64 s[2:3], -1
; GCN1-NEXT: s_cbranch_vccz .LBB55_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s0
@@ -8033,6 +8277,8 @@ define amdgpu_kernel void @atomic_umin_i64_ret_addr64(ptr %out, ptr %out2, i64 %
; GCN1-NEXT: s_branch .LBB55_4
; GCN1-NEXT: .LBB55_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_vccnz .LBB55_4
; GCN1-NEXT: .LBB55_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[2:3], s[0:1], 0
; GCN1-NEXT: v_mov_b32_e32 v5, s12
@@ -8074,6 +8320,7 @@ define amdgpu_kernel void @atomic_umin_i64_ret_addr64(ptr %out, ptr %out2, i64 %
; GCN2-NEXT: s_cmp_eq_u32 s1, s2
; GCN2-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_mov_b64 s[2:3], -1
; GCN2-NEXT: s_cbranch_vccz .LBB55_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s0
@@ -8086,6 +8333,8 @@ define amdgpu_kernel void @atomic_umin_i64_ret_addr64(ptr %out, ptr %out2, i64 %
; GCN2-NEXT: s_branch .LBB55_4
; GCN2-NEXT: .LBB55_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_vccnz .LBB55_4
; GCN2-NEXT: .LBB55_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -8121,6 +8370,7 @@ define amdgpu_kernel void @atomic_umin_i64_ret_addr64(ptr %out, ptr %out2, i64 %
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB55_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -8132,6 +8382,8 @@ define amdgpu_kernel void @atomic_umin_i64_ret_addr64(ptr %out, ptr %out2, i64 %
; GFX12-NEXT: s_branch .LBB55_4
; GFX12-NEXT: .LBB55_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB55_4
; GFX12-NEXT: .LBB55_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: s_cselect_b32 s0, s0, -1
@@ -8307,6 +8559,7 @@ define amdgpu_kernel void @atomic_or_i64_ret_offset(ptr %out, ptr %out2, i64 %in
; GCN1-NEXT: s_cmp_eq_u32 s1, s6
; GCN1-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN1-NEXT: s_mov_b64 s[6:7], -1
; GCN1-NEXT: s_cbranch_vccz .LBB57_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s0
@@ -8320,6 +8573,8 @@ define amdgpu_kernel void @atomic_or_i64_ret_offset(ptr %out, ptr %out2, i64 %in
; GCN1-NEXT: s_branch .LBB57_4
; GCN1-NEXT: .LBB57_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN1-NEXT: s_cbranch_vccnz .LBB57_4
; GCN1-NEXT: .LBB57_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[6:7], s[0:1], 0
; GCN1-NEXT: s_and_b64 s[6:7], s[6:7], exec
@@ -8358,6 +8613,7 @@ define amdgpu_kernel void @atomic_or_i64_ret_offset(ptr %out, ptr %out2, i64 %in
; GCN2-NEXT: s_cmp_eq_u32 s1, s6
; GCN2-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN2-NEXT: s_mov_b64 s[6:7], -1
; GCN2-NEXT: s_cbranch_vccz .LBB57_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s0
@@ -8371,6 +8627,8 @@ define amdgpu_kernel void @atomic_or_i64_ret_offset(ptr %out, ptr %out2, i64 %in
; GCN2-NEXT: s_branch .LBB57_4
; GCN2-NEXT: .LBB57_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN2-NEXT: s_cbranch_vccnz .LBB57_4
; GCN2-NEXT: .LBB57_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -8403,6 +8661,7 @@ define amdgpu_kernel void @atomic_or_i64_ret_offset(ptr %out, ptr %out2, i64 %in
; GFX12-NEXT: s_cmp_eq_u32 s1, s7
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB57_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -8414,6 +8673,8 @@ define amdgpu_kernel void @atomic_or_i64_ret_offset(ptr %out, ptr %out2, i64 %in
; GFX12-NEXT: s_branch .LBB57_4
; GFX12-NEXT: .LBB57_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB57_4
; GFX12-NEXT: .LBB57_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: s_cselect_b32 s0, s0, -1
@@ -8604,6 +8865,7 @@ define amdgpu_kernel void @atomic_or_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GCN1-NEXT: s_cmp_eq_u32 s1, s2
; GCN1-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_mov_b64 s[2:3], -1
; GCN1-NEXT: s_cbranch_vccz .LBB59_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s0
@@ -8617,6 +8879,8 @@ define amdgpu_kernel void @atomic_or_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GCN1-NEXT: s_branch .LBB59_4
; GCN1-NEXT: .LBB59_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_vccnz .LBB59_4
; GCN1-NEXT: .LBB59_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[2:3], s[0:1], 0
; GCN1-NEXT: s_and_b64 s[2:3], s[2:3], exec
@@ -8657,6 +8921,7 @@ define amdgpu_kernel void @atomic_or_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GCN2-NEXT: s_cmp_eq_u32 s1, s2
; GCN2-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_mov_b64 s[2:3], -1
; GCN2-NEXT: s_cbranch_vccz .LBB59_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s0
@@ -8670,6 +8935,8 @@ define amdgpu_kernel void @atomic_or_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GCN2-NEXT: s_branch .LBB59_4
; GCN2-NEXT: .LBB59_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_vccnz .LBB59_4
; GCN2-NEXT: .LBB59_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -8703,6 +8970,7 @@ define amdgpu_kernel void @atomic_or_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB59_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -8714,6 +8982,8 @@ define amdgpu_kernel void @atomic_or_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GFX12-NEXT: s_branch .LBB59_4
; GFX12-NEXT: .LBB59_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB59_4
; GFX12-NEXT: .LBB59_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: s_cselect_b32 s0, s0, -1
@@ -8881,6 +9151,7 @@ define amdgpu_kernel void @atomic_or_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN1-NEXT: s_cmp_eq_u32 s1, s6
; GCN1-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN1-NEXT: s_mov_b64 s[6:7], -1
; GCN1-NEXT: s_cbranch_vccz .LBB61_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s0
@@ -8894,6 +9165,8 @@ define amdgpu_kernel void @atomic_or_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN1-NEXT: s_branch .LBB61_4
; GCN1-NEXT: .LBB61_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN1-NEXT: s_cbranch_vccnz .LBB61_4
; GCN1-NEXT: .LBB61_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[6:7], s[0:1], 0
; GCN1-NEXT: s_and_b64 s[6:7], s[6:7], exec
@@ -8930,6 +9203,7 @@ define amdgpu_kernel void @atomic_or_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN2-NEXT: s_cmp_eq_u32 s1, s6
; GCN2-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN2-NEXT: s_mov_b64 s[6:7], -1
; GCN2-NEXT: s_cbranch_vccz .LBB61_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s0
@@ -8943,6 +9217,8 @@ define amdgpu_kernel void @atomic_or_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN2-NEXT: s_branch .LBB61_4
; GCN2-NEXT: .LBB61_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN2-NEXT: s_cbranch_vccnz .LBB61_4
; GCN2-NEXT: .LBB61_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -8974,6 +9250,7 @@ define amdgpu_kernel void @atomic_or_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB61_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -8985,6 +9262,8 @@ define amdgpu_kernel void @atomic_or_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX12-NEXT: s_branch .LBB61_4
; GFX12-NEXT: .LBB61_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB61_4
; GFX12-NEXT: .LBB61_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: s_cselect_b32 s0, s0, -1
@@ -9165,6 +9444,7 @@ define amdgpu_kernel void @atomic_or_i64_ret_addr64(ptr %out, ptr %out2, i64 %in
; GCN1-NEXT: s_cmp_eq_u32 s1, s2
; GCN1-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_mov_b64 s[2:3], -1
; GCN1-NEXT: s_cbranch_vccz .LBB63_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s0
@@ -9178,6 +9458,8 @@ define amdgpu_kernel void @atomic_or_i64_ret_addr64(ptr %out, ptr %out2, i64 %in
; GCN1-NEXT: s_branch .LBB63_4
; GCN1-NEXT: .LBB63_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_vccnz .LBB63_4
; GCN1-NEXT: .LBB63_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[2:3], s[0:1], 0
; GCN1-NEXT: s_and_b64 s[2:3], s[2:3], exec
@@ -9216,6 +9498,7 @@ define amdgpu_kernel void @atomic_or_i64_ret_addr64(ptr %out, ptr %out2, i64 %in
; GCN2-NEXT: s_cmp_eq_u32 s1, s2
; GCN2-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_mov_b64 s[2:3], -1
; GCN2-NEXT: s_cbranch_vccz .LBB63_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s0
@@ -9229,6 +9512,8 @@ define amdgpu_kernel void @atomic_or_i64_ret_addr64(ptr %out, ptr %out2, i64 %in
; GCN2-NEXT: s_branch .LBB63_4
; GCN2-NEXT: .LBB63_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_vccnz .LBB63_4
; GCN2-NEXT: .LBB63_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -9261,6 +9546,7 @@ define amdgpu_kernel void @atomic_or_i64_ret_addr64(ptr %out, ptr %out2, i64 %in
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB63_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -9272,6 +9558,8 @@ define amdgpu_kernel void @atomic_or_i64_ret_addr64(ptr %out, ptr %out2, i64 %in
; GFX12-NEXT: s_branch .LBB63_4
; GFX12-NEXT: .LBB63_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB63_4
; GFX12-NEXT: .LBB63_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: s_cselect_b32 s0, s0, -1
@@ -9687,6 +9975,7 @@ define amdgpu_kernel void @atomic_xchg_i64_ret_offset(ptr %out, ptr %out2, i64 %
; GCN1-NEXT: s_cmp_eq_u32 s1, s6
; GCN1-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN1-NEXT: s_mov_b64 s[6:7], -1
; GCN1-NEXT: s_cbranch_vccz .LBB67_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s0
@@ -9700,6 +9989,8 @@ define amdgpu_kernel void @atomic_xchg_i64_ret_offset(ptr %out, ptr %out2, i64 %
; GCN1-NEXT: s_branch .LBB67_4
; GCN1-NEXT: .LBB67_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN1-NEXT: s_cbranch_vccnz .LBB67_4
; GCN1-NEXT: .LBB67_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[6:7], s[0:1], 0
; GCN1-NEXT: v_mov_b32_e32 v4, s4
@@ -9737,6 +10028,7 @@ define amdgpu_kernel void @atomic_xchg_i64_ret_offset(ptr %out, ptr %out2, i64 %
; GCN2-NEXT: s_cmp_eq_u32 s1, s6
; GCN2-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN2-NEXT: s_mov_b64 s[6:7], -1
; GCN2-NEXT: s_cbranch_vccz .LBB67_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s0
@@ -9750,6 +10042,8 @@ define amdgpu_kernel void @atomic_xchg_i64_ret_offset(ptr %out, ptr %out2, i64 %
; GCN2-NEXT: s_branch .LBB67_4
; GCN2-NEXT: .LBB67_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN2-NEXT: s_cbranch_vccnz .LBB67_4
; GCN2-NEXT: .LBB67_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -9781,6 +10075,7 @@ define amdgpu_kernel void @atomic_xchg_i64_ret_offset(ptr %out, ptr %out2, i64 %
; GFX12-NEXT: s_cmp_eq_u32 s1, s7
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB67_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -9792,6 +10087,8 @@ define amdgpu_kernel void @atomic_xchg_i64_ret_offset(ptr %out, ptr %out2, i64 %
; GFX12-NEXT: s_branch .LBB67_4
; GFX12-NEXT: .LBB67_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB67_4
; GFX12-NEXT: .LBB67_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
@@ -9970,6 +10267,7 @@ define amdgpu_kernel void @atomic_xchg_i64_ret_addr64_offset(ptr %out, ptr %out2
; GCN1-NEXT: s_cmp_eq_u32 s1, s2
; GCN1-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_mov_b64 s[2:3], -1
; GCN1-NEXT: s_cbranch_vccz .LBB69_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s0
@@ -9983,6 +10281,8 @@ define amdgpu_kernel void @atomic_xchg_i64_ret_addr64_offset(ptr %out, ptr %out2
; GCN1-NEXT: s_branch .LBB69_4
; GCN1-NEXT: .LBB69_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_vccnz .LBB69_4
; GCN1-NEXT: .LBB69_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[2:3], s[0:1], 0
; GCN1-NEXT: v_mov_b32_e32 v4, s12
@@ -10022,6 +10322,7 @@ define amdgpu_kernel void @atomic_xchg_i64_ret_addr64_offset(ptr %out, ptr %out2
; GCN2-NEXT: s_cmp_eq_u32 s1, s2
; GCN2-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_mov_b64 s[2:3], -1
; GCN2-NEXT: s_cbranch_vccz .LBB69_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s0
@@ -10035,6 +10336,8 @@ define amdgpu_kernel void @atomic_xchg_i64_ret_addr64_offset(ptr %out, ptr %out2
; GCN2-NEXT: s_branch .LBB69_4
; GCN2-NEXT: .LBB69_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_vccnz .LBB69_4
; GCN2-NEXT: .LBB69_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -10067,6 +10370,7 @@ define amdgpu_kernel void @atomic_xchg_i64_ret_addr64_offset(ptr %out, ptr %out2
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB69_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -10078,6 +10382,8 @@ define amdgpu_kernel void @atomic_xchg_i64_ret_addr64_offset(ptr %out, ptr %out2
; GFX12-NEXT: s_branch .LBB69_4
; GFX12-NEXT: .LBB69_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB69_4
; GFX12-NEXT: .LBB69_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
@@ -10233,6 +10539,7 @@ define amdgpu_kernel void @atomic_xchg_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN1-NEXT: s_cmp_eq_u32 s1, s6
; GCN1-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN1-NEXT: s_mov_b64 s[6:7], -1
; GCN1-NEXT: s_cbranch_vccz .LBB71_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s0
@@ -10246,6 +10553,8 @@ define amdgpu_kernel void @atomic_xchg_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN1-NEXT: s_branch .LBB71_4
; GCN1-NEXT: .LBB71_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN1-NEXT: s_cbranch_vccnz .LBB71_4
; GCN1-NEXT: .LBB71_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[6:7], s[0:1], 0
; GCN1-NEXT: v_mov_b32_e32 v4, s4
@@ -10281,6 +10590,7 @@ define amdgpu_kernel void @atomic_xchg_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN2-NEXT: s_cmp_eq_u32 s1, s6
; GCN2-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN2-NEXT: s_mov_b64 s[6:7], -1
; GCN2-NEXT: s_cbranch_vccz .LBB71_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s0
@@ -10294,6 +10604,8 @@ define amdgpu_kernel void @atomic_xchg_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN2-NEXT: s_branch .LBB71_4
; GCN2-NEXT: .LBB71_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN2-NEXT: s_cbranch_vccnz .LBB71_4
; GCN2-NEXT: .LBB71_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -10324,6 +10636,7 @@ define amdgpu_kernel void @atomic_xchg_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB71_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -10335,6 +10648,8 @@ define amdgpu_kernel void @atomic_xchg_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX12-NEXT: s_branch .LBB71_4
; GFX12-NEXT: .LBB71_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB71_4
; GFX12-NEXT: .LBB71_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
@@ -10503,6 +10818,7 @@ define amdgpu_kernel void @atomic_xchg_i64_ret_addr64(ptr %out, ptr %out2, i64 %
; GCN1-NEXT: s_cmp_eq_u32 s1, s2
; GCN1-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_mov_b64 s[2:3], -1
; GCN1-NEXT: s_cbranch_vccz .LBB73_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s0
@@ -10516,6 +10832,8 @@ define amdgpu_kernel void @atomic_xchg_i64_ret_addr64(ptr %out, ptr %out2, i64 %
; GCN1-NEXT: s_branch .LBB73_4
; GCN1-NEXT: .LBB73_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_vccnz .LBB73_4
; GCN1-NEXT: .LBB73_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[2:3], s[0:1], 0
; GCN1-NEXT: v_mov_b32_e32 v4, s12
@@ -10553,6 +10871,7 @@ define amdgpu_kernel void @atomic_xchg_i64_ret_addr64(ptr %out, ptr %out2, i64 %
; GCN2-NEXT: s_cmp_eq_u32 s1, s2
; GCN2-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_mov_b64 s[2:3], -1
; GCN2-NEXT: s_cbranch_vccz .LBB73_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s0
@@ -10566,6 +10885,8 @@ define amdgpu_kernel void @atomic_xchg_i64_ret_addr64(ptr %out, ptr %out2, i64 %
; GCN2-NEXT: s_branch .LBB73_4
; GCN2-NEXT: .LBB73_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_vccnz .LBB73_4
; GCN2-NEXT: .LBB73_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -10597,6 +10918,7 @@ define amdgpu_kernel void @atomic_xchg_i64_ret_addr64(ptr %out, ptr %out2, i64 %
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB73_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -10608,6 +10930,8 @@ define amdgpu_kernel void @atomic_xchg_i64_ret_addr64(ptr %out, ptr %out2, i64 %
; GFX12-NEXT: s_branch .LBB73_4
; GFX12-NEXT: .LBB73_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB73_4
; GFX12-NEXT: .LBB73_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: v_dual_mov_b32 v2, s4 :: v_dual_mov_b32 v3, s5
@@ -10781,6 +11105,7 @@ define amdgpu_kernel void @atomic_xor_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GCN1-NEXT: s_cmp_eq_u32 s1, s6
; GCN1-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN1-NEXT: s_mov_b64 s[6:7], -1
; GCN1-NEXT: s_cbranch_vccz .LBB75_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s0
@@ -10794,6 +11119,8 @@ define amdgpu_kernel void @atomic_xor_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GCN1-NEXT: s_branch .LBB75_4
; GCN1-NEXT: .LBB75_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN1-NEXT: s_cbranch_vccnz .LBB75_4
; GCN1-NEXT: .LBB75_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[6:7], s[0:1], 0
; GCN1-NEXT: s_and_b64 s[6:7], s[6:7], exec
@@ -10832,6 +11159,7 @@ define amdgpu_kernel void @atomic_xor_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GCN2-NEXT: s_cmp_eq_u32 s1, s6
; GCN2-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN2-NEXT: s_mov_b64 s[6:7], -1
; GCN2-NEXT: s_cbranch_vccz .LBB75_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s0
@@ -10845,6 +11173,8 @@ define amdgpu_kernel void @atomic_xor_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GCN2-NEXT: s_branch .LBB75_4
; GCN2-NEXT: .LBB75_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN2-NEXT: s_cbranch_vccnz .LBB75_4
; GCN2-NEXT: .LBB75_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -10877,6 +11207,7 @@ define amdgpu_kernel void @atomic_xor_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: s_cmp_eq_u32 s1, s7
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB75_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -10888,6 +11219,8 @@ define amdgpu_kernel void @atomic_xor_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: s_branch .LBB75_4
; GFX12-NEXT: .LBB75_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB75_4
; GFX12-NEXT: .LBB75_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: s_cselect_b32 s0, s0, -1
@@ -11078,6 +11411,7 @@ define amdgpu_kernel void @atomic_xor_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GCN1-NEXT: s_cmp_eq_u32 s1, s2
; GCN1-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_mov_b64 s[2:3], -1
; GCN1-NEXT: s_cbranch_vccz .LBB77_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s0
@@ -11091,6 +11425,8 @@ define amdgpu_kernel void @atomic_xor_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GCN1-NEXT: s_branch .LBB77_4
; GCN1-NEXT: .LBB77_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_vccnz .LBB77_4
; GCN1-NEXT: .LBB77_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[2:3], s[0:1], 0
; GCN1-NEXT: s_and_b64 s[2:3], s[2:3], exec
@@ -11131,6 +11467,7 @@ define amdgpu_kernel void @atomic_xor_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GCN2-NEXT: s_cmp_eq_u32 s1, s2
; GCN2-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_mov_b64 s[2:3], -1
; GCN2-NEXT: s_cbranch_vccz .LBB77_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s0
@@ -11144,6 +11481,8 @@ define amdgpu_kernel void @atomic_xor_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GCN2-NEXT: s_branch .LBB77_4
; GCN2-NEXT: .LBB77_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_vccnz .LBB77_4
; GCN2-NEXT: .LBB77_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -11177,6 +11516,7 @@ define amdgpu_kernel void @atomic_xor_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB77_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -11188,6 +11528,8 @@ define amdgpu_kernel void @atomic_xor_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GFX12-NEXT: s_branch .LBB77_4
; GFX12-NEXT: .LBB77_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB77_4
; GFX12-NEXT: .LBB77_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: s_cselect_b32 s0, s0, -1
@@ -11355,6 +11697,7 @@ define amdgpu_kernel void @atomic_xor_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN1-NEXT: s_cmp_eq_u32 s1, s6
; GCN1-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN1-NEXT: s_mov_b64 s[6:7], -1
; GCN1-NEXT: s_cbranch_vccz .LBB79_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s0
@@ -11368,6 +11711,8 @@ define amdgpu_kernel void @atomic_xor_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN1-NEXT: s_branch .LBB79_4
; GCN1-NEXT: .LBB79_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN1-NEXT: s_cbranch_vccnz .LBB79_4
; GCN1-NEXT: .LBB79_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[6:7], s[0:1], 0
; GCN1-NEXT: s_and_b64 s[6:7], s[6:7], exec
@@ -11404,6 +11749,7 @@ define amdgpu_kernel void @atomic_xor_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN2-NEXT: s_cmp_eq_u32 s1, s6
; GCN2-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN2-NEXT: s_mov_b64 s[6:7], -1
; GCN2-NEXT: s_cbranch_vccz .LBB79_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s0
@@ -11417,6 +11763,8 @@ define amdgpu_kernel void @atomic_xor_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN2-NEXT: s_branch .LBB79_4
; GCN2-NEXT: .LBB79_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN2-NEXT: s_cbranch_vccnz .LBB79_4
; GCN2-NEXT: .LBB79_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -11448,6 +11796,7 @@ define amdgpu_kernel void @atomic_xor_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB79_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -11459,6 +11808,8 @@ define amdgpu_kernel void @atomic_xor_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX12-NEXT: s_branch .LBB79_4
; GFX12-NEXT: .LBB79_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB79_4
; GFX12-NEXT: .LBB79_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: s_cselect_b32 s0, s0, -1
@@ -11639,6 +11990,7 @@ define amdgpu_kernel void @atomic_xor_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GCN1-NEXT: s_cmp_eq_u32 s1, s2
; GCN1-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_mov_b64 s[2:3], -1
; GCN1-NEXT: s_cbranch_vccz .LBB81_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s0
@@ -11652,6 +12004,8 @@ define amdgpu_kernel void @atomic_xor_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GCN1-NEXT: s_branch .LBB81_4
; GCN1-NEXT: .LBB81_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_vccnz .LBB81_4
; GCN1-NEXT: .LBB81_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[2:3], s[0:1], 0
; GCN1-NEXT: s_and_b64 s[2:3], s[2:3], exec
@@ -11690,6 +12044,7 @@ define amdgpu_kernel void @atomic_xor_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GCN2-NEXT: s_cmp_eq_u32 s1, s2
; GCN2-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_mov_b64 s[2:3], -1
; GCN2-NEXT: s_cbranch_vccz .LBB81_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s0
@@ -11703,6 +12058,8 @@ define amdgpu_kernel void @atomic_xor_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GCN2-NEXT: s_branch .LBB81_4
; GCN2-NEXT: .LBB81_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_vccnz .LBB81_4
; GCN2-NEXT: .LBB81_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -11735,6 +12092,7 @@ define amdgpu_kernel void @atomic_xor_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB81_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -11746,6 +12104,8 @@ define amdgpu_kernel void @atomic_xor_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: s_branch .LBB81_4
; GFX12-NEXT: .LBB81_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB81_4
; GFX12-NEXT: .LBB81_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: s_cselect_b32 s0, s0, -1
@@ -12496,6 +12856,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_ret_offset(ptr %out, ptr %out2, i6
; GCN1-NEXT: s_cmp_eq_u32 s1, s2
; GCN1-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_mov_b64 s[2:3], -1
; GCN1-NEXT: s_cbranch_vccz .LBB92_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v5, s1
@@ -12511,6 +12872,8 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_ret_offset(ptr %out, ptr %out2, i6
; GCN1-NEXT: s_branch .LBB92_4
; GCN1-NEXT: .LBB92_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_vccnz .LBB92_4
; GCN1-NEXT: .LBB92_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[2:3], s[0:1], 0
; GCN1-NEXT: v_mov_b32_e32 v5, s12
@@ -12550,6 +12913,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_ret_offset(ptr %out, ptr %out2, i6
; GCN2-NEXT: s_cmp_eq_u32 s1, s2
; GCN2-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_mov_b64 s[2:3], -1
; GCN2-NEXT: s_cbranch_vccz .LBB92_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v5, s1
@@ -12565,6 +12929,8 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_ret_offset(ptr %out, ptr %out2, i6
; GCN2-NEXT: s_branch .LBB92_4
; GCN2-NEXT: .LBB92_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_vccnz .LBB92_4
; GCN2-NEXT: .LBB92_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -12597,6 +12963,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_ret_offset(ptr %out, ptr %out2, i6
; GFX12-NEXT: s_cmp_eq_u32 s1, s9
; GFX12-NEXT: s_cselect_b32 s8, -1, 0
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX12-NEXT: s_mov_b32 s8, -1
; GFX12-NEXT: s_cbranch_vccz .LBB92_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
@@ -12609,6 +12976,8 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_ret_offset(ptr %out, ptr %out2, i6
; GFX12-NEXT: s_branch .LBB92_4
; GFX12-NEXT: .LBB92_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX12-NEXT: s_cbranch_vccnz .LBB92_4
; GFX12-NEXT: .LBB92_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: s_cselect_b32 s0, s0, -1
@@ -12808,6 +13177,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_ret_addr64_offset(ptr %out, ptr %o
; GCN1-NEXT: s_cmp_eq_u32 s3, s4
; GCN1-NEXT: s_cselect_b64 s[4:5], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GCN1-NEXT: s_mov_b64 s[4:5], -1
; GCN1-NEXT: s_cbranch_vccz .LBB94_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v5, s3
@@ -12823,6 +13193,8 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_ret_addr64_offset(ptr %out, ptr %o
; GCN1-NEXT: s_branch .LBB94_4
; GCN1-NEXT: .LBB94_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GCN1-NEXT: s_cbranch_vccnz .LBB94_4
; GCN1-NEXT: .LBB94_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[4:5], s[2:3], 0
; GCN1-NEXT: v_mov_b32_e32 v5, s12
@@ -12866,6 +13238,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_ret_addr64_offset(ptr %out, ptr %o
; GCN2-NEXT: s_cmp_eq_u32 s3, s4
; GCN2-NEXT: s_cselect_b64 s[4:5], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GCN2-NEXT: s_mov_b64 s[4:5], -1
; GCN2-NEXT: s_cbranch_vccz .LBB94_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v5, s3
@@ -12881,6 +13254,8 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_ret_addr64_offset(ptr %out, ptr %o
; GCN2-NEXT: s_branch .LBB94_4
; GCN2-NEXT: .LBB94_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GCN2-NEXT: s_cbranch_vccnz .LBB94_4
; GCN2-NEXT: .LBB94_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[2:3], 0
; GCN2-NEXT: s_cselect_b32 s2, s2, -1
@@ -12918,6 +13293,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_ret_addr64_offset(ptr %out, ptr %o
; GFX12-NEXT: s_cmp_eq_u32 s3, s5
; GFX12-NEXT: s_cselect_b32 s4, -1, 0
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX12-NEXT: s_mov_b32 s4, -1
; GFX12-NEXT: s_cbranch_vccz .LBB94_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
@@ -12930,6 +13306,8 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_ret_addr64_offset(ptr %out, ptr %o
; GFX12-NEXT: s_branch .LBB94_4
; GFX12-NEXT: .LBB94_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_vccnz .LBB94_4
; GFX12-NEXT: .LBB94_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[2:3], 0
; GFX12-NEXT: s_cselect_b32 s2, s2, -1
@@ -13112,6 +13490,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_ret(ptr %out, ptr %out2, i64 %in,
; GCN1-NEXT: s_cmp_eq_u32 s9, s0
; GCN1-NEXT: s_cselect_b64 s[0:1], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[0:1]
+; GCN1-NEXT: s_mov_b64 s[0:1], -1
; GCN1-NEXT: s_cbranch_vccz .LBB96_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v4, s8
@@ -13127,6 +13506,8 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_ret(ptr %out, ptr %out2, i64 %in,
; GCN1-NEXT: s_branch .LBB96_4
; GCN1-NEXT: .LBB96_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[0:1]
+; GCN1-NEXT: s_cbranch_vccnz .LBB96_4
; GCN1-NEXT: .LBB96_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[0:1], s[8:9], 0
; GCN1-NEXT: v_mov_b32_e32 v5, s12
@@ -13164,6 +13545,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_ret(ptr %out, ptr %out2, i64 %in,
; GCN2-NEXT: s_cmp_eq_u32 s9, s0
; GCN2-NEXT: s_cselect_b64 s[0:1], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[0:1]
+; GCN2-NEXT: s_mov_b64 s[0:1], -1
; GCN2-NEXT: s_cbranch_vccz .LBB96_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v4, s8
@@ -13179,6 +13561,8 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_ret(ptr %out, ptr %out2, i64 %in,
; GCN2-NEXT: s_branch .LBB96_4
; GCN2-NEXT: .LBB96_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[0:1]
+; GCN2-NEXT: s_cbranch_vccnz .LBB96_4
; GCN2-NEXT: .LBB96_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[8:9], 0
; GCN2-NEXT: s_cselect_b32 s0, s8, -1
@@ -13210,6 +13594,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_ret(ptr %out, ptr %out2, i64 %in,
; GFX12-NEXT: s_cselect_b32 s8, -1, 0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX12-NEXT: s_mov_b32 s8, -1
; GFX12-NEXT: s_cbranch_vccz .LBB96_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
@@ -13222,6 +13607,8 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_ret(ptr %out, ptr %out2, i64 %in,
; GFX12-NEXT: s_branch .LBB96_4
; GFX12-NEXT: .LBB96_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX12-NEXT: s_cbranch_vccnz .LBB96_4
; GFX12-NEXT: .LBB96_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: s_cselect_b32 s0, s0, -1
@@ -13412,6 +13799,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_ret_addr64(ptr %out, ptr %out2, i6
; GCN1-NEXT: s_cmp_eq_u32 s3, s6
; GCN1-NEXT: s_cselect_b64 s[4:5], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GCN1-NEXT: s_mov_b64 s[4:5], -1
; GCN1-NEXT: s_cbranch_vccz .LBB98_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v5, s3
@@ -13427,6 +13815,8 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_ret_addr64(ptr %out, ptr %out2, i6
; GCN1-NEXT: s_branch .LBB98_4
; GCN1-NEXT: .LBB98_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GCN1-NEXT: s_cbranch_vccnz .LBB98_4
; GCN1-NEXT: .LBB98_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[4:5], s[2:3], 0
; GCN1-NEXT: v_mov_b32_e32 v5, s12
@@ -13468,6 +13858,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_ret_addr64(ptr %out, ptr %out2, i6
; GCN2-NEXT: s_cmp_eq_u32 s3, s6
; GCN2-NEXT: s_cselect_b64 s[4:5], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GCN2-NEXT: s_mov_b64 s[4:5], -1
; GCN2-NEXT: s_cbranch_vccz .LBB98_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v5, s3
@@ -13483,6 +13874,8 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_ret_addr64(ptr %out, ptr %out2, i6
; GCN2-NEXT: s_branch .LBB98_4
; GCN2-NEXT: .LBB98_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GCN2-NEXT: s_cbranch_vccnz .LBB98_4
; GCN2-NEXT: .LBB98_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[2:3], 0
; GCN2-NEXT: s_cselect_b32 s2, s2, -1
@@ -13519,6 +13912,7 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_ret_addr64(ptr %out, ptr %out2, i6
; GFX12-NEXT: s_cselect_b32 s4, -1, 0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX12-NEXT: s_mov_b32 s4, -1
; GFX12-NEXT: s_cbranch_vccz .LBB98_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s12 :: v_dual_mov_b32 v1, s13
@@ -13531,6 +13925,8 @@ define amdgpu_kernel void @atomic_cmpxchg_i64_ret_addr64(ptr %out, ptr %out2, i6
; GFX12-NEXT: s_branch .LBB98_4
; GFX12-NEXT: .LBB98_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s4
+; GFX12-NEXT: s_cbranch_vccnz .LBB98_4
; GFX12-NEXT: .LBB98_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[2:3], 0
; GFX12-NEXT: s_cselect_b32 s2, s2, -1
@@ -14129,6 +14525,7 @@ define amdgpu_kernel void @atomic_inc_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GCN1-NEXT: s_cmp_eq_u32 s1, s6
; GCN1-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN1-NEXT: s_mov_b64 s[6:7], -1
; GCN1-NEXT: s_cbranch_vccz .LBB108_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s0
@@ -14142,6 +14539,8 @@ define amdgpu_kernel void @atomic_inc_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GCN1-NEXT: s_branch .LBB108_4
; GCN1-NEXT: .LBB108_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN1-NEXT: s_cbranch_vccnz .LBB108_4
; GCN1-NEXT: .LBB108_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[6:7], s[0:1], 0
; GCN1-NEXT: s_and_b64 s[6:7], s[6:7], exec
@@ -14183,6 +14582,7 @@ define amdgpu_kernel void @atomic_inc_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GCN2-NEXT: s_cmp_eq_u32 s1, s6
; GCN2-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN2-NEXT: s_mov_b64 s[6:7], -1
; GCN2-NEXT: s_cbranch_vccz .LBB108_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s0
@@ -14196,6 +14596,8 @@ define amdgpu_kernel void @atomic_inc_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GCN2-NEXT: s_branch .LBB108_4
; GCN2-NEXT: .LBB108_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN2-NEXT: s_cbranch_vccnz .LBB108_4
; GCN2-NEXT: .LBB108_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -14231,6 +14633,7 @@ define amdgpu_kernel void @atomic_inc_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: s_cmp_eq_u32 s1, s7
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB108_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -14242,6 +14645,8 @@ define amdgpu_kernel void @atomic_inc_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: s_branch .LBB108_4
; GFX12-NEXT: .LBB108_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB108_4
; GFX12-NEXT: .LBB108_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: s_cselect_b32 s0, s0, -1
@@ -14446,6 +14851,7 @@ define amdgpu_kernel void @atomic_inc_i64_ret_incr64_offset(ptr %out, ptr %out2,
; GCN1-NEXT: s_cmp_eq_u32 s1, s2
; GCN1-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_mov_b64 s[2:3], -1
; GCN1-NEXT: s_cbranch_vccz .LBB110_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s0
@@ -14459,6 +14865,8 @@ define amdgpu_kernel void @atomic_inc_i64_ret_incr64_offset(ptr %out, ptr %out2,
; GCN1-NEXT: s_branch .LBB110_4
; GCN1-NEXT: .LBB110_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_vccnz .LBB110_4
; GCN1-NEXT: .LBB110_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[2:3], s[0:1], 0
; GCN1-NEXT: s_and_b64 s[2:3], s[2:3], exec
@@ -14502,6 +14910,7 @@ define amdgpu_kernel void @atomic_inc_i64_ret_incr64_offset(ptr %out, ptr %out2,
; GCN2-NEXT: s_cmp_eq_u32 s1, s2
; GCN2-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_mov_b64 s[2:3], -1
; GCN2-NEXT: s_cbranch_vccz .LBB110_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s0
@@ -14515,6 +14924,8 @@ define amdgpu_kernel void @atomic_inc_i64_ret_incr64_offset(ptr %out, ptr %out2,
; GCN2-NEXT: s_branch .LBB110_4
; GCN2-NEXT: .LBB110_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_vccnz .LBB110_4
; GCN2-NEXT: .LBB110_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -14551,6 +14962,7 @@ define amdgpu_kernel void @atomic_inc_i64_ret_incr64_offset(ptr %out, ptr %out2,
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB110_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -14562,6 +14974,8 @@ define amdgpu_kernel void @atomic_inc_i64_ret_incr64_offset(ptr %out, ptr %out2,
; GFX12-NEXT: s_branch .LBB110_4
; GFX12-NEXT: .LBB110_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB110_4
; GFX12-NEXT: .LBB110_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: s_cselect_b32 s0, s0, -1
@@ -14743,6 +15157,7 @@ define amdgpu_kernel void @atomic_inc_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN1-NEXT: s_cmp_eq_u32 s1, s6
; GCN1-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN1-NEXT: s_mov_b64 s[6:7], -1
; GCN1-NEXT: s_cbranch_vccz .LBB112_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s0
@@ -14756,6 +15171,8 @@ define amdgpu_kernel void @atomic_inc_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN1-NEXT: s_branch .LBB112_4
; GCN1-NEXT: .LBB112_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN1-NEXT: s_cbranch_vccnz .LBB112_4
; GCN1-NEXT: .LBB112_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[6:7], s[0:1], 0
; GCN1-NEXT: s_and_b64 s[6:7], s[6:7], exec
@@ -14795,6 +15212,7 @@ define amdgpu_kernel void @atomic_inc_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN2-NEXT: s_cmp_eq_u32 s1, s6
; GCN2-NEXT: s_cselect_b64 s[6:7], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN2-NEXT: s_mov_b64 s[6:7], -1
; GCN2-NEXT: s_cbranch_vccz .LBB112_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s0
@@ -14808,6 +15226,8 @@ define amdgpu_kernel void @atomic_inc_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN2-NEXT: s_branch .LBB112_4
; GCN2-NEXT: .LBB112_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; GCN2-NEXT: s_cbranch_vccnz .LBB112_4
; GCN2-NEXT: .LBB112_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -14842,6 +15262,7 @@ define amdgpu_kernel void @atomic_inc_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB112_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -14853,6 +15274,8 @@ define amdgpu_kernel void @atomic_inc_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX12-NEXT: s_branch .LBB112_4
; GFX12-NEXT: .LBB112_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB112_4
; GFX12-NEXT: .LBB112_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: s_cselect_b32 s0, s0, -1
@@ -15047,6 +15470,7 @@ define amdgpu_kernel void @atomic_inc_i64_ret_incr64(ptr %out, ptr %out2, i64 %i
; GCN1-NEXT: s_cmp_eq_u32 s1, s2
; GCN1-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_mov_b64 s[2:3], -1
; GCN1-NEXT: s_cbranch_vccz .LBB114_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s0
@@ -15060,6 +15484,8 @@ define amdgpu_kernel void @atomic_inc_i64_ret_incr64(ptr %out, ptr %out2, i64 %i
; GCN1-NEXT: s_branch .LBB114_4
; GCN1-NEXT: .LBB114_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_vccnz .LBB114_4
; GCN1-NEXT: .LBB114_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[2:3], s[0:1], 0
; GCN1-NEXT: s_and_b64 s[2:3], s[2:3], exec
@@ -15101,6 +15527,7 @@ define amdgpu_kernel void @atomic_inc_i64_ret_incr64(ptr %out, ptr %out2, i64 %i
; GCN2-NEXT: s_cmp_eq_u32 s1, s2
; GCN2-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_mov_b64 s[2:3], -1
; GCN2-NEXT: s_cbranch_vccz .LBB114_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s0
@@ -15114,6 +15541,8 @@ define amdgpu_kernel void @atomic_inc_i64_ret_incr64(ptr %out, ptr %out2, i64 %i
; GCN2-NEXT: s_branch .LBB114_4
; GCN2-NEXT: .LBB114_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_vccnz .LBB114_4
; GCN2-NEXT: .LBB114_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -15149,6 +15578,7 @@ define amdgpu_kernel void @atomic_inc_i64_ret_incr64(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB114_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -15160,6 +15590,8 @@ define amdgpu_kernel void @atomic_inc_i64_ret_incr64(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: s_branch .LBB114_4
; GFX12-NEXT: .LBB114_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB114_4
; GFX12-NEXT: .LBB114_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: s_cselect_b32 s0, s0, -1
@@ -15357,6 +15789,7 @@ define amdgpu_kernel void @atomic_dec_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GCN1-NEXT: s_cmp_eq_u32 s3, s6
; GCN1-NEXT: s_cselect_b64 s[4:5], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GCN1-NEXT: s_mov_b64 s[4:5], -1
; GCN1-NEXT: s_cbranch_vccz .LBB116_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s2
@@ -15370,6 +15803,8 @@ define amdgpu_kernel void @atomic_dec_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GCN1-NEXT: s_branch .LBB116_4
; GCN1-NEXT: .LBB116_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GCN1-NEXT: s_cbranch_vccnz .LBB116_4
; GCN1-NEXT: .LBB116_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[4:5], s[2:3], 0
; GCN1-NEXT: v_mov_b32_e32 v4, s1
@@ -15415,6 +15850,7 @@ define amdgpu_kernel void @atomic_dec_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GCN2-NEXT: s_cmp_eq_u32 s3, s6
; GCN2-NEXT: s_cselect_b64 s[4:5], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GCN2-NEXT: s_mov_b64 s[4:5], -1
; GCN2-NEXT: s_cbranch_vccz .LBB116_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s2
@@ -15428,6 +15864,8 @@ define amdgpu_kernel void @atomic_dec_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GCN2-NEXT: s_branch .LBB116_4
; GCN2-NEXT: .LBB116_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GCN2-NEXT: s_cbranch_vccnz .LBB116_4
; GCN2-NEXT: .LBB116_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[2:3], 0
; GCN2-NEXT: s_cselect_b32 s2, s2, -1
@@ -15467,6 +15905,7 @@ define amdgpu_kernel void @atomic_dec_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: s_cmp_eq_u32 s1, s7
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB116_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -15478,6 +15917,8 @@ define amdgpu_kernel void @atomic_dec_i64_ret_offset(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: s_branch .LBB116_4
; GFX12-NEXT: .LBB116_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB116_4
; GFX12-NEXT: .LBB116_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: s_cselect_b32 s6, s0, -1
@@ -15694,6 +16135,7 @@ define amdgpu_kernel void @atomic_dec_i64_ret_decr64_offset(ptr %out, ptr %out2,
; GCN1-NEXT: s_cmp_eq_u32 s1, s2
; GCN1-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_mov_b64 s[2:3], -1
; GCN1-NEXT: s_cbranch_vccz .LBB118_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s0
@@ -15707,6 +16149,8 @@ define amdgpu_kernel void @atomic_dec_i64_ret_decr64_offset(ptr %out, ptr %out2,
; GCN1-NEXT: s_branch .LBB118_4
; GCN1-NEXT: .LBB118_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_vccnz .LBB118_4
; GCN1-NEXT: .LBB118_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[2:3], s[0:1], 0
; GCN1-NEXT: v_mov_b32_e32 v5, s12
@@ -15754,6 +16198,7 @@ define amdgpu_kernel void @atomic_dec_i64_ret_decr64_offset(ptr %out, ptr %out2,
; GCN2-NEXT: s_cmp_eq_u32 s1, s2
; GCN2-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_mov_b64 s[2:3], -1
; GCN2-NEXT: s_cbranch_vccz .LBB118_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s0
@@ -15767,6 +16212,8 @@ define amdgpu_kernel void @atomic_dec_i64_ret_decr64_offset(ptr %out, ptr %out2,
; GCN2-NEXT: s_branch .LBB118_4
; GCN2-NEXT: .LBB118_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_vccnz .LBB118_4
; GCN2-NEXT: .LBB118_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -15807,6 +16254,7 @@ define amdgpu_kernel void @atomic_dec_i64_ret_decr64_offset(ptr %out, ptr %out2,
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB118_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -15818,6 +16266,8 @@ define amdgpu_kernel void @atomic_dec_i64_ret_decr64_offset(ptr %out, ptr %out2,
; GFX12-NEXT: s_branch .LBB118_4
; GFX12-NEXT: .LBB118_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB118_4
; GFX12-NEXT: .LBB118_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: s_cselect_b32 s6, s0, -1
@@ -16011,6 +16461,7 @@ define amdgpu_kernel void @atomic_dec_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN1-NEXT: s_cmp_eq_u32 s9, s2
; GCN1-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_mov_b64 s[2:3], -1
; GCN1-NEXT: s_cbranch_vccz .LBB120_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s8
@@ -16024,6 +16475,8 @@ define amdgpu_kernel void @atomic_dec_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN1-NEXT: s_branch .LBB120_4
; GCN1-NEXT: .LBB120_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_vccnz .LBB120_4
; GCN1-NEXT: .LBB120_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[2:3], s[8:9], 0
; GCN1-NEXT: v_mov_b32_e32 v4, s1
@@ -16067,6 +16520,7 @@ define amdgpu_kernel void @atomic_dec_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN2-NEXT: s_cmp_eq_u32 s9, s2
; GCN2-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_mov_b64 s[2:3], -1
; GCN2-NEXT: s_cbranch_vccz .LBB120_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s8
@@ -16080,6 +16534,8 @@ define amdgpu_kernel void @atomic_dec_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GCN2-NEXT: s_branch .LBB120_4
; GCN2-NEXT: .LBB120_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_vccnz .LBB120_4
; GCN2-NEXT: .LBB120_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[8:9], 0
; GCN2-NEXT: s_cselect_b32 s2, s8, -1
@@ -16118,6 +16574,7 @@ define amdgpu_kernel void @atomic_dec_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB120_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -16129,6 +16586,8 @@ define amdgpu_kernel void @atomic_dec_i64_ret(ptr %out, ptr %out2, i64 %in) {
; GFX12-NEXT: s_branch .LBB120_4
; GFX12-NEXT: .LBB120_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB120_4
; GFX12-NEXT: .LBB120_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: s_cselect_b32 s6, s0, -1
@@ -16335,6 +16794,7 @@ define amdgpu_kernel void @atomic_dec_i64_ret_decr64(ptr %out, ptr %out2, i64 %i
; GCN1-NEXT: s_cmp_eq_u32 s1, s2
; GCN1-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_mov_b64 s[2:3], -1
; GCN1-NEXT: s_cbranch_vccz .LBB122_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s0
@@ -16348,6 +16808,8 @@ define amdgpu_kernel void @atomic_dec_i64_ret_decr64(ptr %out, ptr %out2, i64 %i
; GCN1-NEXT: s_branch .LBB122_4
; GCN1-NEXT: .LBB122_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_vccnz .LBB122_4
; GCN1-NEXT: .LBB122_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[2:3], s[0:1], 0
; GCN1-NEXT: v_mov_b32_e32 v5, s12
@@ -16393,6 +16855,7 @@ define amdgpu_kernel void @atomic_dec_i64_ret_decr64(ptr %out, ptr %out2, i64 %i
; GCN2-NEXT: s_cmp_eq_u32 s1, s2
; GCN2-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_mov_b64 s[2:3], -1
; GCN2-NEXT: s_cbranch_vccz .LBB122_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s0
@@ -16406,6 +16869,8 @@ define amdgpu_kernel void @atomic_dec_i64_ret_decr64(ptr %out, ptr %out2, i64 %i
; GCN2-NEXT: s_branch .LBB122_4
; GCN2-NEXT: .LBB122_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_vccnz .LBB122_4
; GCN2-NEXT: .LBB122_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -16445,6 +16910,7 @@ define amdgpu_kernel void @atomic_dec_i64_ret_decr64(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: s_cselect_b32 s6, -1, 0
; GFX12-NEXT: s_delay_alu instid0(SALU_CYCLE_1)
; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_mov_b32 s6, -1
; GFX12-NEXT: s_cbranch_vccz .LBB122_2
; GFX12-NEXT: ; %bb.1: ; %atomicrmw.global
; GFX12-NEXT: v_dual_mov_b32 v0, s0 :: v_dual_mov_b32 v1, s1
@@ -16456,6 +16922,8 @@ define amdgpu_kernel void @atomic_dec_i64_ret_decr64(ptr %out, ptr %out2, i64 %i
; GFX12-NEXT: s_branch .LBB122_4
; GFX12-NEXT: .LBB122_2:
; GFX12-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_cbranch_vccnz .LBB122_4
; GFX12-NEXT: .LBB122_3: ; %atomicrmw.private
; GFX12-NEXT: s_cmp_lg_u64 s[0:1], 0
; GFX12-NEXT: s_cselect_b32 s6, s0, -1
diff --git a/llvm/test/CodeGen/AMDGPU/flat_atomics_i64_system.ll b/llvm/test/CodeGen/AMDGPU/flat_atomics_i64_system.ll
index 23dfe2f..a67fcc4 100644
--- a/llvm/test/CodeGen/AMDGPU/flat_atomics_i64_system.ll
+++ b/llvm/test/CodeGen/AMDGPU/flat_atomics_i64_system.ll
@@ -706,6 +706,7 @@ define amdgpu_gfx i64 @flat_atomic_xchg_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN1-NEXT: s_cmp_eq_u32 s5, s34
; GCN1-NEXT: s_cselect_b64 s[34:35], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN1-NEXT: s_mov_b64 s[34:35], -1
; GCN1-NEXT: s_cbranch_vccz .LBB6_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s4
@@ -719,6 +720,8 @@ define amdgpu_gfx i64 @flat_atomic_xchg_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN1-NEXT: s_branch .LBB6_4
; GCN1-NEXT: .LBB6_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN1-NEXT: s_cbranch_vccnz .LBB6_4
; GCN1-NEXT: .LBB6_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[34:35], s[4:5], 0
; GCN1-NEXT: v_mov_b32_e32 v4, s6
@@ -745,6 +748,7 @@ define amdgpu_gfx i64 @flat_atomic_xchg_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN2-NEXT: s_cmp_eq_u32 s5, s34
; GCN2-NEXT: s_cselect_b64 s[34:35], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN2-NEXT: s_mov_b64 s[34:35], -1
; GCN2-NEXT: s_cbranch_vccz .LBB6_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s4
@@ -758,6 +762,8 @@ define amdgpu_gfx i64 @flat_atomic_xchg_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN2-NEXT: s_branch .LBB6_4
; GCN2-NEXT: .LBB6_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN2-NEXT: s_cbranch_vccnz .LBB6_4
; GCN2-NEXT: .LBB6_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[4:5], 0
; GCN2-NEXT: s_cselect_b32 s34, s4, -1
@@ -781,6 +787,7 @@ define amdgpu_gfx i64 @flat_atomic_xchg_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN3-NEXT: s_cmp_eq_u32 s5, s35
; GCN3-NEXT: s_cselect_b64 s[34:35], -1, 0
; GCN3-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN3-NEXT: s_mov_b64 s[34:35], -1
; GCN3-NEXT: s_cbranch_vccz .LBB6_2
; GCN3-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN3-NEXT: v_mov_b32_e32 v0, s4
@@ -794,6 +801,8 @@ define amdgpu_gfx i64 @flat_atomic_xchg_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN3-NEXT: s_branch .LBB6_4
; GCN3-NEXT: .LBB6_2:
; GCN3-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN3-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN3-NEXT: s_cbranch_vccnz .LBB6_4
; GCN3-NEXT: .LBB6_3: ; %atomicrmw.private
; GCN3-NEXT: s_cmp_lg_u64 s[4:5], 0
; GCN3-NEXT: s_cselect_b32 s34, s4, -1
@@ -823,6 +832,7 @@ define amdgpu_gfx i64 @flat_atomic_xchg_i64_ret_offset_scalar(ptr inreg %out, i6
; GCN1-NEXT: s_cmp_eq_u32 s35, s36
; GCN1-NEXT: s_cselect_b64 s[36:37], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN1-NEXT: s_mov_b64 s[36:37], -1
; GCN1-NEXT: s_cbranch_vccz .LBB7_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s34
@@ -836,6 +846,8 @@ define amdgpu_gfx i64 @flat_atomic_xchg_i64_ret_offset_scalar(ptr inreg %out, i6
; GCN1-NEXT: s_branch .LBB7_4
; GCN1-NEXT: .LBB7_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN1-NEXT: s_cbranch_vccnz .LBB7_4
; GCN1-NEXT: .LBB7_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[36:37], s[34:35], 0
; GCN1-NEXT: v_mov_b32_e32 v4, s6
@@ -864,6 +876,7 @@ define amdgpu_gfx i64 @flat_atomic_xchg_i64_ret_offset_scalar(ptr inreg %out, i6
; GCN2-NEXT: s_cmp_eq_u32 s35, s36
; GCN2-NEXT: s_cselect_b64 s[36:37], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN2-NEXT: s_mov_b64 s[36:37], -1
; GCN2-NEXT: s_cbranch_vccz .LBB7_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s34
@@ -877,6 +890,8 @@ define amdgpu_gfx i64 @flat_atomic_xchg_i64_ret_offset_scalar(ptr inreg %out, i6
; GCN2-NEXT: s_branch .LBB7_4
; GCN2-NEXT: .LBB7_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN2-NEXT: s_cbranch_vccnz .LBB7_4
; GCN2-NEXT: .LBB7_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[34:35], 0
; GCN2-NEXT: s_cselect_b32 s34, s34, -1
@@ -902,6 +917,7 @@ define amdgpu_gfx i64 @flat_atomic_xchg_i64_ret_offset_scalar(ptr inreg %out, i6
; GCN3-NEXT: s_cmp_eq_u32 s35, s37
; GCN3-NEXT: s_cselect_b64 s[36:37], -1, 0
; GCN3-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN3-NEXT: s_mov_b64 s[36:37], -1
; GCN3-NEXT: s_cbranch_vccz .LBB7_2
; GCN3-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN3-NEXT: v_mov_b32_e32 v0, s34
@@ -915,6 +931,8 @@ define amdgpu_gfx i64 @flat_atomic_xchg_i64_ret_offset_scalar(ptr inreg %out, i6
; GCN3-NEXT: s_branch .LBB7_4
; GCN3-NEXT: .LBB7_2:
; GCN3-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN3-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN3-NEXT: s_cbranch_vccnz .LBB7_4
; GCN3-NEXT: .LBB7_3: ; %atomicrmw.private
; GCN3-NEXT: s_cmp_lg_u64 s[34:35], 0
; GCN3-NEXT: s_cselect_b32 s34, s34, -1
@@ -1866,6 +1884,7 @@ define amdgpu_gfx double @flat_atomic_xchg_f64_ret_scalar(ptr inreg %ptr, double
; GCN1-NEXT: s_cmp_eq_u32 s5, s34
; GCN1-NEXT: s_cselect_b64 s[34:35], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN1-NEXT: s_mov_b64 s[34:35], -1
; GCN1-NEXT: s_cbranch_vccz .LBB16_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s4
@@ -1879,6 +1898,8 @@ define amdgpu_gfx double @flat_atomic_xchg_f64_ret_scalar(ptr inreg %ptr, double
; GCN1-NEXT: s_branch .LBB16_4
; GCN1-NEXT: .LBB16_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN1-NEXT: s_cbranch_vccnz .LBB16_4
; GCN1-NEXT: .LBB16_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[34:35], s[4:5], 0
; GCN1-NEXT: v_mov_b32_e32 v4, s6
@@ -1905,6 +1926,7 @@ define amdgpu_gfx double @flat_atomic_xchg_f64_ret_scalar(ptr inreg %ptr, double
; GCN2-NEXT: s_cmp_eq_u32 s5, s34
; GCN2-NEXT: s_cselect_b64 s[34:35], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN2-NEXT: s_mov_b64 s[34:35], -1
; GCN2-NEXT: s_cbranch_vccz .LBB16_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s4
@@ -1918,6 +1940,8 @@ define amdgpu_gfx double @flat_atomic_xchg_f64_ret_scalar(ptr inreg %ptr, double
; GCN2-NEXT: s_branch .LBB16_4
; GCN2-NEXT: .LBB16_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN2-NEXT: s_cbranch_vccnz .LBB16_4
; GCN2-NEXT: .LBB16_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[4:5], 0
; GCN2-NEXT: s_cselect_b32 s34, s4, -1
@@ -1941,6 +1965,7 @@ define amdgpu_gfx double @flat_atomic_xchg_f64_ret_scalar(ptr inreg %ptr, double
; GCN3-NEXT: s_cmp_eq_u32 s5, s35
; GCN3-NEXT: s_cselect_b64 s[34:35], -1, 0
; GCN3-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN3-NEXT: s_mov_b64 s[34:35], -1
; GCN3-NEXT: s_cbranch_vccz .LBB16_2
; GCN3-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN3-NEXT: v_mov_b32_e32 v0, s4
@@ -1954,6 +1979,8 @@ define amdgpu_gfx double @flat_atomic_xchg_f64_ret_scalar(ptr inreg %ptr, double
; GCN3-NEXT: s_branch .LBB16_4
; GCN3-NEXT: .LBB16_2:
; GCN3-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN3-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN3-NEXT: s_cbranch_vccnz .LBB16_4
; GCN3-NEXT: .LBB16_3: ; %atomicrmw.private
; GCN3-NEXT: s_cmp_lg_u64 s[4:5], 0
; GCN3-NEXT: s_cselect_b32 s34, s4, -1
@@ -1983,6 +2010,7 @@ define amdgpu_gfx double @flat_atomic_xchg_f64_ret_offset_scalar(ptr inreg %out,
; GCN1-NEXT: s_cmp_eq_u32 s35, s36
; GCN1-NEXT: s_cselect_b64 s[36:37], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN1-NEXT: s_mov_b64 s[36:37], -1
; GCN1-NEXT: s_cbranch_vccz .LBB17_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s34
@@ -1996,6 +2024,8 @@ define amdgpu_gfx double @flat_atomic_xchg_f64_ret_offset_scalar(ptr inreg %out,
; GCN1-NEXT: s_branch .LBB17_4
; GCN1-NEXT: .LBB17_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN1-NEXT: s_cbranch_vccnz .LBB17_4
; GCN1-NEXT: .LBB17_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[36:37], s[34:35], 0
; GCN1-NEXT: v_mov_b32_e32 v4, s6
@@ -2024,6 +2054,7 @@ define amdgpu_gfx double @flat_atomic_xchg_f64_ret_offset_scalar(ptr inreg %out,
; GCN2-NEXT: s_cmp_eq_u32 s35, s36
; GCN2-NEXT: s_cselect_b64 s[36:37], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN2-NEXT: s_mov_b64 s[36:37], -1
; GCN2-NEXT: s_cbranch_vccz .LBB17_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s34
@@ -2037,6 +2068,8 @@ define amdgpu_gfx double @flat_atomic_xchg_f64_ret_offset_scalar(ptr inreg %out,
; GCN2-NEXT: s_branch .LBB17_4
; GCN2-NEXT: .LBB17_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN2-NEXT: s_cbranch_vccnz .LBB17_4
; GCN2-NEXT: .LBB17_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[34:35], 0
; GCN2-NEXT: s_cselect_b32 s34, s34, -1
@@ -2062,6 +2095,7 @@ define amdgpu_gfx double @flat_atomic_xchg_f64_ret_offset_scalar(ptr inreg %out,
; GCN3-NEXT: s_cmp_eq_u32 s35, s37
; GCN3-NEXT: s_cselect_b64 s[36:37], -1, 0
; GCN3-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN3-NEXT: s_mov_b64 s[36:37], -1
; GCN3-NEXT: s_cbranch_vccz .LBB17_2
; GCN3-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN3-NEXT: v_mov_b32_e32 v0, s34
@@ -2075,6 +2109,8 @@ define amdgpu_gfx double @flat_atomic_xchg_f64_ret_offset_scalar(ptr inreg %out,
; GCN3-NEXT: s_branch .LBB17_4
; GCN3-NEXT: .LBB17_2:
; GCN3-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN3-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN3-NEXT: s_cbranch_vccnz .LBB17_4
; GCN3-NEXT: .LBB17_3: ; %atomicrmw.private
; GCN3-NEXT: s_cmp_lg_u64 s[34:35], 0
; GCN3-NEXT: s_cselect_b32 s34, s34, -1
@@ -3114,6 +3150,7 @@ define amdgpu_gfx i64 @flat_atomic_add_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN1-NEXT: s_cmp_eq_u32 s5, s34
; GCN1-NEXT: s_cselect_b64 s[34:35], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN1-NEXT: s_mov_b64 s[34:35], -1
; GCN1-NEXT: s_cbranch_vccz .LBB26_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s4
@@ -3127,6 +3164,8 @@ define amdgpu_gfx i64 @flat_atomic_add_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN1-NEXT: s_branch .LBB26_4
; GCN1-NEXT: .LBB26_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN1-NEXT: s_cbranch_vccnz .LBB26_4
; GCN1-NEXT: .LBB26_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[34:35], s[4:5], 0
; GCN1-NEXT: v_mov_b32_e32 v4, s7
@@ -3156,6 +3195,7 @@ define amdgpu_gfx i64 @flat_atomic_add_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN2-NEXT: s_cmp_eq_u32 s5, s34
; GCN2-NEXT: s_cselect_b64 s[34:35], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN2-NEXT: s_mov_b64 s[34:35], -1
; GCN2-NEXT: s_cbranch_vccz .LBB26_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s4
@@ -3169,6 +3209,8 @@ define amdgpu_gfx i64 @flat_atomic_add_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN2-NEXT: s_branch .LBB26_4
; GCN2-NEXT: .LBB26_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN2-NEXT: s_cbranch_vccnz .LBB26_4
; GCN2-NEXT: .LBB26_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[4:5], 0
; GCN2-NEXT: s_cselect_b32 s34, s4, -1
@@ -3195,6 +3237,7 @@ define amdgpu_gfx i64 @flat_atomic_add_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN3-NEXT: s_cmp_eq_u32 s5, s35
; GCN3-NEXT: s_cselect_b64 s[34:35], -1, 0
; GCN3-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN3-NEXT: s_mov_b64 s[34:35], -1
; GCN3-NEXT: s_cbranch_vccz .LBB26_2
; GCN3-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN3-NEXT: v_mov_b32_e32 v0, s4
@@ -3208,6 +3251,8 @@ define amdgpu_gfx i64 @flat_atomic_add_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN3-NEXT: s_branch .LBB26_4
; GCN3-NEXT: .LBB26_2:
; GCN3-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN3-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN3-NEXT: s_cbranch_vccnz .LBB26_4
; GCN3-NEXT: .LBB26_3: ; %atomicrmw.private
; GCN3-NEXT: s_cmp_lg_u64 s[4:5], 0
; GCN3-NEXT: s_cselect_b32 s34, s4, -1
@@ -3240,6 +3285,7 @@ define amdgpu_gfx i64 @flat_atomic_add_i64_ret_offset_scalar(ptr inreg %out, i64
; GCN1-NEXT: s_cmp_eq_u32 s35, s36
; GCN1-NEXT: s_cselect_b64 s[36:37], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN1-NEXT: s_mov_b64 s[36:37], -1
; GCN1-NEXT: s_cbranch_vccz .LBB27_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s34
@@ -3253,6 +3299,8 @@ define amdgpu_gfx i64 @flat_atomic_add_i64_ret_offset_scalar(ptr inreg %out, i64
; GCN1-NEXT: s_branch .LBB27_4
; GCN1-NEXT: .LBB27_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN1-NEXT: s_cbranch_vccnz .LBB27_4
; GCN1-NEXT: .LBB27_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[36:37], s[34:35], 0
; GCN1-NEXT: v_mov_b32_e32 v4, s7
@@ -3284,6 +3332,7 @@ define amdgpu_gfx i64 @flat_atomic_add_i64_ret_offset_scalar(ptr inreg %out, i64
; GCN2-NEXT: s_cmp_eq_u32 s35, s36
; GCN2-NEXT: s_cselect_b64 s[36:37], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN2-NEXT: s_mov_b64 s[36:37], -1
; GCN2-NEXT: s_cbranch_vccz .LBB27_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s34
@@ -3297,6 +3346,8 @@ define amdgpu_gfx i64 @flat_atomic_add_i64_ret_offset_scalar(ptr inreg %out, i64
; GCN2-NEXT: s_branch .LBB27_4
; GCN2-NEXT: .LBB27_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN2-NEXT: s_cbranch_vccnz .LBB27_4
; GCN2-NEXT: .LBB27_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[34:35], 0
; GCN2-NEXT: s_cselect_b32 s34, s34, -1
@@ -3325,6 +3376,7 @@ define amdgpu_gfx i64 @flat_atomic_add_i64_ret_offset_scalar(ptr inreg %out, i64
; GCN3-NEXT: s_cmp_eq_u32 s35, s37
; GCN3-NEXT: s_cselect_b64 s[36:37], -1, 0
; GCN3-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN3-NEXT: s_mov_b64 s[36:37], -1
; GCN3-NEXT: s_cbranch_vccz .LBB27_2
; GCN3-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN3-NEXT: v_mov_b32_e32 v0, s34
@@ -3338,6 +3390,8 @@ define amdgpu_gfx i64 @flat_atomic_add_i64_ret_offset_scalar(ptr inreg %out, i64
; GCN3-NEXT: s_branch .LBB27_4
; GCN3-NEXT: .LBB27_2:
; GCN3-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN3-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN3-NEXT: s_cbranch_vccnz .LBB27_4
; GCN3-NEXT: .LBB27_3: ; %atomicrmw.private
; GCN3-NEXT: s_cmp_lg_u64 s[34:35], 0
; GCN3-NEXT: s_cselect_b32 s34, s34, -1
@@ -4409,6 +4463,7 @@ define amdgpu_gfx i64 @flat_atomic_sub_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN1-NEXT: s_cmp_eq_u32 s5, s34
; GCN1-NEXT: s_cselect_b64 s[34:35], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN1-NEXT: s_mov_b64 s[34:35], -1
; GCN1-NEXT: s_cbranch_vccz .LBB36_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s4
@@ -4422,6 +4477,8 @@ define amdgpu_gfx i64 @flat_atomic_sub_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN1-NEXT: s_branch .LBB36_4
; GCN1-NEXT: .LBB36_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN1-NEXT: s_cbranch_vccnz .LBB36_4
; GCN1-NEXT: .LBB36_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[34:35], s[4:5], 0
; GCN1-NEXT: v_mov_b32_e32 v4, s7
@@ -4451,6 +4508,7 @@ define amdgpu_gfx i64 @flat_atomic_sub_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN2-NEXT: s_cmp_eq_u32 s5, s34
; GCN2-NEXT: s_cselect_b64 s[34:35], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN2-NEXT: s_mov_b64 s[34:35], -1
; GCN2-NEXT: s_cbranch_vccz .LBB36_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s4
@@ -4464,6 +4522,8 @@ define amdgpu_gfx i64 @flat_atomic_sub_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN2-NEXT: s_branch .LBB36_4
; GCN2-NEXT: .LBB36_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN2-NEXT: s_cbranch_vccnz .LBB36_4
; GCN2-NEXT: .LBB36_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[4:5], 0
; GCN2-NEXT: s_cselect_b32 s34, s4, -1
@@ -4490,6 +4550,7 @@ define amdgpu_gfx i64 @flat_atomic_sub_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN3-NEXT: s_cmp_eq_u32 s5, s35
; GCN3-NEXT: s_cselect_b64 s[34:35], -1, 0
; GCN3-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN3-NEXT: s_mov_b64 s[34:35], -1
; GCN3-NEXT: s_cbranch_vccz .LBB36_2
; GCN3-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN3-NEXT: v_mov_b32_e32 v0, s4
@@ -4503,6 +4564,8 @@ define amdgpu_gfx i64 @flat_atomic_sub_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN3-NEXT: s_branch .LBB36_4
; GCN3-NEXT: .LBB36_2:
; GCN3-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN3-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN3-NEXT: s_cbranch_vccnz .LBB36_4
; GCN3-NEXT: .LBB36_3: ; %atomicrmw.private
; GCN3-NEXT: s_cmp_lg_u64 s[4:5], 0
; GCN3-NEXT: s_cselect_b32 s34, s4, -1
@@ -4535,6 +4598,7 @@ define amdgpu_gfx i64 @flat_atomic_sub_i64_ret_offset_scalar(ptr inreg %out, i64
; GCN1-NEXT: s_cmp_eq_u32 s35, s36
; GCN1-NEXT: s_cselect_b64 s[36:37], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN1-NEXT: s_mov_b64 s[36:37], -1
; GCN1-NEXT: s_cbranch_vccz .LBB37_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s34
@@ -4548,6 +4612,8 @@ define amdgpu_gfx i64 @flat_atomic_sub_i64_ret_offset_scalar(ptr inreg %out, i64
; GCN1-NEXT: s_branch .LBB37_4
; GCN1-NEXT: .LBB37_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN1-NEXT: s_cbranch_vccnz .LBB37_4
; GCN1-NEXT: .LBB37_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[36:37], s[34:35], 0
; GCN1-NEXT: v_mov_b32_e32 v4, s7
@@ -4579,6 +4645,7 @@ define amdgpu_gfx i64 @flat_atomic_sub_i64_ret_offset_scalar(ptr inreg %out, i64
; GCN2-NEXT: s_cmp_eq_u32 s35, s36
; GCN2-NEXT: s_cselect_b64 s[36:37], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN2-NEXT: s_mov_b64 s[36:37], -1
; GCN2-NEXT: s_cbranch_vccz .LBB37_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s34
@@ -4592,6 +4659,8 @@ define amdgpu_gfx i64 @flat_atomic_sub_i64_ret_offset_scalar(ptr inreg %out, i64
; GCN2-NEXT: s_branch .LBB37_4
; GCN2-NEXT: .LBB37_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN2-NEXT: s_cbranch_vccnz .LBB37_4
; GCN2-NEXT: .LBB37_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[34:35], 0
; GCN2-NEXT: s_cselect_b32 s34, s34, -1
@@ -4620,6 +4689,7 @@ define amdgpu_gfx i64 @flat_atomic_sub_i64_ret_offset_scalar(ptr inreg %out, i64
; GCN3-NEXT: s_cmp_eq_u32 s35, s37
; GCN3-NEXT: s_cselect_b64 s[36:37], -1, 0
; GCN3-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN3-NEXT: s_mov_b64 s[36:37], -1
; GCN3-NEXT: s_cbranch_vccz .LBB37_2
; GCN3-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN3-NEXT: v_mov_b32_e32 v0, s34
@@ -4633,6 +4703,8 @@ define amdgpu_gfx i64 @flat_atomic_sub_i64_ret_offset_scalar(ptr inreg %out, i64
; GCN3-NEXT: s_branch .LBB37_4
; GCN3-NEXT: .LBB37_2:
; GCN3-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN3-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN3-NEXT: s_cbranch_vccnz .LBB37_4
; GCN3-NEXT: .LBB37_3: ; %atomicrmw.private
; GCN3-NEXT: s_cmp_lg_u64 s[34:35], 0
; GCN3-NEXT: s_cselect_b32 s34, s34, -1
@@ -5698,6 +5770,7 @@ define amdgpu_gfx i64 @flat_atomic_and_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN1-NEXT: s_cmp_eq_u32 s5, s34
; GCN1-NEXT: s_cselect_b64 s[34:35], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN1-NEXT: s_mov_b64 s[34:35], -1
; GCN1-NEXT: s_cbranch_vccz .LBB46_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s4
@@ -5711,6 +5784,8 @@ define amdgpu_gfx i64 @flat_atomic_and_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN1-NEXT: s_branch .LBB46_4
; GCN1-NEXT: .LBB46_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN1-NEXT: s_cbranch_vccnz .LBB46_4
; GCN1-NEXT: .LBB46_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[34:35], s[4:5], 0
; GCN1-NEXT: s_and_b64 s[34:35], s[34:35], exec
@@ -5739,6 +5814,7 @@ define amdgpu_gfx i64 @flat_atomic_and_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN2-NEXT: s_cmp_eq_u32 s5, s34
; GCN2-NEXT: s_cselect_b64 s[34:35], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN2-NEXT: s_mov_b64 s[34:35], -1
; GCN2-NEXT: s_cbranch_vccz .LBB46_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s4
@@ -5752,6 +5828,8 @@ define amdgpu_gfx i64 @flat_atomic_and_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN2-NEXT: s_branch .LBB46_4
; GCN2-NEXT: .LBB46_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN2-NEXT: s_cbranch_vccnz .LBB46_4
; GCN2-NEXT: .LBB46_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[4:5], 0
; GCN2-NEXT: s_cselect_b32 s34, s4, -1
@@ -5777,6 +5855,7 @@ define amdgpu_gfx i64 @flat_atomic_and_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN3-NEXT: s_cmp_eq_u32 s5, s35
; GCN3-NEXT: s_cselect_b64 s[34:35], -1, 0
; GCN3-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN3-NEXT: s_mov_b64 s[34:35], -1
; GCN3-NEXT: s_cbranch_vccz .LBB46_2
; GCN3-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN3-NEXT: v_mov_b32_e32 v0, s4
@@ -5790,6 +5869,8 @@ define amdgpu_gfx i64 @flat_atomic_and_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN3-NEXT: s_branch .LBB46_4
; GCN3-NEXT: .LBB46_2:
; GCN3-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN3-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN3-NEXT: s_cbranch_vccnz .LBB46_4
; GCN3-NEXT: .LBB46_3: ; %atomicrmw.private
; GCN3-NEXT: s_cmp_lg_u64 s[4:5], 0
; GCN3-NEXT: s_cselect_b32 s34, s4, -1
@@ -5821,6 +5902,7 @@ define amdgpu_gfx i64 @flat_atomic_and_i64_ret_offset_scalar(ptr inreg %out, i64
; GCN1-NEXT: s_cmp_eq_u32 s35, s36
; GCN1-NEXT: s_cselect_b64 s[36:37], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN1-NEXT: s_mov_b64 s[36:37], -1
; GCN1-NEXT: s_cbranch_vccz .LBB47_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s34
@@ -5834,6 +5916,8 @@ define amdgpu_gfx i64 @flat_atomic_and_i64_ret_offset_scalar(ptr inreg %out, i64
; GCN1-NEXT: s_branch .LBB47_4
; GCN1-NEXT: .LBB47_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN1-NEXT: s_cbranch_vccnz .LBB47_4
; GCN1-NEXT: .LBB47_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[36:37], s[34:35], 0
; GCN1-NEXT: s_and_b64 s[36:37], s[36:37], exec
@@ -5864,6 +5948,7 @@ define amdgpu_gfx i64 @flat_atomic_and_i64_ret_offset_scalar(ptr inreg %out, i64
; GCN2-NEXT: s_cmp_eq_u32 s35, s36
; GCN2-NEXT: s_cselect_b64 s[36:37], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN2-NEXT: s_mov_b64 s[36:37], -1
; GCN2-NEXT: s_cbranch_vccz .LBB47_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s34
@@ -5877,6 +5962,8 @@ define amdgpu_gfx i64 @flat_atomic_and_i64_ret_offset_scalar(ptr inreg %out, i64
; GCN2-NEXT: s_branch .LBB47_4
; GCN2-NEXT: .LBB47_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN2-NEXT: s_cbranch_vccnz .LBB47_4
; GCN2-NEXT: .LBB47_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[34:35], 0
; GCN2-NEXT: s_cselect_b32 s34, s34, -1
@@ -5904,6 +5991,7 @@ define amdgpu_gfx i64 @flat_atomic_and_i64_ret_offset_scalar(ptr inreg %out, i64
; GCN3-NEXT: s_cmp_eq_u32 s35, s37
; GCN3-NEXT: s_cselect_b64 s[36:37], -1, 0
; GCN3-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN3-NEXT: s_mov_b64 s[36:37], -1
; GCN3-NEXT: s_cbranch_vccz .LBB47_2
; GCN3-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN3-NEXT: v_mov_b32_e32 v0, s34
@@ -5917,6 +6005,8 @@ define amdgpu_gfx i64 @flat_atomic_and_i64_ret_offset_scalar(ptr inreg %out, i64
; GCN3-NEXT: s_branch .LBB47_4
; GCN3-NEXT: .LBB47_2:
; GCN3-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN3-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN3-NEXT: s_cbranch_vccnz .LBB47_4
; GCN3-NEXT: .LBB47_3: ; %atomicrmw.private
; GCN3-NEXT: s_cmp_lg_u64 s[34:35], 0
; GCN3-NEXT: s_cselect_b32 s34, s34, -1
@@ -7359,6 +7449,7 @@ define amdgpu_gfx i64 @flat_atomic_nand_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN1-NEXT: s_cmp_eq_u32 s5, s34
; GCN1-NEXT: s_cselect_b64 s[34:35], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN1-NEXT: s_mov_b64 s[34:35], -1
; GCN1-NEXT: s_cbranch_vccz .LBB56_4
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: s_add_u32 s34, s4, 4
@@ -7391,7 +7482,8 @@ define amdgpu_gfx i64 @flat_atomic_nand_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN1-NEXT: s_branch .LBB56_6
; GCN1-NEXT: .LBB56_4:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN1-NEXT: s_cbranch_execz .LBB56_6
+; GCN1-NEXT: s_and_b64 vcc, exec, s[34:35]
+; GCN1-NEXT: s_cbranch_vccz .LBB56_6
; GCN1-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[34:35], s[4:5], 0
; GCN1-NEXT: s_and_b64 s[34:35], s[34:35], exec
@@ -7422,6 +7514,7 @@ define amdgpu_gfx i64 @flat_atomic_nand_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN2-NEXT: s_cmp_eq_u32 s5, s34
; GCN2-NEXT: s_cselect_b64 s[34:35], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN2-NEXT: s_mov_b64 s[34:35], -1
; GCN2-NEXT: s_cbranch_vccz .LBB56_4
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: s_add_u32 s34, s4, 4
@@ -7454,7 +7547,8 @@ define amdgpu_gfx i64 @flat_atomic_nand_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN2-NEXT: s_branch .LBB56_6
; GCN2-NEXT: .LBB56_4:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN2-NEXT: s_cbranch_execz .LBB56_6
+; GCN2-NEXT: s_and_b64 vcc, exec, s[34:35]
+; GCN2-NEXT: s_cbranch_vccz .LBB56_6
; GCN2-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[4:5], 0
; GCN2-NEXT: s_cselect_b32 s34, s4, -1
@@ -7482,6 +7576,7 @@ define amdgpu_gfx i64 @flat_atomic_nand_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN3-NEXT: s_cmp_eq_u32 s5, s35
; GCN3-NEXT: s_cselect_b64 s[34:35], -1, 0
; GCN3-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN3-NEXT: s_mov_b64 s[34:35], -1
; GCN3-NEXT: s_cbranch_vccz .LBB56_4
; GCN3-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN3-NEXT: v_mov_b32_e32 v2, s4
@@ -7509,7 +7604,8 @@ define amdgpu_gfx i64 @flat_atomic_nand_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN3-NEXT: s_branch .LBB56_6
; GCN3-NEXT: .LBB56_4:
; GCN3-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN3-NEXT: s_cbranch_execz .LBB56_6
+; GCN3-NEXT: s_and_b64 vcc, exec, s[34:35]
+; GCN3-NEXT: s_cbranch_vccz .LBB56_6
; GCN3-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN3-NEXT: s_cmp_lg_u64 s[4:5], 0
; GCN3-NEXT: s_cselect_b32 s34, s4, -1
@@ -7543,6 +7639,7 @@ define amdgpu_gfx i64 @flat_atomic_nand_i64_ret_offset_scalar(ptr inreg %out, i6
; GCN1-NEXT: s_cmp_eq_u32 s35, s36
; GCN1-NEXT: s_cselect_b64 s[36:37], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN1-NEXT: s_mov_b64 s[36:37], -1
; GCN1-NEXT: s_cbranch_vccz .LBB57_4
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: s_add_u32 s36, s34, 4
@@ -7575,7 +7672,8 @@ define amdgpu_gfx i64 @flat_atomic_nand_i64_ret_offset_scalar(ptr inreg %out, i6
; GCN1-NEXT: s_branch .LBB57_6
; GCN1-NEXT: .LBB57_4:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN1-NEXT: s_cbranch_execz .LBB57_6
+; GCN1-NEXT: s_and_b64 vcc, exec, s[36:37]
+; GCN1-NEXT: s_cbranch_vccz .LBB57_6
; GCN1-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[36:37], s[34:35], 0
; GCN1-NEXT: s_and_b64 s[36:37], s[36:37], exec
@@ -7608,6 +7706,7 @@ define amdgpu_gfx i64 @flat_atomic_nand_i64_ret_offset_scalar(ptr inreg %out, i6
; GCN2-NEXT: s_cmp_eq_u32 s35, s36
; GCN2-NEXT: s_cselect_b64 s[36:37], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN2-NEXT: s_mov_b64 s[36:37], -1
; GCN2-NEXT: s_cbranch_vccz .LBB57_4
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: s_add_u32 s36, s34, 4
@@ -7640,7 +7739,8 @@ define amdgpu_gfx i64 @flat_atomic_nand_i64_ret_offset_scalar(ptr inreg %out, i6
; GCN2-NEXT: s_branch .LBB57_6
; GCN2-NEXT: .LBB57_4:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN2-NEXT: s_cbranch_execz .LBB57_6
+; GCN2-NEXT: s_and_b64 vcc, exec, s[36:37]
+; GCN2-NEXT: s_cbranch_vccz .LBB57_6
; GCN2-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[34:35], 0
; GCN2-NEXT: s_cselect_b32 s34, s34, -1
@@ -7670,6 +7770,7 @@ define amdgpu_gfx i64 @flat_atomic_nand_i64_ret_offset_scalar(ptr inreg %out, i6
; GCN3-NEXT: s_cmp_eq_u32 s35, s37
; GCN3-NEXT: s_cselect_b64 s[36:37], -1, 0
; GCN3-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN3-NEXT: s_mov_b64 s[36:37], -1
; GCN3-NEXT: s_cbranch_vccz .LBB57_4
; GCN3-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN3-NEXT: v_mov_b32_e32 v2, s34
@@ -7697,7 +7798,8 @@ define amdgpu_gfx i64 @flat_atomic_nand_i64_ret_offset_scalar(ptr inreg %out, i6
; GCN3-NEXT: s_branch .LBB57_6
; GCN3-NEXT: .LBB57_4:
; GCN3-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN3-NEXT: s_cbranch_execz .LBB57_6
+; GCN3-NEXT: s_and_b64 vcc, exec, s[36:37]
+; GCN3-NEXT: s_cbranch_vccz .LBB57_6
; GCN3-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN3-NEXT: s_cmp_lg_u64 s[34:35], 0
; GCN3-NEXT: s_cselect_b32 s34, s34, -1
@@ -8896,6 +8998,7 @@ define amdgpu_gfx i64 @flat_atomic_or_i64_ret_scalar(ptr inreg %ptr, i64 inreg %
; GCN1-NEXT: s_cmp_eq_u32 s5, s34
; GCN1-NEXT: s_cselect_b64 s[34:35], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN1-NEXT: s_mov_b64 s[34:35], -1
; GCN1-NEXT: s_cbranch_vccz .LBB66_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s4
@@ -8909,6 +9012,8 @@ define amdgpu_gfx i64 @flat_atomic_or_i64_ret_scalar(ptr inreg %ptr, i64 inreg %
; GCN1-NEXT: s_branch .LBB66_4
; GCN1-NEXT: .LBB66_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN1-NEXT: s_cbranch_vccnz .LBB66_4
; GCN1-NEXT: .LBB66_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[34:35], s[4:5], 0
; GCN1-NEXT: s_and_b64 s[34:35], s[34:35], exec
@@ -8937,6 +9042,7 @@ define amdgpu_gfx i64 @flat_atomic_or_i64_ret_scalar(ptr inreg %ptr, i64 inreg %
; GCN2-NEXT: s_cmp_eq_u32 s5, s34
; GCN2-NEXT: s_cselect_b64 s[34:35], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN2-NEXT: s_mov_b64 s[34:35], -1
; GCN2-NEXT: s_cbranch_vccz .LBB66_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s4
@@ -8950,6 +9056,8 @@ define amdgpu_gfx i64 @flat_atomic_or_i64_ret_scalar(ptr inreg %ptr, i64 inreg %
; GCN2-NEXT: s_branch .LBB66_4
; GCN2-NEXT: .LBB66_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN2-NEXT: s_cbranch_vccnz .LBB66_4
; GCN2-NEXT: .LBB66_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[4:5], 0
; GCN2-NEXT: s_cselect_b32 s34, s4, -1
@@ -8975,6 +9083,7 @@ define amdgpu_gfx i64 @flat_atomic_or_i64_ret_scalar(ptr inreg %ptr, i64 inreg %
; GCN3-NEXT: s_cmp_eq_u32 s5, s35
; GCN3-NEXT: s_cselect_b64 s[34:35], -1, 0
; GCN3-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN3-NEXT: s_mov_b64 s[34:35], -1
; GCN3-NEXT: s_cbranch_vccz .LBB66_2
; GCN3-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN3-NEXT: v_mov_b32_e32 v0, s4
@@ -8988,6 +9097,8 @@ define amdgpu_gfx i64 @flat_atomic_or_i64_ret_scalar(ptr inreg %ptr, i64 inreg %
; GCN3-NEXT: s_branch .LBB66_4
; GCN3-NEXT: .LBB66_2:
; GCN3-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN3-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN3-NEXT: s_cbranch_vccnz .LBB66_4
; GCN3-NEXT: .LBB66_3: ; %atomicrmw.private
; GCN3-NEXT: s_cmp_lg_u64 s[4:5], 0
; GCN3-NEXT: s_cselect_b32 s34, s4, -1
@@ -9019,6 +9130,7 @@ define amdgpu_gfx i64 @flat_atomic_or_i64_ret_offset_scalar(ptr inreg %out, i64
; GCN1-NEXT: s_cmp_eq_u32 s35, s36
; GCN1-NEXT: s_cselect_b64 s[36:37], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN1-NEXT: s_mov_b64 s[36:37], -1
; GCN1-NEXT: s_cbranch_vccz .LBB67_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s34
@@ -9032,6 +9144,8 @@ define amdgpu_gfx i64 @flat_atomic_or_i64_ret_offset_scalar(ptr inreg %out, i64
; GCN1-NEXT: s_branch .LBB67_4
; GCN1-NEXT: .LBB67_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN1-NEXT: s_cbranch_vccnz .LBB67_4
; GCN1-NEXT: .LBB67_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[36:37], s[34:35], 0
; GCN1-NEXT: s_and_b64 s[36:37], s[36:37], exec
@@ -9062,6 +9176,7 @@ define amdgpu_gfx i64 @flat_atomic_or_i64_ret_offset_scalar(ptr inreg %out, i64
; GCN2-NEXT: s_cmp_eq_u32 s35, s36
; GCN2-NEXT: s_cselect_b64 s[36:37], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN2-NEXT: s_mov_b64 s[36:37], -1
; GCN2-NEXT: s_cbranch_vccz .LBB67_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s34
@@ -9075,6 +9190,8 @@ define amdgpu_gfx i64 @flat_atomic_or_i64_ret_offset_scalar(ptr inreg %out, i64
; GCN2-NEXT: s_branch .LBB67_4
; GCN2-NEXT: .LBB67_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN2-NEXT: s_cbranch_vccnz .LBB67_4
; GCN2-NEXT: .LBB67_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[34:35], 0
; GCN2-NEXT: s_cselect_b32 s34, s34, -1
@@ -9102,6 +9219,7 @@ define amdgpu_gfx i64 @flat_atomic_or_i64_ret_offset_scalar(ptr inreg %out, i64
; GCN3-NEXT: s_cmp_eq_u32 s35, s37
; GCN3-NEXT: s_cselect_b64 s[36:37], -1, 0
; GCN3-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN3-NEXT: s_mov_b64 s[36:37], -1
; GCN3-NEXT: s_cbranch_vccz .LBB67_2
; GCN3-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN3-NEXT: v_mov_b32_e32 v0, s34
@@ -9115,6 +9233,8 @@ define amdgpu_gfx i64 @flat_atomic_or_i64_ret_offset_scalar(ptr inreg %out, i64
; GCN3-NEXT: s_branch .LBB67_4
; GCN3-NEXT: .LBB67_2:
; GCN3-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN3-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN3-NEXT: s_cbranch_vccnz .LBB67_4
; GCN3-NEXT: .LBB67_3: ; %atomicrmw.private
; GCN3-NEXT: s_cmp_lg_u64 s[34:35], 0
; GCN3-NEXT: s_cselect_b32 s34, s34, -1
@@ -10179,6 +10299,7 @@ define amdgpu_gfx i64 @flat_atomic_xor_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN1-NEXT: s_cmp_eq_u32 s5, s34
; GCN1-NEXT: s_cselect_b64 s[34:35], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN1-NEXT: s_mov_b64 s[34:35], -1
; GCN1-NEXT: s_cbranch_vccz .LBB76_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s4
@@ -10192,6 +10313,8 @@ define amdgpu_gfx i64 @flat_atomic_xor_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN1-NEXT: s_branch .LBB76_4
; GCN1-NEXT: .LBB76_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN1-NEXT: s_cbranch_vccnz .LBB76_4
; GCN1-NEXT: .LBB76_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[34:35], s[4:5], 0
; GCN1-NEXT: s_and_b64 s[34:35], s[34:35], exec
@@ -10220,6 +10343,7 @@ define amdgpu_gfx i64 @flat_atomic_xor_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN2-NEXT: s_cmp_eq_u32 s5, s34
; GCN2-NEXT: s_cselect_b64 s[34:35], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN2-NEXT: s_mov_b64 s[34:35], -1
; GCN2-NEXT: s_cbranch_vccz .LBB76_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s4
@@ -10233,6 +10357,8 @@ define amdgpu_gfx i64 @flat_atomic_xor_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN2-NEXT: s_branch .LBB76_4
; GCN2-NEXT: .LBB76_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN2-NEXT: s_cbranch_vccnz .LBB76_4
; GCN2-NEXT: .LBB76_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[4:5], 0
; GCN2-NEXT: s_cselect_b32 s34, s4, -1
@@ -10258,6 +10384,7 @@ define amdgpu_gfx i64 @flat_atomic_xor_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN3-NEXT: s_cmp_eq_u32 s5, s35
; GCN3-NEXT: s_cselect_b64 s[34:35], -1, 0
; GCN3-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN3-NEXT: s_mov_b64 s[34:35], -1
; GCN3-NEXT: s_cbranch_vccz .LBB76_2
; GCN3-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN3-NEXT: v_mov_b32_e32 v0, s4
@@ -10271,6 +10398,8 @@ define amdgpu_gfx i64 @flat_atomic_xor_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN3-NEXT: s_branch .LBB76_4
; GCN3-NEXT: .LBB76_2:
; GCN3-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN3-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN3-NEXT: s_cbranch_vccnz .LBB76_4
; GCN3-NEXT: .LBB76_3: ; %atomicrmw.private
; GCN3-NEXT: s_cmp_lg_u64 s[4:5], 0
; GCN3-NEXT: s_cselect_b32 s34, s4, -1
@@ -10302,6 +10431,7 @@ define amdgpu_gfx i64 @flat_atomic_xor_i64_ret_offset_scalar(ptr inreg %out, i64
; GCN1-NEXT: s_cmp_eq_u32 s35, s36
; GCN1-NEXT: s_cselect_b64 s[36:37], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN1-NEXT: s_mov_b64 s[36:37], -1
; GCN1-NEXT: s_cbranch_vccz .LBB77_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s34
@@ -10315,6 +10445,8 @@ define amdgpu_gfx i64 @flat_atomic_xor_i64_ret_offset_scalar(ptr inreg %out, i64
; GCN1-NEXT: s_branch .LBB77_4
; GCN1-NEXT: .LBB77_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN1-NEXT: s_cbranch_vccnz .LBB77_4
; GCN1-NEXT: .LBB77_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[36:37], s[34:35], 0
; GCN1-NEXT: s_and_b64 s[36:37], s[36:37], exec
@@ -10345,6 +10477,7 @@ define amdgpu_gfx i64 @flat_atomic_xor_i64_ret_offset_scalar(ptr inreg %out, i64
; GCN2-NEXT: s_cmp_eq_u32 s35, s36
; GCN2-NEXT: s_cselect_b64 s[36:37], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN2-NEXT: s_mov_b64 s[36:37], -1
; GCN2-NEXT: s_cbranch_vccz .LBB77_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s34
@@ -10358,6 +10491,8 @@ define amdgpu_gfx i64 @flat_atomic_xor_i64_ret_offset_scalar(ptr inreg %out, i64
; GCN2-NEXT: s_branch .LBB77_4
; GCN2-NEXT: .LBB77_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN2-NEXT: s_cbranch_vccnz .LBB77_4
; GCN2-NEXT: .LBB77_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[34:35], 0
; GCN2-NEXT: s_cselect_b32 s34, s34, -1
@@ -10385,6 +10520,7 @@ define amdgpu_gfx i64 @flat_atomic_xor_i64_ret_offset_scalar(ptr inreg %out, i64
; GCN3-NEXT: s_cmp_eq_u32 s35, s37
; GCN3-NEXT: s_cselect_b64 s[36:37], -1, 0
; GCN3-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN3-NEXT: s_mov_b64 s[36:37], -1
; GCN3-NEXT: s_cbranch_vccz .LBB77_2
; GCN3-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN3-NEXT: v_mov_b32_e32 v0, s34
@@ -10398,6 +10534,8 @@ define amdgpu_gfx i64 @flat_atomic_xor_i64_ret_offset_scalar(ptr inreg %out, i64
; GCN3-NEXT: s_branch .LBB77_4
; GCN3-NEXT: .LBB77_2:
; GCN3-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN3-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN3-NEXT: s_cbranch_vccnz .LBB77_4
; GCN3-NEXT: .LBB77_3: ; %atomicrmw.private
; GCN3-NEXT: s_cmp_lg_u64 s[34:35], 0
; GCN3-NEXT: s_cselect_b32 s34, s34, -1
@@ -11798,6 +11936,7 @@ define amdgpu_gfx i64 @flat_atomic_max_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN1-NEXT: s_cmp_eq_u32 s5, s34
; GCN1-NEXT: s_cselect_b64 s[34:35], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN1-NEXT: s_mov_b64 s[34:35], -1
; GCN1-NEXT: s_cbranch_vccz .LBB86_4
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: s_add_u32 s34, s4, 4
@@ -11831,7 +11970,8 @@ define amdgpu_gfx i64 @flat_atomic_max_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN1-NEXT: s_branch .LBB86_6
; GCN1-NEXT: .LBB86_4:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN1-NEXT: s_cbranch_execz .LBB86_6
+; GCN1-NEXT: s_and_b64 vcc, exec, s[34:35]
+; GCN1-NEXT: s_cbranch_vccz .LBB86_6
; GCN1-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[34:35], s[4:5], 0
; GCN1-NEXT: v_mov_b32_e32 v5, s6
@@ -11862,6 +12002,7 @@ define amdgpu_gfx i64 @flat_atomic_max_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN2-NEXT: s_cmp_eq_u32 s5, s34
; GCN2-NEXT: s_cselect_b64 s[34:35], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN2-NEXT: s_mov_b64 s[34:35], -1
; GCN2-NEXT: s_cbranch_vccz .LBB86_4
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: s_add_u32 s34, s4, 4
@@ -11895,7 +12036,8 @@ define amdgpu_gfx i64 @flat_atomic_max_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN2-NEXT: s_branch .LBB86_6
; GCN2-NEXT: .LBB86_4:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN2-NEXT: s_cbranch_execz .LBB86_6
+; GCN2-NEXT: s_and_b64 vcc, exec, s[34:35]
+; GCN2-NEXT: s_cbranch_vccz .LBB86_6
; GCN2-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[4:5], 0
; GCN2-NEXT: s_cselect_b32 s34, s4, -1
@@ -11923,6 +12065,7 @@ define amdgpu_gfx i64 @flat_atomic_max_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN3-NEXT: s_cmp_eq_u32 s5, s35
; GCN3-NEXT: s_cselect_b64 s[34:35], -1, 0
; GCN3-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN3-NEXT: s_mov_b64 s[34:35], -1
; GCN3-NEXT: s_cbranch_vccz .LBB86_4
; GCN3-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN3-NEXT: v_mov_b32_e32 v2, s4
@@ -11951,7 +12094,8 @@ define amdgpu_gfx i64 @flat_atomic_max_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN3-NEXT: s_branch .LBB86_6
; GCN3-NEXT: .LBB86_4:
; GCN3-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN3-NEXT: s_cbranch_execz .LBB86_6
+; GCN3-NEXT: s_and_b64 vcc, exec, s[34:35]
+; GCN3-NEXT: s_cbranch_vccz .LBB86_6
; GCN3-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN3-NEXT: s_cmp_lg_u64 s[4:5], 0
; GCN3-NEXT: s_cselect_b32 s34, s4, -1
@@ -11985,6 +12129,7 @@ define amdgpu_gfx i64 @flat_atomic_max_i64_ret_offset_scalar(ptr inreg %out, i64
; GCN1-NEXT: s_cmp_eq_u32 s35, s36
; GCN1-NEXT: s_cselect_b64 s[36:37], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN1-NEXT: s_mov_b64 s[36:37], -1
; GCN1-NEXT: s_cbranch_vccz .LBB87_4
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: s_add_u32 s36, s34, 4
@@ -12018,7 +12163,8 @@ define amdgpu_gfx i64 @flat_atomic_max_i64_ret_offset_scalar(ptr inreg %out, i64
; GCN1-NEXT: s_branch .LBB87_6
; GCN1-NEXT: .LBB87_4:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN1-NEXT: s_cbranch_execz .LBB87_6
+; GCN1-NEXT: s_and_b64 vcc, exec, s[36:37]
+; GCN1-NEXT: s_cbranch_vccz .LBB87_6
; GCN1-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[36:37], s[34:35], 0
; GCN1-NEXT: v_mov_b32_e32 v5, s6
@@ -12051,6 +12197,7 @@ define amdgpu_gfx i64 @flat_atomic_max_i64_ret_offset_scalar(ptr inreg %out, i64
; GCN2-NEXT: s_cmp_eq_u32 s35, s36
; GCN2-NEXT: s_cselect_b64 s[36:37], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN2-NEXT: s_mov_b64 s[36:37], -1
; GCN2-NEXT: s_cbranch_vccz .LBB87_4
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: s_add_u32 s36, s34, 4
@@ -12084,7 +12231,8 @@ define amdgpu_gfx i64 @flat_atomic_max_i64_ret_offset_scalar(ptr inreg %out, i64
; GCN2-NEXT: s_branch .LBB87_6
; GCN2-NEXT: .LBB87_4:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN2-NEXT: s_cbranch_execz .LBB87_6
+; GCN2-NEXT: s_and_b64 vcc, exec, s[36:37]
+; GCN2-NEXT: s_cbranch_vccz .LBB87_6
; GCN2-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[34:35], 0
; GCN2-NEXT: s_cselect_b32 s34, s34, -1
@@ -12114,6 +12262,7 @@ define amdgpu_gfx i64 @flat_atomic_max_i64_ret_offset_scalar(ptr inreg %out, i64
; GCN3-NEXT: s_cmp_eq_u32 s35, s37
; GCN3-NEXT: s_cselect_b64 s[36:37], -1, 0
; GCN3-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN3-NEXT: s_mov_b64 s[36:37], -1
; GCN3-NEXT: s_cbranch_vccz .LBB87_4
; GCN3-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN3-NEXT: v_mov_b32_e32 v2, s34
@@ -12142,7 +12291,8 @@ define amdgpu_gfx i64 @flat_atomic_max_i64_ret_offset_scalar(ptr inreg %out, i64
; GCN3-NEXT: s_branch .LBB87_6
; GCN3-NEXT: .LBB87_4:
; GCN3-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN3-NEXT: s_cbranch_execz .LBB87_6
+; GCN3-NEXT: s_and_b64 vcc, exec, s[36:37]
+; GCN3-NEXT: s_cbranch_vccz .LBB87_6
; GCN3-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN3-NEXT: s_cmp_lg_u64 s[34:35], 0
; GCN3-NEXT: s_cselect_b32 s34, s34, -1
@@ -12401,6 +12551,7 @@ define amdgpu_kernel void @atomic_max_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GCN1-NEXT: s_cmp_eq_u32 s1, s2
; GCN1-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_mov_b64 s[2:3], -1
; GCN1-NEXT: s_cbranch_vccz .LBB89_4
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v3, s1
@@ -12429,7 +12580,8 @@ define amdgpu_kernel void @atomic_max_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GCN1-NEXT: s_branch .LBB89_6
; GCN1-NEXT: .LBB89_4:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN1-NEXT: s_cbranch_execz .LBB89_6
+; GCN1-NEXT: s_and_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_vccz .LBB89_6
; GCN1-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[2:3], s[0:1], 0
; GCN1-NEXT: v_mov_b32_e32 v5, s12
@@ -12472,6 +12624,7 @@ define amdgpu_kernel void @atomic_max_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GCN2-NEXT: s_cmp_eq_u32 s1, s2
; GCN2-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_mov_b64 s[2:3], -1
; GCN2-NEXT: s_cbranch_vccz .LBB89_4
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v3, s1
@@ -12500,7 +12653,8 @@ define amdgpu_kernel void @atomic_max_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GCN2-NEXT: s_branch .LBB89_6
; GCN2-NEXT: .LBB89_4:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN2-NEXT: s_cbranch_execz .LBB89_6
+; GCN2-NEXT: s_and_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_vccz .LBB89_6
; GCN2-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -12542,6 +12696,7 @@ define amdgpu_kernel void @atomic_max_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GCN3-NEXT: s_cmp_eq_u32 s1, s3
; GCN3-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN3-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN3-NEXT: s_mov_b64 s[2:3], -1
; GCN3-NEXT: s_cbranch_vccz .LBB89_4
; GCN3-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN3-NEXT: v_mov_b32_e32 v3, s1
@@ -12570,7 +12725,8 @@ define amdgpu_kernel void @atomic_max_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GCN3-NEXT: s_branch .LBB89_6
; GCN3-NEXT: .LBB89_4:
; GCN3-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN3-NEXT: s_cbranch_execz .LBB89_6
+; GCN3-NEXT: s_and_b64 vcc, exec, s[2:3]
+; GCN3-NEXT: s_cbranch_vccz .LBB89_6
; GCN3-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN3-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN3-NEXT: s_cselect_b32 s0, s0, -1
@@ -12825,6 +12981,7 @@ define amdgpu_kernel void @atomic_max_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GCN1-NEXT: s_cmp_eq_u32 s1, s2
; GCN1-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_mov_b64 s[2:3], -1
; GCN1-NEXT: s_cbranch_vccz .LBB91_4
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v3, s1
@@ -12853,7 +13010,8 @@ define amdgpu_kernel void @atomic_max_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GCN1-NEXT: s_branch .LBB91_6
; GCN1-NEXT: .LBB91_4:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN1-NEXT: s_cbranch_execz .LBB91_6
+; GCN1-NEXT: s_and_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_vccz .LBB91_6
; GCN1-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[2:3], s[0:1], 0
; GCN1-NEXT: v_mov_b32_e32 v5, s12
@@ -12894,6 +13052,7 @@ define amdgpu_kernel void @atomic_max_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GCN2-NEXT: s_cmp_eq_u32 s1, s2
; GCN2-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_mov_b64 s[2:3], -1
; GCN2-NEXT: s_cbranch_vccz .LBB91_4
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v3, s1
@@ -12922,7 +13081,8 @@ define amdgpu_kernel void @atomic_max_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GCN2-NEXT: s_branch .LBB91_6
; GCN2-NEXT: .LBB91_4:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN2-NEXT: s_cbranch_execz .LBB91_6
+; GCN2-NEXT: s_and_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_vccz .LBB91_6
; GCN2-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -12962,6 +13122,7 @@ define amdgpu_kernel void @atomic_max_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GCN3-NEXT: s_cmp_eq_u32 s1, s3
; GCN3-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN3-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN3-NEXT: s_mov_b64 s[2:3], -1
; GCN3-NEXT: s_cbranch_vccz .LBB91_4
; GCN3-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN3-NEXT: v_mov_b32_e32 v3, s1
@@ -12990,7 +13151,8 @@ define amdgpu_kernel void @atomic_max_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GCN3-NEXT: s_branch .LBB91_6
; GCN3-NEXT: .LBB91_4:
; GCN3-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN3-NEXT: s_cbranch_execz .LBB91_6
+; GCN3-NEXT: s_and_b64 vcc, exec, s[2:3]
+; GCN3-NEXT: s_cbranch_vccz .LBB91_6
; GCN3-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN3-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN3-NEXT: s_cselect_b32 s0, s0, -1
@@ -14505,6 +14667,7 @@ define amdgpu_gfx i64 @flat_atomic_umax_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN1-NEXT: s_cmp_eq_u32 s5, s34
; GCN1-NEXT: s_cselect_b64 s[34:35], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN1-NEXT: s_mov_b64 s[34:35], -1
; GCN1-NEXT: s_cbranch_vccz .LBB100_4
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: s_add_u32 s34, s4, 4
@@ -14538,7 +14701,8 @@ define amdgpu_gfx i64 @flat_atomic_umax_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN1-NEXT: s_branch .LBB100_6
; GCN1-NEXT: .LBB100_4:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN1-NEXT: s_cbranch_execz .LBB100_6
+; GCN1-NEXT: s_and_b64 vcc, exec, s[34:35]
+; GCN1-NEXT: s_cbranch_vccz .LBB100_6
; GCN1-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[34:35], s[4:5], 0
; GCN1-NEXT: v_mov_b32_e32 v5, s6
@@ -14569,6 +14733,7 @@ define amdgpu_gfx i64 @flat_atomic_umax_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN2-NEXT: s_cmp_eq_u32 s5, s34
; GCN2-NEXT: s_cselect_b64 s[34:35], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN2-NEXT: s_mov_b64 s[34:35], -1
; GCN2-NEXT: s_cbranch_vccz .LBB100_4
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: s_add_u32 s34, s4, 4
@@ -14602,7 +14767,8 @@ define amdgpu_gfx i64 @flat_atomic_umax_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN2-NEXT: s_branch .LBB100_6
; GCN2-NEXT: .LBB100_4:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN2-NEXT: s_cbranch_execz .LBB100_6
+; GCN2-NEXT: s_and_b64 vcc, exec, s[34:35]
+; GCN2-NEXT: s_cbranch_vccz .LBB100_6
; GCN2-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[4:5], 0
; GCN2-NEXT: s_cselect_b32 s34, s4, -1
@@ -14630,6 +14796,7 @@ define amdgpu_gfx i64 @flat_atomic_umax_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN3-NEXT: s_cmp_eq_u32 s5, s35
; GCN3-NEXT: s_cselect_b64 s[34:35], -1, 0
; GCN3-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN3-NEXT: s_mov_b64 s[34:35], -1
; GCN3-NEXT: s_cbranch_vccz .LBB100_4
; GCN3-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN3-NEXT: v_mov_b32_e32 v2, s4
@@ -14658,7 +14825,8 @@ define amdgpu_gfx i64 @flat_atomic_umax_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN3-NEXT: s_branch .LBB100_6
; GCN3-NEXT: .LBB100_4:
; GCN3-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN3-NEXT: s_cbranch_execz .LBB100_6
+; GCN3-NEXT: s_and_b64 vcc, exec, s[34:35]
+; GCN3-NEXT: s_cbranch_vccz .LBB100_6
; GCN3-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN3-NEXT: s_cmp_lg_u64 s[4:5], 0
; GCN3-NEXT: s_cselect_b32 s34, s4, -1
@@ -14692,6 +14860,7 @@ define amdgpu_gfx i64 @flat_atomic_umax_i64_ret_offset_scalar(ptr inreg %out, i6
; GCN1-NEXT: s_cmp_eq_u32 s35, s36
; GCN1-NEXT: s_cselect_b64 s[36:37], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN1-NEXT: s_mov_b64 s[36:37], -1
; GCN1-NEXT: s_cbranch_vccz .LBB101_4
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: s_add_u32 s36, s34, 4
@@ -14725,7 +14894,8 @@ define amdgpu_gfx i64 @flat_atomic_umax_i64_ret_offset_scalar(ptr inreg %out, i6
; GCN1-NEXT: s_branch .LBB101_6
; GCN1-NEXT: .LBB101_4:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN1-NEXT: s_cbranch_execz .LBB101_6
+; GCN1-NEXT: s_and_b64 vcc, exec, s[36:37]
+; GCN1-NEXT: s_cbranch_vccz .LBB101_6
; GCN1-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[36:37], s[34:35], 0
; GCN1-NEXT: v_mov_b32_e32 v5, s6
@@ -14758,6 +14928,7 @@ define amdgpu_gfx i64 @flat_atomic_umax_i64_ret_offset_scalar(ptr inreg %out, i6
; GCN2-NEXT: s_cmp_eq_u32 s35, s36
; GCN2-NEXT: s_cselect_b64 s[36:37], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN2-NEXT: s_mov_b64 s[36:37], -1
; GCN2-NEXT: s_cbranch_vccz .LBB101_4
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: s_add_u32 s36, s34, 4
@@ -14791,7 +14962,8 @@ define amdgpu_gfx i64 @flat_atomic_umax_i64_ret_offset_scalar(ptr inreg %out, i6
; GCN2-NEXT: s_branch .LBB101_6
; GCN2-NEXT: .LBB101_4:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN2-NEXT: s_cbranch_execz .LBB101_6
+; GCN2-NEXT: s_and_b64 vcc, exec, s[36:37]
+; GCN2-NEXT: s_cbranch_vccz .LBB101_6
; GCN2-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[34:35], 0
; GCN2-NEXT: s_cselect_b32 s34, s34, -1
@@ -14821,6 +14993,7 @@ define amdgpu_gfx i64 @flat_atomic_umax_i64_ret_offset_scalar(ptr inreg %out, i6
; GCN3-NEXT: s_cmp_eq_u32 s35, s37
; GCN3-NEXT: s_cselect_b64 s[36:37], -1, 0
; GCN3-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN3-NEXT: s_mov_b64 s[36:37], -1
; GCN3-NEXT: s_cbranch_vccz .LBB101_4
; GCN3-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN3-NEXT: v_mov_b32_e32 v2, s34
@@ -14849,7 +15022,8 @@ define amdgpu_gfx i64 @flat_atomic_umax_i64_ret_offset_scalar(ptr inreg %out, i6
; GCN3-NEXT: s_branch .LBB101_6
; GCN3-NEXT: .LBB101_4:
; GCN3-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN3-NEXT: s_cbranch_execz .LBB101_6
+; GCN3-NEXT: s_and_b64 vcc, exec, s[36:37]
+; GCN3-NEXT: s_cbranch_vccz .LBB101_6
; GCN3-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN3-NEXT: s_cmp_lg_u64 s[34:35], 0
; GCN3-NEXT: s_cselect_b32 s34, s34, -1
@@ -15108,6 +15282,7 @@ define amdgpu_kernel void @atomic_umax_i64_ret_addr64_offset(ptr %out, ptr %out2
; GCN1-NEXT: s_cmp_eq_u32 s1, s2
; GCN1-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_mov_b64 s[2:3], -1
; GCN1-NEXT: s_cbranch_vccz .LBB103_4
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v3, s1
@@ -15136,7 +15311,8 @@ define amdgpu_kernel void @atomic_umax_i64_ret_addr64_offset(ptr %out, ptr %out2
; GCN1-NEXT: s_branch .LBB103_6
; GCN1-NEXT: .LBB103_4:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN1-NEXT: s_cbranch_execz .LBB103_6
+; GCN1-NEXT: s_and_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_vccz .LBB103_6
; GCN1-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[2:3], s[0:1], 0
; GCN1-NEXT: v_mov_b32_e32 v5, s12
@@ -15179,6 +15355,7 @@ define amdgpu_kernel void @atomic_umax_i64_ret_addr64_offset(ptr %out, ptr %out2
; GCN2-NEXT: s_cmp_eq_u32 s1, s2
; GCN2-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_mov_b64 s[2:3], -1
; GCN2-NEXT: s_cbranch_vccz .LBB103_4
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v3, s1
@@ -15207,7 +15384,8 @@ define amdgpu_kernel void @atomic_umax_i64_ret_addr64_offset(ptr %out, ptr %out2
; GCN2-NEXT: s_branch .LBB103_6
; GCN2-NEXT: .LBB103_4:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN2-NEXT: s_cbranch_execz .LBB103_6
+; GCN2-NEXT: s_and_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_vccz .LBB103_6
; GCN2-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -15249,6 +15427,7 @@ define amdgpu_kernel void @atomic_umax_i64_ret_addr64_offset(ptr %out, ptr %out2
; GCN3-NEXT: s_cmp_eq_u32 s1, s3
; GCN3-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN3-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN3-NEXT: s_mov_b64 s[2:3], -1
; GCN3-NEXT: s_cbranch_vccz .LBB103_4
; GCN3-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN3-NEXT: v_mov_b32_e32 v3, s1
@@ -15277,7 +15456,8 @@ define amdgpu_kernel void @atomic_umax_i64_ret_addr64_offset(ptr %out, ptr %out2
; GCN3-NEXT: s_branch .LBB103_6
; GCN3-NEXT: .LBB103_4:
; GCN3-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN3-NEXT: s_cbranch_execz .LBB103_6
+; GCN3-NEXT: s_and_b64 vcc, exec, s[2:3]
+; GCN3-NEXT: s_cbranch_vccz .LBB103_6
; GCN3-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN3-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN3-NEXT: s_cselect_b32 s0, s0, -1
@@ -15323,6 +15503,7 @@ define amdgpu_kernel void @atomic_umax_i64_ret_addr64(ptr %out, ptr %out2, i64 %
; GCN1-NEXT: s_cmp_eq_u32 s1, s2
; GCN1-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_mov_b64 s[2:3], -1
; GCN1-NEXT: s_cbranch_vccz .LBB104_4
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v3, s1
@@ -15351,7 +15532,8 @@ define amdgpu_kernel void @atomic_umax_i64_ret_addr64(ptr %out, ptr %out2, i64 %
; GCN1-NEXT: s_branch .LBB104_6
; GCN1-NEXT: .LBB104_4:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN1-NEXT: s_cbranch_execz .LBB104_6
+; GCN1-NEXT: s_and_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_vccz .LBB104_6
; GCN1-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[2:3], s[0:1], 0
; GCN1-NEXT: v_mov_b32_e32 v5, s12
@@ -15392,6 +15574,7 @@ define amdgpu_kernel void @atomic_umax_i64_ret_addr64(ptr %out, ptr %out2, i64 %
; GCN2-NEXT: s_cmp_eq_u32 s1, s2
; GCN2-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_mov_b64 s[2:3], -1
; GCN2-NEXT: s_cbranch_vccz .LBB104_4
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v3, s1
@@ -15420,7 +15603,8 @@ define amdgpu_kernel void @atomic_umax_i64_ret_addr64(ptr %out, ptr %out2, i64 %
; GCN2-NEXT: s_branch .LBB104_6
; GCN2-NEXT: .LBB104_4:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN2-NEXT: s_cbranch_execz .LBB104_6
+; GCN2-NEXT: s_and_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_vccz .LBB104_6
; GCN2-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -15460,6 +15644,7 @@ define amdgpu_kernel void @atomic_umax_i64_ret_addr64(ptr %out, ptr %out2, i64 %
; GCN3-NEXT: s_cmp_eq_u32 s1, s3
; GCN3-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN3-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN3-NEXT: s_mov_b64 s[2:3], -1
; GCN3-NEXT: s_cbranch_vccz .LBB104_4
; GCN3-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN3-NEXT: v_mov_b32_e32 v3, s1
@@ -15488,7 +15673,8 @@ define amdgpu_kernel void @atomic_umax_i64_ret_addr64(ptr %out, ptr %out2, i64 %
; GCN3-NEXT: s_branch .LBB104_6
; GCN3-NEXT: .LBB104_4:
; GCN3-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN3-NEXT: s_cbranch_execz .LBB104_6
+; GCN3-NEXT: s_and_b64 vcc, exec, s[2:3]
+; GCN3-NEXT: s_cbranch_vccz .LBB104_6
; GCN3-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN3-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN3-NEXT: s_cselect_b32 s0, s0, -1
@@ -17003,6 +17189,7 @@ define amdgpu_gfx i64 @flat_atomic_umin_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN1-NEXT: s_cmp_eq_u32 s5, s34
; GCN1-NEXT: s_cselect_b64 s[34:35], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN1-NEXT: s_mov_b64 s[34:35], -1
; GCN1-NEXT: s_cbranch_vccz .LBB113_4
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: s_add_u32 s34, s4, 4
@@ -17036,7 +17223,8 @@ define amdgpu_gfx i64 @flat_atomic_umin_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN1-NEXT: s_branch .LBB113_6
; GCN1-NEXT: .LBB113_4:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN1-NEXT: s_cbranch_execz .LBB113_6
+; GCN1-NEXT: s_and_b64 vcc, exec, s[34:35]
+; GCN1-NEXT: s_cbranch_vccz .LBB113_6
; GCN1-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[34:35], s[4:5], 0
; GCN1-NEXT: v_mov_b32_e32 v5, s6
@@ -17067,6 +17255,7 @@ define amdgpu_gfx i64 @flat_atomic_umin_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN2-NEXT: s_cmp_eq_u32 s5, s34
; GCN2-NEXT: s_cselect_b64 s[34:35], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN2-NEXT: s_mov_b64 s[34:35], -1
; GCN2-NEXT: s_cbranch_vccz .LBB113_4
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: s_add_u32 s34, s4, 4
@@ -17100,7 +17289,8 @@ define amdgpu_gfx i64 @flat_atomic_umin_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN2-NEXT: s_branch .LBB113_6
; GCN2-NEXT: .LBB113_4:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN2-NEXT: s_cbranch_execz .LBB113_6
+; GCN2-NEXT: s_and_b64 vcc, exec, s[34:35]
+; GCN2-NEXT: s_cbranch_vccz .LBB113_6
; GCN2-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[4:5], 0
; GCN2-NEXT: s_cselect_b32 s34, s4, -1
@@ -17128,6 +17318,7 @@ define amdgpu_gfx i64 @flat_atomic_umin_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN3-NEXT: s_cmp_eq_u32 s5, s35
; GCN3-NEXT: s_cselect_b64 s[34:35], -1, 0
; GCN3-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN3-NEXT: s_mov_b64 s[34:35], -1
; GCN3-NEXT: s_cbranch_vccz .LBB113_4
; GCN3-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN3-NEXT: v_mov_b32_e32 v2, s4
@@ -17156,7 +17347,8 @@ define amdgpu_gfx i64 @flat_atomic_umin_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN3-NEXT: s_branch .LBB113_6
; GCN3-NEXT: .LBB113_4:
; GCN3-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN3-NEXT: s_cbranch_execz .LBB113_6
+; GCN3-NEXT: s_and_b64 vcc, exec, s[34:35]
+; GCN3-NEXT: s_cbranch_vccz .LBB113_6
; GCN3-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN3-NEXT: s_cmp_lg_u64 s[4:5], 0
; GCN3-NEXT: s_cselect_b32 s34, s4, -1
@@ -17190,6 +17382,7 @@ define amdgpu_gfx i64 @flat_atomic_umin_i64_ret_offset_scalar(ptr inreg %out, i6
; GCN1-NEXT: s_cmp_eq_u32 s35, s36
; GCN1-NEXT: s_cselect_b64 s[36:37], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN1-NEXT: s_mov_b64 s[36:37], -1
; GCN1-NEXT: s_cbranch_vccz .LBB114_4
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: s_add_u32 s36, s34, 4
@@ -17223,7 +17416,8 @@ define amdgpu_gfx i64 @flat_atomic_umin_i64_ret_offset_scalar(ptr inreg %out, i6
; GCN1-NEXT: s_branch .LBB114_6
; GCN1-NEXT: .LBB114_4:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN1-NEXT: s_cbranch_execz .LBB114_6
+; GCN1-NEXT: s_and_b64 vcc, exec, s[36:37]
+; GCN1-NEXT: s_cbranch_vccz .LBB114_6
; GCN1-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[36:37], s[34:35], 0
; GCN1-NEXT: v_mov_b32_e32 v5, s6
@@ -17256,6 +17450,7 @@ define amdgpu_gfx i64 @flat_atomic_umin_i64_ret_offset_scalar(ptr inreg %out, i6
; GCN2-NEXT: s_cmp_eq_u32 s35, s36
; GCN2-NEXT: s_cselect_b64 s[36:37], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN2-NEXT: s_mov_b64 s[36:37], -1
; GCN2-NEXT: s_cbranch_vccz .LBB114_4
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: s_add_u32 s36, s34, 4
@@ -17289,7 +17484,8 @@ define amdgpu_gfx i64 @flat_atomic_umin_i64_ret_offset_scalar(ptr inreg %out, i6
; GCN2-NEXT: s_branch .LBB114_6
; GCN2-NEXT: .LBB114_4:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN2-NEXT: s_cbranch_execz .LBB114_6
+; GCN2-NEXT: s_and_b64 vcc, exec, s[36:37]
+; GCN2-NEXT: s_cbranch_vccz .LBB114_6
; GCN2-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[34:35], 0
; GCN2-NEXT: s_cselect_b32 s34, s34, -1
@@ -17319,6 +17515,7 @@ define amdgpu_gfx i64 @flat_atomic_umin_i64_ret_offset_scalar(ptr inreg %out, i6
; GCN3-NEXT: s_cmp_eq_u32 s35, s37
; GCN3-NEXT: s_cselect_b64 s[36:37], -1, 0
; GCN3-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN3-NEXT: s_mov_b64 s[36:37], -1
; GCN3-NEXT: s_cbranch_vccz .LBB114_4
; GCN3-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN3-NEXT: v_mov_b32_e32 v2, s34
@@ -17347,7 +17544,8 @@ define amdgpu_gfx i64 @flat_atomic_umin_i64_ret_offset_scalar(ptr inreg %out, i6
; GCN3-NEXT: s_branch .LBB114_6
; GCN3-NEXT: .LBB114_4:
; GCN3-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN3-NEXT: s_cbranch_execz .LBB114_6
+; GCN3-NEXT: s_and_b64 vcc, exec, s[36:37]
+; GCN3-NEXT: s_cbranch_vccz .LBB114_6
; GCN3-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN3-NEXT: s_cmp_lg_u64 s[34:35], 0
; GCN3-NEXT: s_cselect_b32 s34, s34, -1
@@ -18858,6 +19056,7 @@ define amdgpu_gfx i64 @flat_atomic_min_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN1-NEXT: s_cmp_eq_u32 s5, s34
; GCN1-NEXT: s_cselect_b64 s[34:35], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN1-NEXT: s_mov_b64 s[34:35], -1
; GCN1-NEXT: s_cbranch_vccz .LBB123_4
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: s_add_u32 s34, s4, 4
@@ -18891,7 +19090,8 @@ define amdgpu_gfx i64 @flat_atomic_min_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN1-NEXT: s_branch .LBB123_6
; GCN1-NEXT: .LBB123_4:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN1-NEXT: s_cbranch_execz .LBB123_6
+; GCN1-NEXT: s_and_b64 vcc, exec, s[34:35]
+; GCN1-NEXT: s_cbranch_vccz .LBB123_6
; GCN1-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[34:35], s[4:5], 0
; GCN1-NEXT: v_mov_b32_e32 v5, s6
@@ -18922,6 +19122,7 @@ define amdgpu_gfx i64 @flat_atomic_min_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN2-NEXT: s_cmp_eq_u32 s5, s34
; GCN2-NEXT: s_cselect_b64 s[34:35], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN2-NEXT: s_mov_b64 s[34:35], -1
; GCN2-NEXT: s_cbranch_vccz .LBB123_4
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: s_add_u32 s34, s4, 4
@@ -18955,7 +19156,8 @@ define amdgpu_gfx i64 @flat_atomic_min_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN2-NEXT: s_branch .LBB123_6
; GCN2-NEXT: .LBB123_4:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN2-NEXT: s_cbranch_execz .LBB123_6
+; GCN2-NEXT: s_and_b64 vcc, exec, s[34:35]
+; GCN2-NEXT: s_cbranch_vccz .LBB123_6
; GCN2-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[4:5], 0
; GCN2-NEXT: s_cselect_b32 s34, s4, -1
@@ -18983,6 +19185,7 @@ define amdgpu_gfx i64 @flat_atomic_min_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN3-NEXT: s_cmp_eq_u32 s5, s35
; GCN3-NEXT: s_cselect_b64 s[34:35], -1, 0
; GCN3-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN3-NEXT: s_mov_b64 s[34:35], -1
; GCN3-NEXT: s_cbranch_vccz .LBB123_4
; GCN3-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN3-NEXT: v_mov_b32_e32 v2, s4
@@ -19011,7 +19214,8 @@ define amdgpu_gfx i64 @flat_atomic_min_i64_ret_scalar(ptr inreg %ptr, i64 inreg
; GCN3-NEXT: s_branch .LBB123_6
; GCN3-NEXT: .LBB123_4:
; GCN3-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN3-NEXT: s_cbranch_execz .LBB123_6
+; GCN3-NEXT: s_and_b64 vcc, exec, s[34:35]
+; GCN3-NEXT: s_cbranch_vccz .LBB123_6
; GCN3-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN3-NEXT: s_cmp_lg_u64 s[4:5], 0
; GCN3-NEXT: s_cselect_b32 s34, s4, -1
@@ -19045,6 +19249,7 @@ define amdgpu_gfx i64 @flat_atomic_min_i64_ret_offset_scalar(ptr inreg %out, i64
; GCN1-NEXT: s_cmp_eq_u32 s35, s36
; GCN1-NEXT: s_cselect_b64 s[36:37], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN1-NEXT: s_mov_b64 s[36:37], -1
; GCN1-NEXT: s_cbranch_vccz .LBB124_4
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: s_add_u32 s36, s34, 4
@@ -19078,7 +19283,8 @@ define amdgpu_gfx i64 @flat_atomic_min_i64_ret_offset_scalar(ptr inreg %out, i64
; GCN1-NEXT: s_branch .LBB124_6
; GCN1-NEXT: .LBB124_4:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN1-NEXT: s_cbranch_execz .LBB124_6
+; GCN1-NEXT: s_and_b64 vcc, exec, s[36:37]
+; GCN1-NEXT: s_cbranch_vccz .LBB124_6
; GCN1-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[36:37], s[34:35], 0
; GCN1-NEXT: v_mov_b32_e32 v5, s6
@@ -19111,6 +19317,7 @@ define amdgpu_gfx i64 @flat_atomic_min_i64_ret_offset_scalar(ptr inreg %out, i64
; GCN2-NEXT: s_cmp_eq_u32 s35, s36
; GCN2-NEXT: s_cselect_b64 s[36:37], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN2-NEXT: s_mov_b64 s[36:37], -1
; GCN2-NEXT: s_cbranch_vccz .LBB124_4
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: s_add_u32 s36, s34, 4
@@ -19144,7 +19351,8 @@ define amdgpu_gfx i64 @flat_atomic_min_i64_ret_offset_scalar(ptr inreg %out, i64
; GCN2-NEXT: s_branch .LBB124_6
; GCN2-NEXT: .LBB124_4:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN2-NEXT: s_cbranch_execz .LBB124_6
+; GCN2-NEXT: s_and_b64 vcc, exec, s[36:37]
+; GCN2-NEXT: s_cbranch_vccz .LBB124_6
; GCN2-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[34:35], 0
; GCN2-NEXT: s_cselect_b32 s34, s34, -1
@@ -19174,6 +19382,7 @@ define amdgpu_gfx i64 @flat_atomic_min_i64_ret_offset_scalar(ptr inreg %out, i64
; GCN3-NEXT: s_cmp_eq_u32 s35, s37
; GCN3-NEXT: s_cselect_b64 s[36:37], -1, 0
; GCN3-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN3-NEXT: s_mov_b64 s[36:37], -1
; GCN3-NEXT: s_cbranch_vccz .LBB124_4
; GCN3-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN3-NEXT: v_mov_b32_e32 v2, s34
@@ -19202,7 +19411,8 @@ define amdgpu_gfx i64 @flat_atomic_min_i64_ret_offset_scalar(ptr inreg %out, i64
; GCN3-NEXT: s_branch .LBB124_6
; GCN3-NEXT: .LBB124_4:
; GCN3-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN3-NEXT: s_cbranch_execz .LBB124_6
+; GCN3-NEXT: s_and_b64 vcc, exec, s[36:37]
+; GCN3-NEXT: s_cbranch_vccz .LBB124_6
; GCN3-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN3-NEXT: s_cmp_lg_u64 s[34:35], 0
; GCN3-NEXT: s_cselect_b32 s34, s34, -1
@@ -19461,6 +19671,7 @@ define amdgpu_kernel void @atomic_min_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GCN1-NEXT: s_cmp_eq_u32 s1, s2
; GCN1-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_mov_b64 s[2:3], -1
; GCN1-NEXT: s_cbranch_vccz .LBB126_4
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v3, s1
@@ -19489,7 +19700,8 @@ define amdgpu_kernel void @atomic_min_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GCN1-NEXT: s_branch .LBB126_6
; GCN1-NEXT: .LBB126_4:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN1-NEXT: s_cbranch_execz .LBB126_6
+; GCN1-NEXT: s_and_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_vccz .LBB126_6
; GCN1-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[2:3], s[0:1], 0
; GCN1-NEXT: v_mov_b32_e32 v5, s12
@@ -19532,6 +19744,7 @@ define amdgpu_kernel void @atomic_min_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GCN2-NEXT: s_cmp_eq_u32 s1, s2
; GCN2-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_mov_b64 s[2:3], -1
; GCN2-NEXT: s_cbranch_vccz .LBB126_4
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v3, s1
@@ -19560,7 +19773,8 @@ define amdgpu_kernel void @atomic_min_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GCN2-NEXT: s_branch .LBB126_6
; GCN2-NEXT: .LBB126_4:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN2-NEXT: s_cbranch_execz .LBB126_6
+; GCN2-NEXT: s_and_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_vccz .LBB126_6
; GCN2-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -19602,6 +19816,7 @@ define amdgpu_kernel void @atomic_min_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GCN3-NEXT: s_cmp_eq_u32 s1, s3
; GCN3-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN3-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN3-NEXT: s_mov_b64 s[2:3], -1
; GCN3-NEXT: s_cbranch_vccz .LBB126_4
; GCN3-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN3-NEXT: v_mov_b32_e32 v3, s1
@@ -19630,7 +19845,8 @@ define amdgpu_kernel void @atomic_min_i64_ret_addr64_offset(ptr %out, ptr %out2,
; GCN3-NEXT: s_branch .LBB126_6
; GCN3-NEXT: .LBB126_4:
; GCN3-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN3-NEXT: s_cbranch_execz .LBB126_6
+; GCN3-NEXT: s_and_b64 vcc, exec, s[2:3]
+; GCN3-NEXT: s_cbranch_vccz .LBB126_6
; GCN3-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN3-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN3-NEXT: s_cselect_b32 s0, s0, -1
@@ -19872,6 +20088,7 @@ define amdgpu_kernel void @atomic_min_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GCN1-NEXT: s_cmp_eq_u32 s1, s2
; GCN1-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_mov_b64 s[2:3], -1
; GCN1-NEXT: s_cbranch_vccz .LBB128_4
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v3, s1
@@ -19900,7 +20117,8 @@ define amdgpu_kernel void @atomic_min_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GCN1-NEXT: s_branch .LBB128_6
; GCN1-NEXT: .LBB128_4:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN1-NEXT: s_cbranch_execz .LBB128_6
+; GCN1-NEXT: s_and_b64 vcc, exec, s[2:3]
+; GCN1-NEXT: s_cbranch_vccz .LBB128_6
; GCN1-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[2:3], s[0:1], 0
; GCN1-NEXT: v_mov_b32_e32 v5, s12
@@ -19941,6 +20159,7 @@ define amdgpu_kernel void @atomic_min_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GCN2-NEXT: s_cmp_eq_u32 s1, s2
; GCN2-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_mov_b64 s[2:3], -1
; GCN2-NEXT: s_cbranch_vccz .LBB128_4
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v3, s1
@@ -19969,7 +20188,8 @@ define amdgpu_kernel void @atomic_min_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GCN2-NEXT: s_branch .LBB128_6
; GCN2-NEXT: .LBB128_4:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN2-NEXT: s_cbranch_execz .LBB128_6
+; GCN2-NEXT: s_and_b64 vcc, exec, s[2:3]
+; GCN2-NEXT: s_cbranch_vccz .LBB128_6
; GCN2-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN2-NEXT: s_cselect_b32 s0, s0, -1
@@ -20009,6 +20229,7 @@ define amdgpu_kernel void @atomic_min_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GCN3-NEXT: s_cmp_eq_u32 s1, s3
; GCN3-NEXT: s_cselect_b64 s[2:3], -1, 0
; GCN3-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GCN3-NEXT: s_mov_b64 s[2:3], -1
; GCN3-NEXT: s_cbranch_vccz .LBB128_4
; GCN3-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN3-NEXT: v_mov_b32_e32 v3, s1
@@ -20037,7 +20258,8 @@ define amdgpu_kernel void @atomic_min_i64_ret_addr64(ptr %out, ptr %out2, i64 %i
; GCN3-NEXT: s_branch .LBB128_6
; GCN3-NEXT: .LBB128_4:
; GCN3-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN3-NEXT: s_cbranch_execz .LBB128_6
+; GCN3-NEXT: s_and_b64 vcc, exec, s[2:3]
+; GCN3-NEXT: s_cbranch_vccz .LBB128_6
; GCN3-NEXT: ; %bb.5: ; %atomicrmw.private
; GCN3-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN3-NEXT: s_cselect_b32 s0, s0, -1
@@ -21270,6 +21492,7 @@ define amdgpu_gfx i64 @flat_atomic_uinc_wrap_i64_ret_scalar(ptr inreg %ptr, i64
; GCN1-NEXT: s_cmp_eq_u32 s5, s34
; GCN1-NEXT: s_cselect_b64 s[34:35], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN1-NEXT: s_mov_b64 s[34:35], -1
; GCN1-NEXT: s_cbranch_vccz .LBB137_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s4
@@ -21283,6 +21506,8 @@ define amdgpu_gfx i64 @flat_atomic_uinc_wrap_i64_ret_scalar(ptr inreg %ptr, i64
; GCN1-NEXT: s_branch .LBB137_4
; GCN1-NEXT: .LBB137_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN1-NEXT: s_cbranch_vccnz .LBB137_4
; GCN1-NEXT: .LBB137_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[34:35], s[4:5], 0
; GCN1-NEXT: s_and_b64 s[34:35], s[34:35], exec
@@ -21314,6 +21539,7 @@ define amdgpu_gfx i64 @flat_atomic_uinc_wrap_i64_ret_scalar(ptr inreg %ptr, i64
; GCN2-NEXT: s_cmp_eq_u32 s5, s34
; GCN2-NEXT: s_cselect_b64 s[34:35], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN2-NEXT: s_mov_b64 s[34:35], -1
; GCN2-NEXT: s_cbranch_vccz .LBB137_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s4
@@ -21327,6 +21553,8 @@ define amdgpu_gfx i64 @flat_atomic_uinc_wrap_i64_ret_scalar(ptr inreg %ptr, i64
; GCN2-NEXT: s_branch .LBB137_4
; GCN2-NEXT: .LBB137_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN2-NEXT: s_cbranch_vccnz .LBB137_4
; GCN2-NEXT: .LBB137_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[4:5], 0
; GCN2-NEXT: s_cselect_b32 s34, s4, -1
@@ -21355,6 +21583,7 @@ define amdgpu_gfx i64 @flat_atomic_uinc_wrap_i64_ret_scalar(ptr inreg %ptr, i64
; GCN3-NEXT: s_cmp_eq_u32 s5, s35
; GCN3-NEXT: s_cselect_b64 s[34:35], -1, 0
; GCN3-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN3-NEXT: s_mov_b64 s[34:35], -1
; GCN3-NEXT: s_cbranch_vccz .LBB137_2
; GCN3-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN3-NEXT: v_mov_b32_e32 v0, s4
@@ -21368,6 +21597,8 @@ define amdgpu_gfx i64 @flat_atomic_uinc_wrap_i64_ret_scalar(ptr inreg %ptr, i64
; GCN3-NEXT: s_branch .LBB137_4
; GCN3-NEXT: .LBB137_2:
; GCN3-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN3-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN3-NEXT: s_cbranch_vccnz .LBB137_4
; GCN3-NEXT: .LBB137_3: ; %atomicrmw.private
; GCN3-NEXT: s_cmp_lg_u64 s[4:5], 0
; GCN3-NEXT: s_cselect_b32 s34, s4, -1
@@ -21402,6 +21633,7 @@ define amdgpu_gfx i64 @flat_atomic_uinc_wrap_i64_ret_offset_scalar(ptr inreg %ou
; GCN1-NEXT: s_cmp_eq_u32 s35, s36
; GCN1-NEXT: s_cselect_b64 s[36:37], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN1-NEXT: s_mov_b64 s[36:37], -1
; GCN1-NEXT: s_cbranch_vccz .LBB138_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s34
@@ -21415,6 +21647,8 @@ define amdgpu_gfx i64 @flat_atomic_uinc_wrap_i64_ret_offset_scalar(ptr inreg %ou
; GCN1-NEXT: s_branch .LBB138_4
; GCN1-NEXT: .LBB138_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN1-NEXT: s_cbranch_vccnz .LBB138_4
; GCN1-NEXT: .LBB138_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[36:37], s[34:35], 0
; GCN1-NEXT: s_and_b64 s[36:37], s[36:37], exec
@@ -21448,6 +21682,7 @@ define amdgpu_gfx i64 @flat_atomic_uinc_wrap_i64_ret_offset_scalar(ptr inreg %ou
; GCN2-NEXT: s_cmp_eq_u32 s35, s36
; GCN2-NEXT: s_cselect_b64 s[36:37], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN2-NEXT: s_mov_b64 s[36:37], -1
; GCN2-NEXT: s_cbranch_vccz .LBB138_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s34
@@ -21461,6 +21696,8 @@ define amdgpu_gfx i64 @flat_atomic_uinc_wrap_i64_ret_offset_scalar(ptr inreg %ou
; GCN2-NEXT: s_branch .LBB138_4
; GCN2-NEXT: .LBB138_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN2-NEXT: s_cbranch_vccnz .LBB138_4
; GCN2-NEXT: .LBB138_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[34:35], 0
; GCN2-NEXT: s_cselect_b32 s34, s34, -1
@@ -21491,6 +21728,7 @@ define amdgpu_gfx i64 @flat_atomic_uinc_wrap_i64_ret_offset_scalar(ptr inreg %ou
; GCN3-NEXT: s_cmp_eq_u32 s35, s37
; GCN3-NEXT: s_cselect_b64 s[36:37], -1, 0
; GCN3-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN3-NEXT: s_mov_b64 s[36:37], -1
; GCN3-NEXT: s_cbranch_vccz .LBB138_2
; GCN3-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN3-NEXT: v_mov_b32_e32 v0, s34
@@ -21504,6 +21742,8 @@ define amdgpu_gfx i64 @flat_atomic_uinc_wrap_i64_ret_offset_scalar(ptr inreg %ou
; GCN3-NEXT: s_branch .LBB138_4
; GCN3-NEXT: .LBB138_2:
; GCN3-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN3-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN3-NEXT: s_cbranch_vccnz .LBB138_4
; GCN3-NEXT: .LBB138_3: ; %atomicrmw.private
; GCN3-NEXT: s_cmp_lg_u64 s[34:35], 0
; GCN3-NEXT: s_cselect_b32 s34, s34, -1
@@ -22679,6 +22919,7 @@ define amdgpu_gfx i64 @flat_atomic_udec_wrap_i64_ret_scalar(ptr inreg %ptr, i64
; GCN1-NEXT: s_cmp_eq_u32 s5, s34
; GCN1-NEXT: s_cselect_b64 s[34:35], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN1-NEXT: s_mov_b64 s[34:35], -1
; GCN1-NEXT: s_cbranch_vccz .LBB147_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s4
@@ -22692,6 +22933,8 @@ define amdgpu_gfx i64 @flat_atomic_udec_wrap_i64_ret_scalar(ptr inreg %ptr, i64
; GCN1-NEXT: s_branch .LBB147_4
; GCN1-NEXT: .LBB147_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN1-NEXT: s_cbranch_vccnz .LBB147_4
; GCN1-NEXT: .LBB147_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[34:35], s[4:5], 0
; GCN1-NEXT: v_mov_b32_e32 v5, s6
@@ -22727,6 +22970,7 @@ define amdgpu_gfx i64 @flat_atomic_udec_wrap_i64_ret_scalar(ptr inreg %ptr, i64
; GCN2-NEXT: s_cmp_eq_u32 s5, s34
; GCN2-NEXT: s_cselect_b64 s[34:35], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN2-NEXT: s_mov_b64 s[34:35], -1
; GCN2-NEXT: s_cbranch_vccz .LBB147_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s4
@@ -22740,6 +22984,8 @@ define amdgpu_gfx i64 @flat_atomic_udec_wrap_i64_ret_scalar(ptr inreg %ptr, i64
; GCN2-NEXT: s_branch .LBB147_4
; GCN2-NEXT: .LBB147_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN2-NEXT: s_cbranch_vccnz .LBB147_4
; GCN2-NEXT: .LBB147_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[4:5], 0
; GCN2-NEXT: s_cselect_b32 s34, s4, -1
@@ -22772,6 +23018,7 @@ define amdgpu_gfx i64 @flat_atomic_udec_wrap_i64_ret_scalar(ptr inreg %ptr, i64
; GCN3-NEXT: s_cmp_eq_u32 s5, s35
; GCN3-NEXT: s_cselect_b64 s[34:35], -1, 0
; GCN3-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN3-NEXT: s_mov_b64 s[34:35], -1
; GCN3-NEXT: s_cbranch_vccz .LBB147_2
; GCN3-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN3-NEXT: v_mov_b32_e32 v0, s4
@@ -22785,6 +23032,8 @@ define amdgpu_gfx i64 @flat_atomic_udec_wrap_i64_ret_scalar(ptr inreg %ptr, i64
; GCN3-NEXT: s_branch .LBB147_4
; GCN3-NEXT: .LBB147_2:
; GCN3-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN3-NEXT: s_andn2_b64 vcc, exec, s[34:35]
+; GCN3-NEXT: s_cbranch_vccnz .LBB147_4
; GCN3-NEXT: .LBB147_3: ; %atomicrmw.private
; GCN3-NEXT: s_cmp_lg_u64 s[4:5], 0
; GCN3-NEXT: s_cselect_b32 s34, s4, -1
@@ -22823,6 +23072,7 @@ define amdgpu_gfx i64 @flat_atomic_udec_wrap_i64_ret_offset_scalar(ptr inreg %ou
; GCN1-NEXT: s_cmp_eq_u32 s35, s36
; GCN1-NEXT: s_cselect_b64 s[36:37], -1, 0
; GCN1-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN1-NEXT: s_mov_b64 s[36:37], -1
; GCN1-NEXT: s_cbranch_vccz .LBB148_2
; GCN1-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN1-NEXT: v_mov_b32_e32 v0, s34
@@ -22836,6 +23086,8 @@ define amdgpu_gfx i64 @flat_atomic_udec_wrap_i64_ret_offset_scalar(ptr inreg %ou
; GCN1-NEXT: s_branch .LBB148_4
; GCN1-NEXT: .LBB148_2:
; GCN1-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN1-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN1-NEXT: s_cbranch_vccnz .LBB148_4
; GCN1-NEXT: .LBB148_3: ; %atomicrmw.private
; GCN1-NEXT: v_cmp_ne_u64_e64 s[36:37], s[34:35], 0
; GCN1-NEXT: v_mov_b32_e32 v5, s6
@@ -22873,6 +23125,7 @@ define amdgpu_gfx i64 @flat_atomic_udec_wrap_i64_ret_offset_scalar(ptr inreg %ou
; GCN2-NEXT: s_cmp_eq_u32 s35, s36
; GCN2-NEXT: s_cselect_b64 s[36:37], -1, 0
; GCN2-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN2-NEXT: s_mov_b64 s[36:37], -1
; GCN2-NEXT: s_cbranch_vccz .LBB148_2
; GCN2-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN2-NEXT: v_mov_b32_e32 v0, s34
@@ -22886,6 +23139,8 @@ define amdgpu_gfx i64 @flat_atomic_udec_wrap_i64_ret_offset_scalar(ptr inreg %ou
; GCN2-NEXT: s_branch .LBB148_4
; GCN2-NEXT: .LBB148_2:
; GCN2-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN2-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN2-NEXT: s_cbranch_vccnz .LBB148_4
; GCN2-NEXT: .LBB148_3: ; %atomicrmw.private
; GCN2-NEXT: s_cmp_lg_u64 s[34:35], 0
; GCN2-NEXT: s_cselect_b32 s34, s34, -1
@@ -22920,6 +23175,7 @@ define amdgpu_gfx i64 @flat_atomic_udec_wrap_i64_ret_offset_scalar(ptr inreg %ou
; GCN3-NEXT: s_cmp_eq_u32 s35, s37
; GCN3-NEXT: s_cselect_b64 s[36:37], -1, 0
; GCN3-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN3-NEXT: s_mov_b64 s[36:37], -1
; GCN3-NEXT: s_cbranch_vccz .LBB148_2
; GCN3-NEXT: ; %bb.1: ; %atomicrmw.global
; GCN3-NEXT: v_mov_b32_e32 v0, s34
@@ -22933,6 +23189,8 @@ define amdgpu_gfx i64 @flat_atomic_udec_wrap_i64_ret_offset_scalar(ptr inreg %ou
; GCN3-NEXT: s_branch .LBB148_4
; GCN3-NEXT: .LBB148_2:
; GCN3-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN3-NEXT: s_andn2_b64 vcc, exec, s[36:37]
+; GCN3-NEXT: s_cbranch_vccnz .LBB148_4
; GCN3-NEXT: .LBB148_3: ; %atomicrmw.private
; GCN3-NEXT: s_cmp_lg_u64 s[34:35], 0
; GCN3-NEXT: s_cselect_b32 s34, s34, -1
diff --git a/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll b/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll
index 17a5f52..ba64485 100644
--- a/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll
+++ b/llvm/test/CodeGen/AMDGPU/indirect-addressing-si.ll
@@ -7216,7 +7216,8 @@ define amdgpu_kernel void @extract_adjacent_blocks(i32 %arg) {
; GENERIC-NEXT: s_load_dword s0, s[4:5], 0x9
; GENERIC-NEXT: s_waitcnt lgkmcnt(0)
; GENERIC-NEXT: s_cmp_lg_u32 s0, 0
-; GENERIC-NEXT: s_cbranch_scc0 .LBB19_4
+; GENERIC-NEXT: s_mov_b64 s[0:1], -1
+; GENERIC-NEXT: s_cbranch_scc0 .LBB19_2
; GENERIC-NEXT: ; %bb.1: ; %bb4
; GENERIC-NEXT: s_mov_b32 s3, 0xf000
; GENERIC-NEXT: s_mov_b32 s2, -1
@@ -7225,9 +7226,11 @@ define amdgpu_kernel void @extract_adjacent_blocks(i32 %arg) {
; GENERIC-NEXT: ;;#ASMSTART
; GENERIC-NEXT: ; reg use v[0:3]
; GENERIC-NEXT: ;;#ASMEND
-; GENERIC-NEXT: s_mov_b64 vcc, exec
-; GENERIC-NEXT: s_cbranch_execnz .LBB19_3
-; GENERIC-NEXT: .LBB19_2: ; %bb1
+; GENERIC-NEXT: s_mov_b64 s[0:1], 0
+; GENERIC-NEXT: .LBB19_2: ; %Flow
+; GENERIC-NEXT: s_andn2_b64 vcc, exec, s[0:1]
+; GENERIC-NEXT: s_cbranch_vccnz .LBB19_4
+; GENERIC-NEXT: ; %bb.3: ; %bb1
; GENERIC-NEXT: s_mov_b32 s3, 0xf000
; GENERIC-NEXT: s_mov_b32 s2, -1
; GENERIC-NEXT: buffer_load_dwordx4 v[0:3], off, s[0:3], 0 glc
@@ -7235,15 +7238,12 @@ define amdgpu_kernel void @extract_adjacent_blocks(i32 %arg) {
; GENERIC-NEXT: ;;#ASMSTART
; GENERIC-NEXT: ; reg use v[0:3]
; GENERIC-NEXT: ;;#ASMEND
-; GENERIC-NEXT: .LBB19_3: ; %bb7
+; GENERIC-NEXT: .LBB19_4: ; %bb7
; GENERIC-NEXT: s_mov_b32 s3, 0xf000
; GENERIC-NEXT: s_mov_b32 s2, -1
; GENERIC-NEXT: buffer_store_dword v0, off, s[0:3], 0
; GENERIC-NEXT: s_waitcnt vmcnt(0)
; GENERIC-NEXT: s_endpgm
-; GENERIC-NEXT: .LBB19_4:
-; GENERIC-NEXT: s_mov_b64 vcc, 0
-; GENERIC-NEXT: s_branch .LBB19_2
;
; NOOPT-LABEL: extract_adjacent_blocks:
; NOOPT: ; %bb.0: ; %bb
@@ -7350,7 +7350,8 @@ define amdgpu_kernel void @extract_adjacent_blocks(i32 %arg) {
; SI-MOVREL-NEXT: s_load_dword s0, s[4:5], 0x9
; SI-MOVREL-NEXT: s_waitcnt lgkmcnt(0)
; SI-MOVREL-NEXT: s_cmp_lg_u32 s0, 0
-; SI-MOVREL-NEXT: s_cbranch_scc0 .LBB19_4
+; SI-MOVREL-NEXT: s_mov_b64 s[0:1], -1
+; SI-MOVREL-NEXT: s_cbranch_scc0 .LBB19_2
; SI-MOVREL-NEXT: ; %bb.1: ; %bb4
; SI-MOVREL-NEXT: s_mov_b32 s3, 0xf000
; SI-MOVREL-NEXT: s_mov_b32 s2, -1
@@ -7359,8 +7360,11 @@ define amdgpu_kernel void @extract_adjacent_blocks(i32 %arg) {
; SI-MOVREL-NEXT: ;;#ASMSTART
; SI-MOVREL-NEXT: ; reg use v[0:3]
; SI-MOVREL-NEXT: ;;#ASMEND
-; SI-MOVREL-NEXT: s_cbranch_execnz .LBB19_3
-; SI-MOVREL-NEXT: .LBB19_2: ; %bb1
+; SI-MOVREL-NEXT: s_mov_b64 s[0:1], 0
+; SI-MOVREL-NEXT: .LBB19_2: ; %Flow
+; SI-MOVREL-NEXT: s_andn2_b64 vcc, exec, s[0:1]
+; SI-MOVREL-NEXT: s_cbranch_vccnz .LBB19_4
+; SI-MOVREL-NEXT: ; %bb.3: ; %bb1
; SI-MOVREL-NEXT: s_mov_b32 s3, 0xf000
; SI-MOVREL-NEXT: s_mov_b32 s2, -1
; SI-MOVREL-NEXT: buffer_load_dwordx4 v[0:3], off, s[0:3], 0 glc
@@ -7368,66 +7372,68 @@ define amdgpu_kernel void @extract_adjacent_blocks(i32 %arg) {
; SI-MOVREL-NEXT: ;;#ASMSTART
; SI-MOVREL-NEXT: ; reg use v[0:3]
; SI-MOVREL-NEXT: ;;#ASMEND
-; SI-MOVREL-NEXT: .LBB19_3: ; %bb7
+; SI-MOVREL-NEXT: .LBB19_4: ; %bb7
; SI-MOVREL-NEXT: s_mov_b32 s3, 0xf000
; SI-MOVREL-NEXT: s_mov_b32 s2, -1
; SI-MOVREL-NEXT: buffer_store_dword v0, off, s[0:3], 0
; SI-MOVREL-NEXT: s_waitcnt vmcnt(0)
; SI-MOVREL-NEXT: s_endpgm
-; SI-MOVREL-NEXT: .LBB19_4:
-; SI-MOVREL-NEXT: s_branch .LBB19_2
;
; VI-LABEL: extract_adjacent_blocks:
; VI: ; %bb.0: ; %bb
; VI-NEXT: s_load_dword s0, s[4:5], 0x24
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s0, 0
-; VI-NEXT: s_cbranch_scc0 .LBB19_4
+; VI-NEXT: s_mov_b64 s[0:1], -1
+; VI-NEXT: s_cbranch_scc0 .LBB19_2
; VI-NEXT: ; %bb.1: ; %bb4
; VI-NEXT: flat_load_dwordx4 v[0:3], v[0:1] glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: ;;#ASMSTART
; VI-NEXT: ; reg use v[0:3]
; VI-NEXT: ;;#ASMEND
-; VI-NEXT: s_cbranch_execnz .LBB19_3
-; VI-NEXT: .LBB19_2: ; %bb1
+; VI-NEXT: s_mov_b64 s[0:1], 0
+; VI-NEXT: .LBB19_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[0:1]
+; VI-NEXT: s_cbranch_vccnz .LBB19_4
+; VI-NEXT: ; %bb.3: ; %bb1
; VI-NEXT: flat_load_dwordx4 v[0:3], v[0:1] glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: ;;#ASMSTART
; VI-NEXT: ; reg use v[0:3]
; VI-NEXT: ;;#ASMEND
-; VI-NEXT: .LBB19_3: ; %bb7
+; VI-NEXT: .LBB19_4: ; %bb7
; VI-NEXT: flat_store_dword v[0:1], v0
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_endpgm
-; VI-NEXT: .LBB19_4:
-; VI-NEXT: s_branch .LBB19_2
;
; GFX9-IDXMODE-LABEL: extract_adjacent_blocks:
; GFX9-IDXMODE: ; %bb.0: ; %bb
; GFX9-IDXMODE-NEXT: s_load_dword s0, s[4:5], 0x24
; GFX9-IDXMODE-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-IDXMODE-NEXT: s_cmp_lg_u32 s0, 0
-; GFX9-IDXMODE-NEXT: s_cbranch_scc0 .LBB19_4
+; GFX9-IDXMODE-NEXT: s_mov_b64 s[0:1], -1
+; GFX9-IDXMODE-NEXT: s_cbranch_scc0 .LBB19_2
; GFX9-IDXMODE-NEXT: ; %bb.1: ; %bb4
; GFX9-IDXMODE-NEXT: global_load_dwordx4 v[0:3], v[0:1], off glc
; GFX9-IDXMODE-NEXT: s_waitcnt vmcnt(0)
; GFX9-IDXMODE-NEXT: ;;#ASMSTART
; GFX9-IDXMODE-NEXT: ; reg use v[0:3]
; GFX9-IDXMODE-NEXT: ;;#ASMEND
-; GFX9-IDXMODE-NEXT: s_cbranch_execnz .LBB19_3
-; GFX9-IDXMODE-NEXT: .LBB19_2: ; %bb1
+; GFX9-IDXMODE-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-IDXMODE-NEXT: .LBB19_2: ; %Flow
+; GFX9-IDXMODE-NEXT: s_andn2_b64 vcc, exec, s[0:1]
+; GFX9-IDXMODE-NEXT: s_cbranch_vccnz .LBB19_4
+; GFX9-IDXMODE-NEXT: ; %bb.3: ; %bb1
; GFX9-IDXMODE-NEXT: global_load_dwordx4 v[0:3], v[0:1], off glc
; GFX9-IDXMODE-NEXT: s_waitcnt vmcnt(0)
; GFX9-IDXMODE-NEXT: ;;#ASMSTART
; GFX9-IDXMODE-NEXT: ; reg use v[0:3]
; GFX9-IDXMODE-NEXT: ;;#ASMEND
-; GFX9-IDXMODE-NEXT: .LBB19_3: ; %bb7
+; GFX9-IDXMODE-NEXT: .LBB19_4: ; %bb7
; GFX9-IDXMODE-NEXT: global_store_dword v[0:1], v0, off
; GFX9-IDXMODE-NEXT: s_waitcnt vmcnt(0)
; GFX9-IDXMODE-NEXT: s_endpgm
-; GFX9-IDXMODE-NEXT: .LBB19_4:
-; GFX9-IDXMODE-NEXT: s_branch .LBB19_2
bb:
%tmp = icmp eq i32 %arg, 0
br i1 %tmp, label %bb1, label %bb4
@@ -7456,7 +7462,8 @@ define amdgpu_kernel void @insert_adjacent_blocks(i32 %arg, float %val0) {
; GENERIC-NEXT: s_load_dword s0, s[4:5], 0x9
; GENERIC-NEXT: s_waitcnt lgkmcnt(0)
; GENERIC-NEXT: s_cmp_lg_u32 s0, 0
-; GENERIC-NEXT: s_cbranch_scc0 .LBB20_4
+; GENERIC-NEXT: s_mov_b64 s[0:1], -1
+; GENERIC-NEXT: s_cbranch_scc0 .LBB20_2
; GENERIC-NEXT: ; %bb.1: ; %bb4
; GENERIC-NEXT: s_mov_b32 s3, 0xf000
; GENERIC-NEXT: s_mov_b32 s2, -1
@@ -7465,9 +7472,11 @@ define amdgpu_kernel void @insert_adjacent_blocks(i32 %arg, float %val0) {
; GENERIC-NEXT: ;;#ASMSTART
; GENERIC-NEXT: ; reg use v[0:3]
; GENERIC-NEXT: ;;#ASMEND
-; GENERIC-NEXT: s_mov_b64 vcc, exec
-; GENERIC-NEXT: s_cbranch_execnz .LBB20_3
-; GENERIC-NEXT: .LBB20_2: ; %bb1
+; GENERIC-NEXT: s_mov_b64 s[0:1], 0
+; GENERIC-NEXT: .LBB20_2: ; %Flow
+; GENERIC-NEXT: s_andn2_b64 vcc, exec, s[0:1]
+; GENERIC-NEXT: s_cbranch_vccnz .LBB20_4
+; GENERIC-NEXT: ; %bb.3: ; %bb1
; GENERIC-NEXT: s_mov_b32 s3, 0xf000
; GENERIC-NEXT: s_mov_b32 s2, -1
; GENERIC-NEXT: buffer_load_dwordx4 v[0:3], off, s[0:3], 0 glc
@@ -7475,15 +7484,12 @@ define amdgpu_kernel void @insert_adjacent_blocks(i32 %arg, float %val0) {
; GENERIC-NEXT: ;;#ASMSTART
; GENERIC-NEXT: ; reg use v[0:3]
; GENERIC-NEXT: ;;#ASMEND
-; GENERIC-NEXT: .LBB20_3: ; %bb7
+; GENERIC-NEXT: .LBB20_4: ; %bb7
; GENERIC-NEXT: s_mov_b32 s3, 0xf000
; GENERIC-NEXT: s_mov_b32 s2, -1
; GENERIC-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
; GENERIC-NEXT: s_waitcnt vmcnt(0)
; GENERIC-NEXT: s_endpgm
-; GENERIC-NEXT: .LBB20_4:
-; GENERIC-NEXT: s_mov_b64 vcc, 0
-; GENERIC-NEXT: s_branch .LBB20_2
;
; NOOPT-LABEL: insert_adjacent_blocks:
; NOOPT: ; %bb.0: ; %bb
@@ -7596,7 +7602,8 @@ define amdgpu_kernel void @insert_adjacent_blocks(i32 %arg, float %val0) {
; SI-MOVREL-NEXT: s_load_dword s0, s[4:5], 0x9
; SI-MOVREL-NEXT: s_waitcnt lgkmcnt(0)
; SI-MOVREL-NEXT: s_cmp_lg_u32 s0, 0
-; SI-MOVREL-NEXT: s_cbranch_scc0 .LBB20_4
+; SI-MOVREL-NEXT: s_mov_b64 s[0:1], -1
+; SI-MOVREL-NEXT: s_cbranch_scc0 .LBB20_2
; SI-MOVREL-NEXT: ; %bb.1: ; %bb4
; SI-MOVREL-NEXT: s_mov_b32 s3, 0xf000
; SI-MOVREL-NEXT: s_mov_b32 s2, -1
@@ -7605,8 +7612,11 @@ define amdgpu_kernel void @insert_adjacent_blocks(i32 %arg, float %val0) {
; SI-MOVREL-NEXT: ;;#ASMSTART
; SI-MOVREL-NEXT: ; reg use v[0:3]
; SI-MOVREL-NEXT: ;;#ASMEND
-; SI-MOVREL-NEXT: s_cbranch_execnz .LBB20_3
-; SI-MOVREL-NEXT: .LBB20_2: ; %bb1
+; SI-MOVREL-NEXT: s_mov_b64 s[0:1], 0
+; SI-MOVREL-NEXT: .LBB20_2: ; %Flow
+; SI-MOVREL-NEXT: s_andn2_b64 vcc, exec, s[0:1]
+; SI-MOVREL-NEXT: s_cbranch_vccnz .LBB20_4
+; SI-MOVREL-NEXT: ; %bb.3: ; %bb1
; SI-MOVREL-NEXT: s_mov_b32 s3, 0xf000
; SI-MOVREL-NEXT: s_mov_b32 s2, -1
; SI-MOVREL-NEXT: buffer_load_dwordx4 v[0:3], off, s[0:3], 0 glc
@@ -7614,66 +7624,68 @@ define amdgpu_kernel void @insert_adjacent_blocks(i32 %arg, float %val0) {
; SI-MOVREL-NEXT: ;;#ASMSTART
; SI-MOVREL-NEXT: ; reg use v[0:3]
; SI-MOVREL-NEXT: ;;#ASMEND
-; SI-MOVREL-NEXT: .LBB20_3: ; %bb7
+; SI-MOVREL-NEXT: .LBB20_4: ; %bb7
; SI-MOVREL-NEXT: s_mov_b32 s3, 0xf000
; SI-MOVREL-NEXT: s_mov_b32 s2, -1
; SI-MOVREL-NEXT: buffer_store_dwordx4 v[0:3], off, s[0:3], 0
; SI-MOVREL-NEXT: s_waitcnt vmcnt(0)
; SI-MOVREL-NEXT: s_endpgm
-; SI-MOVREL-NEXT: .LBB20_4:
-; SI-MOVREL-NEXT: s_branch .LBB20_2
;
; VI-LABEL: insert_adjacent_blocks:
; VI: ; %bb.0: ; %bb
; VI-NEXT: s_load_dword s0, s[4:5], 0x24
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s0, 0
-; VI-NEXT: s_cbranch_scc0 .LBB20_4
+; VI-NEXT: s_mov_b64 s[0:1], -1
+; VI-NEXT: s_cbranch_scc0 .LBB20_2
; VI-NEXT: ; %bb.1: ; %bb4
; VI-NEXT: flat_load_dwordx4 v[0:3], v[0:1] glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: ;;#ASMSTART
; VI-NEXT: ; reg use v[0:3]
; VI-NEXT: ;;#ASMEND
-; VI-NEXT: s_cbranch_execnz .LBB20_3
-; VI-NEXT: .LBB20_2: ; %bb1
+; VI-NEXT: s_mov_b64 s[0:1], 0
+; VI-NEXT: .LBB20_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[0:1]
+; VI-NEXT: s_cbranch_vccnz .LBB20_4
+; VI-NEXT: ; %bb.3: ; %bb1
; VI-NEXT: flat_load_dwordx4 v[0:3], v[0:1] glc
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: ;;#ASMSTART
; VI-NEXT: ; reg use v[0:3]
; VI-NEXT: ;;#ASMEND
-; VI-NEXT: .LBB20_3: ; %bb7
+; VI-NEXT: .LBB20_4: ; %bb7
; VI-NEXT: flat_store_dwordx4 v[0:1], v[0:3]
; VI-NEXT: s_waitcnt vmcnt(0)
; VI-NEXT: s_endpgm
-; VI-NEXT: .LBB20_4:
-; VI-NEXT: s_branch .LBB20_2
;
; GFX9-IDXMODE-LABEL: insert_adjacent_blocks:
; GFX9-IDXMODE: ; %bb.0: ; %bb
; GFX9-IDXMODE-NEXT: s_load_dword s0, s[4:5], 0x24
; GFX9-IDXMODE-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-IDXMODE-NEXT: s_cmp_lg_u32 s0, 0
-; GFX9-IDXMODE-NEXT: s_cbranch_scc0 .LBB20_4
+; GFX9-IDXMODE-NEXT: s_mov_b64 s[0:1], -1
+; GFX9-IDXMODE-NEXT: s_cbranch_scc0 .LBB20_2
; GFX9-IDXMODE-NEXT: ; %bb.1: ; %bb4
; GFX9-IDXMODE-NEXT: global_load_dwordx4 v[0:3], v[0:1], off glc
; GFX9-IDXMODE-NEXT: s_waitcnt vmcnt(0)
; GFX9-IDXMODE-NEXT: ;;#ASMSTART
; GFX9-IDXMODE-NEXT: ; reg use v[0:3]
; GFX9-IDXMODE-NEXT: ;;#ASMEND
-; GFX9-IDXMODE-NEXT: s_cbranch_execnz .LBB20_3
-; GFX9-IDXMODE-NEXT: .LBB20_2: ; %bb1
+; GFX9-IDXMODE-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-IDXMODE-NEXT: .LBB20_2: ; %Flow
+; GFX9-IDXMODE-NEXT: s_andn2_b64 vcc, exec, s[0:1]
+; GFX9-IDXMODE-NEXT: s_cbranch_vccnz .LBB20_4
+; GFX9-IDXMODE-NEXT: ; %bb.3: ; %bb1
; GFX9-IDXMODE-NEXT: global_load_dwordx4 v[0:3], v[0:1], off glc
; GFX9-IDXMODE-NEXT: s_waitcnt vmcnt(0)
; GFX9-IDXMODE-NEXT: ;;#ASMSTART
; GFX9-IDXMODE-NEXT: ; reg use v[0:3]
; GFX9-IDXMODE-NEXT: ;;#ASMEND
-; GFX9-IDXMODE-NEXT: .LBB20_3: ; %bb7
+; GFX9-IDXMODE-NEXT: .LBB20_4: ; %bb7
; GFX9-IDXMODE-NEXT: global_store_dwordx4 v[0:1], v[0:3], off
; GFX9-IDXMODE-NEXT: s_waitcnt vmcnt(0)
; GFX9-IDXMODE-NEXT: s_endpgm
-; GFX9-IDXMODE-NEXT: .LBB20_4:
-; GFX9-IDXMODE-NEXT: s_branch .LBB20_2
bb:
%tmp = icmp eq i32 %arg, 0
br i1 %tmp, label %bb1, label %bb4
@@ -9282,14 +9294,16 @@ define amdgpu_kernel void @broken_phi_bb(i32 %arg, i32 %arg1) {
; SI-MOVREL-NEXT: s_mov_b32 s3, 0xf000
; SI-MOVREL-NEXT: s_mov_b32 s2, -1
; SI-MOVREL-NEXT: s_branch .LBB26_2
-; SI-MOVREL-NEXT: .LBB26_1:
+; SI-MOVREL-NEXT: .LBB26_1: ; in Loop: Header=BB26_2 Depth=1
; SI-MOVREL-NEXT: ; implicit-def: $vgpr0
-; SI-MOVREL-NEXT: s_branch .LBB26_6
+; SI-MOVREL-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; SI-MOVREL-NEXT: s_cbranch_vccz .LBB26_6
; SI-MOVREL-NEXT: .LBB26_2: ; %bb2
; SI-MOVREL-NEXT: ; =>This Loop Header: Depth=1
; SI-MOVREL-NEXT: ; Child Loop BB26_4 Depth 2
; SI-MOVREL-NEXT: s_waitcnt lgkmcnt(0)
; SI-MOVREL-NEXT: v_cmp_le_i32_e32 vcc, s0, v0
+; SI-MOVREL-NEXT: s_mov_b64 s[4:5], -1
; SI-MOVREL-NEXT: s_cbranch_vccnz .LBB26_1
; SI-MOVREL-NEXT: ; %bb.3: ; %bb4
; SI-MOVREL-NEXT: ; in Loop: Header=BB26_2 Depth=1
@@ -9318,14 +9332,16 @@ define amdgpu_kernel void @broken_phi_bb(i32 %arg, i32 %arg1) {
; VI-MOVREL-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; VI-MOVREL-NEXT: v_mov_b32_e32 v0, 8
; VI-MOVREL-NEXT: s_branch .LBB26_2
-; VI-MOVREL-NEXT: .LBB26_1:
+; VI-MOVREL-NEXT: .LBB26_1: ; in Loop: Header=BB26_2 Depth=1
; VI-MOVREL-NEXT: ; implicit-def: $vgpr0
-; VI-MOVREL-NEXT: s_branch .LBB26_6
+; VI-MOVREL-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; VI-MOVREL-NEXT: s_cbranch_vccz .LBB26_6
; VI-MOVREL-NEXT: .LBB26_2: ; %bb2
; VI-MOVREL-NEXT: ; =>This Loop Header: Depth=1
; VI-MOVREL-NEXT: ; Child Loop BB26_4 Depth 2
; VI-MOVREL-NEXT: s_waitcnt lgkmcnt(0)
; VI-MOVREL-NEXT: v_cmp_le_i32_e32 vcc, s0, v0
+; VI-MOVREL-NEXT: s_mov_b64 s[2:3], -1
; VI-MOVREL-NEXT: s_cbranch_vccnz .LBB26_1
; VI-MOVREL-NEXT: ; %bb.3: ; %bb4
; VI-MOVREL-NEXT: ; in Loop: Header=BB26_2 Depth=1
@@ -9354,14 +9370,16 @@ define amdgpu_kernel void @broken_phi_bb(i32 %arg, i32 %arg1) {
; VI-IDXMODE-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; VI-IDXMODE-NEXT: v_mov_b32_e32 v0, 8
; VI-IDXMODE-NEXT: s_branch .LBB26_2
-; VI-IDXMODE-NEXT: .LBB26_1:
+; VI-IDXMODE-NEXT: .LBB26_1: ; in Loop: Header=BB26_2 Depth=1
; VI-IDXMODE-NEXT: ; implicit-def: $vgpr0
-; VI-IDXMODE-NEXT: s_branch .LBB26_6
+; VI-IDXMODE-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; VI-IDXMODE-NEXT: s_cbranch_vccz .LBB26_6
; VI-IDXMODE-NEXT: .LBB26_2: ; %bb2
; VI-IDXMODE-NEXT: ; =>This Loop Header: Depth=1
; VI-IDXMODE-NEXT: ; Child Loop BB26_4 Depth 2
; VI-IDXMODE-NEXT: s_waitcnt lgkmcnt(0)
; VI-IDXMODE-NEXT: v_cmp_le_i32_e32 vcc, s0, v0
+; VI-IDXMODE-NEXT: s_mov_b64 s[2:3], -1
; VI-IDXMODE-NEXT: s_cbranch_vccnz .LBB26_1
; VI-IDXMODE-NEXT: ; %bb.3: ; %bb4
; VI-IDXMODE-NEXT: ; in Loop: Header=BB26_2 Depth=1
@@ -9391,14 +9409,16 @@ define amdgpu_kernel void @broken_phi_bb(i32 %arg, i32 %arg1) {
; GFX9-IDXMODE-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x24
; GFX9-IDXMODE-NEXT: v_mov_b32_e32 v0, 8
; GFX9-IDXMODE-NEXT: s_branch .LBB26_2
-; GFX9-IDXMODE-NEXT: .LBB26_1:
+; GFX9-IDXMODE-NEXT: .LBB26_1: ; in Loop: Header=BB26_2 Depth=1
; GFX9-IDXMODE-NEXT: ; implicit-def: $vgpr0
-; GFX9-IDXMODE-NEXT: s_branch .LBB26_6
+; GFX9-IDXMODE-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GFX9-IDXMODE-NEXT: s_cbranch_vccz .LBB26_6
; GFX9-IDXMODE-NEXT: .LBB26_2: ; %bb2
; GFX9-IDXMODE-NEXT: ; =>This Loop Header: Depth=1
; GFX9-IDXMODE-NEXT: ; Child Loop BB26_4 Depth 2
; GFX9-IDXMODE-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-IDXMODE-NEXT: v_cmp_le_i32_e32 vcc, s0, v0
+; GFX9-IDXMODE-NEXT: s_mov_b64 s[2:3], -1
; GFX9-IDXMODE-NEXT: s_cbranch_vccnz .LBB26_1
; GFX9-IDXMODE-NEXT: ; %bb.3: ; %bb4
; GFX9-IDXMODE-NEXT: ; in Loop: Header=BB26_2 Depth=1
diff --git a/llvm/test/CodeGen/AMDGPU/insert-delay-alu-bug.ll b/llvm/test/CodeGen/AMDGPU/insert-delay-alu-bug.ll
index eb5c5ef..9349de3 100644
--- a/llvm/test/CodeGen/AMDGPU/insert-delay-alu-bug.ll
+++ b/llvm/test/CodeGen/AMDGPU/insert-delay-alu-bug.ll
@@ -73,6 +73,7 @@ define amdgpu_kernel void @f2(i32 %arg, i32 %arg1, i32 %arg2, i1 %arg3, i32 %arg
; GFX11-NEXT: ; %bb.1: ; %bb14
; GFX11-NEXT: s_load_b128 s[20:23], s[16:17], 0x2c
; GFX11-NEXT: s_mov_b32 s18, 0
+; GFX11-NEXT: s_mov_b32 s2, 0
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_bitcmp1_b32 s21, 0
; GFX11-NEXT: s_cselect_b32 s24, -1, 0
@@ -91,14 +92,12 @@ define amdgpu_kernel void @f2(i32 %arg, i32 %arg1, i32 %arg2, i1 %arg3, i32 %arg
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_swappc_b64 s[30:31], s[0:1]
; GFX11-NEXT: s_mov_b32 s14, s21
+; GFX11-NEXT: s_mov_b32 s0, 0
; GFX11-NEXT: s_mov_b32 s2, -1
-; GFX11-NEXT: s_cbranch_execz .LBB2_4
-; GFX11-NEXT: s_branch .LBB2_12
-; GFX11-NEXT: .LBB2_3:
-; GFX11-NEXT: s_mov_b32 s2, 0
+; GFX11-NEXT: .LBB2_3: ; %Flow10
; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s0
; GFX11-NEXT: s_cbranch_vccnz .LBB2_12
-; GFX11-NEXT: .LBB2_4: ; %bb16
+; GFX11-NEXT: ; %bb.4: ; %bb16
; GFX11-NEXT: s_load_b32 s0, s[16:17], 0x54
; GFX11-NEXT: s_bitcmp1_b32 s23, 0
; GFX11-NEXT: s_cselect_b32 s9, -1, 0
diff --git a/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll b/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll
index 44bd409..ce1033d 100644
--- a/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll
+++ b/llvm/test/CodeGen/AMDGPU/insert_vector_elt.ll
@@ -1968,19 +1968,21 @@ define amdgpu_kernel void @insert_split_bb(ptr addrspace(1) %out, ptr addrspace(
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s4, s[8:9], 0x4
; SI-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
+; SI-NEXT: s_mov_b64 s[6:7], -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s4, 0
-; SI-NEXT: s_cbranch_scc0 .LBB42_4
+; SI-NEXT: s_cbranch_scc0 .LBB42_2
; SI-NEXT: ; %bb.1: ; %else
; SI-NEXT: s_load_dword s5, s[2:3], 0x1
; SI-NEXT: s_mov_b64 s[6:7], 0
+; SI-NEXT: .LBB42_2: ; %Flow
; SI-NEXT: s_andn2_b64 vcc, exec, s[6:7]
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_mov_b64 vcc, vcc
-; SI-NEXT: s_cbranch_vccnz .LBB42_3
-; SI-NEXT: .LBB42_2: ; %if
+; SI-NEXT: s_cbranch_vccnz .LBB42_4
+; SI-NEXT: ; %bb.3: ; %if
; SI-NEXT: s_load_dword s5, s[2:3], 0x0
-; SI-NEXT: .LBB42_3: ; %endif
+; SI-NEXT: .LBB42_4: ; %endif
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: v_mov_b32_e32 v0, s4
; SI-NEXT: s_mov_b32 s3, 0x100f000
@@ -1988,23 +1990,25 @@ define amdgpu_kernel void @insert_split_bb(ptr addrspace(1) %out, ptr addrspace(
; SI-NEXT: v_mov_b32_e32 v1, s5
; SI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; SI-NEXT: s_endpgm
-; SI-NEXT: .LBB42_4:
-; SI-NEXT: s_branch .LBB42_2
;
; VI-LABEL: insert_split_bb:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dword s4, s[8:9], 0x10
; VI-NEXT: s_load_dwordx4 s[0:3], s[8:9], 0x0
+; VI-NEXT: s_mov_b64 s[6:7], -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s4, 0
-; VI-NEXT: s_cbranch_scc0 .LBB42_4
+; VI-NEXT: s_cbranch_scc0 .LBB42_2
; VI-NEXT: ; %bb.1: ; %else
; VI-NEXT: s_load_dword s5, s[2:3], 0x4
-; VI-NEXT: s_cbranch_execnz .LBB42_3
-; VI-NEXT: .LBB42_2: ; %if
+; VI-NEXT: s_mov_b64 s[6:7], 0
+; VI-NEXT: .LBB42_2: ; %Flow
+; VI-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; VI-NEXT: s_cbranch_vccnz .LBB42_4
+; VI-NEXT: ; %bb.3: ; %if
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_load_dword s5, s[2:3], 0x0
-; VI-NEXT: .LBB42_3: ; %endif
+; VI-NEXT: .LBB42_4: ; %endif
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: v_mov_b32_e32 v0, s4
; VI-NEXT: s_mov_b32 s3, 0x1100f000
@@ -2012,8 +2016,6 @@ define amdgpu_kernel void @insert_split_bb(ptr addrspace(1) %out, ptr addrspace(
; VI-NEXT: v_mov_b32_e32 v1, s5
; VI-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; VI-NEXT: s_endpgm
-; VI-NEXT: .LBB42_4:
-; VI-NEXT: s_branch .LBB42_2
entry:
%0 = insertelement <2 x i32> poison, i32 %a, i32 0
%1 = icmp eq i32 %a, 0
diff --git a/llvm/test/CodeGen/AMDGPU/mad-combine.ll b/llvm/test/CodeGen/AMDGPU/mad-combine.ll
index 41eeeaf..6e26ade 100644
--- a/llvm/test/CodeGen/AMDGPU/mad-combine.ll
+++ b/llvm/test/CodeGen/AMDGPU/mad-combine.ll
@@ -1058,7 +1058,7 @@ define amdgpu_kernel void @aggressive_combine_to_mad_fsub_0_f32(ptr addrspace(1)
; SI-STD-NEXT: s_waitcnt vmcnt(0)
; SI-STD-NEXT: buffer_load_dword v3, v[0:1], s[0:3], 0 addr64 offset:4 glc
; SI-STD-NEXT: s_waitcnt vmcnt(0)
-; SI-STD-NEXT: buffer_load_dword v5, v[0:1], s[0:3], 0 addr64 offset:8 glc
+; SI-STD-NEXT: buffer_load_dword v4, v[0:1], s[0:3], 0 addr64 offset:8 glc
; SI-STD-NEXT: s_waitcnt vmcnt(0)
; SI-STD-NEXT: buffer_load_dword v6, v[0:1], s[0:3], 0 addr64 offset:12 glc
; SI-STD-NEXT: s_waitcnt vmcnt(0)
@@ -1067,16 +1067,16 @@ define amdgpu_kernel void @aggressive_combine_to_mad_fsub_0_f32(ptr addrspace(1)
; SI-STD-NEXT: s_bitcmp1_b32 s6, 0
; SI-STD-NEXT: s_cselect_b64 s[0:1], -1, 0
; SI-STD-NEXT: s_and_b64 vcc, exec, s[0:1]
+; SI-STD-NEXT: s_mov_b64 s[2:3], -1
; SI-STD-NEXT: s_cbranch_vccnz .LBB12_2
; SI-STD-NEXT: ; %bb.1: ; %normal
-; SI-STD-NEXT: v_mul_f32_e32 v4, v6, v1
-; SI-STD-NEXT: v_fma_f32 v4, v2, v3, v4
-; SI-STD-NEXT: v_sub_f32_e32 v4, v4, v5
+; SI-STD-NEXT: v_mul_f32_e32 v5, v6, v1
+; SI-STD-NEXT: v_fma_f32 v5, v2, v3, v5
+; SI-STD-NEXT: v_sub_f32_e32 v5, v5, v4
; SI-STD-NEXT: s_mov_b64 s[2:3], 0
; SI-STD-NEXT: s_branch .LBB12_3
; SI-STD-NEXT: .LBB12_2:
-; SI-STD-NEXT: s_mov_b64 s[2:3], -1
-; SI-STD-NEXT: ; implicit-def: $vgpr4
+; SI-STD-NEXT: ; implicit-def: $vgpr5
; SI-STD-NEXT: .LBB12_3: ; %Flow
; SI-STD-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
; SI-STD-NEXT: s_andn2_b64 vcc, exec, s[2:3]
@@ -1084,13 +1084,13 @@ define amdgpu_kernel void @aggressive_combine_to_mad_fsub_0_f32(ptr addrspace(1)
; SI-STD-NEXT: s_mov_b64 vcc, vcc
; SI-STD-NEXT: s_cbranch_vccnz .LBB12_5
; SI-STD-NEXT: ; %bb.4: ; %aggressive
-; SI-STD-NEXT: v_mad_f32 v4, v6, v1, -v5
-; SI-STD-NEXT: v_mac_f32_e32 v4, v2, v3
+; SI-STD-NEXT: v_mad_f32 v5, v6, v1, -v4
+; SI-STD-NEXT: v_mac_f32_e32 v5, v2, v3
; SI-STD-NEXT: .LBB12_5: ; %exit
; SI-STD-NEXT: s_mov_b32 s3, 0xf000
; SI-STD-NEXT: s_mov_b32 s2, 0
; SI-STD-NEXT: v_mov_b32_e32 v1, 0
-; SI-STD-NEXT: buffer_store_dword v4, v[0:1], s[0:3], 0 addr64
+; SI-STD-NEXT: buffer_store_dword v5, v[0:1], s[0:3], 0 addr64
; SI-STD-NEXT: s_endpgm
;
; SI-DENORM-FASTFMAF-LABEL: aggressive_combine_to_mad_fsub_0_f32:
@@ -1115,6 +1115,7 @@ define amdgpu_kernel void @aggressive_combine_to_mad_fsub_0_f32(ptr addrspace(1)
; SI-DENORM-FASTFMAF-NEXT: s_bitcmp1_b32 s6, 0
; SI-DENORM-FASTFMAF-NEXT: s_cselect_b64 s[0:1], -1, 0
; SI-DENORM-FASTFMAF-NEXT: s_and_b64 vcc, exec, s[0:1]
+; SI-DENORM-FASTFMAF-NEXT: s_mov_b64 s[2:3], -1
; SI-DENORM-FASTFMAF-NEXT: s_cbranch_vccnz .LBB12_2
; SI-DENORM-FASTFMAF-NEXT: ; %bb.1: ; %normal
; SI-DENORM-FASTFMAF-NEXT: v_mul_f32_e32 v6, v5, v1
@@ -1123,7 +1124,6 @@ define amdgpu_kernel void @aggressive_combine_to_mad_fsub_0_f32(ptr addrspace(1)
; SI-DENORM-FASTFMAF-NEXT: s_mov_b64 s[2:3], 0
; SI-DENORM-FASTFMAF-NEXT: s_branch .LBB12_3
; SI-DENORM-FASTFMAF-NEXT: .LBB12_2:
-; SI-DENORM-FASTFMAF-NEXT: s_mov_b64 s[2:3], -1
; SI-DENORM-FASTFMAF-NEXT: ; implicit-def: $vgpr6
; SI-DENORM-FASTFMAF-NEXT: .LBB12_3: ; %Flow
; SI-DENORM-FASTFMAF-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
@@ -1162,6 +1162,7 @@ define amdgpu_kernel void @aggressive_combine_to_mad_fsub_0_f32(ptr addrspace(1)
; SI-DENORM-SLOWFMAF-NEXT: s_waitcnt vmcnt(0)
; SI-DENORM-SLOWFMAF-NEXT: s_bitcmp1_b32 s6, 0
; SI-DENORM-SLOWFMAF-NEXT: s_cselect_b64 s[0:1], -1, 0
+; SI-DENORM-SLOWFMAF-NEXT: s_mov_b64 s[2:3], -1
; SI-DENORM-SLOWFMAF-NEXT: s_and_b64 vcc, exec, s[0:1]
; SI-DENORM-SLOWFMAF-NEXT: v_mul_f32_e32 v1, v5, v1
; SI-DENORM-SLOWFMAF-NEXT: v_fma_f32 v1, v3, v4, v1
@@ -1171,7 +1172,6 @@ define amdgpu_kernel void @aggressive_combine_to_mad_fsub_0_f32(ptr addrspace(1)
; SI-DENORM-SLOWFMAF-NEXT: s_mov_b64 s[2:3], 0
; SI-DENORM-SLOWFMAF-NEXT: s_branch .LBB12_3
; SI-DENORM-SLOWFMAF-NEXT: .LBB12_2:
-; SI-DENORM-SLOWFMAF-NEXT: s_mov_b64 s[2:3], -1
; SI-DENORM-SLOWFMAF-NEXT: ; implicit-def: $vgpr3
; SI-DENORM-SLOWFMAF-NEXT: .LBB12_3: ; %Flow
; SI-DENORM-SLOWFMAF-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
@@ -1288,23 +1288,23 @@ define amdgpu_kernel void @aggressive_combine_to_mad_fsub_2_f32(ptr addrspace(1)
; SI-STD-NEXT: s_waitcnt vmcnt(0)
; SI-STD-NEXT: buffer_load_dword v4, v[0:1], s[0:3], 0 addr64 offset:8 glc
; SI-STD-NEXT: s_waitcnt vmcnt(0)
-; SI-STD-NEXT: buffer_load_dword v6, v[0:1], s[0:3], 0 addr64 offset:12 glc
+; SI-STD-NEXT: buffer_load_dword v5, v[0:1], s[0:3], 0 addr64 offset:12 glc
; SI-STD-NEXT: s_waitcnt vmcnt(0)
; SI-STD-NEXT: buffer_load_dword v1, v[0:1], s[0:3], 0 addr64 offset:16 glc
; SI-STD-NEXT: s_waitcnt vmcnt(0)
; SI-STD-NEXT: s_bitcmp1_b32 s6, 0
; SI-STD-NEXT: s_cselect_b64 s[0:1], -1, 0
; SI-STD-NEXT: s_and_b64 vcc, exec, s[0:1]
+; SI-STD-NEXT: s_mov_b64 s[2:3], -1
; SI-STD-NEXT: s_cbranch_vccnz .LBB14_2
; SI-STD-NEXT: ; %bb.1: ; %normal
-; SI-STD-NEXT: v_mul_f32_e32 v5, v6, v1
-; SI-STD-NEXT: v_mac_f32_e32 v5, v2, v3
-; SI-STD-NEXT: v_sub_f32_e32 v5, v5, v4
+; SI-STD-NEXT: v_mul_f32_e32 v6, v5, v1
+; SI-STD-NEXT: v_mac_f32_e32 v6, v2, v3
+; SI-STD-NEXT: v_sub_f32_e32 v6, v6, v4
; SI-STD-NEXT: s_mov_b64 s[2:3], 0
; SI-STD-NEXT: s_branch .LBB14_3
; SI-STD-NEXT: .LBB14_2:
-; SI-STD-NEXT: s_mov_b64 s[2:3], -1
-; SI-STD-NEXT: ; implicit-def: $vgpr5
+; SI-STD-NEXT: ; implicit-def: $vgpr6
; SI-STD-NEXT: .LBB14_3: ; %Flow
; SI-STD-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
; SI-STD-NEXT: s_andn2_b64 vcc, exec, s[2:3]
@@ -1312,13 +1312,13 @@ define amdgpu_kernel void @aggressive_combine_to_mad_fsub_2_f32(ptr addrspace(1)
; SI-STD-NEXT: s_mov_b64 vcc, vcc
; SI-STD-NEXT: s_cbranch_vccnz .LBB14_5
; SI-STD-NEXT: ; %bb.4: ; %aggressive
-; SI-STD-NEXT: v_mad_f32 v5, v6, v1, -v4
-; SI-STD-NEXT: v_mac_f32_e32 v5, v2, v3
+; SI-STD-NEXT: v_mad_f32 v6, v5, v1, -v4
+; SI-STD-NEXT: v_mac_f32_e32 v6, v2, v3
; SI-STD-NEXT: .LBB14_5: ; %exit
; SI-STD-NEXT: s_mov_b32 s3, 0xf000
; SI-STD-NEXT: s_mov_b32 s2, 0
; SI-STD-NEXT: v_mov_b32_e32 v1, 0
-; SI-STD-NEXT: buffer_store_dword v5, v[0:1], s[0:3], 0 addr64
+; SI-STD-NEXT: buffer_store_dword v6, v[0:1], s[0:3], 0 addr64
; SI-STD-NEXT: s_endpgm
;
; SI-DENORM-FASTFMAF-LABEL: aggressive_combine_to_mad_fsub_2_f32:
@@ -1343,6 +1343,7 @@ define amdgpu_kernel void @aggressive_combine_to_mad_fsub_2_f32(ptr addrspace(1)
; SI-DENORM-FASTFMAF-NEXT: s_bitcmp1_b32 s6, 0
; SI-DENORM-FASTFMAF-NEXT: s_cselect_b64 s[0:1], -1, 0
; SI-DENORM-FASTFMAF-NEXT: s_and_b64 vcc, exec, s[0:1]
+; SI-DENORM-FASTFMAF-NEXT: s_mov_b64 s[2:3], -1
; SI-DENORM-FASTFMAF-NEXT: s_cbranch_vccnz .LBB14_2
; SI-DENORM-FASTFMAF-NEXT: ; %bb.1: ; %normal
; SI-DENORM-FASTFMAF-NEXT: v_mul_f32_e32 v6, v5, v1
@@ -1351,7 +1352,6 @@ define amdgpu_kernel void @aggressive_combine_to_mad_fsub_2_f32(ptr addrspace(1)
; SI-DENORM-FASTFMAF-NEXT: s_mov_b64 s[2:3], 0
; SI-DENORM-FASTFMAF-NEXT: s_branch .LBB14_3
; SI-DENORM-FASTFMAF-NEXT: .LBB14_2:
-; SI-DENORM-FASTFMAF-NEXT: s_mov_b64 s[2:3], -1
; SI-DENORM-FASTFMAF-NEXT: ; implicit-def: $vgpr6
; SI-DENORM-FASTFMAF-NEXT: .LBB14_3: ; %Flow
; SI-DENORM-FASTFMAF-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
@@ -1390,6 +1390,7 @@ define amdgpu_kernel void @aggressive_combine_to_mad_fsub_2_f32(ptr addrspace(1)
; SI-DENORM-SLOWFMAF-NEXT: s_waitcnt vmcnt(0)
; SI-DENORM-SLOWFMAF-NEXT: s_bitcmp1_b32 s6, 0
; SI-DENORM-SLOWFMAF-NEXT: s_cselect_b64 s[0:1], -1, 0
+; SI-DENORM-SLOWFMAF-NEXT: s_mov_b64 s[2:3], -1
; SI-DENORM-SLOWFMAF-NEXT: s_and_b64 vcc, exec, s[0:1]
; SI-DENORM-SLOWFMAF-NEXT: v_mul_f32_e32 v3, v3, v4
; SI-DENORM-SLOWFMAF-NEXT: v_mul_f32_e32 v1, v5, v1
@@ -1400,7 +1401,6 @@ define amdgpu_kernel void @aggressive_combine_to_mad_fsub_2_f32(ptr addrspace(1)
; SI-DENORM-SLOWFMAF-NEXT: s_mov_b64 s[2:3], 0
; SI-DENORM-SLOWFMAF-NEXT: s_branch .LBB14_3
; SI-DENORM-SLOWFMAF-NEXT: .LBB14_2:
-; SI-DENORM-SLOWFMAF-NEXT: s_mov_b64 s[2:3], -1
; SI-DENORM-SLOWFMAF-NEXT: ; implicit-def: $vgpr3
; SI-DENORM-SLOWFMAF-NEXT: .LBB14_3: ; %Flow
; SI-DENORM-SLOWFMAF-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
@@ -1475,6 +1475,7 @@ define amdgpu_kernel void @aggressive_combine_to_mad_fsub_3_f32(ptr addrspace(1)
; SI-STD-NEXT: s_bitcmp1_b32 s6, 0
; SI-STD-NEXT: s_cselect_b64 s[0:1], -1, 0
; SI-STD-NEXT: s_and_b64 vcc, exec, s[0:1]
+; SI-STD-NEXT: s_mov_b64 s[2:3], -1
; SI-STD-NEXT: s_cbranch_vccnz .LBB15_2
; SI-STD-NEXT: ; %bb.1: ; %normal
; SI-STD-NEXT: v_mul_f32_e32 v6, v5, v1
@@ -1483,7 +1484,6 @@ define amdgpu_kernel void @aggressive_combine_to_mad_fsub_3_f32(ptr addrspace(1)
; SI-STD-NEXT: s_mov_b64 s[2:3], 0
; SI-STD-NEXT: s_branch .LBB15_3
; SI-STD-NEXT: .LBB15_2:
-; SI-STD-NEXT: s_mov_b64 s[2:3], -1
; SI-STD-NEXT: ; implicit-def: $vgpr6
; SI-STD-NEXT: .LBB15_3: ; %Flow
; SI-STD-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
@@ -1523,6 +1523,7 @@ define amdgpu_kernel void @aggressive_combine_to_mad_fsub_3_f32(ptr addrspace(1)
; SI-DENORM-FASTFMAF-NEXT: s_bitcmp1_b32 s6, 0
; SI-DENORM-FASTFMAF-NEXT: s_cselect_b64 s[0:1], -1, 0
; SI-DENORM-FASTFMAF-NEXT: s_and_b64 vcc, exec, s[0:1]
+; SI-DENORM-FASTFMAF-NEXT: s_mov_b64 s[2:3], -1
; SI-DENORM-FASTFMAF-NEXT: s_cbranch_vccnz .LBB15_2
; SI-DENORM-FASTFMAF-NEXT: ; %bb.1: ; %normal
; SI-DENORM-FASTFMAF-NEXT: v_mul_f32_e32 v6, v5, v1
@@ -1531,7 +1532,6 @@ define amdgpu_kernel void @aggressive_combine_to_mad_fsub_3_f32(ptr addrspace(1)
; SI-DENORM-FASTFMAF-NEXT: s_mov_b64 s[2:3], 0
; SI-DENORM-FASTFMAF-NEXT: s_branch .LBB15_3
; SI-DENORM-FASTFMAF-NEXT: .LBB15_2:
-; SI-DENORM-FASTFMAF-NEXT: s_mov_b64 s[2:3], -1
; SI-DENORM-FASTFMAF-NEXT: ; implicit-def: $vgpr6
; SI-DENORM-FASTFMAF-NEXT: .LBB15_3: ; %Flow
; SI-DENORM-FASTFMAF-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
@@ -1570,6 +1570,7 @@ define amdgpu_kernel void @aggressive_combine_to_mad_fsub_3_f32(ptr addrspace(1)
; SI-DENORM-SLOWFMAF-NEXT: s_waitcnt vmcnt(0)
; SI-DENORM-SLOWFMAF-NEXT: s_bitcmp1_b32 s6, 0
; SI-DENORM-SLOWFMAF-NEXT: s_cselect_b64 s[0:1], -1, 0
+; SI-DENORM-SLOWFMAF-NEXT: s_mov_b64 s[2:3], -1
; SI-DENORM-SLOWFMAF-NEXT: s_and_b64 vcc, exec, s[0:1]
; SI-DENORM-SLOWFMAF-NEXT: v_mul_f32_e32 v3, v3, v4
; SI-DENORM-SLOWFMAF-NEXT: v_mul_f32_e32 v1, v5, v1
@@ -1580,7 +1581,6 @@ define amdgpu_kernel void @aggressive_combine_to_mad_fsub_3_f32(ptr addrspace(1)
; SI-DENORM-SLOWFMAF-NEXT: s_mov_b64 s[2:3], 0
; SI-DENORM-SLOWFMAF-NEXT: s_branch .LBB15_3
; SI-DENORM-SLOWFMAF-NEXT: .LBB15_2:
-; SI-DENORM-SLOWFMAF-NEXT: s_mov_b64 s[2:3], -1
; SI-DENORM-SLOWFMAF-NEXT: ; implicit-def: $vgpr3
; SI-DENORM-SLOWFMAF-NEXT: .LBB15_3: ; %Flow
; SI-DENORM-SLOWFMAF-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x9
diff --git a/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll b/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll
index c92c672..13528ff 100644
--- a/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll
+++ b/llvm/test/CodeGen/AMDGPU/memcpy-crash-issue63986.ll
@@ -27,10 +27,13 @@ define void @issue63986(i64 %0, i64 %idxprom, ptr inreg %ptr) {
; CHECK-NEXT: flat_store_dwordx4 v[6:7], v[10:13]
; CHECK-NEXT: s_cbranch_vccz .LBB0_1
; CHECK-NEXT: ; %bb.2: ; %loop-memcpy-residual-header
+; CHECK-NEXT: s_mov_b64 s[4:5], -1
; CHECK-NEXT: s_branch .LBB0_4
; CHECK-NEXT: ; %bb.3:
; CHECK-NEXT: ; implicit-def: $vgpr6_vgpr7
-; CHECK-NEXT: s_branch .LBB0_5
+; CHECK-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; CHECK-NEXT: s_cbranch_vccz .LBB0_5
+; CHECK-NEXT: s_branch .LBB0_8
; CHECK-NEXT: .LBB0_4: ; %loop-memcpy-residual-header.post-loop-memcpy-expansion_crit_edge
; CHECK-NEXT: v_lshlrev_b64 v[6:7], 6, v[2:3]
; CHECK-NEXT: s_cbranch_execnz .LBB0_8
@@ -161,10 +164,13 @@ define void @issue63986_reduced_expanded(i64 %idxprom) {
; CHECK-NEXT: ; %bb.2: ; %loop-memcpy-residual-header
; CHECK-NEXT: s_and_b32 s4, 32, 15
; CHECK-NEXT: s_mov_b32 s5, 0
+; CHECK-NEXT: s_mov_b64 s[6:7], -1
; CHECK-NEXT: s_cbranch_scc0 .LBB1_4
; CHECK-NEXT: ; %bb.3:
; CHECK-NEXT: ; implicit-def: $vgpr0_vgpr1
-; CHECK-NEXT: s_branch .LBB1_5
+; CHECK-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; CHECK-NEXT: s_cbranch_vccz .LBB1_5
+; CHECK-NEXT: s_branch .LBB1_8
; CHECK-NEXT: .LBB1_4: ; %loop-memcpy-residual-header.post-loop-memcpy-expansion_crit_edge
; CHECK-NEXT: v_lshlrev_b64 v[0:1], 1, v[0:1]
; CHECK-NEXT: s_cbranch_execnz .LBB1_8
diff --git a/llvm/test/CodeGen/AMDGPU/mul.ll b/llvm/test/CodeGen/AMDGPU/mul.ll
index 7e3d5c9..c85ae21 100644
--- a/llvm/test/CodeGen/AMDGPU/mul.ll
+++ b/llvm/test/CodeGen/AMDGPU/mul.ll
@@ -2348,6 +2348,7 @@ define amdgpu_kernel void @mul32_in_branch(ptr addrspace(1) %out, ptr addrspace(
; SI-LABEL: mul32_in_branch:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0xd
+; SI-NEXT: s_mov_b64 s[6:7], -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s0, 0
; SI-NEXT: s_cbranch_scc0 .LBB15_2
@@ -2356,7 +2357,6 @@ define amdgpu_kernel void @mul32_in_branch(ptr addrspace(1) %out, ptr addrspace(
; SI-NEXT: s_mov_b64 s[6:7], 0
; SI-NEXT: s_branch .LBB15_3
; SI-NEXT: .LBB15_2:
-; SI-NEXT: s_mov_b64 s[6:7], -1
; SI-NEXT: ; implicit-def: $sgpr8
; SI-NEXT: .LBB15_3: ; %Flow
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
@@ -2383,6 +2383,7 @@ define amdgpu_kernel void @mul32_in_branch(ptr addrspace(1) %out, ptr addrspace(
; VI-LABEL: mul32_in_branch:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x34
+; VI-NEXT: s_mov_b64 s[6:7], -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u32 s0, 0
; VI-NEXT: s_cbranch_scc0 .LBB15_2
@@ -2391,7 +2392,6 @@ define amdgpu_kernel void @mul32_in_branch(ptr addrspace(1) %out, ptr addrspace(
; VI-NEXT: s_mov_b64 s[6:7], 0
; VI-NEXT: s_branch .LBB15_3
; VI-NEXT: .LBB15_2:
-; VI-NEXT: s_mov_b64 s[6:7], -1
; VI-NEXT: ; implicit-def: $sgpr8
; VI-NEXT: .LBB15_3: ; %Flow
; VI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
@@ -2418,6 +2418,7 @@ define amdgpu_kernel void @mul32_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX9-LABEL: mul32_in_branch:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x34
+; GFX9-NEXT: s_mov_b64 s[6:7], -1
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u32 s0, 0
; GFX9-NEXT: s_cbranch_scc0 .LBB15_2
@@ -2426,7 +2427,6 @@ define amdgpu_kernel void @mul32_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX9-NEXT: s_mov_b64 s[6:7], 0
; GFX9-NEXT: s_branch .LBB15_3
; GFX9-NEXT: .LBB15_2:
-; GFX9-NEXT: s_mov_b64 s[6:7], -1
; GFX9-NEXT: ; implicit-def: $sgpr8
; GFX9-NEXT: .LBB15_3: ; %Flow
; GFX9-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
@@ -2453,19 +2453,19 @@ define amdgpu_kernel void @mul32_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX10-LABEL: mul32_in_branch:
; GFX10: ; %bb.0: ; %entry
; GFX10-NEXT: s_load_dwordx2 s[0:1], s[4:5], 0x34
-; GFX10-NEXT: s_mov_b32 s6, 0
+; GFX10-NEXT: s_mov_b32 s7, -1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_cmp_lg_u32 s0, 0
; GFX10-NEXT: s_cbranch_scc0 .LBB15_2
; GFX10-NEXT: ; %bb.1: ; %else
-; GFX10-NEXT: s_mul_i32 s7, s0, s1
+; GFX10-NEXT: s_mul_i32 s6, s0, s1
+; GFX10-NEXT: s_mov_b32 s7, 0
; GFX10-NEXT: s_branch .LBB15_3
; GFX10-NEXT: .LBB15_2:
-; GFX10-NEXT: s_mov_b32 s6, -1
-; GFX10-NEXT: ; implicit-def: $sgpr7
+; GFX10-NEXT: ; implicit-def: $sgpr6
; GFX10-NEXT: .LBB15_3: ; %Flow
; GFX10-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x24
-; GFX10-NEXT: s_andn2_b32 vcc_lo, exec_lo, s6
+; GFX10-NEXT: s_andn2_b32 vcc_lo, exec_lo, s7
; GFX10-NEXT: s_cbranch_vccnz .LBB15_5
; GFX10-NEXT: ; %bb.4: ; %if
; GFX10-NEXT: s_mov_b32 s7, 0x31016000
@@ -2476,7 +2476,7 @@ define amdgpu_kernel void @mul32_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX10-NEXT: buffer_load_dword v0, off, s[4:7], 0
; GFX10-NEXT: s_branch .LBB15_6
; GFX10-NEXT: .LBB15_5:
-; GFX10-NEXT: v_mov_b32_e32 v0, s7
+; GFX10-NEXT: v_mov_b32_e32 v0, s6
; GFX10-NEXT: .LBB15_6: ; %endif
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_mov_b32 s3, 0x31016000
@@ -2488,19 +2488,19 @@ define amdgpu_kernel void @mul32_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX11-LABEL: mul32_in_branch:
; GFX11: ; %bb.0: ; %entry
; GFX11-NEXT: s_load_b64 s[0:1], s[4:5], 0x34
-; GFX11-NEXT: s_mov_b32 s6, 0
+; GFX11-NEXT: s_mov_b32 s7, -1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u32 s0, 0
; GFX11-NEXT: s_cbranch_scc0 .LBB15_2
; GFX11-NEXT: ; %bb.1: ; %else
-; GFX11-NEXT: s_mul_i32 s7, s0, s1
+; GFX11-NEXT: s_mul_i32 s6, s0, s1
+; GFX11-NEXT: s_mov_b32 s7, 0
; GFX11-NEXT: s_branch .LBB15_3
; GFX11-NEXT: .LBB15_2:
-; GFX11-NEXT: s_mov_b32 s6, -1
-; GFX11-NEXT: ; implicit-def: $sgpr7
+; GFX11-NEXT: ; implicit-def: $sgpr6
; GFX11-NEXT: .LBB15_3: ; %Flow
; GFX11-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s7
; GFX11-NEXT: s_cbranch_vccnz .LBB15_5
; GFX11-NEXT: ; %bb.4: ; %if
; GFX11-NEXT: s_mov_b32 s7, 0x31016000
@@ -2511,7 +2511,7 @@ define amdgpu_kernel void @mul32_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX11-NEXT: buffer_load_b32 v0, off, s[4:7], 0
; GFX11-NEXT: s_branch .LBB15_6
; GFX11-NEXT: .LBB15_5:
-; GFX11-NEXT: v_mov_b32_e32 v0, s7
+; GFX11-NEXT: v_mov_b32_e32 v0, s6
; GFX11-NEXT: .LBB15_6: ; %endif
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_mov_b32 s3, 0x31016000
@@ -2523,19 +2523,19 @@ define amdgpu_kernel void @mul32_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX12-LABEL: mul32_in_branch:
; GFX12: ; %bb.0: ; %entry
; GFX12-NEXT: s_load_b64 s[0:1], s[4:5], 0x34
-; GFX12-NEXT: s_mov_b32 s6, 0
+; GFX12-NEXT: s_mov_b32 s7, -1
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: s_cmp_lg_u32 s0, 0
; GFX12-NEXT: s_cbranch_scc0 .LBB15_2
; GFX12-NEXT: ; %bb.1: ; %else
-; GFX12-NEXT: s_mul_i32 s7, s0, s1
+; GFX12-NEXT: s_mul_i32 s6, s0, s1
+; GFX12-NEXT: s_mov_b32 s7, 0
; GFX12-NEXT: s_branch .LBB15_3
; GFX12-NEXT: .LBB15_2:
-; GFX12-NEXT: s_mov_b32 s6, -1
-; GFX12-NEXT: ; implicit-def: $sgpr7
+; GFX12-NEXT: ; implicit-def: $sgpr6
; GFX12-NEXT: .LBB15_3: ; %Flow
; GFX12-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s7
; GFX12-NEXT: s_cbranch_vccnz .LBB15_5
; GFX12-NEXT: ; %bb.4: ; %if
; GFX12-NEXT: s_mov_b32 s7, 0x31016000
@@ -2546,7 +2546,7 @@ define amdgpu_kernel void @mul32_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX12-NEXT: buffer_load_b32 v0, off, s[4:7], null
; GFX12-NEXT: s_branch .LBB15_6
; GFX12-NEXT: .LBB15_5:
-; GFX12-NEXT: v_mov_b32_e32 v0, s7
+; GFX12-NEXT: v_mov_b32_e32 v0, s6
; GFX12-NEXT: .LBB15_6: ; %endif
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: s_mov_b32 s3, 0x31016000
@@ -2558,19 +2558,19 @@ define amdgpu_kernel void @mul32_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX1250-LABEL: mul32_in_branch:
; GFX1250: ; %bb.0: ; %entry
; GFX1250-NEXT: s_load_b64 s[0:1], s[4:5], 0x34
-; GFX1250-NEXT: s_mov_b32 s6, 0
+; GFX1250-NEXT: s_mov_b32 s7, -1
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: s_cmp_lg_u32 s0, 0
; GFX1250-NEXT: s_cbranch_scc0 .LBB15_2
; GFX1250-NEXT: ; %bb.1: ; %else
-; GFX1250-NEXT: s_mul_i32 s7, s0, s1
+; GFX1250-NEXT: s_mul_i32 s6, s0, s1
+; GFX1250-NEXT: s_mov_b32 s7, 0
; GFX1250-NEXT: s_branch .LBB15_3
; GFX1250-NEXT: .LBB15_2:
-; GFX1250-NEXT: s_mov_b32 s6, -1
-; GFX1250-NEXT: ; implicit-def: $sgpr7
+; GFX1250-NEXT: ; implicit-def: $sgpr6
; GFX1250-NEXT: .LBB15_3: ; %Flow
; GFX1250-NEXT: s_load_b128 s[0:3], s[4:5], 0x24
-; GFX1250-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s6
+; GFX1250-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s7
; GFX1250-NEXT: s_cbranch_vccnz .LBB15_5
; GFX1250-NEXT: ; %bb.4: ; %if
; GFX1250-NEXT: s_mov_b32 s7, 0x31016000
@@ -2581,7 +2581,7 @@ define amdgpu_kernel void @mul32_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX1250-NEXT: buffer_load_b32 v0, off, s[4:7], null
; GFX1250-NEXT: s_branch .LBB15_6
; GFX1250-NEXT: .LBB15_5:
-; GFX1250-NEXT: v_mov_b32_e32 v0, s7
+; GFX1250-NEXT: v_mov_b32_e32 v0, s6
; GFX1250-NEXT: .LBB15_6: ; %endif
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: s_mov_b32 s3, 0x31016000
@@ -2648,10 +2648,10 @@ define amdgpu_kernel void @mul64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; SI-LABEL: mul64_in_branch:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
-; SI-NEXT: s_mov_b64 s[8:9], 0
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: v_cmp_ne_u64_e64 s[10:11], s[4:5], 0
-; SI-NEXT: s_and_b64 vcc, exec, s[10:11]
+; SI-NEXT: v_cmp_ne_u64_e64 s[8:9], s[4:5], 0
+; SI-NEXT: s_and_b64 vcc, exec, s[8:9]
+; SI-NEXT: s_mov_b64 s[8:9], -1
; SI-NEXT: s_cbranch_vccz .LBB16_4
; SI-NEXT: ; %bb.1: ; %else
; SI-NEXT: v_mov_b32_e32 v0, s6
@@ -2662,8 +2662,7 @@ define amdgpu_kernel void @mul64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; SI-NEXT: v_add_i32_e32 v0, vcc, s7, v0
; SI-NEXT: v_add_i32_e32 v1, vcc, s5, v0
; SI-NEXT: v_mov_b32_e32 v0, s4
-; SI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
-; SI-NEXT: s_cbranch_vccnz .LBB16_3
+; SI-NEXT: s_cbranch_execnz .LBB16_3
; SI-NEXT: .LBB16_2: ; %if
; SI-NEXT: s_mov_b32 s7, 0xf000
; SI-NEXT: s_mov_b32 s6, -1
@@ -2678,24 +2677,25 @@ define amdgpu_kernel void @mul64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; SI-NEXT: s_endpgm
; SI-NEXT: .LBB16_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1
-; SI-NEXT: s_branch .LBB16_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; SI-NEXT: s_cbranch_vccz .LBB16_2
+; SI-NEXT: s_branch .LBB16_3
;
; VI-LABEL: mul64_in_branch:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
-; VI-NEXT: s_mov_b64 s[8:9], 0
+; VI-NEXT: s_mov_b64 s[8:9], -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u64 s[4:5], 0
; VI-NEXT: s_cbranch_scc0 .LBB16_4
; VI-NEXT: ; %bb.1: ; %else
; VI-NEXT: v_mov_b32_e32 v0, s6
-; VI-NEXT: v_mad_u64_u32 v[0:1], s[10:11], s4, v0, 0
+; VI-NEXT: v_mad_u64_u32 v[0:1], s[8:9], s4, v0, 0
; VI-NEXT: s_mul_i32 s4, s4, s7
; VI-NEXT: v_add_u32_e32 v1, vcc, s4, v1
; VI-NEXT: s_mul_i32 s4, s5, s6
; VI-NEXT: v_add_u32_e32 v1, vcc, s4, v1
-; VI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
-; VI-NEXT: s_cbranch_vccnz .LBB16_3
+; VI-NEXT: s_cbranch_execnz .LBB16_3
; VI-NEXT: .LBB16_2: ; %if
; VI-NEXT: s_mov_b32 s7, 0xf000
; VI-NEXT: s_mov_b32 s6, -1
@@ -2710,24 +2710,25 @@ define amdgpu_kernel void @mul64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; VI-NEXT: s_endpgm
; VI-NEXT: .LBB16_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1
-; VI-NEXT: s_branch .LBB16_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; VI-NEXT: s_cbranch_vccz .LBB16_2
+; VI-NEXT: s_branch .LBB16_3
;
; GFX9-LABEL: mul64_in_branch:
; GFX9: ; %bb.0: ; %entry
; GFX9-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24
-; GFX9-NEXT: s_mov_b64 s[0:1], 0
+; GFX9-NEXT: s_mov_b64 s[2:3], -1
; GFX9-NEXT: s_waitcnt lgkmcnt(0)
; GFX9-NEXT: s_cmp_lg_u64 s[12:13], 0
; GFX9-NEXT: s_cbranch_scc0 .LBB16_3
; GFX9-NEXT: ; %bb.1: ; %else
-; GFX9-NEXT: s_mul_i32 s2, s12, s15
-; GFX9-NEXT: s_mul_hi_u32 s3, s12, s14
-; GFX9-NEXT: s_add_i32 s2, s3, s2
-; GFX9-NEXT: s_mul_i32 s3, s13, s14
-; GFX9-NEXT: s_add_i32 s3, s2, s3
-; GFX9-NEXT: s_mul_i32 s2, s12, s14
-; GFX9-NEXT: s_andn2_b64 vcc, exec, s[0:1]
-; GFX9-NEXT: s_cbranch_vccnz .LBB16_4
+; GFX9-NEXT: s_mul_i32 s0, s12, s15
+; GFX9-NEXT: s_mul_hi_u32 s1, s12, s14
+; GFX9-NEXT: s_add_i32 s0, s1, s0
+; GFX9-NEXT: s_mul_i32 s1, s13, s14
+; GFX9-NEXT: s_add_i32 s1, s0, s1
+; GFX9-NEXT: s_mul_i32 s0, s12, s14
+; GFX9-NEXT: s_cbranch_execnz .LBB16_4
; GFX9-NEXT: .LBB16_2: ; %if
; GFX9-NEXT: s_mov_b32 s3, 0xf000
; GFX9-NEXT: s_mov_b32 s2, -1
@@ -2736,11 +2737,12 @@ define amdgpu_kernel void @mul64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX9-NEXT: buffer_load_dwordx2 v[0:1], off, s[0:3], 0
; GFX9-NEXT: s_branch .LBB16_5
; GFX9-NEXT: .LBB16_3:
-; GFX9-NEXT: ; implicit-def: $sgpr2_sgpr3
-; GFX9-NEXT: s_branch .LBB16_2
+; GFX9-NEXT: ; implicit-def: $sgpr0_sgpr1
+; GFX9-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GFX9-NEXT: s_cbranch_vccz .LBB16_2
; GFX9-NEXT: .LBB16_4:
-; GFX9-NEXT: v_mov_b32_e32 v0, s2
-; GFX9-NEXT: v_mov_b32_e32 v1, s3
+; GFX9-NEXT: v_mov_b32_e32 v0, s0
+; GFX9-NEXT: v_mov_b32_e32 v1, s1
; GFX9-NEXT: .LBB16_5: ; %endif
; GFX9-NEXT: s_mov_b32 s11, 0xf000
; GFX9-NEXT: s_mov_b32 s10, -1
@@ -2751,6 +2753,7 @@ define amdgpu_kernel void @mul64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX10-LABEL: mul64_in_branch:
; GFX10: ; %bb.0: ; %entry
; GFX10-NEXT: s_load_dwordx8 s[8:15], s[4:5], 0x24
+; GFX10-NEXT: s_mov_b32 s2, -1
; GFX10-NEXT: s_waitcnt lgkmcnt(0)
; GFX10-NEXT: s_cmp_lg_u64 s[12:13], 0
; GFX10-NEXT: s_cbranch_scc0 .LBB16_3
@@ -2771,7 +2774,8 @@ define amdgpu_kernel void @mul64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX10-NEXT: s_branch .LBB16_5
; GFX10-NEXT: .LBB16_3:
; GFX10-NEXT: ; implicit-def: $sgpr0_sgpr1
-; GFX10-NEXT: s_branch .LBB16_2
+; GFX10-NEXT: s_andn2_b32 vcc_lo, exec_lo, s2
+; GFX10-NEXT: s_cbranch_vccz .LBB16_2
; GFX10-NEXT: .LBB16_4:
; GFX10-NEXT: v_mov_b32_e32 v0, s0
; GFX10-NEXT: v_mov_b32_e32 v1, s1
@@ -2785,6 +2789,7 @@ define amdgpu_kernel void @mul64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX11-LABEL: mul64_in_branch:
; GFX11: ; %bb.0: ; %entry
; GFX11-NEXT: s_load_b256 s[0:7], s[4:5], 0x24
+; GFX11-NEXT: s_mov_b32 s8, -1
; GFX11-NEXT: s_waitcnt lgkmcnt(0)
; GFX11-NEXT: s_cmp_lg_u64 s[4:5], 0
; GFX11-NEXT: s_cbranch_scc0 .LBB16_3
@@ -2805,7 +2810,8 @@ define amdgpu_kernel void @mul64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX11-NEXT: s_branch .LBB16_5
; GFX11-NEXT: .LBB16_3:
; GFX11-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX11-NEXT: s_branch .LBB16_2
+; GFX11-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX11-NEXT: s_cbranch_vccz .LBB16_2
; GFX11-NEXT: .LBB16_4:
; GFX11-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
; GFX11-NEXT: .LBB16_5: ; %endif
@@ -2818,6 +2824,7 @@ define amdgpu_kernel void @mul64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX12-LABEL: mul64_in_branch:
; GFX12: ; %bb.0: ; %entry
; GFX12-NEXT: s_load_b256 s[0:7], s[4:5], 0x24
+; GFX12-NEXT: s_mov_b32 s8, -1
; GFX12-NEXT: s_wait_kmcnt 0x0
; GFX12-NEXT: s_cmp_lg_u64 s[4:5], 0
; GFX12-NEXT: s_cbranch_scc0 .LBB16_3
@@ -2833,7 +2840,8 @@ define amdgpu_kernel void @mul64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX12-NEXT: s_branch .LBB16_5
; GFX12-NEXT: .LBB16_3:
; GFX12-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX12-NEXT: s_branch .LBB16_2
+; GFX12-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX12-NEXT: s_cbranch_vccz .LBB16_2
; GFX12-NEXT: .LBB16_4:
; GFX12-NEXT: v_dual_mov_b32 v0, s4 :: v_dual_mov_b32 v1, s5
; GFX12-NEXT: .LBB16_5: ; %endif
@@ -2846,6 +2854,7 @@ define amdgpu_kernel void @mul64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX1250-LABEL: mul64_in_branch:
; GFX1250: ; %bb.0: ; %entry
; GFX1250-NEXT: s_load_b256 s[0:7], s[4:5], 0x24
+; GFX1250-NEXT: s_mov_b32 s8, -1
; GFX1250-NEXT: s_wait_kmcnt 0x0
; GFX1250-NEXT: s_cmp_lg_u64 s[4:5], 0
; GFX1250-NEXT: s_cbranch_scc0 .LBB16_3
@@ -2861,7 +2870,8 @@ define amdgpu_kernel void @mul64_in_branch(ptr addrspace(1) %out, ptr addrspace(
; GFX1250-NEXT: s_branch .LBB16_5
; GFX1250-NEXT: .LBB16_3:
; GFX1250-NEXT: ; implicit-def: $sgpr4_sgpr5
-; GFX1250-NEXT: s_branch .LBB16_2
+; GFX1250-NEXT: s_and_not1_b32 vcc_lo, exec_lo, s8
+; GFX1250-NEXT: s_cbranch_vccz .LBB16_2
; GFX1250-NEXT: .LBB16_4:
; GFX1250-NEXT: v_mov_b64_e32 v[0:1], s[4:5]
; GFX1250-NEXT: .LBB16_5: ; %endif
diff --git a/llvm/test/CodeGen/AMDGPU/no-fold-accvgpr-mov.ll b/llvm/test/CodeGen/AMDGPU/no-fold-accvgpr-mov.ll
index cf244f0..0e55dfa 100644
--- a/llvm/test/CodeGen/AMDGPU/no-fold-accvgpr-mov.ll
+++ b/llvm/test/CodeGen/AMDGPU/no-fold-accvgpr-mov.ll
@@ -32,10 +32,13 @@ define amdgpu_kernel void @matmul_kernel(i32 %a0, i32 %a1) {
; GFX942-NEXT: .LBB0_2: ; %bb
; GFX942-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX942-NEXT: s_and_b64 vcc, exec, s[0:1]
+; GFX942-NEXT: s_mov_b64 s[4:5], -1
; GFX942-NEXT: s_cbranch_vccz .LBB0_1
-; GFX942-NEXT: ; %bb.3:
+; GFX942-NEXT: ; %bb.3: ; in Loop: Header=BB0_2 Depth=1
; GFX942-NEXT: ; implicit-def: $sgpr3
; GFX942-NEXT: ; implicit-def: $agpr2
+; GFX942-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX942-NEXT: s_cbranch_vccnz .LBB0_2
; GFX942-NEXT: .LBB0_4: ; %common.ret
; GFX942-NEXT: s_endpgm
;
@@ -58,7 +61,7 @@ define amdgpu_kernel void @matmul_kernel(i32 %a0, i32 %a1) {
; GFX908-NEXT: s_ashr_i32 s5, s3, 31
; GFX908-NEXT: s_mov_b32 s3, s2
; GFX908-NEXT: v_mov_b32_e32 v1, s2
-; GFX908-NEXT: s_nop 2
+; GFX908-NEXT: s_nop 1
; GFX908-NEXT: v_accvgpr_read_b32 v0, a2
; GFX908-NEXT: v_mov_b32_e32 v2, s3
; GFX908-NEXT: v_accvgpr_read_b32 v4, a1
@@ -72,10 +75,13 @@ define amdgpu_kernel void @matmul_kernel(i32 %a0, i32 %a1) {
; GFX908-NEXT: .LBB0_2: ; %bb
; GFX908-NEXT: ; =>This Inner Loop Header: Depth=1
; GFX908-NEXT: s_and_b64 vcc, exec, s[0:1]
+; GFX908-NEXT: s_mov_b64 s[4:5], -1
; GFX908-NEXT: s_cbranch_vccz .LBB0_1
-; GFX908-NEXT: ; %bb.3:
+; GFX908-NEXT: ; %bb.3: ; in Loop: Header=BB0_2 Depth=1
; GFX908-NEXT: ; implicit-def: $sgpr3
; GFX908-NEXT: ; implicit-def: $agpr2
+; GFX908-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GFX908-NEXT: s_cbranch_vccnz .LBB0_2
; GFX908-NEXT: .LBB0_4: ; %common.ret
; GFX908-NEXT: s_endpgm
entry:
diff --git a/llvm/test/CodeGen/AMDGPU/optimize-negated-cond.ll b/llvm/test/CodeGen/AMDGPU/optimize-negated-cond.ll
index 0887f41..3ad7a0f 100644
--- a/llvm/test/CodeGen/AMDGPU/optimize-negated-cond.ll
+++ b/llvm/test/CodeGen/AMDGPU/optimize-negated-cond.ll
@@ -117,6 +117,7 @@ define amdgpu_kernel void @negated_cond_dominated_blocks(ptr addrspace(1) %arg1)
; GCN-NEXT: s_cbranch_scc1 .LBB1_6
; GCN-NEXT: .LBB1_2: ; %bb4
; GCN-NEXT: ; =>This Inner Loop Header: Depth=1
+; GCN-NEXT: s_mov_b64 s[8:9], -1
; GCN-NEXT: s_mov_b64 vcc, s[0:1]
; GCN-NEXT: s_cbranch_vccz .LBB1_4
; GCN-NEXT: ; %bb.3: ; %bb6
@@ -127,7 +128,8 @@ define amdgpu_kernel void @negated_cond_dominated_blocks(ptr addrspace(1) %arg1)
; GCN-NEXT: s_branch .LBB1_5
; GCN-NEXT: .LBB1_4: ; in Loop: Header=BB1_2 Depth=1
; GCN-NEXT: ; implicit-def: $sgpr2
-; GCN-NEXT: s_mov_b64 vcc, 0
+; GCN-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; GCN-NEXT: s_cbranch_vccnz .LBB1_1
; GCN-NEXT: .LBB1_5: ; %bb5
; GCN-NEXT: ; in Loop: Header=BB1_2 Depth=1
; GCN-NEXT: s_lshl_b32 s2, s3, 5
diff --git a/llvm/test/CodeGen/AMDGPU/sdiv64.ll b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
index 4addf42..f4b9160 100644
--- a/llvm/test/CodeGen/AMDGPU/sdiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/sdiv64.ll
@@ -1234,28 +1234,28 @@ define amdgpu_kernel void @s_test_sdiv_k_num_i64(ptr addrspace(1) %out, i64 %x)
; GCN-IR-NEXT: s_sub_u32 s2, s2, s4
; GCN-IR-NEXT: s_subb_u32 s3, s3, s4
; GCN-IR-NEXT: s_flbit_i32_b64 s14, s[2:3]
-; GCN-IR-NEXT: s_add_u32 s10, s14, 0xffffffc5
-; GCN-IR-NEXT: s_addc_u32 s11, 0, -1
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[2:3], 0
-; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[12:13], s[10:11], 63
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[16:17], s[10:11], 63
-; GCN-IR-NEXT: s_or_b64 s[12:13], s[8:9], s[12:13]
-; GCN-IR-NEXT: s_and_b64 s[8:9], s[12:13], exec
-; GCN-IR-NEXT: s_cselect_b32 s8, 0, 24
+; GCN-IR-NEXT: s_add_u32 s8, s14, 0xffffffc5
+; GCN-IR-NEXT: s_addc_u32 s9, 0, -1
+; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[2:3], 0
+; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[12:13], s[8:9], 63
+; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[16:17], s[8:9], 63
+; GCN-IR-NEXT: s_or_b64 s[12:13], s[10:11], s[12:13]
+; GCN-IR-NEXT: s_and_b64 s[10:11], s[12:13], exec
+; GCN-IR-NEXT: s_cselect_b32 s10, 0, 24
; GCN-IR-NEXT: s_or_b64 s[12:13], s[12:13], s[16:17]
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[12:13]
-; GCN-IR-NEXT: s_mov_b32 s9, 0
+; GCN-IR-NEXT: s_mov_b32 s11, 0
; GCN-IR-NEXT: s_cbranch_vccz .LBB10_5
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT: s_add_u32 s12, s10, 1
-; GCN-IR-NEXT: s_addc_u32 s13, s11, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[12:13], 0
-; GCN-IR-NEXT: s_sub_i32 s10, 63, s10
-; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[8:9]
-; GCN-IR-NEXT: s_lshl_b64 s[8:9], 24, s10
+; GCN-IR-NEXT: s_add_u32 s10, s8, 1
+; GCN-IR-NEXT: s_addc_u32 s11, s9, 0
+; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[12:13], s[10:11], 0
+; GCN-IR-NEXT: s_sub_i32 s8, 63, s8
+; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[12:13]
+; GCN-IR-NEXT: s_lshl_b64 s[8:9], 24, s8
; GCN-IR-NEXT: s_cbranch_vccz .LBB10_4
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT: s_lshr_b64 s[12:13], 24, s12
+; GCN-IR-NEXT: s_lshr_b64 s[12:13], 24, s10
; GCN-IR-NEXT: s_add_u32 s16, s2, -1
; GCN-IR-NEXT: s_addc_u32 s17, s3, -1
; GCN-IR-NEXT: s_sub_u32 s10, 58, s14
@@ -1285,9 +1285,9 @@ define amdgpu_kernel void @s_test_sdiv_k_num_i64(ptr addrspace(1) %out, i64 %x)
; GCN-IR-NEXT: s_cbranch_vccz .LBB10_3
; GCN-IR-NEXT: .LBB10_4: ; %Flow6
; GCN-IR-NEXT: s_lshl_b64 s[2:3], s[8:9], 1
-; GCN-IR-NEXT: s_or_b64 s[8:9], s[6:7], s[2:3]
+; GCN-IR-NEXT: s_or_b64 s[10:11], s[6:7], s[2:3]
; GCN-IR-NEXT: .LBB10_5: ; %udiv-end
-; GCN-IR-NEXT: s_xor_b64 s[6:7], s[8:9], s[4:5]
+; GCN-IR-NEXT: s_xor_b64 s[6:7], s[10:11], s[4:5]
; GCN-IR-NEXT: s_sub_u32 s4, s6, s4
; GCN-IR-NEXT: s_subb_u32 s5, s7, s5
; GCN-IR-NEXT: v_mov_b32_e32 v0, s4
diff --git a/llvm/test/CodeGen/AMDGPU/sgpr-control-flow.ll b/llvm/test/CodeGen/AMDGPU/sgpr-control-flow.ll
index 40b6f02..654e345 100644
--- a/llvm/test/CodeGen/AMDGPU/sgpr-control-flow.ll
+++ b/llvm/test/CodeGen/AMDGPU/sgpr-control-flow.ll
@@ -12,12 +12,13 @@ define amdgpu_kernel void @sgpr_if_else_salu_br(ptr addrspace(1) %out, i32 %a, i
; SI-LABEL: sgpr_if_else_salu_br:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0xb
-; SI-NEXT: s_load_dword s6, s[4:5], 0xf
+; SI-NEXT: s_load_dword s8, s[4:5], 0xf
+; SI-NEXT: s_mov_b64 s[6:7], -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s0, 0
; SI-NEXT: s_cbranch_scc0 .LBB0_4
; SI-NEXT: ; %bb.1: ; %else
-; SI-NEXT: s_add_i32 s3, s3, s6
+; SI-NEXT: s_add_i32 s3, s3, s8
; SI-NEXT: s_cbranch_execnz .LBB0_3
; SI-NEXT: .LBB0_2: ; %if
; SI-NEXT: s_sub_i32 s3, s1, s2
@@ -32,7 +33,9 @@ define amdgpu_kernel void @sgpr_if_else_salu_br(ptr addrspace(1) %out, i32 %a, i
; SI-NEXT: s_endpgm
; SI-NEXT: .LBB0_4:
; SI-NEXT: ; implicit-def: $sgpr3
-; SI-NEXT: s_branch .LBB0_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; SI-NEXT: s_cbranch_vccz .LBB0_2
+; SI-NEXT: s_branch .LBB0_3
entry:
%0 = icmp eq i32 %a, 0
@@ -57,6 +60,7 @@ define amdgpu_kernel void @sgpr_if_else_salu_br_opt(ptr addrspace(1) %out, [8 x
; SI-LABEL: sgpr_if_else_salu_br_opt:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dword s2, s[4:5], 0x13
+; SI-NEXT: s_mov_b64 s[0:1], -1
; SI-NEXT: s_waitcnt lgkmcnt(0)
; SI-NEXT: s_cmp_lg_u32 s2, 0
; SI-NEXT: s_cbranch_scc0 .LBB1_4
@@ -82,7 +86,9 @@ define amdgpu_kernel void @sgpr_if_else_salu_br_opt(ptr addrspace(1) %out, [8 x
; SI-NEXT: s_endpgm
; SI-NEXT: .LBB1_4:
; SI-NEXT: ; implicit-def: $sgpr3
-; SI-NEXT: s_branch .LBB1_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[0:1]
+; SI-NEXT: s_cbranch_vccz .LBB1_2
+; SI-NEXT: s_branch .LBB1_3
entry:
%cmp0 = icmp eq i32 %a, 0
diff --git a/llvm/test/CodeGen/AMDGPU/si-unify-exit-multiple-unreachables.ll b/llvm/test/CodeGen/AMDGPU/si-unify-exit-multiple-unreachables.ll
index 4cbe682..725a690 100644
--- a/llvm/test/CodeGen/AMDGPU/si-unify-exit-multiple-unreachables.ll
+++ b/llvm/test/CodeGen/AMDGPU/si-unify-exit-multiple-unreachables.ll
@@ -12,64 +12,65 @@ define amdgpu_kernel void @kernel(i32 %a, ptr addrspace(1) %x, i32 noundef %n) {
; CHECK: ; %bb.0: ; %entry
; CHECK-NEXT: s_load_dword s0, s[8:9], 0x10
; CHECK-NEXT: s_load_dword s10, s[8:9], 0x0
+; CHECK-NEXT: s_mov_b64 s[4:5], -1
+; CHECK-NEXT: s_mov_b64 s[2:3], 0
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: s_cmpk_lg_i32 s0, 0x100
-; CHECK-NEXT: s_cbranch_scc0 .LBB0_6
+; CHECK-NEXT: s_cbranch_scc0 .LBB0_5
; CHECK-NEXT: ; %bb.1: ; %if.else
; CHECK-NEXT: v_cmp_gt_u32_e32 vcc, 10, v0
; CHECK-NEXT: s_mov_b64 s[4:5], 0
-; CHECK-NEXT: s_mov_b64 s[2:3], 0
; CHECK-NEXT: s_mov_b64 s[0:1], 0
; CHECK-NEXT: s_and_saveexec_b64 s[6:7], vcc
-; CHECK-NEXT: s_cbranch_execz .LBB0_5
+; CHECK-NEXT: s_cbranch_execz .LBB0_4
; CHECK-NEXT: ; %bb.2: ; %if.then3
; CHECK-NEXT: s_cmp_lg_u32 s10, 0
-; CHECK-NEXT: s_cbranch_scc1 .LBB0_14
-; CHECK-NEXT: ; %bb.3:
; CHECK-NEXT: s_mov_b64 s[0:1], -1
-; CHECK-NEXT: .LBB0_4: ; %Flow3
+; CHECK-NEXT: s_cbranch_scc1 .LBB0_13
+; CHECK-NEXT: .LBB0_3: ; %Flow3
; CHECK-NEXT: s_and_b64 s[0:1], s[0:1], exec
; CHECK-NEXT: s_and_b64 s[2:3], s[2:3], exec
-; CHECK-NEXT: .LBB0_5: ; %Flow2
+; CHECK-NEXT: .LBB0_4: ; %Flow2
; CHECK-NEXT: s_or_b64 exec, exec, s[6:7]
; CHECK-NEXT: s_and_b64 vcc, exec, s[4:5]
-; CHECK-NEXT: s_cbranch_vccz .LBB0_8
-; CHECK-NEXT: s_branch .LBB0_7
-; CHECK-NEXT: .LBB0_6:
-; CHECK-NEXT: s_mov_b64 s[2:3], 0
+; CHECK-NEXT: s_cbranch_vccz .LBB0_7
+; CHECK-NEXT: s_branch .LBB0_6
+; CHECK-NEXT: .LBB0_5:
; CHECK-NEXT: s_mov_b64 s[0:1], 0
-; CHECK-NEXT: s_cbranch_execz .LBB0_8
-; CHECK-NEXT: .LBB0_7: ; %if.then
+; CHECK-NEXT: s_and_b64 vcc, exec, s[4:5]
+; CHECK-NEXT: s_cbranch_vccz .LBB0_7
+; CHECK-NEXT: .LBB0_6: ; %if.then
; CHECK-NEXT: s_cmp_lg_u32 s10, 0
; CHECK-NEXT: s_mov_b64 s[0:1], -1
-; CHECK-NEXT: s_cbranch_scc1 .LBB0_13
-; CHECK-NEXT: .LBB0_8: ; %Flow4
+; CHECK-NEXT: s_cbranch_scc1 .LBB0_12
+; CHECK-NEXT: .LBB0_7: ; %Flow4
; CHECK-NEXT: s_and_saveexec_b64 s[4:5], s[2:3]
-; CHECK-NEXT: .LBB0_9: ; %UnifiedUnreachableBlock
+; CHECK-NEXT: .LBB0_8: ; %UnifiedUnreachableBlock
; CHECK-NEXT: ; divergent unreachable
-; CHECK-NEXT: .LBB0_10: ; %Flow6
+; CHECK-NEXT: .LBB0_9: ; %Flow6
; CHECK-NEXT: s_or_b64 exec, exec, s[4:5]
; CHECK-NEXT: s_and_saveexec_b64 s[2:3], s[0:1]
-; CHECK-NEXT: s_cbranch_execz .LBB0_12
-; CHECK-NEXT: ; %bb.11: ; %if.end6.sink.split
+; CHECK-NEXT: s_cbranch_execz .LBB0_11
+; CHECK-NEXT: ; %bb.10: ; %if.end6.sink.split
; CHECK-NEXT: s_load_dwordx2 s[0:1], s[8:9], 0x8
; CHECK-NEXT: v_lshlrev_b32_e32 v0, 2, v0
; CHECK-NEXT: v_mov_b32_e32 v1, s10
; CHECK-NEXT: s_waitcnt lgkmcnt(0)
; CHECK-NEXT: global_store_dword v0, v1, s[0:1]
-; CHECK-NEXT: .LBB0_12: ; %UnifiedReturnBlock
+; CHECK-NEXT: .LBB0_11: ; %UnifiedReturnBlock
; CHECK-NEXT: s_endpgm
-; CHECK-NEXT: .LBB0_13: ; %cond.false
+; CHECK-NEXT: .LBB0_12: ; %cond.false
; CHECK-NEXT: s_mov_b64 s[0:1], 0
; CHECK-NEXT: s_or_b64 s[2:3], s[2:3], exec
; CHECK-NEXT: s_trap 2
; CHECK-NEXT: s_and_saveexec_b64 s[4:5], s[2:3]
-; CHECK-NEXT: s_cbranch_execnz .LBB0_9
-; CHECK-NEXT: s_branch .LBB0_10
-; CHECK-NEXT: .LBB0_14: ; %cond.false.i8
+; CHECK-NEXT: s_cbranch_execnz .LBB0_8
+; CHECK-NEXT: s_branch .LBB0_9
+; CHECK-NEXT: .LBB0_13: ; %cond.false.i8
; CHECK-NEXT: s_mov_b64 s[2:3], -1
+; CHECK-NEXT: s_mov_b64 s[0:1], 0
; CHECK-NEXT: s_trap 2
-; CHECK-NEXT: s_branch .LBB0_4
+; CHECK-NEXT: s_branch .LBB0_3
entry:
diff --git a/llvm/test/CodeGen/AMDGPU/si-unify-exit-return-unreachable.ll b/llvm/test/CodeGen/AMDGPU/si-unify-exit-return-unreachable.ll
index d20fef3..227b341 100644
--- a/llvm/test/CodeGen/AMDGPU/si-unify-exit-return-unreachable.ll
+++ b/llvm/test/CodeGen/AMDGPU/si-unify-exit-return-unreachable.ll
@@ -1,5 +1,4 @@
; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
-; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
; RUN: llc -mtriple=amdgcn-amd-amdhsa %s -o - | FileCheck -check-prefix=GCN %s
; RUN: opt -mtriple=amdgcn-amd-amdhsa -lowerswitch -amdgpu-unify-divergent-exit-nodes -verify -structurizecfg -verify -si-annotate-control-flow -verify -S %s -o - | FileCheck -check-prefix=IR %s
diff --git a/llvm/test/CodeGen/AMDGPU/srem.ll b/llvm/test/CodeGen/AMDGPU/srem.ll
index f614f58..439723d 100644
--- a/llvm/test/CodeGen/AMDGPU/srem.ll
+++ b/llvm/test/CodeGen/AMDGPU/srem.ll
@@ -1495,6 +1495,7 @@ define amdgpu_kernel void @srem_i64(ptr addrspace(1) %out, ptr addrspace(1) %in)
; GCN-NEXT: v_mov_b32_e32 v0, 0
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: global_load_dwordx4 v[0:3], v0, s[10:11]
+; GCN-NEXT: s_mov_b64 s[10:11], -1
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_readfirstlane_b32 s7, v1
; GCN-NEXT: v_readfirstlane_b32 s6, v0
@@ -1671,7 +1672,9 @@ define amdgpu_kernel void @srem_i64(ptr addrspace(1) %out, ptr addrspace(1) %in)
; GCN-NEXT: s_endpgm
; GCN-NEXT: .LBB8_4:
; GCN-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN-NEXT: s_branch .LBB8_2
+; GCN-NEXT: s_andn2_b64 vcc, exec, s[10:11]
+; GCN-NEXT: s_cbranch_vccz .LBB8_2
+; GCN-NEXT: s_branch .LBB8_3
;
; TAHITI-LABEL: srem_i64:
; TAHITI: ; %bb.0:
@@ -1683,6 +1686,7 @@ define amdgpu_kernel void @srem_i64(ptr addrspace(1) %out, ptr addrspace(1) %in)
; TAHITI-NEXT: s_mov_b32 s0, s6
; TAHITI-NEXT: s_mov_b32 s1, s7
; TAHITI-NEXT: buffer_load_dwordx4 v[0:3], off, s[0:3], 0
+; TAHITI-NEXT: s_mov_b64 s[6:7], -1
; TAHITI-NEXT: s_waitcnt vmcnt(0)
; TAHITI-NEXT: v_or_b32_e32 v5, v1, v3
; TAHITI-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
@@ -1832,7 +1836,9 @@ define amdgpu_kernel void @srem_i64(ptr addrspace(1) %out, ptr addrspace(1) %in)
; TAHITI-NEXT: s_endpgm
; TAHITI-NEXT: .LBB8_4:
; TAHITI-NEXT: ; implicit-def: $vgpr3_vgpr4
-; TAHITI-NEXT: s_branch .LBB8_2
+; TAHITI-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; TAHITI-NEXT: s_cbranch_vccz .LBB8_2
+; TAHITI-NEXT: s_branch .LBB8_3
;
; TONGA-LABEL: srem_i64:
; TONGA: ; %bb.0:
@@ -1842,6 +1848,7 @@ define amdgpu_kernel void @srem_i64(ptr addrspace(1) %out, ptr addrspace(1) %in)
; TONGA-NEXT: v_mov_b32_e32 v0, s6
; TONGA-NEXT: v_mov_b32_e32 v1, s7
; TONGA-NEXT: flat_load_dwordx4 v[0:3], v[0:1]
+; TONGA-NEXT: s_mov_b64 s[6:7], -1
; TONGA-NEXT: s_waitcnt vmcnt(0)
; TONGA-NEXT: v_or_b32_e32 v5, v1, v3
; TONGA-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[4:5]
@@ -1979,7 +1986,9 @@ define amdgpu_kernel void @srem_i64(ptr addrspace(1) %out, ptr addrspace(1) %in)
; TONGA-NEXT: s_endpgm
; TONGA-NEXT: .LBB8_4:
; TONGA-NEXT: ; implicit-def: $vgpr3_vgpr4
-; TONGA-NEXT: s_branch .LBB8_2
+; TONGA-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; TONGA-NEXT: s_cbranch_vccz .LBB8_2
+; TONGA-NEXT: s_branch .LBB8_3
;
; EG-LABEL: srem_i64:
; EG: ; %bb.0:
@@ -2686,6 +2695,7 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN: ; %bb.0:
; GCN-NEXT: s_load_dwordx4 s[8:11], s[4:5], 0x24
; GCN-NEXT: v_mov_b32_e32 v8, 0
+; GCN-NEXT: s_mov_b64 s[14:15], -1
; GCN-NEXT: s_waitcnt lgkmcnt(0)
; GCN-NEXT: global_load_dwordx4 v[0:3], v8, s[10:11] offset:16
; GCN-NEXT: global_load_dwordx4 v[4:7], v8, s[10:11]
@@ -2700,9 +2710,9 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: v_readfirstlane_b32 s5, v3
; GCN-NEXT: v_readfirstlane_b32 s4, v2
; GCN-NEXT: v_readfirstlane_b32 s7, v7
-; GCN-NEXT: s_cmp_lg_u64 s[0:1], 0
; GCN-NEXT: v_readfirstlane_b32 s6, v6
-; GCN-NEXT: s_cbranch_scc0 .LBB10_7
+; GCN-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GCN-NEXT: s_cbranch_scc0 .LBB10_8
; GCN-NEXT: ; %bb.1:
; GCN-NEXT: s_ashr_i32 s0, s11, 31
; GCN-NEXT: s_add_u32 s2, s10, s0
@@ -2868,7 +2878,8 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: s_or_b64 s[0:1], s[6:7], s[4:5]
; GCN-NEXT: s_mov_b32 s0, 0
; GCN-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GCN-NEXT: s_cbranch_scc0 .LBB10_8
+; GCN-NEXT: s_mov_b64 s[10:11], -1
+; GCN-NEXT: s_cbranch_scc0 .LBB10_5
; GCN-NEXT: ; %bb.4:
; GCN-NEXT: s_ashr_i32 s0, s5, 31
; GCN-NEXT: s_add_u32 s2, s4, s0
@@ -2879,6 +2890,7 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: v_cvt_f32_u32_e32 v3, s13
; GCN-NEXT: s_sub_u32 s0, 0, s12
; GCN-NEXT: s_subb_u32 s1, 0, s13
+; GCN-NEXT: s_mov_b64 s[10:11], 0
; GCN-NEXT: v_madmk_f32 v2, v3, 0x4f800000, v2
; GCN-NEXT: v_rcp_f32_e32 v2, v2
; GCN-NEXT: v_mul_f32_e32 v2, 0x5f7ffffc, v2
@@ -3007,8 +3019,10 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: v_mov_b32_e32 v4, s14
; GCN-NEXT: v_subrev_co_u32_e32 v2, vcc, s14, v2
; GCN-NEXT: v_subb_co_u32_e32 v3, vcc, v3, v4, vcc
-; GCN-NEXT: s_cbranch_execnz .LBB10_6
-; GCN-NEXT: .LBB10_5:
+; GCN-NEXT: .LBB10_5: ; %Flow
+; GCN-NEXT: s_andn2_b64 vcc, exec, s[10:11]
+; GCN-NEXT: s_cbranch_vccnz .LBB10_7
+; GCN-NEXT: ; %bb.6:
; GCN-NEXT: v_cvt_f32_u32_e32 v2, s4
; GCN-NEXT: s_sub_i32 s0, 0, s4
; GCN-NEXT: v_rcp_iflag_f32_e32 v2, v2
@@ -3027,15 +3041,15 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: v_cmp_le_u32_e32 vcc, s4, v2
; GCN-NEXT: v_cndmask_b32_e32 v2, v2, v3, vcc
; GCN-NEXT: v_mov_b32_e32 v3, 0
-; GCN-NEXT: .LBB10_6:
+; GCN-NEXT: .LBB10_7:
; GCN-NEXT: v_mov_b32_e32 v4, 0
; GCN-NEXT: global_store_dwordx4 v4, v[0:3], s[8:9]
; GCN-NEXT: s_endpgm
-; GCN-NEXT: .LBB10_7:
-; GCN-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GCN-NEXT: s_branch .LBB10_2
; GCN-NEXT: .LBB10_8:
-; GCN-NEXT: s_branch .LBB10_5
+; GCN-NEXT: ; implicit-def: $vgpr0_vgpr1
+; GCN-NEXT: s_andn2_b64 vcc, exec, s[14:15]
+; GCN-NEXT: s_cbranch_vccz .LBB10_2
+; GCN-NEXT: s_branch .LBB10_3
;
; TAHITI-LABEL: srem_v2i64:
; TAHITI: ; %bb.0:
@@ -3048,10 +3062,11 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TAHITI-NEXT: s_mov_b32 s1, s7
; TAHITI-NEXT: buffer_load_dwordx4 v[4:7], off, s[0:3], 0
; TAHITI-NEXT: buffer_load_dwordx4 v[0:3], off, s[0:3], 0 offset:16
+; TAHITI-NEXT: s_mov_b64 s[6:7], -1
; TAHITI-NEXT: s_waitcnt vmcnt(0)
; TAHITI-NEXT: v_or_b32_e32 v9, v5, v1
; TAHITI-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[8:9]
-; TAHITI-NEXT: s_cbranch_vccz .LBB10_7
+; TAHITI-NEXT: s_cbranch_vccz .LBB10_8
; TAHITI-NEXT: ; %bb.1:
; TAHITI-NEXT: v_ashrrev_i32_e32 v9, 31, v1
; TAHITI-NEXT: v_add_i32_e32 v8, vcc, v0, v9
@@ -3194,7 +3209,8 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TAHITI-NEXT: v_or_b32_e32 v1, v7, v3
; TAHITI-NEXT: v_mov_b32_e32 v0, 0
; TAHITI-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
-; TAHITI-NEXT: s_cbranch_vccz .LBB10_8
+; TAHITI-NEXT: s_mov_b64 s[6:7], -1
+; TAHITI-NEXT: s_cbranch_vccz .LBB10_5
; TAHITI-NEXT: ; %bb.4:
; TAHITI-NEXT: v_ashrrev_i32_e32 v0, 31, v3
; TAHITI-NEXT: v_add_i32_e32 v1, vcc, v2, v0
@@ -3207,6 +3223,7 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TAHITI-NEXT: v_subb_u32_e32 v10, vcc, 0, v0, vcc
; TAHITI-NEXT: v_madmk_f32 v3, v4, 0x4f800000, v3
; TAHITI-NEXT: v_rcp_f32_e32 v3, v3
+; TAHITI-NEXT: s_mov_b64 s[6:7], 0
; TAHITI-NEXT: v_mul_f32_e32 v3, 0x5f7ffffc, v3
; TAHITI-NEXT: v_mul_f32_e32 v4, 0x2f800000, v3
; TAHITI-NEXT: v_trunc_f32_e32 v4, v4
@@ -3313,8 +3330,10 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TAHITI-NEXT: v_xor_b32_e32 v0, v0, v5
; TAHITI-NEXT: v_sub_i32_e32 v10, vcc, v1, v5
; TAHITI-NEXT: v_subb_u32_e32 v11, vcc, v0, v5, vcc
-; TAHITI-NEXT: s_cbranch_execnz .LBB10_6
-; TAHITI-NEXT: .LBB10_5:
+; TAHITI-NEXT: .LBB10_5: ; %Flow
+; TAHITI-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; TAHITI-NEXT: s_cbranch_vccnz .LBB10_7
+; TAHITI-NEXT: ; %bb.6:
; TAHITI-NEXT: v_cvt_f32_u32_e32 v0, v2
; TAHITI-NEXT: v_sub_i32_e32 v1, vcc, 0, v2
; TAHITI-NEXT: v_mov_b32_e32 v11, 0
@@ -3333,16 +3352,16 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TAHITI-NEXT: v_subrev_i32_e32 v1, vcc, v2, v0
; TAHITI-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2
; TAHITI-NEXT: v_cndmask_b32_e32 v10, v0, v1, vcc
-; TAHITI-NEXT: .LBB10_6:
+; TAHITI-NEXT: .LBB10_7:
; TAHITI-NEXT: s_mov_b32 s7, 0xf000
; TAHITI-NEXT: s_mov_b32 s6, -1
; TAHITI-NEXT: buffer_store_dwordx4 v[8:11], off, s[4:7], 0
; TAHITI-NEXT: s_endpgm
-; TAHITI-NEXT: .LBB10_7:
-; TAHITI-NEXT: ; implicit-def: $vgpr8_vgpr9
-; TAHITI-NEXT: s_branch .LBB10_2
; TAHITI-NEXT: .LBB10_8:
-; TAHITI-NEXT: s_branch .LBB10_5
+; TAHITI-NEXT: ; implicit-def: $vgpr8_vgpr9
+; TAHITI-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; TAHITI-NEXT: s_cbranch_vccz .LBB10_2
+; TAHITI-NEXT: s_branch .LBB10_3
;
; TONGA-LABEL: srem_v2i64:
; TONGA: ; %bb.0:
@@ -3357,10 +3376,11 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TONGA-NEXT: v_mov_b32_e32 v1, s1
; TONGA-NEXT: flat_load_dwordx4 v[0:3], v[0:1]
; TONGA-NEXT: flat_load_dwordx4 v[4:7], v[4:5]
+; TONGA-NEXT: s_mov_b64 s[6:7], -1
; TONGA-NEXT: s_waitcnt vmcnt(0)
; TONGA-NEXT: v_or_b32_e32 v9, v5, v1
; TONGA-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[8:9]
-; TONGA-NEXT: s_cbranch_vccz .LBB10_7
+; TONGA-NEXT: s_cbranch_vccz .LBB10_8
; TONGA-NEXT: ; %bb.1:
; TONGA-NEXT: v_ashrrev_i32_e32 v8, 31, v1
; TONGA-NEXT: v_add_u32_e32 v9, vcc, v0, v8
@@ -3491,7 +3511,8 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TONGA-NEXT: v_or_b32_e32 v1, v7, v3
; TONGA-NEXT: v_mov_b32_e32 v0, 0
; TONGA-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
-; TONGA-NEXT: s_cbranch_vccz .LBB10_8
+; TONGA-NEXT: s_mov_b64 s[6:7], -1
+; TONGA-NEXT: s_cbranch_vccz .LBB10_5
; TONGA-NEXT: ; %bb.4:
; TONGA-NEXT: v_ashrrev_i32_e32 v0, 31, v3
; TONGA-NEXT: v_add_u32_e32 v1, vcc, v2, v0
@@ -3504,6 +3525,7 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TONGA-NEXT: v_subb_u32_e32 v14, vcc, 0, v12, vcc
; TONGA-NEXT: v_madmk_f32 v0, v1, 0x4f800000, v0
; TONGA-NEXT: v_rcp_f32_e32 v0, v0
+; TONGA-NEXT: s_mov_b64 s[6:7], 0
; TONGA-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0
; TONGA-NEXT: v_mul_f32_e32 v1, 0x2f800000, v0
; TONGA-NEXT: v_trunc_f32_e32 v1, v1
@@ -3598,8 +3620,10 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TONGA-NEXT: v_xor_b32_e32 v1, v1, v11
; TONGA-NEXT: v_sub_u32_e32 v10, vcc, v0, v11
; TONGA-NEXT: v_subb_u32_e32 v11, vcc, v1, v11, vcc
-; TONGA-NEXT: s_cbranch_execnz .LBB10_6
-; TONGA-NEXT: .LBB10_5:
+; TONGA-NEXT: .LBB10_5: ; %Flow
+; TONGA-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; TONGA-NEXT: s_cbranch_vccnz .LBB10_7
+; TONGA-NEXT: ; %bb.6:
; TONGA-NEXT: v_cvt_f32_u32_e32 v0, v2
; TONGA-NEXT: v_sub_u32_e32 v1, vcc, 0, v2
; TONGA-NEXT: v_mov_b32_e32 v11, 0
@@ -3618,16 +3642,16 @@ define amdgpu_kernel void @srem_v2i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TONGA-NEXT: v_subrev_u32_e32 v1, vcc, v2, v0
; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2
; TONGA-NEXT: v_cndmask_b32_e32 v10, v0, v1, vcc
-; TONGA-NEXT: .LBB10_6:
+; TONGA-NEXT: .LBB10_7:
; TONGA-NEXT: v_mov_b32_e32 v0, s4
; TONGA-NEXT: v_mov_b32_e32 v1, s5
; TONGA-NEXT: flat_store_dwordx4 v[0:1], v[8:11]
; TONGA-NEXT: s_endpgm
-; TONGA-NEXT: .LBB10_7:
-; TONGA-NEXT: ; implicit-def: $vgpr8_vgpr9
-; TONGA-NEXT: s_branch .LBB10_2
; TONGA-NEXT: .LBB10_8:
-; TONGA-NEXT: s_branch .LBB10_5
+; TONGA-NEXT: ; implicit-def: $vgpr8_vgpr9
+; TONGA-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; TONGA-NEXT: s_cbranch_vccz .LBB10_2
+; TONGA-NEXT: s_branch .LBB10_3
;
; EG-LABEL: srem_v2i64:
; EG: ; %bb.0:
@@ -4867,6 +4891,7 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: global_load_dwordx4 v[14:17], v8, s[10:11]
; GCN-NEXT: global_load_dwordx4 v[0:3], v8, s[10:11] offset:48
; GCN-NEXT: global_load_dwordx4 v[4:7], v8, s[10:11] offset:16
+; GCN-NEXT: s_mov_b64 s[10:11], -1
; GCN-NEXT: s_waitcnt vmcnt(3)
; GCN-NEXT: v_readfirstlane_b32 s5, v11
; GCN-NEXT: v_readfirstlane_b32 s4, v10
@@ -4876,7 +4901,7 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: s_or_b64 s[0:1], s[6:7], s[4:5]
; GCN-NEXT: s_mov_b32 s0, 0
; GCN-NEXT: s_cmp_lg_u64 s[0:1], 0
-; GCN-NEXT: s_cbranch_scc0 .LBB12_13
+; GCN-NEXT: s_cbranch_scc0 .LBB12_15
; GCN-NEXT: ; %bb.1:
; GCN-NEXT: s_ashr_i32 s0, s5, 31
; GCN-NEXT: s_add_u32 s2, s4, s0
@@ -5042,7 +5067,8 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: v_or_b32_e32 v11, v17, v13
; GCN-NEXT: v_mov_b32_e32 v10, 0
; GCN-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[10:11]
-; GCN-NEXT: s_cbranch_vccz .LBB12_14
+; GCN-NEXT: s_mov_b64 s[4:5], -1
+; GCN-NEXT: s_cbranch_vccz .LBB12_5
; GCN-NEXT: ; %bb.4:
; GCN-NEXT: v_ashrrev_i32_e32 v10, 31, v13
; GCN-NEXT: v_add_co_u32_e32 v11, vcc, v12, v10
@@ -5055,6 +5081,7 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: v_subb_co_u32_e32 v18, vcc, 0, v10, vcc
; GCN-NEXT: v_madmk_f32 v13, v14, 0x4f800000, v13
; GCN-NEXT: v_rcp_f32_e32 v13, v13
+; GCN-NEXT: s_mov_b64 s[4:5], 0
; GCN-NEXT: v_mul_f32_e32 v13, 0x5f7ffffc, v13
; GCN-NEXT: v_mul_f32_e32 v14, 0x2f800000, v13
; GCN-NEXT: v_trunc_f32_e32 v14, v14
@@ -5161,8 +5188,10 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: v_xor_b32_e32 v13, v10, v15
; GCN-NEXT: v_sub_co_u32_e32 v10, vcc, v11, v15
; GCN-NEXT: v_subb_co_u32_e32 v11, vcc, v13, v15, vcc
-; GCN-NEXT: s_cbranch_execnz .LBB12_6
-; GCN-NEXT: .LBB12_5:
+; GCN-NEXT: .LBB12_5: ; %Flow6
+; GCN-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GCN-NEXT: s_cbranch_vccnz .LBB12_7
+; GCN-NEXT: ; %bb.6:
; GCN-NEXT: v_cvt_f32_u32_e32 v10, v12
; GCN-NEXT: v_sub_u32_e32 v11, 0, v12
; GCN-NEXT: v_rcp_iflag_f32_e32 v10, v10
@@ -5181,13 +5210,14 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v10, v12
; GCN-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc
; GCN-NEXT: v_mov_b32_e32 v11, 0
-; GCN-NEXT: .LBB12_6:
+; GCN-NEXT: .LBB12_7:
; GCN-NEXT: s_waitcnt vmcnt(0)
; GCN-NEXT: v_or_b32_e32 v13, v5, v1
; GCN-NEXT: v_mov_b32_e32 v12, 0
; GCN-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[12:13]
-; GCN-NEXT: s_cbranch_vccz .LBB12_15
-; GCN-NEXT: ; %bb.7:
+; GCN-NEXT: s_mov_b64 s[4:5], -1
+; GCN-NEXT: s_cbranch_vccz .LBB12_16
+; GCN-NEXT: ; %bb.8:
; GCN-NEXT: v_ashrrev_i32_e32 v13, 31, v1
; GCN-NEXT: v_add_co_u32_e32 v12, vcc, v0, v13
; GCN-NEXT: v_addc_co_u32_e32 v1, vcc, v1, v13, vcc
@@ -5305,8 +5335,8 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: v_xor_b32_e32 v1, v1, v15
; GCN-NEXT: v_sub_co_u32_e32 v12, vcc, v5, v15
; GCN-NEXT: v_subb_co_u32_e32 v13, vcc, v1, v15, vcc
-; GCN-NEXT: s_cbranch_execnz .LBB12_9
-; GCN-NEXT: .LBB12_8:
+; GCN-NEXT: s_cbranch_execnz .LBB12_10
+; GCN-NEXT: .LBB12_9:
; GCN-NEXT: v_cvt_f32_u32_e32 v1, v0
; GCN-NEXT: v_sub_u32_e32 v5, 0, v0
; GCN-NEXT: v_mov_b32_e32 v13, 0
@@ -5325,12 +5355,13 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: v_sub_u32_e32 v4, v1, v0
; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v1, v0
; GCN-NEXT: v_cndmask_b32_e32 v12, v1, v4, vcc
-; GCN-NEXT: .LBB12_9:
+; GCN-NEXT: .LBB12_10:
; GCN-NEXT: v_or_b32_e32 v1, v7, v3
; GCN-NEXT: v_mov_b32_e32 v0, 0
; GCN-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
-; GCN-NEXT: s_cbranch_vccz .LBB12_16
-; GCN-NEXT: ; %bb.10:
+; GCN-NEXT: s_mov_b64 s[4:5], -1
+; GCN-NEXT: s_cbranch_vccz .LBB12_12
+; GCN-NEXT: ; %bb.11:
; GCN-NEXT: v_ashrrev_i32_e32 v0, 31, v3
; GCN-NEXT: v_add_co_u32_e32 v1, vcc, v2, v0
; GCN-NEXT: v_addc_co_u32_e32 v3, vcc, v3, v0, vcc
@@ -5342,6 +5373,7 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: v_subb_co_u32_e32 v14, vcc, 0, v0, vcc
; GCN-NEXT: v_madmk_f32 v3, v4, 0x4f800000, v3
; GCN-NEXT: v_rcp_f32_e32 v3, v3
+; GCN-NEXT: s_mov_b64 s[4:5], 0
; GCN-NEXT: v_mul_f32_e32 v3, 0x5f7ffffc, v3
; GCN-NEXT: v_mul_f32_e32 v4, 0x2f800000, v3
; GCN-NEXT: v_trunc_f32_e32 v4, v4
@@ -5448,8 +5480,10 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: v_xor_b32_e32 v0, v0, v5
; GCN-NEXT: v_sub_co_u32_e32 v14, vcc, v1, v5
; GCN-NEXT: v_subb_co_u32_e32 v15, vcc, v0, v5, vcc
-; GCN-NEXT: s_cbranch_execnz .LBB12_12
-; GCN-NEXT: .LBB12_11:
+; GCN-NEXT: .LBB12_12: ; %Flow
+; GCN-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GCN-NEXT: s_cbranch_vccnz .LBB12_14
+; GCN-NEXT: ; %bb.13:
; GCN-NEXT: v_cvt_f32_u32_e32 v0, v2
; GCN-NEXT: v_sub_u32_e32 v1, 0, v2
; GCN-NEXT: v_mov_b32_e32 v15, 0
@@ -5468,21 +5502,21 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; GCN-NEXT: v_sub_u32_e32 v1, v0, v2
; GCN-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2
; GCN-NEXT: v_cndmask_b32_e32 v14, v0, v1, vcc
-; GCN-NEXT: .LBB12_12:
+; GCN-NEXT: .LBB12_14:
; GCN-NEXT: v_mov_b32_e32 v0, 0
; GCN-NEXT: global_store_dwordx4 v0, v[12:15], s[8:9] offset:16
; GCN-NEXT: global_store_dwordx4 v0, v[8:11], s[8:9]
; GCN-NEXT: s_endpgm
-; GCN-NEXT: .LBB12_13:
-; GCN-NEXT: ; implicit-def: $vgpr8_vgpr9
-; GCN-NEXT: s_branch .LBB12_2
-; GCN-NEXT: .LBB12_14:
-; GCN-NEXT: s_branch .LBB12_5
; GCN-NEXT: .LBB12_15:
-; GCN-NEXT: ; implicit-def: $vgpr12_vgpr13
-; GCN-NEXT: s_branch .LBB12_8
+; GCN-NEXT: ; implicit-def: $vgpr8_vgpr9
+; GCN-NEXT: s_andn2_b64 vcc, exec, s[10:11]
+; GCN-NEXT: s_cbranch_vccz .LBB12_2
+; GCN-NEXT: s_branch .LBB12_3
; GCN-NEXT: .LBB12_16:
-; GCN-NEXT: s_branch .LBB12_11
+; GCN-NEXT: ; implicit-def: $vgpr12_vgpr13
+; GCN-NEXT: s_andn2_b64 vcc, exec, s[4:5]
+; GCN-NEXT: s_cbranch_vccz .LBB12_9
+; GCN-NEXT: s_branch .LBB12_10
;
; TAHITI-LABEL: srem_v4i64:
; TAHITI: ; %bb.0:
@@ -5497,10 +5531,11 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TAHITI-NEXT: buffer_load_dwordx4 v[14:17], off, s[0:3], 0
; TAHITI-NEXT: buffer_load_dwordx4 v[4:7], off, s[0:3], 0 offset:16
; TAHITI-NEXT: buffer_load_dwordx4 v[0:3], off, s[0:3], 0 offset:48
+; TAHITI-NEXT: s_mov_b64 s[6:7], -1
; TAHITI-NEXT: s_waitcnt vmcnt(2)
; TAHITI-NEXT: v_or_b32_e32 v9, v15, v11
; TAHITI-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[8:9]
-; TAHITI-NEXT: s_cbranch_vccz .LBB12_13
+; TAHITI-NEXT: s_cbranch_vccz .LBB12_15
; TAHITI-NEXT: ; %bb.1:
; TAHITI-NEXT: v_ashrrev_i32_e32 v8, 31, v11
; TAHITI-NEXT: v_add_i32_e32 v9, vcc, v10, v8
@@ -5643,7 +5678,8 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TAHITI-NEXT: v_or_b32_e32 v11, v17, v13
; TAHITI-NEXT: v_mov_b32_e32 v10, 0
; TAHITI-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[10:11]
-; TAHITI-NEXT: s_cbranch_vccz .LBB12_14
+; TAHITI-NEXT: s_mov_b64 s[6:7], -1
+; TAHITI-NEXT: s_cbranch_vccz .LBB12_5
; TAHITI-NEXT: ; %bb.4:
; TAHITI-NEXT: v_ashrrev_i32_e32 v10, 31, v13
; TAHITI-NEXT: v_add_i32_e32 v11, vcc, v12, v10
@@ -5656,6 +5692,7 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TAHITI-NEXT: v_subb_u32_e32 v18, vcc, 0, v10, vcc
; TAHITI-NEXT: v_madmk_f32 v13, v14, 0x4f800000, v13
; TAHITI-NEXT: v_rcp_f32_e32 v13, v13
+; TAHITI-NEXT: s_mov_b64 s[6:7], 0
; TAHITI-NEXT: v_mul_f32_e32 v13, 0x5f7ffffc, v13
; TAHITI-NEXT: v_mul_f32_e32 v14, 0x2f800000, v13
; TAHITI-NEXT: v_trunc_f32_e32 v14, v14
@@ -5762,8 +5799,10 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TAHITI-NEXT: v_xor_b32_e32 v13, v10, v15
; TAHITI-NEXT: v_sub_i32_e32 v10, vcc, v11, v15
; TAHITI-NEXT: v_subb_u32_e32 v11, vcc, v13, v15, vcc
-; TAHITI-NEXT: s_cbranch_execnz .LBB12_6
-; TAHITI-NEXT: .LBB12_5:
+; TAHITI-NEXT: .LBB12_5: ; %Flow6
+; TAHITI-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; TAHITI-NEXT: s_cbranch_vccnz .LBB12_7
+; TAHITI-NEXT: ; %bb.6:
; TAHITI-NEXT: v_cvt_f32_u32_e32 v10, v12
; TAHITI-NEXT: v_sub_i32_e32 v11, vcc, 0, v12
; TAHITI-NEXT: v_rcp_iflag_f32_e32 v10, v10
@@ -5782,13 +5821,14 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TAHITI-NEXT: v_cmp_ge_u32_e32 vcc, v10, v12
; TAHITI-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc
; TAHITI-NEXT: v_mov_b32_e32 v11, 0
-; TAHITI-NEXT: .LBB12_6:
+; TAHITI-NEXT: .LBB12_7:
; TAHITI-NEXT: s_waitcnt vmcnt(0)
; TAHITI-NEXT: v_or_b32_e32 v13, v5, v1
; TAHITI-NEXT: v_mov_b32_e32 v12, 0
; TAHITI-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[12:13]
-; TAHITI-NEXT: s_cbranch_vccz .LBB12_15
-; TAHITI-NEXT: ; %bb.7:
+; TAHITI-NEXT: s_mov_b64 s[6:7], -1
+; TAHITI-NEXT: s_cbranch_vccz .LBB12_16
+; TAHITI-NEXT: ; %bb.8:
; TAHITI-NEXT: v_ashrrev_i32_e32 v13, 31, v1
; TAHITI-NEXT: v_add_i32_e32 v12, vcc, v0, v13
; TAHITI-NEXT: v_addc_u32_e32 v1, vcc, v1, v13, vcc
@@ -5906,8 +5946,8 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TAHITI-NEXT: v_xor_b32_e32 v1, v1, v15
; TAHITI-NEXT: v_sub_i32_e32 v12, vcc, v5, v15
; TAHITI-NEXT: v_subb_u32_e32 v13, vcc, v1, v15, vcc
-; TAHITI-NEXT: s_cbranch_execnz .LBB12_9
-; TAHITI-NEXT: .LBB12_8:
+; TAHITI-NEXT: s_cbranch_execnz .LBB12_10
+; TAHITI-NEXT: .LBB12_9:
; TAHITI-NEXT: v_cvt_f32_u32_e32 v1, v0
; TAHITI-NEXT: v_sub_i32_e32 v5, vcc, 0, v0
; TAHITI-NEXT: v_mov_b32_e32 v13, 0
@@ -5926,12 +5966,13 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TAHITI-NEXT: v_subrev_i32_e32 v4, vcc, v0, v1
; TAHITI-NEXT: v_cmp_ge_u32_e32 vcc, v1, v0
; TAHITI-NEXT: v_cndmask_b32_e32 v12, v1, v4, vcc
-; TAHITI-NEXT: .LBB12_9:
+; TAHITI-NEXT: .LBB12_10:
; TAHITI-NEXT: v_or_b32_e32 v1, v7, v3
; TAHITI-NEXT: v_mov_b32_e32 v0, 0
; TAHITI-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
-; TAHITI-NEXT: s_cbranch_vccz .LBB12_16
-; TAHITI-NEXT: ; %bb.10:
+; TAHITI-NEXT: s_mov_b64 s[6:7], -1
+; TAHITI-NEXT: s_cbranch_vccz .LBB12_12
+; TAHITI-NEXT: ; %bb.11:
; TAHITI-NEXT: v_ashrrev_i32_e32 v0, 31, v3
; TAHITI-NEXT: v_add_i32_e32 v1, vcc, v2, v0
; TAHITI-NEXT: v_addc_u32_e32 v3, vcc, v3, v0, vcc
@@ -5943,6 +5984,7 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TAHITI-NEXT: v_subb_u32_e32 v14, vcc, 0, v0, vcc
; TAHITI-NEXT: v_madmk_f32 v3, v4, 0x4f800000, v3
; TAHITI-NEXT: v_rcp_f32_e32 v3, v3
+; TAHITI-NEXT: s_mov_b64 s[6:7], 0
; TAHITI-NEXT: v_mul_f32_e32 v3, 0x5f7ffffc, v3
; TAHITI-NEXT: v_mul_f32_e32 v4, 0x2f800000, v3
; TAHITI-NEXT: v_trunc_f32_e32 v4, v4
@@ -6049,8 +6091,10 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TAHITI-NEXT: v_xor_b32_e32 v0, v0, v5
; TAHITI-NEXT: v_sub_i32_e32 v14, vcc, v1, v5
; TAHITI-NEXT: v_subb_u32_e32 v15, vcc, v0, v5, vcc
-; TAHITI-NEXT: s_cbranch_execnz .LBB12_12
-; TAHITI-NEXT: .LBB12_11:
+; TAHITI-NEXT: .LBB12_12: ; %Flow
+; TAHITI-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; TAHITI-NEXT: s_cbranch_vccnz .LBB12_14
+; TAHITI-NEXT: ; %bb.13:
; TAHITI-NEXT: v_cvt_f32_u32_e32 v0, v2
; TAHITI-NEXT: v_sub_i32_e32 v1, vcc, 0, v2
; TAHITI-NEXT: v_mov_b32_e32 v15, 0
@@ -6069,22 +6113,22 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TAHITI-NEXT: v_subrev_i32_e32 v1, vcc, v2, v0
; TAHITI-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2
; TAHITI-NEXT: v_cndmask_b32_e32 v14, v0, v1, vcc
-; TAHITI-NEXT: .LBB12_12:
+; TAHITI-NEXT: .LBB12_14:
; TAHITI-NEXT: s_mov_b32 s7, 0xf000
; TAHITI-NEXT: s_mov_b32 s6, -1
; TAHITI-NEXT: buffer_store_dwordx4 v[12:15], off, s[4:7], 0 offset:16
; TAHITI-NEXT: buffer_store_dwordx4 v[8:11], off, s[4:7], 0
; TAHITI-NEXT: s_endpgm
-; TAHITI-NEXT: .LBB12_13:
-; TAHITI-NEXT: ; implicit-def: $vgpr8_vgpr9
-; TAHITI-NEXT: s_branch .LBB12_2
-; TAHITI-NEXT: .LBB12_14:
-; TAHITI-NEXT: s_branch .LBB12_5
; TAHITI-NEXT: .LBB12_15:
-; TAHITI-NEXT: ; implicit-def: $vgpr12_vgpr13
-; TAHITI-NEXT: s_branch .LBB12_8
+; TAHITI-NEXT: ; implicit-def: $vgpr8_vgpr9
+; TAHITI-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; TAHITI-NEXT: s_cbranch_vccz .LBB12_2
+; TAHITI-NEXT: s_branch .LBB12_3
; TAHITI-NEXT: .LBB12_16:
-; TAHITI-NEXT: s_branch .LBB12_11
+; TAHITI-NEXT: ; implicit-def: $vgpr12_vgpr13
+; TAHITI-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; TAHITI-NEXT: s_cbranch_vccz .LBB12_9
+; TAHITI-NEXT: s_branch .LBB12_10
;
; TONGA-LABEL: srem_v4i64:
; TONGA: ; %bb.0:
@@ -6109,10 +6153,11 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TONGA-NEXT: v_mov_b32_e32 v4, s0
; TONGA-NEXT: flat_load_dwordx4 v[0:3], v[0:1]
; TONGA-NEXT: flat_load_dwordx4 v[4:7], v[4:5]
+; TONGA-NEXT: s_mov_b64 s[6:7], -1
; TONGA-NEXT: s_waitcnt vmcnt(2)
; TONGA-NEXT: v_or_b32_e32 v9, v15, v11
; TONGA-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[8:9]
-; TONGA-NEXT: s_cbranch_vccz .LBB12_13
+; TONGA-NEXT: s_cbranch_vccz .LBB12_15
; TONGA-NEXT: ; %bb.1:
; TONGA-NEXT: v_ashrrev_i32_e32 v8, 31, v11
; TONGA-NEXT: v_add_u32_e32 v9, vcc, v10, v8
@@ -6243,7 +6288,8 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TONGA-NEXT: v_or_b32_e32 v11, v17, v13
; TONGA-NEXT: v_mov_b32_e32 v10, 0
; TONGA-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[10:11]
-; TONGA-NEXT: s_cbranch_vccz .LBB12_14
+; TONGA-NEXT: s_mov_b64 s[6:7], -1
+; TONGA-NEXT: s_cbranch_vccz .LBB12_5
; TONGA-NEXT: ; %bb.4:
; TONGA-NEXT: v_ashrrev_i32_e32 v10, 31, v13
; TONGA-NEXT: v_add_u32_e32 v11, vcc, v12, v10
@@ -6256,6 +6302,7 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TONGA-NEXT: v_subb_u32_e32 v22, vcc, 0, v20, vcc
; TONGA-NEXT: v_madmk_f32 v10, v11, 0x4f800000, v10
; TONGA-NEXT: v_rcp_f32_e32 v10, v10
+; TONGA-NEXT: s_mov_b64 s[6:7], 0
; TONGA-NEXT: v_mul_f32_e32 v10, 0x5f7ffffc, v10
; TONGA-NEXT: v_mul_f32_e32 v11, 0x2f800000, v10
; TONGA-NEXT: v_trunc_f32_e32 v11, v11
@@ -6350,8 +6397,10 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TONGA-NEXT: v_xor_b32_e32 v11, v11, v18
; TONGA-NEXT: v_sub_u32_e32 v10, vcc, v10, v18
; TONGA-NEXT: v_subb_u32_e32 v11, vcc, v11, v18, vcc
-; TONGA-NEXT: s_cbranch_execnz .LBB12_6
-; TONGA-NEXT: .LBB12_5:
+; TONGA-NEXT: .LBB12_5: ; %Flow6
+; TONGA-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; TONGA-NEXT: s_cbranch_vccnz .LBB12_7
+; TONGA-NEXT: ; %bb.6:
; TONGA-NEXT: v_cvt_f32_u32_e32 v10, v12
; TONGA-NEXT: v_sub_u32_e32 v11, vcc, 0, v12
; TONGA-NEXT: v_rcp_iflag_f32_e32 v10, v10
@@ -6370,13 +6419,14 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v10, v12
; TONGA-NEXT: v_cndmask_b32_e32 v10, v10, v11, vcc
; TONGA-NEXT: v_mov_b32_e32 v11, 0
-; TONGA-NEXT: .LBB12_6:
+; TONGA-NEXT: .LBB12_7:
; TONGA-NEXT: s_waitcnt vmcnt(0)
; TONGA-NEXT: v_or_b32_e32 v13, v5, v1
; TONGA-NEXT: v_mov_b32_e32 v12, 0
; TONGA-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[12:13]
-; TONGA-NEXT: s_cbranch_vccz .LBB12_15
-; TONGA-NEXT: ; %bb.7:
+; TONGA-NEXT: s_mov_b64 s[6:7], -1
+; TONGA-NEXT: s_cbranch_vccz .LBB12_16
+; TONGA-NEXT: ; %bb.8:
; TONGA-NEXT: v_ashrrev_i32_e32 v12, 31, v1
; TONGA-NEXT: v_add_u32_e32 v13, vcc, v0, v12
; TONGA-NEXT: v_addc_u32_e32 v1, vcc, v1, v12, vcc
@@ -6482,8 +6532,8 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TONGA-NEXT: v_xor_b32_e32 v1, v1, v16
; TONGA-NEXT: v_sub_u32_e32 v12, vcc, v5, v16
; TONGA-NEXT: v_subb_u32_e32 v13, vcc, v1, v16, vcc
-; TONGA-NEXT: s_cbranch_execnz .LBB12_9
-; TONGA-NEXT: .LBB12_8:
+; TONGA-NEXT: s_cbranch_execnz .LBB12_10
+; TONGA-NEXT: .LBB12_9:
; TONGA-NEXT: v_cvt_f32_u32_e32 v1, v0
; TONGA-NEXT: v_sub_u32_e32 v5, vcc, 0, v0
; TONGA-NEXT: v_mov_b32_e32 v13, 0
@@ -6502,12 +6552,13 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TONGA-NEXT: v_subrev_u32_e32 v4, vcc, v0, v1
; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v1, v0
; TONGA-NEXT: v_cndmask_b32_e32 v12, v1, v4, vcc
-; TONGA-NEXT: .LBB12_9:
+; TONGA-NEXT: .LBB12_10:
; TONGA-NEXT: v_or_b32_e32 v1, v7, v3
; TONGA-NEXT: v_mov_b32_e32 v0, 0
; TONGA-NEXT: v_cmp_ne_u64_e32 vcc, 0, v[0:1]
-; TONGA-NEXT: s_cbranch_vccz .LBB12_16
-; TONGA-NEXT: ; %bb.10:
+; TONGA-NEXT: s_mov_b64 s[6:7], -1
+; TONGA-NEXT: s_cbranch_vccz .LBB12_12
+; TONGA-NEXT: ; %bb.11:
; TONGA-NEXT: v_ashrrev_i32_e32 v0, 31, v3
; TONGA-NEXT: v_add_u32_e32 v1, vcc, v2, v0
; TONGA-NEXT: v_addc_u32_e32 v3, vcc, v3, v0, vcc
@@ -6519,6 +6570,7 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TONGA-NEXT: v_subb_u32_e32 v18, vcc, 0, v16, vcc
; TONGA-NEXT: v_madmk_f32 v0, v1, 0x4f800000, v0
; TONGA-NEXT: v_rcp_f32_e32 v0, v0
+; TONGA-NEXT: s_mov_b64 s[6:7], 0
; TONGA-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0
; TONGA-NEXT: v_mul_f32_e32 v1, 0x2f800000, v0
; TONGA-NEXT: v_trunc_f32_e32 v1, v1
@@ -6613,8 +6665,10 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TONGA-NEXT: v_xor_b32_e32 v1, v1, v15
; TONGA-NEXT: v_sub_u32_e32 v14, vcc, v0, v15
; TONGA-NEXT: v_subb_u32_e32 v15, vcc, v1, v15, vcc
-; TONGA-NEXT: s_cbranch_execnz .LBB12_12
-; TONGA-NEXT: .LBB12_11:
+; TONGA-NEXT: .LBB12_12: ; %Flow
+; TONGA-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; TONGA-NEXT: s_cbranch_vccnz .LBB12_14
+; TONGA-NEXT: ; %bb.13:
; TONGA-NEXT: v_cvt_f32_u32_e32 v0, v2
; TONGA-NEXT: v_sub_u32_e32 v1, vcc, 0, v2
; TONGA-NEXT: v_mov_b32_e32 v15, 0
@@ -6633,7 +6687,7 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TONGA-NEXT: v_subrev_u32_e32 v1, vcc, v2, v0
; TONGA-NEXT: v_cmp_ge_u32_e32 vcc, v0, v2
; TONGA-NEXT: v_cndmask_b32_e32 v14, v0, v1, vcc
-; TONGA-NEXT: .LBB12_12:
+; TONGA-NEXT: .LBB12_14:
; TONGA-NEXT: v_mov_b32_e32 v0, s4
; TONGA-NEXT: v_mov_b32_e32 v1, s5
; TONGA-NEXT: s_add_u32 s0, s4, 16
@@ -6643,16 +6697,16 @@ define amdgpu_kernel void @srem_v4i64(ptr addrspace(1) %out, ptr addrspace(1) %i
; TONGA-NEXT: v_mov_b32_e32 v1, s1
; TONGA-NEXT: flat_store_dwordx4 v[0:1], v[12:15]
; TONGA-NEXT: s_endpgm
-; TONGA-NEXT: .LBB12_13:
-; TONGA-NEXT: ; implicit-def: $vgpr8_vgpr9
-; TONGA-NEXT: s_branch .LBB12_2
-; TONGA-NEXT: .LBB12_14:
-; TONGA-NEXT: s_branch .LBB12_5
; TONGA-NEXT: .LBB12_15:
-; TONGA-NEXT: ; implicit-def: $vgpr12_vgpr13
-; TONGA-NEXT: s_branch .LBB12_8
+; TONGA-NEXT: ; implicit-def: $vgpr8_vgpr9
+; TONGA-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; TONGA-NEXT: s_cbranch_vccz .LBB12_2
+; TONGA-NEXT: s_branch .LBB12_3
; TONGA-NEXT: .LBB12_16:
-; TONGA-NEXT: s_branch .LBB12_11
+; TONGA-NEXT: ; implicit-def: $vgpr12_vgpr13
+; TONGA-NEXT: s_andn2_b64 vcc, exec, s[6:7]
+; TONGA-NEXT: s_cbranch_vccz .LBB12_9
+; TONGA-NEXT: s_branch .LBB12_10
;
; EG-LABEL: srem_v4i64:
; EG: ; %bb.0:
diff --git a/llvm/test/CodeGen/AMDGPU/udiv64.ll b/llvm/test/CodeGen/AMDGPU/udiv64.ll
index bc9a3f2..a0d8386 100644
--- a/llvm/test/CodeGen/AMDGPU/udiv64.ll
+++ b/llvm/test/CodeGen/AMDGPU/udiv64.ll
@@ -132,28 +132,28 @@ define amdgpu_kernel void @s_test_udiv_i64(ptr addrspace(1) %out, i64 %x, i64 %y
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[12:13], s[2:3], 0
; GCN-IR-NEXT: s_flbit_i32_b64 s10, s[6:7]
; GCN-IR-NEXT: s_flbit_i32_b64 s16, s[2:3]
-; GCN-IR-NEXT: s_or_b64 s[8:9], s[8:9], s[12:13]
-; GCN-IR-NEXT: s_sub_u32 s12, s10, s16
-; GCN-IR-NEXT: s_subb_u32 s13, 0, 0
-; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[14:15], s[12:13], 63
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[18:19], s[12:13], 63
-; GCN-IR-NEXT: s_or_b64 s[14:15], s[8:9], s[14:15]
-; GCN-IR-NEXT: s_and_b64 s[8:9], s[14:15], exec
-; GCN-IR-NEXT: s_cselect_b32 s9, 0, s3
-; GCN-IR-NEXT: s_cselect_b32 s8, 0, s2
+; GCN-IR-NEXT: s_or_b64 s[12:13], s[8:9], s[12:13]
+; GCN-IR-NEXT: s_sub_u32 s8, s10, s16
+; GCN-IR-NEXT: s_subb_u32 s9, 0, 0
+; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[14:15], s[8:9], 63
+; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[18:19], s[8:9], 63
+; GCN-IR-NEXT: s_or_b64 s[14:15], s[12:13], s[14:15]
+; GCN-IR-NEXT: s_and_b64 s[12:13], s[14:15], exec
+; GCN-IR-NEXT: s_cselect_b32 s13, 0, s3
+; GCN-IR-NEXT: s_cselect_b32 s12, 0, s2
; GCN-IR-NEXT: s_or_b64 s[14:15], s[14:15], s[18:19]
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[14:15]
; GCN-IR-NEXT: s_cbranch_vccz .LBB0_5
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT: s_add_u32 s14, s12, 1
-; GCN-IR-NEXT: s_addc_u32 s15, s13, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[14:15], 0
-; GCN-IR-NEXT: s_sub_i32 s12, 63, s12
-; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[8:9]
-; GCN-IR-NEXT: s_lshl_b64 s[8:9], s[2:3], s12
+; GCN-IR-NEXT: s_add_u32 s12, s8, 1
+; GCN-IR-NEXT: s_addc_u32 s13, s9, 0
+; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[14:15], s[12:13], 0
+; GCN-IR-NEXT: s_sub_i32 s8, 63, s8
+; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[14:15]
+; GCN-IR-NEXT: s_lshl_b64 s[8:9], s[2:3], s8
; GCN-IR-NEXT: s_cbranch_vccz .LBB0_4
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT: s_lshr_b64 s[12:13], s[2:3], s14
+; GCN-IR-NEXT: s_lshr_b64 s[12:13], s[2:3], s12
; GCN-IR-NEXT: s_add_u32 s14, s6, -1
; GCN-IR-NEXT: s_addc_u32 s15, s7, -1
; GCN-IR-NEXT: s_not_b64 s[2:3], s[10:11]
@@ -184,12 +184,12 @@ define amdgpu_kernel void @s_test_udiv_i64(ptr addrspace(1) %out, i64 %x, i64 %y
; GCN-IR-NEXT: s_cbranch_vccz .LBB0_3
; GCN-IR-NEXT: .LBB0_4: ; %Flow7
; GCN-IR-NEXT: s_lshl_b64 s[2:3], s[8:9], 1
-; GCN-IR-NEXT: s_or_b64 s[8:9], s[4:5], s[2:3]
+; GCN-IR-NEXT: s_or_b64 s[12:13], s[4:5], s[2:3]
; GCN-IR-NEXT: .LBB0_5: ; %udiv-end
-; GCN-IR-NEXT: v_mov_b32_e32 v0, s8
+; GCN-IR-NEXT: v_mov_b32_e32 v0, s12
; GCN-IR-NEXT: s_mov_b32 s3, 0xf000
; GCN-IR-NEXT: s_mov_b32 s2, -1
-; GCN-IR-NEXT: v_mov_b32_e32 v1, s9
+; GCN-IR-NEXT: v_mov_b32_e32 v1, s13
; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GCN-IR-NEXT: s_endpgm
%result = udiv i64 %x, %y
@@ -895,28 +895,28 @@ define amdgpu_kernel void @s_test_udiv_k_num_i64(ptr addrspace(1) %out, i64 %x)
; GCN-IR-NEXT: s_mov_b64 s[4:5], 0
; GCN-IR-NEXT: s_waitcnt lgkmcnt(0)
; GCN-IR-NEXT: s_flbit_i32_b64 s12, s[2:3]
-; GCN-IR-NEXT: s_add_u32 s8, s12, 0xffffffc5
-; GCN-IR-NEXT: s_addc_u32 s9, 0, -1
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[6:7], s[2:3], 0
-; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[10:11], s[8:9], 63
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[14:15], s[8:9], 63
-; GCN-IR-NEXT: s_or_b64 s[10:11], s[6:7], s[10:11]
-; GCN-IR-NEXT: s_and_b64 s[6:7], s[10:11], exec
-; GCN-IR-NEXT: s_cselect_b32 s6, 0, 24
+; GCN-IR-NEXT: s_add_u32 s6, s12, 0xffffffc5
+; GCN-IR-NEXT: s_addc_u32 s7, 0, -1
+; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[8:9], s[2:3], 0
+; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[10:11], s[6:7], 63
+; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[14:15], s[6:7], 63
+; GCN-IR-NEXT: s_or_b64 s[10:11], s[8:9], s[10:11]
+; GCN-IR-NEXT: s_and_b64 s[8:9], s[10:11], exec
+; GCN-IR-NEXT: s_cselect_b32 s8, 0, 24
; GCN-IR-NEXT: s_or_b64 s[10:11], s[10:11], s[14:15]
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[10:11]
-; GCN-IR-NEXT: s_mov_b32 s7, 0
+; GCN-IR-NEXT: s_mov_b32 s9, 0
; GCN-IR-NEXT: s_cbranch_vccz .LBB8_5
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT: s_add_u32 s10, s8, 1
-; GCN-IR-NEXT: s_addc_u32 s11, s9, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[6:7], s[10:11], 0
-; GCN-IR-NEXT: s_sub_i32 s8, 63, s8
-; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[6:7]
-; GCN-IR-NEXT: s_lshl_b64 s[6:7], 24, s8
+; GCN-IR-NEXT: s_add_u32 s8, s6, 1
+; GCN-IR-NEXT: s_addc_u32 s9, s7, 0
+; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[8:9], 0
+; GCN-IR-NEXT: s_sub_i32 s6, 63, s6
+; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[10:11]
+; GCN-IR-NEXT: s_lshl_b64 s[6:7], 24, s6
; GCN-IR-NEXT: s_cbranch_vccz .LBB8_4
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT: s_lshr_b64 s[10:11], 24, s10
+; GCN-IR-NEXT: s_lshr_b64 s[10:11], 24, s8
; GCN-IR-NEXT: s_add_u32 s14, s2, -1
; GCN-IR-NEXT: s_addc_u32 s15, s3, -1
; GCN-IR-NEXT: s_sub_u32 s8, 58, s12
@@ -946,12 +946,12 @@ define amdgpu_kernel void @s_test_udiv_k_num_i64(ptr addrspace(1) %out, i64 %x)
; GCN-IR-NEXT: s_cbranch_vccz .LBB8_3
; GCN-IR-NEXT: .LBB8_4: ; %Flow6
; GCN-IR-NEXT: s_lshl_b64 s[2:3], s[6:7], 1
-; GCN-IR-NEXT: s_or_b64 s[6:7], s[4:5], s[2:3]
+; GCN-IR-NEXT: s_or_b64 s[8:9], s[4:5], s[2:3]
; GCN-IR-NEXT: .LBB8_5: ; %udiv-end
-; GCN-IR-NEXT: v_mov_b32_e32 v0, s6
+; GCN-IR-NEXT: v_mov_b32_e32 v0, s8
; GCN-IR-NEXT: s_mov_b32 s3, 0xf000
; GCN-IR-NEXT: s_mov_b32 s2, -1
-; GCN-IR-NEXT: v_mov_b32_e32 v1, s7
+; GCN-IR-NEXT: v_mov_b32_e32 v1, s9
; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GCN-IR-NEXT: s_endpgm
%result = udiv i64 24, %x
@@ -1261,31 +1261,31 @@ define amdgpu_kernel void @s_test_udiv_k_den_i64(ptr addrspace(1) %out, i64 %x)
; GCN-IR: ; %bb.0: ; %_udiv-special-cases
; GCN-IR-NEXT: s_load_dwordx4 s[0:3], s[4:5], 0x9
; GCN-IR-NEXT: s_waitcnt lgkmcnt(0)
-; GCN-IR-NEXT: s_flbit_i32_b64 s12, s[2:3]
-; GCN-IR-NEXT: s_sub_u32 s8, 59, s12
-; GCN-IR-NEXT: s_subb_u32 s9, 0, 0
+; GCN-IR-NEXT: s_flbit_i32_b64 s10, s[2:3]
+; GCN-IR-NEXT: s_sub_u32 s6, 59, s10
+; GCN-IR-NEXT: s_subb_u32 s7, 0, 0
; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[4:5], s[2:3], 0
-; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[6:7], s[8:9], 63
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[10:11], s[8:9], 63
-; GCN-IR-NEXT: s_or_b64 s[4:5], s[4:5], s[6:7]
-; GCN-IR-NEXT: s_and_b64 s[6:7], s[4:5], exec
-; GCN-IR-NEXT: s_cselect_b32 s7, 0, s3
-; GCN-IR-NEXT: s_cselect_b32 s6, 0, s2
-; GCN-IR-NEXT: s_or_b64 s[4:5], s[4:5], s[10:11]
+; GCN-IR-NEXT: v_cmp_gt_u64_e64 s[8:9], s[6:7], 63
+; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[12:13], s[6:7], 63
+; GCN-IR-NEXT: s_or_b64 s[4:5], s[4:5], s[8:9]
+; GCN-IR-NEXT: s_and_b64 s[8:9], s[4:5], exec
+; GCN-IR-NEXT: s_cselect_b32 s9, 0, s3
+; GCN-IR-NEXT: s_cselect_b32 s8, 0, s2
+; GCN-IR-NEXT: s_or_b64 s[4:5], s[4:5], s[12:13]
; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[4:5]
; GCN-IR-NEXT: s_mov_b64 s[4:5], 0
; GCN-IR-NEXT: s_cbranch_vccz .LBB11_5
; GCN-IR-NEXT: ; %bb.1: ; %udiv-bb1
-; GCN-IR-NEXT: s_add_u32 s10, s8, 1
-; GCN-IR-NEXT: s_addc_u32 s11, s9, 0
-; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[6:7], s[10:11], 0
-; GCN-IR-NEXT: s_sub_i32 s8, 63, s8
-; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[6:7]
-; GCN-IR-NEXT: s_lshl_b64 s[6:7], s[2:3], s8
+; GCN-IR-NEXT: s_add_u32 s8, s6, 1
+; GCN-IR-NEXT: s_addc_u32 s9, s7, 0
+; GCN-IR-NEXT: v_cmp_eq_u64_e64 s[12:13], s[8:9], 0
+; GCN-IR-NEXT: s_sub_i32 s6, 63, s6
+; GCN-IR-NEXT: s_andn2_b64 vcc, exec, s[12:13]
+; GCN-IR-NEXT: s_lshl_b64 s[6:7], s[2:3], s6
; GCN-IR-NEXT: s_cbranch_vccz .LBB11_4
; GCN-IR-NEXT: ; %bb.2: ; %udiv-preheader
-; GCN-IR-NEXT: s_lshr_b64 s[8:9], s[2:3], s10
-; GCN-IR-NEXT: s_add_u32 s2, s12, 0xffffffc4
+; GCN-IR-NEXT: s_lshr_b64 s[8:9], s[2:3], s8
+; GCN-IR-NEXT: s_add_u32 s2, s10, 0xffffffc4
; GCN-IR-NEXT: s_addc_u32 s3, 0, -1
; GCN-IR-NEXT: s_mov_b64 s[10:11], 0
; GCN-IR-NEXT: s_mov_b32 s5, 0
@@ -1311,12 +1311,12 @@ define amdgpu_kernel void @s_test_udiv_k_den_i64(ptr addrspace(1) %out, i64 %x)
; GCN-IR-NEXT: s_cbranch_vccz .LBB11_3
; GCN-IR-NEXT: .LBB11_4: ; %Flow6
; GCN-IR-NEXT: s_lshl_b64 s[2:3], s[6:7], 1
-; GCN-IR-NEXT: s_or_b64 s[6:7], s[4:5], s[2:3]
+; GCN-IR-NEXT: s_or_b64 s[8:9], s[4:5], s[2:3]
; GCN-IR-NEXT: .LBB11_5: ; %udiv-end
-; GCN-IR-NEXT: v_mov_b32_e32 v0, s6
+; GCN-IR-NEXT: v_mov_b32_e32 v0, s8
; GCN-IR-NEXT: s_mov_b32 s3, 0xf000
; GCN-IR-NEXT: s_mov_b32 s2, -1
-; GCN-IR-NEXT: v_mov_b32_e32 v1, s7
+; GCN-IR-NEXT: v_mov_b32_e32 v1, s9
; GCN-IR-NEXT: buffer_store_dwordx2 v[0:1], off, s[0:3], 0
; GCN-IR-NEXT: s_endpgm
%result = udiv i64 %x, 24
diff --git a/llvm/test/CodeGen/AMDGPU/wave32.ll b/llvm/test/CodeGen/AMDGPU/wave32.ll
index 097154e..3cba1bf 100644
--- a/llvm/test/CodeGen/AMDGPU/wave32.ll
+++ b/llvm/test/CodeGen/AMDGPU/wave32.ll
@@ -734,15 +734,16 @@ define amdgpu_kernel void @test_udiv64(ptr addrspace(1) %arg) #0 {
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
; GFX1032-NEXT: s_load_dwordx4 s[4:7], s[2:3], 0x0
; GFX1032-NEXT: s_waitcnt lgkmcnt(0)
-; GFX1032-NEXT: s_or_b64 s[8:9], s[6:7], s[4:5]
-; GFX1032-NEXT: s_mov_b32 s8, 0
-; GFX1032-NEXT: s_cmp_lg_u64 s[8:9], 0
+; GFX1032-NEXT: s_or_b64 s[0:1], s[6:7], s[4:5]
+; GFX1032-NEXT: s_mov_b32 s0, 0
+; GFX1032-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX1032-NEXT: s_mov_b32 s0, -1
; GFX1032-NEXT: s_cbranch_scc0 .LBB15_4
; GFX1032-NEXT: ; %bb.1:
; GFX1032-NEXT: v_cvt_f32_u32_e32 v0, s4
; GFX1032-NEXT: v_cvt_f32_u32_e32 v1, s5
-; GFX1032-NEXT: s_sub_u32 s9, 0, s4
-; GFX1032-NEXT: s_subb_u32 s10, 0, s5
+; GFX1032-NEXT: s_sub_u32 s8, 0, s4
+; GFX1032-NEXT: s_subb_u32 s9, 0, s5
; GFX1032-NEXT: v_madmk_f32 v0, v1, 0x4f800000, v0
; GFX1032-NEXT: v_rcp_f32_e32 v0, v0
; GFX1032-NEXT: v_mul_f32_e32 v0, 0x5f7ffffc, v0
@@ -753,111 +754,110 @@ define amdgpu_kernel void @test_udiv64(ptr addrspace(1) %arg) #0 {
; GFX1032-NEXT: v_cvt_u32_f32_e32 v0, v0
; GFX1032-NEXT: v_readfirstlane_b32 s0, v1
; GFX1032-NEXT: v_readfirstlane_b32 s1, v0
-; GFX1032-NEXT: s_mul_i32 s11, s9, s0
-; GFX1032-NEXT: s_mul_hi_u32 s13, s9, s1
-; GFX1032-NEXT: s_mul_i32 s12, s10, s1
-; GFX1032-NEXT: s_add_i32 s11, s13, s11
-; GFX1032-NEXT: s_mul_i32 s14, s9, s1
-; GFX1032-NEXT: s_add_i32 s11, s11, s12
-; GFX1032-NEXT: s_mul_hi_u32 s13, s1, s14
-; GFX1032-NEXT: s_mul_hi_u32 s15, s0, s14
-; GFX1032-NEXT: s_mul_i32 s12, s0, s14
-; GFX1032-NEXT: s_mul_hi_u32 s14, s1, s11
-; GFX1032-NEXT: s_mul_i32 s1, s1, s11
-; GFX1032-NEXT: s_mul_hi_u32 s16, s0, s11
-; GFX1032-NEXT: s_add_u32 s1, s13, s1
-; GFX1032-NEXT: s_addc_u32 s13, 0, s14
-; GFX1032-NEXT: s_add_u32 s1, s1, s12
-; GFX1032-NEXT: s_mul_i32 s11, s0, s11
-; GFX1032-NEXT: s_addc_u32 s1, s13, s15
-; GFX1032-NEXT: s_addc_u32 s12, s16, 0
+; GFX1032-NEXT: s_mul_i32 s10, s8, s0
+; GFX1032-NEXT: s_mul_hi_u32 s12, s8, s1
+; GFX1032-NEXT: s_mul_i32 s11, s9, s1
+; GFX1032-NEXT: s_add_i32 s10, s12, s10
+; GFX1032-NEXT: s_mul_i32 s13, s8, s1
+; GFX1032-NEXT: s_add_i32 s10, s10, s11
+; GFX1032-NEXT: s_mul_hi_u32 s12, s1, s13
+; GFX1032-NEXT: s_mul_hi_u32 s14, s0, s13
+; GFX1032-NEXT: s_mul_i32 s11, s0, s13
+; GFX1032-NEXT: s_mul_hi_u32 s13, s1, s10
+; GFX1032-NEXT: s_mul_i32 s1, s1, s10
+; GFX1032-NEXT: s_mul_hi_u32 s15, s0, s10
+; GFX1032-NEXT: s_add_u32 s1, s12, s1
+; GFX1032-NEXT: s_addc_u32 s12, 0, s13
; GFX1032-NEXT: s_add_u32 s1, s1, s11
-; GFX1032-NEXT: s_addc_u32 s11, 0, s12
+; GFX1032-NEXT: s_mul_i32 s10, s0, s10
+; GFX1032-NEXT: s_addc_u32 s1, s12, s14
+; GFX1032-NEXT: s_addc_u32 s11, s15, 0
+; GFX1032-NEXT: s_add_u32 s1, s1, s10
+; GFX1032-NEXT: s_addc_u32 s10, 0, s11
; GFX1032-NEXT: v_add_co_u32 v0, s1, v0, s1
; GFX1032-NEXT: s_cmp_lg_u32 s1, 0
-; GFX1032-NEXT: s_addc_u32 s0, s0, s11
+; GFX1032-NEXT: s_addc_u32 s0, s0, s10
; GFX1032-NEXT: v_readfirstlane_b32 s1, v0
-; GFX1032-NEXT: s_mul_i32 s11, s9, s0
-; GFX1032-NEXT: s_mul_hi_u32 s12, s9, s1
-; GFX1032-NEXT: s_mul_i32 s10, s10, s1
-; GFX1032-NEXT: s_add_i32 s11, s12, s11
+; GFX1032-NEXT: s_mul_i32 s10, s8, s0
+; GFX1032-NEXT: s_mul_hi_u32 s11, s8, s1
; GFX1032-NEXT: s_mul_i32 s9, s9, s1
-; GFX1032-NEXT: s_add_i32 s11, s11, s10
-; GFX1032-NEXT: s_mul_hi_u32 s12, s0, s9
-; GFX1032-NEXT: s_mul_i32 s13, s0, s9
-; GFX1032-NEXT: s_mul_hi_u32 s9, s1, s9
-; GFX1032-NEXT: s_mul_hi_u32 s14, s1, s11
-; GFX1032-NEXT: s_mul_i32 s1, s1, s11
-; GFX1032-NEXT: s_mul_hi_u32 s10, s0, s11
-; GFX1032-NEXT: s_add_u32 s1, s9, s1
-; GFX1032-NEXT: s_addc_u32 s9, 0, s14
-; GFX1032-NEXT: s_add_u32 s1, s1, s13
-; GFX1032-NEXT: s_mul_i32 s11, s0, s11
-; GFX1032-NEXT: s_addc_u32 s1, s9, s12
-; GFX1032-NEXT: s_addc_u32 s9, s10, 0
-; GFX1032-NEXT: s_add_u32 s1, s1, s11
-; GFX1032-NEXT: s_addc_u32 s9, 0, s9
+; GFX1032-NEXT: s_add_i32 s10, s11, s10
+; GFX1032-NEXT: s_mul_i32 s8, s8, s1
+; GFX1032-NEXT: s_add_i32 s10, s10, s9
+; GFX1032-NEXT: s_mul_hi_u32 s11, s0, s8
+; GFX1032-NEXT: s_mul_i32 s12, s0, s8
+; GFX1032-NEXT: s_mul_hi_u32 s8, s1, s8
+; GFX1032-NEXT: s_mul_hi_u32 s13, s1, s10
+; GFX1032-NEXT: s_mul_i32 s1, s1, s10
+; GFX1032-NEXT: s_mul_hi_u32 s9, s0, s10
+; GFX1032-NEXT: s_add_u32 s1, s8, s1
+; GFX1032-NEXT: s_addc_u32 s8, 0, s13
+; GFX1032-NEXT: s_add_u32 s1, s1, s12
+; GFX1032-NEXT: s_mul_i32 s10, s0, s10
+; GFX1032-NEXT: s_addc_u32 s1, s8, s11
+; GFX1032-NEXT: s_addc_u32 s8, s9, 0
+; GFX1032-NEXT: s_add_u32 s1, s1, s10
+; GFX1032-NEXT: s_addc_u32 s8, 0, s8
; GFX1032-NEXT: v_add_co_u32 v0, s1, v0, s1
; GFX1032-NEXT: s_cmp_lg_u32 s1, 0
-; GFX1032-NEXT: s_addc_u32 s0, s0, s9
+; GFX1032-NEXT: s_addc_u32 s0, s0, s8
; GFX1032-NEXT: v_readfirstlane_b32 s1, v0
-; GFX1032-NEXT: s_mul_i32 s10, s6, s0
-; GFX1032-NEXT: s_mul_hi_u32 s9, s6, s0
-; GFX1032-NEXT: s_mul_hi_u32 s11, s7, s0
+; GFX1032-NEXT: s_mul_i32 s9, s6, s0
+; GFX1032-NEXT: s_mul_hi_u32 s8, s6, s0
+; GFX1032-NEXT: s_mul_hi_u32 s10, s7, s0
; GFX1032-NEXT: s_mul_i32 s0, s7, s0
-; GFX1032-NEXT: s_mul_hi_u32 s12, s6, s1
-; GFX1032-NEXT: s_mul_hi_u32 s13, s7, s1
+; GFX1032-NEXT: s_mul_hi_u32 s11, s6, s1
+; GFX1032-NEXT: s_mul_hi_u32 s12, s7, s1
; GFX1032-NEXT: s_mul_i32 s1, s7, s1
-; GFX1032-NEXT: s_add_u32 s10, s12, s10
-; GFX1032-NEXT: s_addc_u32 s9, 0, s9
-; GFX1032-NEXT: s_add_u32 s1, s10, s1
-; GFX1032-NEXT: s_addc_u32 s1, s9, s13
-; GFX1032-NEXT: s_addc_u32 s9, s11, 0
+; GFX1032-NEXT: s_add_u32 s9, s11, s9
+; GFX1032-NEXT: s_addc_u32 s8, 0, s8
+; GFX1032-NEXT: s_add_u32 s1, s9, s1
+; GFX1032-NEXT: s_addc_u32 s1, s8, s12
+; GFX1032-NEXT: s_addc_u32 s8, s10, 0
; GFX1032-NEXT: s_add_u32 s1, s1, s0
-; GFX1032-NEXT: s_addc_u32 s9, 0, s9
+; GFX1032-NEXT: s_addc_u32 s8, 0, s8
; GFX1032-NEXT: s_mul_hi_u32 s0, s4, s1
-; GFX1032-NEXT: s_mul_i32 s11, s4, s9
-; GFX1032-NEXT: s_mul_i32 s12, s4, s1
-; GFX1032-NEXT: s_add_i32 s0, s0, s11
-; GFX1032-NEXT: v_sub_co_u32 v0, s11, s6, s12
-; GFX1032-NEXT: s_mul_i32 s10, s5, s1
+; GFX1032-NEXT: s_mul_i32 s10, s4, s8
+; GFX1032-NEXT: s_mul_i32 s11, s4, s1
; GFX1032-NEXT: s_add_i32 s0, s0, s10
-; GFX1032-NEXT: v_sub_co_u32 v1, s12, v0, s4
-; GFX1032-NEXT: s_sub_i32 s10, s7, s0
+; GFX1032-NEXT: v_sub_co_u32 v0, s10, s6, s11
+; GFX1032-NEXT: s_mul_i32 s9, s5, s1
+; GFX1032-NEXT: s_add_i32 s0, s0, s9
+; GFX1032-NEXT: v_sub_co_u32 v1, s11, v0, s4
+; GFX1032-NEXT: s_sub_i32 s9, s7, s0
+; GFX1032-NEXT: s_cmp_lg_u32 s10, 0
+; GFX1032-NEXT: s_subb_u32 s9, s9, s5
; GFX1032-NEXT: s_cmp_lg_u32 s11, 0
-; GFX1032-NEXT: s_subb_u32 s10, s10, s5
-; GFX1032-NEXT: s_cmp_lg_u32 s12, 0
; GFX1032-NEXT: v_cmp_le_u32_e32 vcc_lo, s4, v1
-; GFX1032-NEXT: s_subb_u32 s10, s10, 0
-; GFX1032-NEXT: s_cmp_ge_u32 s10, s5
+; GFX1032-NEXT: s_subb_u32 s9, s9, 0
+; GFX1032-NEXT: s_cmp_ge_u32 s9, s5
; GFX1032-NEXT: v_cndmask_b32_e64 v1, 0, -1, vcc_lo
-; GFX1032-NEXT: s_cselect_b32 s12, -1, 0
-; GFX1032-NEXT: s_cmp_eq_u32 s10, s5
+; GFX1032-NEXT: s_cselect_b32 s11, -1, 0
+; GFX1032-NEXT: s_cmp_eq_u32 s9, s5
; GFX1032-NEXT: s_cselect_b32 vcc_lo, -1, 0
-; GFX1032-NEXT: s_add_u32 s10, s1, 1
-; GFX1032-NEXT: v_cndmask_b32_e32 v1, s12, v1, vcc_lo
-; GFX1032-NEXT: s_addc_u32 s12, s9, 0
-; GFX1032-NEXT: s_add_u32 s13, s1, 2
-; GFX1032-NEXT: s_addc_u32 s14, s9, 0
-; GFX1032-NEXT: s_cmp_lg_u32 s11, 0
+; GFX1032-NEXT: s_add_u32 s9, s1, 1
+; GFX1032-NEXT: v_cndmask_b32_e32 v1, s11, v1, vcc_lo
+; GFX1032-NEXT: s_addc_u32 s11, s8, 0
+; GFX1032-NEXT: s_add_u32 s12, s1, 2
+; GFX1032-NEXT: s_addc_u32 s13, s8, 0
+; GFX1032-NEXT: s_cmp_lg_u32 s10, 0
; GFX1032-NEXT: v_cmp_le_u32_e32 vcc_lo, s4, v0
; GFX1032-NEXT: s_subb_u32 s0, s7, s0
-; GFX1032-NEXT: v_mov_b32_e32 v2, s13
+; GFX1032-NEXT: v_mov_b32_e32 v2, s12
; GFX1032-NEXT: s_cmp_ge_u32 s0, s5
; GFX1032-NEXT: v_cndmask_b32_e64 v0, 0, -1, vcc_lo
; GFX1032-NEXT: s_cselect_b32 s7, -1, 0
; GFX1032-NEXT: s_cmp_eq_u32 s0, s5
; GFX1032-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v1
; GFX1032-NEXT: s_cselect_b32 s0, -1, 0
-; GFX1032-NEXT: v_mov_b32_e32 v1, s14
+; GFX1032-NEXT: v_mov_b32_e32 v1, s13
; GFX1032-NEXT: v_cndmask_b32_e64 v0, s7, v0, s0
-; GFX1032-NEXT: v_cndmask_b32_e32 v2, s10, v2, vcc_lo
-; GFX1032-NEXT: v_cndmask_b32_e32 v1, s12, v1, vcc_lo
+; GFX1032-NEXT: v_cndmask_b32_e32 v2, s9, v2, vcc_lo
+; GFX1032-NEXT: v_cndmask_b32_e32 v1, s11, v1, vcc_lo
; GFX1032-NEXT: v_cmp_ne_u32_e32 vcc_lo, 0, v0
-; GFX1032-NEXT: v_cndmask_b32_e32 v1, s9, v1, vcc_lo
+; GFX1032-NEXT: v_cndmask_b32_e32 v1, s8, v1, vcc_lo
; GFX1032-NEXT: v_cndmask_b32_e32 v0, s1, v2, vcc_lo
-; GFX1032-NEXT: s_andn2_b32 vcc_lo, exec_lo, s8
-; GFX1032-NEXT: s_cbranch_vccnz .LBB15_3
+; GFX1032-NEXT: s_cbranch_execnz .LBB15_3
; GFX1032-NEXT: .LBB15_2:
; GFX1032-NEXT: v_cvt_f32_u32_e32 v0, s4
; GFX1032-NEXT: s_sub_i32 s1, 0, s4
@@ -888,7 +888,9 @@ define amdgpu_kernel void @test_udiv64(ptr addrspace(1) %arg) #0 {
; GFX1032-NEXT: s_endpgm
; GFX1032-NEXT: .LBB15_4:
; GFX1032-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1032-NEXT: s_branch .LBB15_2
+; GFX1032-NEXT: s_andn2_b32 vcc_lo, exec_lo, s0
+; GFX1032-NEXT: s_cbranch_vccz .LBB15_2
+; GFX1032-NEXT: s_branch .LBB15_3
;
; GFX1064-LABEL: test_udiv64:
; GFX1064: ; %bb.0: ; %bb
@@ -899,6 +901,7 @@ define amdgpu_kernel void @test_udiv64(ptr addrspace(1) %arg) #0 {
; GFX1064-NEXT: s_or_b64 s[0:1], s[6:7], s[4:5]
; GFX1064-NEXT: s_mov_b32 s0, 0
; GFX1064-NEXT: s_cmp_lg_u64 s[0:1], 0
+; GFX1064-NEXT: s_mov_b64 s[0:1], -1
; GFX1064-NEXT: s_cbranch_scc0 .LBB15_4
; GFX1064-NEXT: ; %bb.1:
; GFX1064-NEXT: v_cvt_f32_u32_e32 v0, s4
@@ -1049,7 +1052,9 @@ define amdgpu_kernel void @test_udiv64(ptr addrspace(1) %arg) #0 {
; GFX1064-NEXT: s_endpgm
; GFX1064-NEXT: .LBB15_4:
; GFX1064-NEXT: ; implicit-def: $vgpr0_vgpr1
-; GFX1064-NEXT: s_branch .LBB15_2
+; GFX1064-NEXT: s_andn2_b64 vcc, exec, s[0:1]
+; GFX1064-NEXT: s_cbranch_vccz .LBB15_2
+; GFX1064-NEXT: s_branch .LBB15_3
bb:
%tmp = getelementptr inbounds i64, ptr addrspace(1) %arg, i64 1
%tmp1 = load i64, ptr addrspace(1) %tmp, align 8
@@ -1824,10 +1829,13 @@ define amdgpu_ps <4 x float> @test_loop_vcc(<4 x float> %in) #0 {
; GFX1032-NEXT: v_mov_b32_e32 v1, v5
; GFX1032-NEXT: v_mov_b32_e32 v2, v6
; GFX1032-NEXT: v_mov_b32_e32 v3, v7
+; GFX1032-NEXT: s_mov_b32 s1, -1
; GFX1032-NEXT: s_cbranch_vccz .LBB33_1
-; GFX1032-NEXT: ; %bb.3:
+; GFX1032-NEXT: ; %bb.3: ; in Loop: Header=BB33_2 Depth=1
; GFX1032-NEXT: ; implicit-def: $vgpr4_vgpr5_vgpr6_vgpr7
; GFX1032-NEXT: ; implicit-def: $vgpr8
+; GFX1032-NEXT: s_andn2_b32 vcc_lo, exec_lo, s1
+; GFX1032-NEXT: s_cbranch_vccnz .LBB33_2
; GFX1032-NEXT: .LBB33_4: ; %break
; GFX1032-NEXT: s_and_b32 exec_lo, exec_lo, s0
; GFX1032-NEXT: s_waitcnt vmcnt(0)
@@ -1856,10 +1864,13 @@ define amdgpu_ps <4 x float> @test_loop_vcc(<4 x float> %in) #0 {
; GFX1064-NEXT: v_mov_b32_e32 v1, v5
; GFX1064-NEXT: v_mov_b32_e32 v2, v6
; GFX1064-NEXT: v_mov_b32_e32 v3, v7
+; GFX1064-NEXT: s_mov_b64 s[2:3], -1
; GFX1064-NEXT: s_cbranch_vccz .LBB33_1
-; GFX1064-NEXT: ; %bb.3:
+; GFX1064-NEXT: ; %bb.3: ; in Loop: Header=BB33_2 Depth=1
; GFX1064-NEXT: ; implicit-def: $vgpr4_vgpr5_vgpr6_vgpr7
; GFX1064-NEXT: ; implicit-def: $vgpr8
+; GFX1064-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GFX1064-NEXT: s_cbranch_vccnz .LBB33_2
; GFX1064-NEXT: .LBB33_4: ; %break
; GFX1064-NEXT: s_and_b64 exec, exec, s[0:1]
; GFX1064-NEXT: s_waitcnt vmcnt(0)
diff --git a/llvm/test/CodeGen/AMDGPU/wqm.ll b/llvm/test/CodeGen/AMDGPU/wqm.ll
index ad8dcd3..db1f4e0 100644
--- a/llvm/test/CodeGen/AMDGPU/wqm.ll
+++ b/llvm/test/CodeGen/AMDGPU/wqm.ll
@@ -1949,10 +1949,13 @@ define amdgpu_ps <4 x float> @test_loop_vcc(<4 x float> %in) nounwind {
; GFX9-W64-NEXT: v_mov_b32_e32 v1, v5
; GFX9-W64-NEXT: v_mov_b32_e32 v2, v6
; GFX9-W64-NEXT: v_mov_b32_e32 v3, v7
+; GFX9-W64-NEXT: s_mov_b64 s[2:3], -1
; GFX9-W64-NEXT: s_cbranch_vccz .LBB35_1
-; GFX9-W64-NEXT: ; %bb.3:
+; GFX9-W64-NEXT: ; %bb.3: ; in Loop: Header=BB35_2 Depth=1
; GFX9-W64-NEXT: ; implicit-def: $vgpr4_vgpr5_vgpr6_vgpr7
; GFX9-W64-NEXT: ; implicit-def: $vgpr8
+; GFX9-W64-NEXT: s_andn2_b64 vcc, exec, s[2:3]
+; GFX9-W64-NEXT: s_cbranch_vccnz .LBB35_2
; GFX9-W64-NEXT: .LBB35_4: ; %break
; GFX9-W64-NEXT: s_and_b64 exec, exec, s[0:1]
; GFX9-W64-NEXT: s_waitcnt vmcnt(0)
@@ -1981,10 +1984,13 @@ define amdgpu_ps <4 x float> @test_loop_vcc(<4 x float> %in) nounwind {
; GFX10-W32-NEXT: v_mov_b32_e32 v6, v2
; GFX10-W32-NEXT: v_mov_b32_e32 v5, v1
; GFX10-W32-NEXT: v_mov_b32_e32 v4, v0
+; GFX10-W32-NEXT: s_mov_b32 s1, -1
; GFX10-W32-NEXT: s_cbranch_vccz .LBB35_1
-; GFX10-W32-NEXT: ; %bb.3:
+; GFX10-W32-NEXT: ; %bb.3: ; in Loop: Header=BB35_2 Depth=1
; GFX10-W32-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
; GFX10-W32-NEXT: ; implicit-def: $vgpr8
+; GFX10-W32-NEXT: s_andn2_b32 vcc_lo, exec_lo, s1
+; GFX10-W32-NEXT: s_cbranch_vccnz .LBB35_2
; GFX10-W32-NEXT: .LBB35_4: ; %break
; GFX10-W32-NEXT: s_and_b32 exec_lo, exec_lo, s0
; GFX10-W32-NEXT: s_waitcnt vmcnt(0)
@@ -2177,6 +2183,7 @@ define amdgpu_ps <4 x float> @test_scc(i32 inreg %sel, i32 %idx) #1 {
; GFX9-W64-NEXT: s_wqm_b64 exec, exec
; GFX9-W64-NEXT: v_mov_b32_e32 v4, v0
; GFX9-W64-NEXT: s_cmp_lt_i32 s0, 1
+; GFX9-W64-NEXT: s_mov_b64 s[0:1], -1
; GFX9-W64-NEXT: s_cbranch_scc0 .LBB39_2
; GFX9-W64-NEXT: ; %bb.1: ; %else
; GFX9-W64-NEXT: v_mov_b32_e32 v0, 0
@@ -2186,6 +2193,8 @@ define amdgpu_ps <4 x float> @test_scc(i32 inreg %sel, i32 %idx) #1 {
; GFX9-W64-NEXT: s_branch .LBB39_4
; GFX9-W64-NEXT: .LBB39_2:
; GFX9-W64-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
+; GFX9-W64-NEXT: s_andn2_b64 vcc, exec, s[0:1]
+; GFX9-W64-NEXT: s_cbranch_vccnz .LBB39_4
; GFX9-W64-NEXT: .LBB39_3: ; %if
; GFX9-W64-NEXT: s_waitcnt vmcnt(0)
; GFX9-W64-NEXT: v_mov_b32_e32 v0, 0
@@ -2203,6 +2212,7 @@ define amdgpu_ps <4 x float> @test_scc(i32 inreg %sel, i32 %idx) #1 {
; GFX10-W32-NEXT: s_wqm_b32 exec_lo, exec_lo
; GFX10-W32-NEXT: v_mov_b32_e32 v4, v0
; GFX10-W32-NEXT: s_cmp_lt_i32 s0, 1
+; GFX10-W32-NEXT: s_mov_b32 s0, -1
; GFX10-W32-NEXT: s_cbranch_scc0 .LBB39_2
; GFX10-W32-NEXT: ; %bb.1: ; %else
; GFX10-W32-NEXT: v_mov_b32_e32 v1, 1
@@ -2212,6 +2222,8 @@ define amdgpu_ps <4 x float> @test_scc(i32 inreg %sel, i32 %idx) #1 {
; GFX10-W32-NEXT: s_branch .LBB39_4
; GFX10-W32-NEXT: .LBB39_2:
; GFX10-W32-NEXT: ; implicit-def: $vgpr0_vgpr1_vgpr2_vgpr3
+; GFX10-W32-NEXT: s_andn2_b32 vcc_lo, exec_lo, s0
+; GFX10-W32-NEXT: s_cbranch_vccnz .LBB39_4
; GFX10-W32-NEXT: .LBB39_3: ; %if
; GFX10-W32-NEXT: s_waitcnt vmcnt(0)
; GFX10-W32-NEXT: v_mov_b32_e32 v0, 0
diff --git a/llvm/test/CodeGen/AMDGPU/xor.ll b/llvm/test/CodeGen/AMDGPU/xor.ll
index 00bb7b2..ac96cf7 100644
--- a/llvm/test/CodeGen/AMDGPU/xor.ll
+++ b/llvm/test/CodeGen/AMDGPU/xor.ll
@@ -515,19 +515,18 @@ define amdgpu_kernel void @xor_cf(ptr addrspace(1) %out, ptr addrspace(1) %in, i
; SI-LABEL: xor_cf:
; SI: ; %bb.0: ; %entry
; SI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x9
-; SI-NEXT: s_mov_b64 s[8:9], 0
; SI-NEXT: s_waitcnt lgkmcnt(0)
-; SI-NEXT: v_cmp_ne_u64_e64 s[10:11], s[4:5], 0
-; SI-NEXT: s_and_b64 vcc, exec, s[10:11]
+; SI-NEXT: v_cmp_ne_u64_e64 s[8:9], s[4:5], 0
+; SI-NEXT: s_and_b64 vcc, exec, s[8:9]
+; SI-NEXT: s_mov_b64 s[8:9], -1
; SI-NEXT: s_cbranch_vccz .LBB12_4
; SI-NEXT: ; %bb.1: ; %else
-; SI-NEXT: s_mov_b32 s15, 0xf000
-; SI-NEXT: s_mov_b32 s14, -1
-; SI-NEXT: s_mov_b32 s12, s2
-; SI-NEXT: s_mov_b32 s13, s3
-; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[12:15], 0
-; SI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
-; SI-NEXT: s_cbranch_vccnz .LBB12_3
+; SI-NEXT: s_mov_b32 s11, 0xf000
+; SI-NEXT: s_mov_b32 s10, -1
+; SI-NEXT: s_mov_b32 s8, s2
+; SI-NEXT: s_mov_b32 s9, s3
+; SI-NEXT: buffer_load_dwordx2 v[0:1], off, s[8:11], 0
+; SI-NEXT: s_cbranch_execnz .LBB12_3
; SI-NEXT: .LBB12_2: ; %if
; SI-NEXT: s_xor_b64 s[2:3], s[4:5], s[6:7]
; SI-NEXT: s_waitcnt vmcnt(0)
@@ -541,12 +540,14 @@ define amdgpu_kernel void @xor_cf(ptr addrspace(1) %out, ptr addrspace(1) %in, i
; SI-NEXT: s_endpgm
; SI-NEXT: .LBB12_4:
; SI-NEXT: ; implicit-def: $vgpr0_vgpr1
-; SI-NEXT: s_branch .LBB12_2
+; SI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; SI-NEXT: s_cbranch_vccz .LBB12_2
+; SI-NEXT: s_branch .LBB12_3
;
; VI-LABEL: xor_cf:
; VI: ; %bb.0: ; %entry
; VI-NEXT: s_load_dwordx8 s[0:7], s[4:5], 0x24
-; VI-NEXT: s_mov_b64 s[8:9], 0
+; VI-NEXT: s_mov_b64 s[8:9], -1
; VI-NEXT: s_waitcnt lgkmcnt(0)
; VI-NEXT: s_cmp_lg_u64 s[4:5], 0
; VI-NEXT: s_cbranch_scc0 .LBB12_4
@@ -554,8 +555,7 @@ define amdgpu_kernel void @xor_cf(ptr addrspace(1) %out, ptr addrspace(1) %in, i
; VI-NEXT: v_mov_b32_e32 v0, s2
; VI-NEXT: v_mov_b32_e32 v1, s3
; VI-NEXT: flat_load_dwordx2 v[0:1], v[0:1]
-; VI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
-; VI-NEXT: s_cbranch_vccnz .LBB12_3
+; VI-NEXT: s_cbranch_execnz .LBB12_3
; VI-NEXT: .LBB12_2: ; %if
; VI-NEXT: s_xor_b64 s[2:3], s[4:5], s[6:7]
; VI-NEXT: s_waitcnt vmcnt(0)
@@ -569,7 +569,9 @@ define amdgpu_kernel void @xor_cf(ptr addrspace(1) %out, ptr addrspace(1) %in, i
; VI-NEXT: s_endpgm
; VI-NEXT: .LBB12_4:
; VI-NEXT: ; implicit-def: $vgpr0_vgpr1
-; VI-NEXT: s_branch .LBB12_2
+; VI-NEXT: s_andn2_b64 vcc, exec, s[8:9]
+; VI-NEXT: s_cbranch_vccz .LBB12_2
+; VI-NEXT: s_branch .LBB12_3
entry:
%0 = icmp eq i64 %a, 0
br i1 %0, label %if, label %else
diff --git a/llvm/test/CodeGen/PowerPC/atomic-compare-exchange-weak.ll b/llvm/test/CodeGen/PowerPC/atomic-compare-exchange-weak.ll
index 65a12a6..4df62ae 100644
--- a/llvm/test/CodeGen/PowerPC/atomic-compare-exchange-weak.ll
+++ b/llvm/test/CodeGen/PowerPC/atomic-compare-exchange-weak.ll
@@ -22,20 +22,20 @@ define i32 @foo(ptr noundef %cp, ptr noundef %old, i32 noundef %c) {
; CHECK-NEXT: bne cr0, L..BB0_2
; CHECK-NEXT: # %bb.1: # %cmpxchg.fencedstore
; CHECK-NEXT: stwcx. r5, 0, r3
-; CHECK-NEXT: beq cr0, L..BB0_5
+; CHECK-NEXT: creqv 4*cr5+lt, 4*cr5+lt, 4*cr5+lt
+; CHECK-NEXT: beq cr0, L..BB0_3
; CHECK-NEXT: L..BB0_2: # %cmpxchg.failure
; CHECK-NEXT: crxor 4*cr5+lt, 4*cr5+lt, 4*cr5+lt
-; CHECK-NEXT: # %bb.3: # %cmpxchg.store_expected
+; CHECK-NEXT: L..BB0_3: # %cmpxchg.end
+; CHECK-NEXT: bc 12, 4*cr5+lt, L..BB0_5
+; CHECK-NEXT: # %bb.4: # %cmpxchg.store_expected
; CHECK-NEXT: stw r6, 0(r4)
-; CHECK-NEXT: L..BB0_4: # %cmpxchg.continue
+; CHECK-NEXT: L..BB0_5: # %cmpxchg.continue
; CHECK-NEXT: li r3, 0
; CHECK-NEXT: li r4, 1
; CHECK-NEXT: isel r3, r4, r3, 4*cr5+lt
; CHECK-NEXT: stb r3, -17(r1)
; CHECK-NEXT: blr
-; CHECK-NEXT: L..BB0_5:
-; CHECK-NEXT: creqv 4*cr5+lt, 4*cr5+lt, 4*cr5+lt
-; CHECK-NEXT: b L..BB0_4
;
; CHECK64-LABEL: foo:
; CHECK64: # %bb.0: # %entry
@@ -49,12 +49,15 @@ define i32 @foo(ptr noundef %cp, ptr noundef %old, i32 noundef %c) {
; CHECK64-NEXT: bne cr0, L..BB0_2
; CHECK64-NEXT: # %bb.1: # %cmpxchg.fencedstore
; CHECK64-NEXT: stwcx. r5, 0, r3
-; CHECK64-NEXT: beq cr0, L..BB0_5
+; CHECK64-NEXT: creqv 4*cr5+lt, 4*cr5+lt, 4*cr5+lt
+; CHECK64-NEXT: beq cr0, L..BB0_3
; CHECK64-NEXT: L..BB0_2: # %cmpxchg.failure
; CHECK64-NEXT: crxor 4*cr5+lt, 4*cr5+lt, 4*cr5+lt
-; CHECK64-NEXT: # %bb.3: # %cmpxchg.store_expected
+; CHECK64-NEXT: L..BB0_3: # %cmpxchg.end
+; CHECK64-NEXT: bc 12, 4*cr5+lt, L..BB0_5
+; CHECK64-NEXT: # %bb.4: # %cmpxchg.store_expected
; CHECK64-NEXT: stw r6, 0(r4)
-; CHECK64-NEXT: L..BB0_4: # %cmpxchg.continue
+; CHECK64-NEXT: L..BB0_5: # %cmpxchg.continue
; CHECK64-NEXT: li r3, 0
; CHECK64-NEXT: li r4, 1
; CHECK64-NEXT: isel r3, r4, r3, 4*cr5+lt
@@ -63,9 +66,6 @@ define i32 @foo(ptr noundef %cp, ptr noundef %old, i32 noundef %c) {
; CHECK64-NEXT: li r3, 0
; CHECK64-NEXT: isel r3, r4, r3, 4*cr5+lt
; CHECK64-NEXT: blr
-; CHECK64-NEXT: L..BB0_5:
-; CHECK64-NEXT: creqv 4*cr5+lt, 4*cr5+lt, 4*cr5+lt
-; CHECK64-NEXT: b L..BB0_4
entry:
%cp.addr = alloca ptr, align 4
%old.addr = alloca ptr, align 4
diff --git a/llvm/test/CodeGen/PowerPC/atomic-float.ll b/llvm/test/CodeGen/PowerPC/atomic-float.ll
index 600d289..acc9d04 100644
--- a/llvm/test/CodeGen/PowerPC/atomic-float.ll
+++ b/llvm/test/CodeGen/PowerPC/atomic-float.ll
@@ -10,36 +10,35 @@ define float @test_add(ptr %ptr, float %incr) {
; CHECK-64-NEXT: sync
; CHECK-64-NEXT: lfs 0, 0(3)
; CHECK-64-NEXT: b .LBB0_3
-; CHECK-64-NEXT: .LBB0_1: # %cmpxchg.nostore
-; CHECK-64-NEXT: # in Loop: Header=BB0_3 Depth=1
+; CHECK-64-NEXT: .LBB0_1: # %cmpxchg.nostore
+; CHECK-64-NEXT: #
; CHECK-64-NEXT: crxor 20, 20, 20
-; CHECK-64-NEXT: .LBB0_2: # %cmpxchg.end
-; CHECK-64-NEXT: # in Loop: Header=BB0_3 Depth=1
+; CHECK-64-NEXT: .LBB0_2: # %cmpxchg.end
+; CHECK-64-NEXT: #
; CHECK-64-NEXT: stw 4, -12(1)
; CHECK-64-NEXT: lfs 0, -12(1)
-; CHECK-64-NEXT: bc 12, 20, .LBB0_7
-; CHECK-64-NEXT: .LBB0_3: # %atomicrmw.start
-; CHECK-64-NEXT: # =>This Loop Header: Depth=1
-; CHECK-64-NEXT: # Child Loop BB0_4 Depth 2
+; CHECK-64-NEXT: bc 12, 20, .LBB0_6
+; CHECK-64-NEXT: .LBB0_3: # %atomicrmw.start
+; CHECK-64-NEXT: # =>This Loop Header: Depth=1
+; CHECK-64-NEXT: # Child Loop BB0_4 Depth 2
; CHECK-64-NEXT: fadds 2, 0, 1
; CHECK-64-NEXT: stfs 2, -4(1)
; CHECK-64-NEXT: stfs 0, -8(1)
; CHECK-64-NEXT: lwz 5, -4(1)
; CHECK-64-NEXT: lwz 6, -8(1)
-; CHECK-64-NEXT: .LBB0_4: # %cmpxchg.start
-; CHECK-64-NEXT: # Parent Loop BB0_3 Depth=1
-; CHECK-64-NEXT: # => This Inner Loop Header: Depth=2
+; CHECK-64-NEXT: .LBB0_4: # %cmpxchg.start
+; CHECK-64-NEXT: # Parent Loop BB0_3 Depth=1
+; CHECK-64-NEXT: # => This Inner Loop Header: Depth=2
; CHECK-64-NEXT: lwarx 4, 0, 3
-; CHECK-64-NEXT: cmplw 4, 6
-; CHECK-64-NEXT: bne 0, .LBB0_1
-; CHECK-64-NEXT: # %bb.5: # %cmpxchg.fencedstore
-; CHECK-64-NEXT: # in Loop: Header=BB0_4 Depth=2
+; CHECK-64-NEXT: cmplw 4, 6
+; CHECK-64-NEXT: bne 0, .LBB0_1
+; CHECK-64-NEXT: # %bb.5: # %cmpxchg.fencedstore
+; CHECK-64-NEXT: #
; CHECK-64-NEXT: stwcx. 5, 0, 3
-; CHECK-64-NEXT: bne 0, .LBB0_4
-; CHECK-64-NEXT: # %bb.6: # in Loop: Header=BB0_3 Depth=1
; CHECK-64-NEXT: creqv 20, 20, 20
+; CHECK-64-NEXT: bne 0, .LBB0_4
; CHECK-64-NEXT: b .LBB0_2
-; CHECK-64-NEXT: .LBB0_7: # %atomicrmw.end
+; CHECK-64-NEXT: .LBB0_6: # %atomicrmw.end
; CHECK-64-NEXT: fmr 1, 0
; CHECK-64-NEXT: lwsync
; CHECK-64-NEXT: blr
@@ -51,36 +50,35 @@ define float @test_add(ptr %ptr, float %incr) {
; CHECK-32-NEXT: sync
; CHECK-32-NEXT: lfs 0, 0(3)
; CHECK-32-NEXT: b .LBB0_3
-; CHECK-32-NEXT: .LBB0_1: # %cmpxchg.nostore
-; CHECK-32-NEXT: # in Loop: Header=BB0_3 Depth=1
+; CHECK-32-NEXT: .LBB0_1: # %cmpxchg.nostore
+; CHECK-32-NEXT: #
; CHECK-32-NEXT: crxor 20, 20, 20
-; CHECK-32-NEXT: .LBB0_2: # %cmpxchg.end
-; CHECK-32-NEXT: # in Loop: Header=BB0_3 Depth=1
+; CHECK-32-NEXT: .LBB0_2: # %cmpxchg.end
+; CHECK-32-NEXT: #
; CHECK-32-NEXT: stw 4, 20(1)
; CHECK-32-NEXT: lfs 0, 20(1)
-; CHECK-32-NEXT: bc 12, 20, .LBB0_7
-; CHECK-32-NEXT: .LBB0_3: # %atomicrmw.start
-; CHECK-32-NEXT: # =>This Loop Header: Depth=1
-; CHECK-32-NEXT: # Child Loop BB0_4 Depth 2
+; CHECK-32-NEXT: bc 12, 20, .LBB0_6
+; CHECK-32-NEXT: .LBB0_3: # %atomicrmw.start
+; CHECK-32-NEXT: # =>This Loop Header: Depth=1
+; CHECK-32-NEXT: # Child Loop BB0_4 Depth 2
; CHECK-32-NEXT: fadds 2, 0, 1
; CHECK-32-NEXT: stfs 2, 28(1)
; CHECK-32-NEXT: stfs 0, 24(1)
; CHECK-32-NEXT: lwz 5, 28(1)
; CHECK-32-NEXT: lwz 6, 24(1)
-; CHECK-32-NEXT: .LBB0_4: # %cmpxchg.start
-; CHECK-32-NEXT: # Parent Loop BB0_3 Depth=1
-; CHECK-32-NEXT: # => This Inner Loop Header: Depth=2
+; CHECK-32-NEXT: .LBB0_4: # %cmpxchg.start
+; CHECK-32-NEXT: # Parent Loop BB0_3 Depth=1
+; CHECK-32-NEXT: # => This Inner Loop Header: Depth=2
; CHECK-32-NEXT: lwarx 4, 0, 3
-; CHECK-32-NEXT: cmplw 4, 6
-; CHECK-32-NEXT: bne 0, .LBB0_1
-; CHECK-32-NEXT: # %bb.5: # %cmpxchg.fencedstore
-; CHECK-32-NEXT: # in Loop: Header=BB0_4 Depth=2
+; CHECK-32-NEXT: cmplw 4, 6
+; CHECK-32-NEXT: bne 0, .LBB0_1
+; CHECK-32-NEXT: # %bb.5: # %cmpxchg.fencedstore
+; CHECK-32-NEXT: #
; CHECK-32-NEXT: stwcx. 5, 0, 3
-; CHECK-32-NEXT: bne 0, .LBB0_4
-; CHECK-32-NEXT: # %bb.6: # in Loop: Header=BB0_3 Depth=1
; CHECK-32-NEXT: creqv 20, 20, 20
+; CHECK-32-NEXT: bne 0, .LBB0_4
; CHECK-32-NEXT: b .LBB0_2
-; CHECK-32-NEXT: .LBB0_7: # %atomicrmw.end
+; CHECK-32-NEXT: .LBB0_6: # %atomicrmw.end
; CHECK-32-NEXT: fmr 1, 0
; CHECK-32-NEXT: lwsync
; CHECK-32-NEXT: addi 1, 1, 32
diff --git a/llvm/test/CodeGen/PowerPC/atomicrmw-cond-sub-clamp.ll b/llvm/test/CodeGen/PowerPC/atomicrmw-cond-sub-clamp.ll
index 27a26aa..b5be0e6 100644
--- a/llvm/test/CodeGen/PowerPC/atomicrmw-cond-sub-clamp.ll
+++ b/llvm/test/CodeGen/PowerPC/atomicrmw-cond-sub-clamp.ll
@@ -6,45 +6,49 @@ define i8 @atomicrmw_usub_cond_i8(ptr %ptr, i8 %val) {
; CHECK: # %bb.0:
; CHECK-NEXT: sync
; CHECK-NEXT: rldicr 5, 3, 0, 61
-; CHECK-NEXT: not 3, 3
+; CHECK-NEXT: not 3, 3
; CHECK-NEXT: li 6, 255
; CHECK-NEXT: lwz 8, 0(5)
; CHECK-NEXT: rlwinm 3, 3, 3, 27, 28
; CHECK-NEXT: slw 6, 6, 3
-; CHECK-NEXT: not 6, 6
-; CHECK-NEXT: clrlwi 7, 4, 24
-; CHECK-NEXT: b .LBB0_2
-; CHECK-NEXT: .LBB0_1: # %cmpxchg.nostore
-; CHECK-NEXT: # in Loop: Header=BB0_2 Depth=1
-; CHECK-NEXT: mr 8, 9
-; CHECK-NEXT: .LBB0_2: # %atomicrmw.start
-; CHECK-NEXT: # =>This Loop Header: Depth=1
-; CHECK-NEXT: # Child Loop BB0_5 Depth 2
+; CHECK-NEXT: not 6, 6
+; CHECK-NEXT: clrlwi 7, 4, 24
+; CHECK-NEXT: b .LBB0_3
+; CHECK-NEXT: .LBB0_1: # %cmpxchg.nostore
+; CHECK-NEXT: #
+; CHECK-NEXT: crxor 20, 20, 20
+; CHECK-NEXT: .LBB0_2: # %cmpxchg.end
+; CHECK-NEXT: #
+; CHECK-NEXT: mr 8, 9
+; CHECK-NEXT: bc 12, 20, .LBB0_8
+; CHECK-NEXT: .LBB0_3: # %atomicrmw.start
+; CHECK-NEXT: # =>This Loop Header: Depth=1
+; CHECK-NEXT: # Child Loop BB0_6 Depth 2
; CHECK-NEXT: srw 9, 8, 3
-; CHECK-NEXT: clrlwi 10, 9, 24
-; CHECK-NEXT: cmplw 10, 7
-; CHECK-NEXT: blt 0, .LBB0_4
-; CHECK-NEXT: # %bb.3: # in Loop: Header=BB0_2 Depth=1
-; CHECK-NEXT: sub 9, 9, 4
-; CHECK-NEXT: .LBB0_4: # %atomicrmw.start
-; CHECK-NEXT: # in Loop: Header=BB0_2 Depth=1
-; CHECK-NEXT: clrlwi 9, 9, 24
+; CHECK-NEXT: clrlwi 10, 9, 24
+; CHECK-NEXT: cmplw 10, 7
+; CHECK-NEXT: blt 0, .LBB0_5
+; CHECK-NEXT: # %bb.4:
+; CHECK-NEXT: sub 9, 9, 4
+; CHECK-NEXT: .LBB0_5: # %atomicrmw.start
+; CHECK-NEXT: #
+; CHECK-NEXT: clrlwi 9, 9, 24
; CHECK-NEXT: slw 9, 9, 3
; CHECK-NEXT: and 10, 8, 6
; CHECK-NEXT: or 10, 10, 9
-; CHECK-NEXT: .LBB0_5: # %cmpxchg.start
-; CHECK-NEXT: # Parent Loop BB0_2 Depth=1
-; CHECK-NEXT: # => This Inner Loop Header: Depth=2
+; CHECK-NEXT: .LBB0_6: # %cmpxchg.start
+; CHECK-NEXT: # Parent Loop BB0_3 Depth=1
+; CHECK-NEXT: # => This Inner Loop Header: Depth=2
; CHECK-NEXT: lwarx 9, 0, 5
-; CHECK-NEXT: cmplw 9, 8
-; CHECK-NEXT: bne 0, .LBB0_1
-; CHECK-NEXT: # %bb.6: # %cmpxchg.fencedstore
-; CHECK-NEXT: # in Loop: Header=BB0_5 Depth=2
+; CHECK-NEXT: cmplw 9, 8
+; CHECK-NEXT: bne 0, .LBB0_1
+; CHECK-NEXT: # %bb.7: # %cmpxchg.fencedstore
+; CHECK-NEXT: #
; CHECK-NEXT: stwcx. 10, 0, 5
-; CHECK-NEXT: bne 0, .LBB0_5
-; CHECK-NEXT: # %bb.7:
-; CHECK-NEXT: mr 8, 9
-; CHECK-NEXT: # %bb.8: # %atomicrmw.end
+; CHECK-NEXT: creqv 20, 20, 20
+; CHECK-NEXT: bne 0, .LBB0_6
+; CHECK-NEXT: b .LBB0_2
+; CHECK-NEXT: .LBB0_8: # %atomicrmw.end
; CHECK-NEXT: srw 3, 9, 3
; CHECK-NEXT: lwsync
; CHECK-NEXT: blr
@@ -57,47 +61,51 @@ define i16 @atomicrmw_usub_cond_i16(ptr %ptr, i16 %val) {
; CHECK: # %bb.0:
; CHECK-NEXT: sync
; CHECK-NEXT: rldicr 5, 3, 0, 61
-; CHECK-NEXT: clrlwi 3, 3, 30
+; CHECK-NEXT: clrlwi 3, 3, 30
; CHECK-NEXT: lis 6, 0
; CHECK-NEXT: xori 3, 3, 2
; CHECK-NEXT: lwz 8, 0(5)
; CHECK-NEXT: ori 6, 6, 65535
; CHECK-NEXT: slwi 3, 3, 3
; CHECK-NEXT: slw 6, 6, 3
-; CHECK-NEXT: not 6, 6
-; CHECK-NEXT: clrlwi 7, 4, 16
-; CHECK-NEXT: b .LBB1_2
-; CHECK-NEXT: .LBB1_1: # %cmpxchg.nostore
-; CHECK-NEXT: # in Loop: Header=BB1_2 Depth=1
-; CHECK-NEXT: mr 8, 9
-; CHECK-NEXT: .LBB1_2: # %atomicrmw.start
-; CHECK-NEXT: # =>This Loop Header: Depth=1
-; CHECK-NEXT: # Child Loop BB1_5 Depth 2
+; CHECK-NEXT: not 6, 6
+; CHECK-NEXT: clrlwi 7, 4, 16
+; CHECK-NEXT: b .LBB1_3
+; CHECK-NEXT: .LBB1_1: # %cmpxchg.nostore
+; CHECK-NEXT: #
+; CHECK-NEXT: crxor 20, 20, 20
+; CHECK-NEXT: .LBB1_2: # %cmpxchg.end
+; CHECK-NEXT: #
+; CHECK-NEXT: mr 8, 9
+; CHECK-NEXT: bc 12, 20, .LBB1_8
+; CHECK-NEXT: .LBB1_3: # %atomicrmw.start
+; CHECK-NEXT: # =>This Loop Header: Depth=1
+; CHECK-NEXT: # Child Loop BB1_6 Depth 2
; CHECK-NEXT: srw 9, 8, 3
-; CHECK-NEXT: clrlwi 10, 9, 16
-; CHECK-NEXT: cmplw 10, 7
-; CHECK-NEXT: blt 0, .LBB1_4
-; CHECK-NEXT: # %bb.3: # in Loop: Header=BB1_2 Depth=1
-; CHECK-NEXT: sub 9, 9, 4
-; CHECK-NEXT: .LBB1_4: # %atomicrmw.start
-; CHECK-NEXT: # in Loop: Header=BB1_2 Depth=1
-; CHECK-NEXT: clrlwi 9, 9, 16
+; CHECK-NEXT: clrlwi 10, 9, 16
+; CHECK-NEXT: cmplw 10, 7
+; CHECK-NEXT: blt 0, .LBB1_5
+; CHECK-NEXT: # %bb.4:
+; CHECK-NEXT: sub 9, 9, 4
+; CHECK-NEXT: .LBB1_5: # %atomicrmw.start
+; CHECK-NEXT: #
+; CHECK-NEXT: clrlwi 9, 9, 16
; CHECK-NEXT: slw 9, 9, 3
; CHECK-NEXT: and 10, 8, 6
; CHECK-NEXT: or 10, 10, 9
-; CHECK-NEXT: .LBB1_5: # %cmpxchg.start
-; CHECK-NEXT: # Parent Loop BB1_2 Depth=1
-; CHECK-NEXT: # => This Inner Loop Header: Depth=2
+; CHECK-NEXT: .LBB1_6: # %cmpxchg.start
+; CHECK-NEXT: # Parent Loop BB1_3 Depth=1
+; CHECK-NEXT: # => This Inner Loop Header: Depth=2
; CHECK-NEXT: lwarx 9, 0, 5
-; CHECK-NEXT: cmplw 9, 8
-; CHECK-NEXT: bne 0, .LBB1_1
-; CHECK-NEXT: # %bb.6: # %cmpxchg.fencedstore
-; CHECK-NEXT: # in Loop: Header=BB1_5 Depth=2
+; CHECK-NEXT: cmplw 9, 8
+; CHECK-NEXT: bne 0, .LBB1_1
+; CHECK-NEXT: # %bb.7: # %cmpxchg.fencedstore
+; CHECK-NEXT: #
; CHECK-NEXT: stwcx. 10, 0, 5
-; CHECK-NEXT: bne 0, .LBB1_5
-; CHECK-NEXT: # %bb.7:
-; CHECK-NEXT: mr 8, 9
-; CHECK-NEXT: # %bb.8: # %atomicrmw.end
+; CHECK-NEXT: creqv 20, 20, 20
+; CHECK-NEXT: bne 0, .LBB1_6
+; CHECK-NEXT: b .LBB1_2
+; CHECK-NEXT: .LBB1_8: # %atomicrmw.end
; CHECK-NEXT: srw 3, 9, 3
; CHECK-NEXT: lwsync
; CHECK-NEXT: blr
@@ -110,34 +118,38 @@ define i32 @atomicrmw_usub_cond_i32(ptr %ptr, i32 %val) {
; CHECK: # %bb.0:
; CHECK-NEXT: sync
; CHECK-NEXT: lwz 6, 0(3)
-; CHECK-NEXT: b .LBB2_2
-; CHECK-NEXT: .LBB2_1: # %cmpxchg.nostore
-; CHECK-NEXT: # in Loop: Header=BB2_2 Depth=1
-; CHECK-NEXT: mr 6, 5
-; CHECK-NEXT: .LBB2_2: # %atomicrmw.start
-; CHECK-NEXT: # =>This Loop Header: Depth=1
-; CHECK-NEXT: # Child Loop BB2_5 Depth 2
-; CHECK-NEXT: cmplw 6, 4
-; CHECK-NEXT: bge 0, .LBB2_4
-; CHECK-NEXT: # %bb.3: # %atomicrmw.start
-; CHECK-NEXT: # in Loop: Header=BB2_2 Depth=1
-; CHECK-NEXT: mr 7, 6
-; CHECK-NEXT: b .LBB2_5
-; CHECK-NEXT: .LBB2_4: # in Loop: Header=BB2_2 Depth=1
-; CHECK-NEXT: sub 7, 6, 4
-; CHECK-NEXT: .LBB2_5: # %cmpxchg.start
-; CHECK-NEXT: # Parent Loop BB2_2 Depth=1
-; CHECK-NEXT: # => This Inner Loop Header: Depth=2
+; CHECK-NEXT: b .LBB2_3
+; CHECK-NEXT: .LBB2_1: # %cmpxchg.nostore
+; CHECK-NEXT: #
+; CHECK-NEXT: crxor 20, 20, 20
+; CHECK-NEXT: .LBB2_2: # %cmpxchg.end
+; CHECK-NEXT: #
+; CHECK-NEXT: mr 6, 5
+; CHECK-NEXT: bc 12, 20, .LBB2_8
+; CHECK-NEXT: .LBB2_3: # %atomicrmw.start
+; CHECK-NEXT: # =>This Loop Header: Depth=1
+; CHECK-NEXT: # Child Loop BB2_6 Depth 2
+; CHECK-NEXT: cmplw 6, 4
+; CHECK-NEXT: bge 0, .LBB2_5
+; CHECK-NEXT: # %bb.4: # %atomicrmw.start
+; CHECK-NEXT: #
+; CHECK-NEXT: mr 7, 6
+; CHECK-NEXT: b .LBB2_6
+; CHECK-NEXT: .LBB2_5:
+; CHECK-NEXT: sub 7, 6, 4
+; CHECK-NEXT: .LBB2_6: # %cmpxchg.start
+; CHECK-NEXT: # Parent Loop BB2_3 Depth=1
+; CHECK-NEXT: # => This Inner Loop Header: Depth=2
; CHECK-NEXT: lwarx 5, 0, 3
-; CHECK-NEXT: cmplw 5, 6
-; CHECK-NEXT: bne 0, .LBB2_1
-; CHECK-NEXT: # %bb.6: # %cmpxchg.fencedstore
-; CHECK-NEXT: # in Loop: Header=BB2_5 Depth=2
+; CHECK-NEXT: cmplw 5, 6
+; CHECK-NEXT: bne 0, .LBB2_1
+; CHECK-NEXT: # %bb.7: # %cmpxchg.fencedstore
+; CHECK-NEXT: #
; CHECK-NEXT: stwcx. 7, 0, 3
-; CHECK-NEXT: bne 0, .LBB2_5
-; CHECK-NEXT: # %bb.7:
-; CHECK-NEXT: mr 6, 5
-; CHECK-NEXT: # %bb.8: # %atomicrmw.end
+; CHECK-NEXT: creqv 20, 20, 20
+; CHECK-NEXT: bne 0, .LBB2_6
+; CHECK-NEXT: b .LBB2_2
+; CHECK-NEXT: .LBB2_8: # %atomicrmw.end
; CHECK-NEXT: mr 3, 5
; CHECK-NEXT: lwsync
; CHECK-NEXT: blr
@@ -150,34 +162,38 @@ define i64 @atomicrmw_usub_cond_i64(ptr %ptr, i64 %val) {
; CHECK: # %bb.0:
; CHECK-NEXT: sync
; CHECK-NEXT: ld 6, 0(3)
-; CHECK-NEXT: b .LBB3_2
-; CHECK-NEXT: .LBB3_1: # %cmpxchg.nostore
-; CHECK-NEXT: # in Loop: Header=BB3_2 Depth=1
-; CHECK-NEXT: mr 6, 5
-; CHECK-NEXT: .LBB3_2: # %atomicrmw.start
-; CHECK-NEXT: # =>This Loop Header: Depth=1
-; CHECK-NEXT: # Child Loop BB3_5 Depth 2
-; CHECK-NEXT: cmpld 6, 4
-; CHECK-NEXT: bge 0, .LBB3_4
-; CHECK-NEXT: # %bb.3: # %atomicrmw.start
-; CHECK-NEXT: # in Loop: Header=BB3_2 Depth=1
-; CHECK-NEXT: mr 7, 6
-; CHECK-NEXT: b .LBB3_5
-; CHECK-NEXT: .LBB3_4: # in Loop: Header=BB3_2 Depth=1
-; CHECK-NEXT: sub 7, 6, 4
-; CHECK-NEXT: .LBB3_5: # %cmpxchg.start
-; CHECK-NEXT: # Parent Loop BB3_2 Depth=1
-; CHECK-NEXT: # => This Inner Loop Header: Depth=2
+; CHECK-NEXT: b .LBB3_3
+; CHECK-NEXT: .LBB3_1: # %cmpxchg.nostore
+; CHECK-NEXT: #
+; CHECK-NEXT: crxor 20, 20, 20
+; CHECK-NEXT: .LBB3_2: # %cmpxchg.end
+; CHECK-NEXT: #
+; CHECK-NEXT: mr 6, 5
+; CHECK-NEXT: bc 12, 20, .LBB3_8
+; CHECK-NEXT: .LBB3_3: # %atomicrmw.start
+; CHECK-NEXT: # =>This Loop Header: Depth=1
+; CHECK-NEXT: # Child Loop BB3_6 Depth 2
+; CHECK-NEXT: cmpld 6, 4
+; CHECK-NEXT: bge 0, .LBB3_5
+; CHECK-NEXT: # %bb.4: # %atomicrmw.start
+; CHECK-NEXT: #
+; CHECK-NEXT: mr 7, 6
+; CHECK-NEXT: b .LBB3_6
+; CHECK-NEXT: .LBB3_5:
+; CHECK-NEXT: sub 7, 6, 4
+; CHECK-NEXT: .LBB3_6: # %cmpxchg.start
+; CHECK-NEXT: # Parent Loop BB3_3 Depth=1
+; CHECK-NEXT: # => This Inner Loop Header: Depth=2
; CHECK-NEXT: ldarx 5, 0, 3
-; CHECK-NEXT: cmpld 5, 6
-; CHECK-NEXT: bne 0, .LBB3_1
-; CHECK-NEXT: # %bb.6: # %cmpxchg.fencedstore
-; CHECK-NEXT: # in Loop: Header=BB3_5 Depth=2
+; CHECK-NEXT: cmpld 5, 6
+; CHECK-NEXT: bne 0, .LBB3_1
+; CHECK-NEXT: # %bb.7: # %cmpxchg.fencedstore
+; CHECK-NEXT: #
; CHECK-NEXT: stdcx. 7, 0, 3
-; CHECK-NEXT: bne 0, .LBB3_5
-; CHECK-NEXT: # %bb.7:
-; CHECK-NEXT: mr 6, 5
-; CHECK-NEXT: # %bb.8: # %atomicrmw.end
+; CHECK-NEXT: creqv 20, 20, 20
+; CHECK-NEXT: bne 0, .LBB3_6
+; CHECK-NEXT: b .LBB3_2
+; CHECK-NEXT: .LBB3_8: # %atomicrmw.end
; CHECK-NEXT: mr 3, 5
; CHECK-NEXT: lwsync
; CHECK-NEXT: blr
@@ -190,47 +206,51 @@ define i8 @atomicrmw_usub_sat_i8(ptr %ptr, i8 %val) {
; CHECK: # %bb.0:
; CHECK-NEXT: sync
; CHECK-NEXT: rldicr 5, 3, 0, 61
-; CHECK-NEXT: not 3, 3
+; CHECK-NEXT: not 3, 3
; CHECK-NEXT: li 6, 255
; CHECK-NEXT: lwz 7, 0(5)
; CHECK-NEXT: rlwinm 3, 3, 3, 27, 28
; CHECK-NEXT: slw 6, 6, 3
-; CHECK-NEXT: not 6, 6
-; CHECK-NEXT: clrlwi 4, 4, 24
-; CHECK-NEXT: b .LBB4_2
-; CHECK-NEXT: .LBB4_1: # %cmpxchg.nostore
-; CHECK-NEXT: # in Loop: Header=BB4_2 Depth=1
-; CHECK-NEXT: mr 7, 8
-; CHECK-NEXT: .LBB4_2: # %atomicrmw.start
-; CHECK-NEXT: # =>This Loop Header: Depth=1
-; CHECK-NEXT: # Child Loop BB4_5 Depth 2
+; CHECK-NEXT: not 6, 6
+; CHECK-NEXT: clrlwi 4, 4, 24
+; CHECK-NEXT: b .LBB4_3
+; CHECK-NEXT: .LBB4_1: # %cmpxchg.nostore
+; CHECK-NEXT: #
+; CHECK-NEXT: crxor 20, 20, 20
+; CHECK-NEXT: .LBB4_2: # %cmpxchg.end
+; CHECK-NEXT: #
+; CHECK-NEXT: mr 7, 8
+; CHECK-NEXT: bc 12, 20, .LBB4_8
+; CHECK-NEXT: .LBB4_3: # %atomicrmw.start
+; CHECK-NEXT: # =>This Loop Header: Depth=1
+; CHECK-NEXT: # Child Loop BB4_6 Depth 2
; CHECK-NEXT: srw 8, 7, 3
-; CHECK-NEXT: clrlwi 9, 8, 24
-; CHECK-NEXT: sub 8, 9, 4
-; CHECK-NEXT: cmplw 8, 9
+; CHECK-NEXT: clrlwi 9, 8, 24
+; CHECK-NEXT: sub 8, 9, 4
+; CHECK-NEXT: cmplw 8, 9
; CHECK-NEXT: li 9, 0
-; CHECK-NEXT: bgt 0, .LBB4_4
-; CHECK-NEXT: # %bb.3: # %atomicrmw.start
-; CHECK-NEXT: # in Loop: Header=BB4_2 Depth=1
-; CHECK-NEXT: mr 9, 8
-; CHECK-NEXT: .LBB4_4: # %atomicrmw.start
-; CHECK-NEXT: # in Loop: Header=BB4_2 Depth=1
+; CHECK-NEXT: bgt 0, .LBB4_5
+; CHECK-NEXT: # %bb.4: # %atomicrmw.start
+; CHECK-NEXT: #
+; CHECK-NEXT: mr 9, 8
+; CHECK-NEXT: .LBB4_5: # %atomicrmw.start
+; CHECK-NEXT: #
; CHECK-NEXT: slw 8, 9, 3
; CHECK-NEXT: and 9, 7, 6
; CHECK-NEXT: or 9, 9, 8
-; CHECK-NEXT: .LBB4_5: # %cmpxchg.start
-; CHECK-NEXT: # Parent Loop BB4_2 Depth=1
-; CHECK-NEXT: # => This Inner Loop Header: Depth=2
+; CHECK-NEXT: .LBB4_6: # %cmpxchg.start
+; CHECK-NEXT: # Parent Loop BB4_3 Depth=1
+; CHECK-NEXT: # => This Inner Loop Header: Depth=2
; CHECK-NEXT: lwarx 8, 0, 5
-; CHECK-NEXT: cmplw 8, 7
-; CHECK-NEXT: bne 0, .LBB4_1
-; CHECK-NEXT: # %bb.6: # %cmpxchg.fencedstore
-; CHECK-NEXT: # in Loop: Header=BB4_5 Depth=2
+; CHECK-NEXT: cmplw 8, 7
+; CHECK-NEXT: bne 0, .LBB4_1
+; CHECK-NEXT: # %bb.7: # %cmpxchg.fencedstore
+; CHECK-NEXT: #
; CHECK-NEXT: stwcx. 9, 0, 5
-; CHECK-NEXT: bne 0, .LBB4_5
-; CHECK-NEXT: # %bb.7:
-; CHECK-NEXT: mr 7, 8
-; CHECK-NEXT: # %bb.8: # %atomicrmw.end
+; CHECK-NEXT: creqv 20, 20, 20
+; CHECK-NEXT: bne 0, .LBB4_6
+; CHECK-NEXT: b .LBB4_2
+; CHECK-NEXT: .LBB4_8: # %atomicrmw.end
; CHECK-NEXT: srw 3, 8, 3
; CHECK-NEXT: lwsync
; CHECK-NEXT: blr
@@ -243,49 +263,53 @@ define i16 @atomicrmw_usub_sat_i16(ptr %ptr, i16 %val) {
; CHECK: # %bb.0:
; CHECK-NEXT: sync
; CHECK-NEXT: rldicr 5, 3, 0, 61
-; CHECK-NEXT: clrlwi 3, 3, 30
+; CHECK-NEXT: clrlwi 3, 3, 30
; CHECK-NEXT: lis 6, 0
; CHECK-NEXT: xori 3, 3, 2
; CHECK-NEXT: lwz 7, 0(5)
; CHECK-NEXT: ori 6, 6, 65535
; CHECK-NEXT: slwi 3, 3, 3
; CHECK-NEXT: slw 6, 6, 3
-; CHECK-NEXT: not 6, 6
-; CHECK-NEXT: clrlwi 4, 4, 16
-; CHECK-NEXT: b .LBB5_2
-; CHECK-NEXT: .LBB5_1: # %cmpxchg.nostore
-; CHECK-NEXT: # in Loop: Header=BB5_2 Depth=1
-; CHECK-NEXT: mr 7, 8
-; CHECK-NEXT: .LBB5_2: # %atomicrmw.start
-; CHECK-NEXT: # =>This Loop Header: Depth=1
-; CHECK-NEXT: # Child Loop BB5_5 Depth 2
+; CHECK-NEXT: not 6, 6
+; CHECK-NEXT: clrlwi 4, 4, 16
+; CHECK-NEXT: b .LBB5_3
+; CHECK-NEXT: .LBB5_1: # %cmpxchg.nostore
+; CHECK-NEXT: #
+; CHECK-NEXT: crxor 20, 20, 20
+; CHECK-NEXT: .LBB5_2: # %cmpxchg.end
+; CHECK-NEXT: #
+; CHECK-NEXT: mr 7, 8
+; CHECK-NEXT: bc 12, 20, .LBB5_8
+; CHECK-NEXT: .LBB5_3: # %atomicrmw.start
+; CHECK-NEXT: # =>This Loop Header: Depth=1
+; CHECK-NEXT: # Child Loop BB5_6 Depth 2
; CHECK-NEXT: srw 8, 7, 3
-; CHECK-NEXT: clrlwi 9, 8, 16
-; CHECK-NEXT: sub 8, 9, 4
-; CHECK-NEXT: cmplw 8, 9
+; CHECK-NEXT: clrlwi 9, 8, 16
+; CHECK-NEXT: sub 8, 9, 4
+; CHECK-NEXT: cmplw 8, 9
; CHECK-NEXT: li 9, 0
-; CHECK-NEXT: bgt 0, .LBB5_4
-; CHECK-NEXT: # %bb.3: # %atomicrmw.start
-; CHECK-NEXT: # in Loop: Header=BB5_2 Depth=1
-; CHECK-NEXT: mr 9, 8
-; CHECK-NEXT: .LBB5_4: # %atomicrmw.start
-; CHECK-NEXT: # in Loop: Header=BB5_2 Depth=1
+; CHECK-NEXT: bgt 0, .LBB5_5
+; CHECK-NEXT: # %bb.4: # %atomicrmw.start
+; CHECK-NEXT: #
+; CHECK-NEXT: mr 9, 8
+; CHECK-NEXT: .LBB5_5: # %atomicrmw.start
+; CHECK-NEXT: #
; CHECK-NEXT: slw 8, 9, 3
; CHECK-NEXT: and 9, 7, 6
; CHECK-NEXT: or 9, 9, 8
-; CHECK-NEXT: .LBB5_5: # %cmpxchg.start
-; CHECK-NEXT: # Parent Loop BB5_2 Depth=1
-; CHECK-NEXT: # => This Inner Loop Header: Depth=2
+; CHECK-NEXT: .LBB5_6: # %cmpxchg.start
+; CHECK-NEXT: # Parent Loop BB5_3 Depth=1
+; CHECK-NEXT: # => This Inner Loop Header: Depth=2
; CHECK-NEXT: lwarx 8, 0, 5
-; CHECK-NEXT: cmplw 8, 7
-; CHECK-NEXT: bne 0, .LBB5_1
-; CHECK-NEXT: # %bb.6: # %cmpxchg.fencedstore
-; CHECK-NEXT: # in Loop: Header=BB5_5 Depth=2
+; CHECK-NEXT: cmplw 8, 7
+; CHECK-NEXT: bne 0, .LBB5_1
+; CHECK-NEXT: # %bb.7: # %cmpxchg.fencedstore
+; CHECK-NEXT: #
; CHECK-NEXT: stwcx. 9, 0, 5
-; CHECK-NEXT: bne 0, .LBB5_5
-; CHECK-NEXT: # %bb.7:
-; CHECK-NEXT: mr 7, 8
-; CHECK-NEXT: # %bb.8: # %atomicrmw.end
+; CHECK-NEXT: creqv 20, 20, 20
+; CHECK-NEXT: bne 0, .LBB5_6
+; CHECK-NEXT: b .LBB5_2
+; CHECK-NEXT: .LBB5_8: # %atomicrmw.end
; CHECK-NEXT: srw 3, 8, 3
; CHECK-NEXT: lwsync
; CHECK-NEXT: blr
@@ -298,33 +322,37 @@ define i32 @atomicrmw_usub_sat_i32(ptr %ptr, i32 %val) {
; CHECK: # %bb.0:
; CHECK-NEXT: sync
; CHECK-NEXT: lwz 6, 0(3)
-; CHECK-NEXT: b .LBB6_2
-; CHECK-NEXT: .LBB6_1: # %cmpxchg.nostore
-; CHECK-NEXT: # in Loop: Header=BB6_2 Depth=1
-; CHECK-NEXT: mr 6, 5
-; CHECK-NEXT: .LBB6_2: # %atomicrmw.start
-; CHECK-NEXT: # =>This Loop Header: Depth=1
-; CHECK-NEXT: # Child Loop BB6_4 Depth 2
-; CHECK-NEXT: sub 5, 6, 4
-; CHECK-NEXT: cmplw 5, 6
+; CHECK-NEXT: b .LBB6_3
+; CHECK-NEXT: .LBB6_1: # %cmpxchg.nostore
+; CHECK-NEXT: #
+; CHECK-NEXT: crxor 20, 20, 20
+; CHECK-NEXT: .LBB6_2: # %cmpxchg.end
+; CHECK-NEXT: #
+; CHECK-NEXT: mr 6, 5
+; CHECK-NEXT: bc 12, 20, .LBB6_7
+; CHECK-NEXT: .LBB6_3: # %atomicrmw.start
+; CHECK-NEXT: # =>This Loop Header: Depth=1
+; CHECK-NEXT: # Child Loop BB6_5 Depth 2
+; CHECK-NEXT: sub 5, 6, 4
+; CHECK-NEXT: cmplw 5, 6
; CHECK-NEXT: li 7, 0
-; CHECK-NEXT: bgt 0, .LBB6_4
-; CHECK-NEXT: # %bb.3: # %atomicrmw.start
-; CHECK-NEXT: # in Loop: Header=BB6_2 Depth=1
-; CHECK-NEXT: mr 7, 5
-; CHECK-NEXT: .LBB6_4: # %cmpxchg.start
-; CHECK-NEXT: # Parent Loop BB6_2 Depth=1
-; CHECK-NEXT: # => This Inner Loop Header: Depth=2
+; CHECK-NEXT: bgt 0, .LBB6_5
+; CHECK-NEXT: # %bb.4: # %atomicrmw.start
+; CHECK-NEXT: #
+; CHECK-NEXT: mr 7, 5
+; CHECK-NEXT: .LBB6_5: # %cmpxchg.start
+; CHECK-NEXT: # Parent Loop BB6_3 Depth=1
+; CHECK-NEXT: # => This Inner Loop Header: Depth=2
; CHECK-NEXT: lwarx 5, 0, 3
-; CHECK-NEXT: cmplw 5, 6
-; CHECK-NEXT: bne 0, .LBB6_1
-; CHECK-NEXT: # %bb.5: # %cmpxchg.fencedstore
-; CHECK-NEXT: # in Loop: Header=BB6_4 Depth=2
+; CHECK-NEXT: cmplw 5, 6
+; CHECK-NEXT: bne 0, .LBB6_1
+; CHECK-NEXT: # %bb.6: # %cmpxchg.fencedstore
+; CHECK-NEXT: #
; CHECK-NEXT: stwcx. 7, 0, 3
-; CHECK-NEXT: bne 0, .LBB6_4
-; CHECK-NEXT: # %bb.6:
-; CHECK-NEXT: mr 6, 5
-; CHECK-NEXT: # %bb.7: # %atomicrmw.end
+; CHECK-NEXT: creqv 20, 20, 20
+; CHECK-NEXT: bne 0, .LBB6_5
+; CHECK-NEXT: b .LBB6_2
+; CHECK-NEXT: .LBB6_7: # %atomicrmw.end
; CHECK-NEXT: mr 3, 5
; CHECK-NEXT: lwsync
; CHECK-NEXT: blr
@@ -337,33 +365,37 @@ define i64 @atomicrmw_usub_sat_i64(ptr %ptr, i64 %val) {
; CHECK: # %bb.0:
; CHECK-NEXT: sync
; CHECK-NEXT: ld 6, 0(3)
-; CHECK-NEXT: b .LBB7_2
-; CHECK-NEXT: .LBB7_1: # %cmpxchg.nostore
-; CHECK-NEXT: # in Loop: Header=BB7_2 Depth=1
-; CHECK-NEXT: mr 6, 5
-; CHECK-NEXT: .LBB7_2: # %atomicrmw.start
-; CHECK-NEXT: # =>This Loop Header: Depth=1
-; CHECK-NEXT: # Child Loop BB7_4 Depth 2
-; CHECK-NEXT: subc 5, 6, 4
+; CHECK-NEXT: b .LBB7_3
+; CHECK-NEXT: .LBB7_1: # %cmpxchg.nostore
+; CHECK-NEXT: #
+; CHECK-NEXT: crxor 20, 20, 20
+; CHECK-NEXT: .LBB7_2: # %cmpxchg.end
+; CHECK-NEXT: #
+; CHECK-NEXT: mr 6, 5
+; CHECK-NEXT: bc 12, 20, .LBB7_7
+; CHECK-NEXT: .LBB7_3: # %atomicrmw.start
+; CHECK-NEXT: # =>This Loop Header: Depth=1
+; CHECK-NEXT: # Child Loop BB7_5 Depth 2
+; CHECK-NEXT: subc 5, 6, 4
; CHECK-NEXT: li 7, 0
; CHECK-NEXT: addze. 8, 7
-; CHECK-NEXT: beq 0, .LBB7_4
-; CHECK-NEXT: # %bb.3: # %atomicrmw.start
-; CHECK-NEXT: # in Loop: Header=BB7_2 Depth=1
-; CHECK-NEXT: mr 7, 5
-; CHECK-NEXT: .LBB7_4: # %cmpxchg.start
-; CHECK-NEXT: # Parent Loop BB7_2 Depth=1
-; CHECK-NEXT: # => This Inner Loop Header: Depth=2
+; CHECK-NEXT: beq 0, .LBB7_5
+; CHECK-NEXT: # %bb.4: # %atomicrmw.start
+; CHECK-NEXT: #
+; CHECK-NEXT: mr 7, 5
+; CHECK-NEXT: .LBB7_5: # %cmpxchg.start
+; CHECK-NEXT: # Parent Loop BB7_3 Depth=1
+; CHECK-NEXT: # => This Inner Loop Header: Depth=2
; CHECK-NEXT: ldarx 5, 0, 3
-; CHECK-NEXT: cmpld 5, 6
-; CHECK-NEXT: bne 0, .LBB7_1
-; CHECK-NEXT: # %bb.5: # %cmpxchg.fencedstore
-; CHECK-NEXT: # in Loop: Header=BB7_4 Depth=2
+; CHECK-NEXT: cmpld 5, 6
+; CHECK-NEXT: bne 0, .LBB7_1
+; CHECK-NEXT: # %bb.6: # %cmpxchg.fencedstore
+; CHECK-NEXT: #
; CHECK-NEXT: stdcx. 7, 0, 3
-; CHECK-NEXT: bne 0, .LBB7_4
-; CHECK-NEXT: # %bb.6:
-; CHECK-NEXT: mr 6, 5
-; CHECK-NEXT: # %bb.7: # %atomicrmw.end
+; CHECK-NEXT: creqv 20, 20, 20
+; CHECK-NEXT: bne 0, .LBB7_5
+; CHECK-NEXT: b .LBB7_2
+; CHECK-NEXT: .LBB7_7: # %atomicrmw.end
; CHECK-NEXT: mr 3, 5
; CHECK-NEXT: lwsync
; CHECK-NEXT: blr
diff --git a/llvm/test/CodeGen/PowerPC/atomicrmw-uinc-udec-wrap.ll b/llvm/test/CodeGen/PowerPC/atomicrmw-uinc-udec-wrap.ll
index 6ced47b..d692404 100644
--- a/llvm/test/CodeGen/PowerPC/atomicrmw-uinc-udec-wrap.ll
+++ b/llvm/test/CodeGen/PowerPC/atomicrmw-uinc-udec-wrap.ll
@@ -6,47 +6,51 @@ define i8 @atomicrmw_uinc_wrap_i8(ptr %ptr, i8 %val) {
; CHECK: # %bb.0:
; CHECK-NEXT: sync
; CHECK-NEXT: rldicr 5, 3, 0, 61
-; CHECK-NEXT: not 3, 3
+; CHECK-NEXT: not 3, 3
; CHECK-NEXT: li 6, 255
; CHECK-NEXT: lwz 7, 0(5)
; CHECK-NEXT: rlwinm 3, 3, 3, 27, 28
; CHECK-NEXT: slw 6, 6, 3
-; CHECK-NEXT: not 6, 6
-; CHECK-NEXT: clrlwi 4, 4, 24
-; CHECK-NEXT: b .LBB0_2
-; CHECK-NEXT: .LBB0_1: # %cmpxchg.nostore
-; CHECK-NEXT: # in Loop: Header=BB0_2 Depth=1
-; CHECK-NEXT: mr 7, 8
-; CHECK-NEXT: .LBB0_2: # %atomicrmw.start
-; CHECK-NEXT: # =>This Loop Header: Depth=1
-; CHECK-NEXT: # Child Loop BB0_5 Depth 2
+; CHECK-NEXT: not 6, 6
+; CHECK-NEXT: clrlwi 4, 4, 24
+; CHECK-NEXT: b .LBB0_3
+; CHECK-NEXT: .LBB0_1: # %cmpxchg.nostore
+; CHECK-NEXT: #
+; CHECK-NEXT: crxor 20, 20, 20
+; CHECK-NEXT: .LBB0_2: # %cmpxchg.end
+; CHECK-NEXT: #
+; CHECK-NEXT: mr 7, 8
+; CHECK-NEXT: bc 12, 20, .LBB0_8
+; CHECK-NEXT: .LBB0_3: # %atomicrmw.start
+; CHECK-NEXT: # =>This Loop Header: Depth=1
+; CHECK-NEXT: # Child Loop BB0_6 Depth 2
; CHECK-NEXT: srw 8, 7, 3
-; CHECK-NEXT: clrlwi 9, 8, 24
-; CHECK-NEXT: cmplw 9, 4
+; CHECK-NEXT: clrlwi 9, 8, 24
+; CHECK-NEXT: cmplw 9, 4
; CHECK-NEXT: li 9, 0
-; CHECK-NEXT: bge 0, .LBB0_4
-; CHECK-NEXT: # %bb.3: # %atomicrmw.start
-; CHECK-NEXT: # in Loop: Header=BB0_2 Depth=1
+; CHECK-NEXT: bge 0, .LBB0_5
+; CHECK-NEXT: # %bb.4: # %atomicrmw.start
+; CHECK-NEXT: #
; CHECK-NEXT: addi 9, 8, 1
-; CHECK-NEXT: .LBB0_4: # %atomicrmw.start
-; CHECK-NEXT: # in Loop: Header=BB0_2 Depth=1
-; CHECK-NEXT: clrlwi 8, 9, 24
+; CHECK-NEXT: .LBB0_5: # %atomicrmw.start
+; CHECK-NEXT: #
+; CHECK-NEXT: clrlwi 8, 9, 24
; CHECK-NEXT: slw 8, 8, 3
; CHECK-NEXT: and 9, 7, 6
; CHECK-NEXT: or 9, 9, 8
-; CHECK-NEXT: .LBB0_5: # %cmpxchg.start
-; CHECK-NEXT: # Parent Loop BB0_2 Depth=1
-; CHECK-NEXT: # => This Inner Loop Header: Depth=2
+; CHECK-NEXT: .LBB0_6: # %cmpxchg.start
+; CHECK-NEXT: # Parent Loop BB0_3 Depth=1
+; CHECK-NEXT: # => This Inner Loop Header: Depth=2
; CHECK-NEXT: lwarx 8, 0, 5
-; CHECK-NEXT: cmplw 8, 7
-; CHECK-NEXT: bne 0, .LBB0_1
-; CHECK-NEXT: # %bb.6: # %cmpxchg.fencedstore
-; CHECK-NEXT: # in Loop: Header=BB0_5 Depth=2
+; CHECK-NEXT: cmplw 8, 7
+; CHECK-NEXT: bne 0, .LBB0_1
+; CHECK-NEXT: # %bb.7: # %cmpxchg.fencedstore
+; CHECK-NEXT: #
; CHECK-NEXT: stwcx. 9, 0, 5
-; CHECK-NEXT: bne 0, .LBB0_5
-; CHECK-NEXT: # %bb.7:
-; CHECK-NEXT: mr 7, 8
-; CHECK-NEXT: # %bb.8: # %atomicrmw.end
+; CHECK-NEXT: creqv 20, 20, 20
+; CHECK-NEXT: bne 0, .LBB0_6
+; CHECK-NEXT: b .LBB0_2
+; CHECK-NEXT: .LBB0_8: # %atomicrmw.end
; CHECK-NEXT: srw 3, 8, 3
; CHECK-NEXT: lwsync
; CHECK-NEXT: blr
@@ -59,49 +63,53 @@ define i16 @atomicrmw_uinc_wrap_i16(ptr %ptr, i16 %val) {
; CHECK: # %bb.0:
; CHECK-NEXT: sync
; CHECK-NEXT: rldicr 5, 3, 0, 61
-; CHECK-NEXT: clrlwi 3, 3, 30
+; CHECK-NEXT: clrlwi 3, 3, 30
; CHECK-NEXT: lis 6, 0
; CHECK-NEXT: xori 3, 3, 2
; CHECK-NEXT: lwz 7, 0(5)
; CHECK-NEXT: ori 6, 6, 65535
; CHECK-NEXT: slwi 3, 3, 3
; CHECK-NEXT: slw 6, 6, 3
-; CHECK-NEXT: not 6, 6
-; CHECK-NEXT: clrlwi 4, 4, 16
-; CHECK-NEXT: b .LBB1_2
-; CHECK-NEXT: .LBB1_1: # %cmpxchg.nostore
-; CHECK-NEXT: # in Loop: Header=BB1_2 Depth=1
-; CHECK-NEXT: mr 7, 8
-; CHECK-NEXT: .LBB1_2: # %atomicrmw.start
-; CHECK-NEXT: # =>This Loop Header: Depth=1
-; CHECK-NEXT: # Child Loop BB1_5 Depth 2
+; CHECK-NEXT: not 6, 6
+; CHECK-NEXT: clrlwi 4, 4, 16
+; CHECK-NEXT: b .LBB1_3
+; CHECK-NEXT: .LBB1_1: # %cmpxchg.nostore
+; CHECK-NEXT: #
+; CHECK-NEXT: crxor 20, 20, 20
+; CHECK-NEXT: .LBB1_2: # %cmpxchg.end
+; CHECK-NEXT: #
+; CHECK-NEXT: mr 7, 8
+; CHECK-NEXT: bc 12, 20, .LBB1_8
+; CHECK-NEXT: .LBB1_3: # %atomicrmw.start
+; CHECK-NEXT: # =>This Loop Header: Depth=1
+; CHECK-NEXT: # Child Loop BB1_6 Depth 2
; CHECK-NEXT: srw 8, 7, 3
-; CHECK-NEXT: clrlwi 9, 8, 16
-; CHECK-NEXT: cmplw 9, 4
+; CHECK-NEXT: clrlwi 9, 8, 16
+; CHECK-NEXT: cmplw 9, 4
; CHECK-NEXT: li 9, 0
-; CHECK-NEXT: bge 0, .LBB1_4
-; CHECK-NEXT: # %bb.3: # %atomicrmw.start
-; CHECK-NEXT: # in Loop: Header=BB1_2 Depth=1
+; CHECK-NEXT: bge 0, .LBB1_5
+; CHECK-NEXT: # %bb.4: # %atomicrmw.start
+; CHECK-NEXT: #
; CHECK-NEXT: addi 9, 8, 1
-; CHECK-NEXT: .LBB1_4: # %atomicrmw.start
-; CHECK-NEXT: # in Loop: Header=BB1_2 Depth=1
-; CHECK-NEXT: clrlwi 8, 9, 16
+; CHECK-NEXT: .LBB1_5: # %atomicrmw.start
+; CHECK-NEXT: #
+; CHECK-NEXT: clrlwi 8, 9, 16
; CHECK-NEXT: slw 8, 8, 3
; CHECK-NEXT: and 9, 7, 6
; CHECK-NEXT: or 9, 9, 8
-; CHECK-NEXT: .LBB1_5: # %cmpxchg.start
-; CHECK-NEXT: # Parent Loop BB1_2 Depth=1
-; CHECK-NEXT: # => This Inner Loop Header: Depth=2
+; CHECK-NEXT: .LBB1_6: # %cmpxchg.start
+; CHECK-NEXT: # Parent Loop BB1_3 Depth=1
+; CHECK-NEXT: # => This Inner Loop Header: Depth=2
; CHECK-NEXT: lwarx 8, 0, 5
-; CHECK-NEXT: cmplw 8, 7
-; CHECK-NEXT: bne 0, .LBB1_1
-; CHECK-NEXT: # %bb.6: # %cmpxchg.fencedstore
-; CHECK-NEXT: # in Loop: Header=BB1_5 Depth=2
+; CHECK-NEXT: cmplw 8, 7
+; CHECK-NEXT: bne 0, .LBB1_1
+; CHECK-NEXT: # %bb.7: # %cmpxchg.fencedstore
+; CHECK-NEXT: #
; CHECK-NEXT: stwcx. 9, 0, 5
-; CHECK-NEXT: bne 0, .LBB1_5
-; CHECK-NEXT: # %bb.7:
-; CHECK-NEXT: mr 7, 8
-; CHECK-NEXT: # %bb.8: # %atomicrmw.end
+; CHECK-NEXT: creqv 20, 20, 20
+; CHECK-NEXT: bne 0, .LBB1_6
+; CHECK-NEXT: b .LBB1_2
+; CHECK-NEXT: .LBB1_8: # %atomicrmw.end
; CHECK-NEXT: srw 3, 8, 3
; CHECK-NEXT: lwsync
; CHECK-NEXT: blr
@@ -114,32 +122,36 @@ define i32 @atomicrmw_uinc_wrap_i32(ptr %ptr, i32 %val) {
; CHECK: # %bb.0:
; CHECK-NEXT: sync
; CHECK-NEXT: lwz 6, 0(3)
-; CHECK-NEXT: b .LBB2_2
-; CHECK-NEXT: .LBB2_1: # %cmpxchg.nostore
-; CHECK-NEXT: # in Loop: Header=BB2_2 Depth=1
-; CHECK-NEXT: mr 6, 5
-; CHECK-NEXT: .LBB2_2: # %atomicrmw.start
-; CHECK-NEXT: # =>This Loop Header: Depth=1
-; CHECK-NEXT: # Child Loop BB2_4 Depth 2
-; CHECK-NEXT: cmplw 6, 4
+; CHECK-NEXT: b .LBB2_3
+; CHECK-NEXT: .LBB2_1: # %cmpxchg.nostore
+; CHECK-NEXT: #
+; CHECK-NEXT: crxor 20, 20, 20
+; CHECK-NEXT: .LBB2_2: # %cmpxchg.end
+; CHECK-NEXT: #
+; CHECK-NEXT: mr 6, 5
+; CHECK-NEXT: bc 12, 20, .LBB2_7
+; CHECK-NEXT: .LBB2_3: # %atomicrmw.start
+; CHECK-NEXT: # =>This Loop Header: Depth=1
+; CHECK-NEXT: # Child Loop BB2_5 Depth 2
+; CHECK-NEXT: cmplw 6, 4
; CHECK-NEXT: li 7, 0
-; CHECK-NEXT: bge 0, .LBB2_4
-; CHECK-NEXT: # %bb.3: # %atomicrmw.start
-; CHECK-NEXT: # in Loop: Header=BB2_2 Depth=1
+; CHECK-NEXT: bge 0, .LBB2_5
+; CHECK-NEXT: # %bb.4: # %atomicrmw.start
+; CHECK-NEXT: #
; CHECK-NEXT: addi 7, 6, 1
-; CHECK-NEXT: .LBB2_4: # %cmpxchg.start
-; CHECK-NEXT: # Parent Loop BB2_2 Depth=1
-; CHECK-NEXT: # => This Inner Loop Header: Depth=2
+; CHECK-NEXT: .LBB2_5: # %cmpxchg.start
+; CHECK-NEXT: # Parent Loop BB2_3 Depth=1
+; CHECK-NEXT: # => This Inner Loop Header: Depth=2
; CHECK-NEXT: lwarx 5, 0, 3
-; CHECK-NEXT: cmplw 5, 6
-; CHECK-NEXT: bne 0, .LBB2_1
-; CHECK-NEXT: # %bb.5: # %cmpxchg.fencedstore
-; CHECK-NEXT: # in Loop: Header=BB2_4 Depth=2
+; CHECK-NEXT: cmplw 5, 6
+; CHECK-NEXT: bne 0, .LBB2_1
+; CHECK-NEXT: # %bb.6: # %cmpxchg.fencedstore
+; CHECK-NEXT: #
; CHECK-NEXT: stwcx. 7, 0, 3
-; CHECK-NEXT: bne 0, .LBB2_4
-; CHECK-NEXT: # %bb.6:
-; CHECK-NEXT: mr 6, 5
-; CHECK-NEXT: # %bb.7: # %atomicrmw.end
+; CHECK-NEXT: creqv 20, 20, 20
+; CHECK-NEXT: bne 0, .LBB2_5
+; CHECK-NEXT: b .LBB2_2
+; CHECK-NEXT: .LBB2_7: # %atomicrmw.end
; CHECK-NEXT: mr 3, 5
; CHECK-NEXT: lwsync
; CHECK-NEXT: blr
@@ -152,32 +164,36 @@ define i64 @atomicrmw_uinc_wrap_i64(ptr %ptr, i64 %val) {
; CHECK: # %bb.0:
; CHECK-NEXT: sync
; CHECK-NEXT: ld 6, 0(3)
-; CHECK-NEXT: b .LBB3_2
-; CHECK-NEXT: .LBB3_1: # %cmpxchg.nostore
-; CHECK-NEXT: # in Loop: Header=BB3_2 Depth=1
-; CHECK-NEXT: mr 6, 5
-; CHECK-NEXT: .LBB3_2: # %atomicrmw.start
-; CHECK-NEXT: # =>This Loop Header: Depth=1
-; CHECK-NEXT: # Child Loop BB3_4 Depth 2
-; CHECK-NEXT: cmpld 6, 4
+; CHECK-NEXT: b .LBB3_3
+; CHECK-NEXT: .LBB3_1: # %cmpxchg.nostore
+; CHECK-NEXT: #
+; CHECK-NEXT: crxor 20, 20, 20
+; CHECK-NEXT: .LBB3_2: # %cmpxchg.end
+; CHECK-NEXT: #
+; CHECK-NEXT: mr 6, 5
+; CHECK-NEXT: bc 12, 20, .LBB3_7
+; CHECK-NEXT: .LBB3_3: # %atomicrmw.start
+; CHECK-NEXT: # =>This Loop Header: Depth=1
+; CHECK-NEXT: # Child Loop BB3_5 Depth 2
+; CHECK-NEXT: cmpld 6, 4
; CHECK-NEXT: li 7, 0
-; CHECK-NEXT: bge 0, .LBB3_4
-; CHECK-NEXT: # %bb.3: # %atomicrmw.start
-; CHECK-NEXT: # in Loop: Header=BB3_2 Depth=1
+; CHECK-NEXT: bge 0, .LBB3_5
+; CHECK-NEXT: # %bb.4: # %atomicrmw.start
+; CHECK-NEXT: #
; CHECK-NEXT: addi 7, 6, 1
-; CHECK-NEXT: .LBB3_4: # %cmpxchg.start
-; CHECK-NEXT: # Parent Loop BB3_2 Depth=1
-; CHECK-NEXT: # => This Inner Loop Header: Depth=2
+; CHECK-NEXT: .LBB3_5: # %cmpxchg.start
+; CHECK-NEXT: # Parent Loop BB3_3 Depth=1
+; CHECK-NEXT: # => This Inner Loop Header: Depth=2
; CHECK-NEXT: ldarx 5, 0, 3
-; CHECK-NEXT: cmpld 5, 6
-; CHECK-NEXT: bne 0, .LBB3_1
-; CHECK-NEXT: # %bb.5: # %cmpxchg.fencedstore
-; CHECK-NEXT: # in Loop: Header=BB3_4 Depth=2
+; CHECK-NEXT: cmpld 5, 6
+; CHECK-NEXT: bne 0, .LBB3_1
+; CHECK-NEXT: # %bb.6: # %cmpxchg.fencedstore
+; CHECK-NEXT: #
; CHECK-NEXT: stdcx. 7, 0, 3
-; CHECK-NEXT: bne 0, .LBB3_4
-; CHECK-NEXT: # %bb.6:
-; CHECK-NEXT: mr 6, 5
-; CHECK-NEXT: # %bb.7: # %atomicrmw.end
+; CHECK-NEXT: creqv 20, 20, 20
+; CHECK-NEXT: bne 0, .LBB3_5
+; CHECK-NEXT: b .LBB3_2
+; CHECK-NEXT: .LBB3_7: # %atomicrmw.end
; CHECK-NEXT: mr 3, 5
; CHECK-NEXT: lwsync
; CHECK-NEXT: blr
@@ -190,48 +206,52 @@ define i8 @atomicrmw_udec_wrap_i8(ptr %ptr, i8 %val) {
; CHECK: # %bb.0:
; CHECK-NEXT: sync
; CHECK-NEXT: rldicr 5, 3, 0, 61
-; CHECK-NEXT: not 3, 3
+; CHECK-NEXT: not 3, 3
; CHECK-NEXT: li 6, 255
; CHECK-NEXT: lwz 8, 0(5)
; CHECK-NEXT: rlwinm 3, 3, 3, 27, 28
; CHECK-NEXT: slw 6, 6, 3
-; CHECK-NEXT: not 6, 6
-; CHECK-NEXT: clrlwi 7, 4, 24
-; CHECK-NEXT: b .LBB4_2
-; CHECK-NEXT: .LBB4_1: # %cmpxchg.nostore
-; CHECK-NEXT: # in Loop: Header=BB4_2 Depth=1
-; CHECK-NEXT: mr 8, 9
-; CHECK-NEXT: .LBB4_2: # %atomicrmw.start
-; CHECK-NEXT: # =>This Loop Header: Depth=1
-; CHECK-NEXT: # Child Loop BB4_5 Depth 2
+; CHECK-NEXT: not 6, 6
+; CHECK-NEXT: clrlwi 7, 4, 24
+; CHECK-NEXT: b .LBB4_3
+; CHECK-NEXT: .LBB4_1: # %cmpxchg.nostore
+; CHECK-NEXT: #
+; CHECK-NEXT: crxor 20, 20, 20
+; CHECK-NEXT: .LBB4_2: # %cmpxchg.end
+; CHECK-NEXT: #
+; CHECK-NEXT: mr 8, 9
+; CHECK-NEXT: bc 12, 20, .LBB4_8
+; CHECK-NEXT: .LBB4_3: # %atomicrmw.start
+; CHECK-NEXT: # =>This Loop Header: Depth=1
+; CHECK-NEXT: # Child Loop BB4_6 Depth 2
; CHECK-NEXT: srw 9, 8, 3
; CHECK-NEXT: andi. 10, 9, 255
; CHECK-NEXT: cmplw 1, 10, 7
; CHECK-NEXT: cror 20, 2, 5
-; CHECK-NEXT: mr 10, 4
-; CHECK-NEXT: bc 12, 20, .LBB4_4
-; CHECK-NEXT: # %bb.3: # %atomicrmw.start
-; CHECK-NEXT: # in Loop: Header=BB4_2 Depth=1
+; CHECK-NEXT: mr 10, 4
+; CHECK-NEXT: bc 12, 20, .LBB4_5
+; CHECK-NEXT: # %bb.4: # %atomicrmw.start
+; CHECK-NEXT: #
; CHECK-NEXT: addi 10, 9, -1
-; CHECK-NEXT: .LBB4_4: # %atomicrmw.start
-; CHECK-NEXT: # in Loop: Header=BB4_2 Depth=1
-; CHECK-NEXT: clrlwi 9, 10, 24
+; CHECK-NEXT: .LBB4_5: # %atomicrmw.start
+; CHECK-NEXT: #
+; CHECK-NEXT: clrlwi 9, 10, 24
; CHECK-NEXT: slw 9, 9, 3
; CHECK-NEXT: and 10, 8, 6
; CHECK-NEXT: or 10, 10, 9
-; CHECK-NEXT: .LBB4_5: # %cmpxchg.start
-; CHECK-NEXT: # Parent Loop BB4_2 Depth=1
-; CHECK-NEXT: # => This Inner Loop Header: Depth=2
+; CHECK-NEXT: .LBB4_6: # %cmpxchg.start
+; CHECK-NEXT: # Parent Loop BB4_3 Depth=1
+; CHECK-NEXT: # => This Inner Loop Header: Depth=2
; CHECK-NEXT: lwarx 9, 0, 5
-; CHECK-NEXT: cmplw 9, 8
-; CHECK-NEXT: bne 0, .LBB4_1
-; CHECK-NEXT: # %bb.6: # %cmpxchg.fencedstore
-; CHECK-NEXT: # in Loop: Header=BB4_5 Depth=2
+; CHECK-NEXT: cmplw 9, 8
+; CHECK-NEXT: bne 0, .LBB4_1
+; CHECK-NEXT: # %bb.7: # %cmpxchg.fencedstore
+; CHECK-NEXT: #
; CHECK-NEXT: stwcx. 10, 0, 5
-; CHECK-NEXT: bne 0, .LBB4_5
-; CHECK-NEXT: # %bb.7:
-; CHECK-NEXT: mr 8, 9
-; CHECK-NEXT: # %bb.8: # %atomicrmw.end
+; CHECK-NEXT: creqv 20, 20, 20
+; CHECK-NEXT: bne 0, .LBB4_6
+; CHECK-NEXT: b .LBB4_2
+; CHECK-NEXT: .LBB4_8: # %atomicrmw.end
; CHECK-NEXT: srw 3, 9, 3
; CHECK-NEXT: lwsync
; CHECK-NEXT: blr
@@ -244,50 +264,54 @@ define i16 @atomicrmw_udec_wrap_i16(ptr %ptr, i16 %val) {
; CHECK: # %bb.0:
; CHECK-NEXT: sync
; CHECK-NEXT: rldicr 5, 3, 0, 61
-; CHECK-NEXT: clrlwi 3, 3, 30
+; CHECK-NEXT: clrlwi 3, 3, 30
; CHECK-NEXT: lis 6, 0
; CHECK-NEXT: xori 3, 3, 2
; CHECK-NEXT: lwz 8, 0(5)
; CHECK-NEXT: ori 6, 6, 65535
; CHECK-NEXT: slwi 3, 3, 3
; CHECK-NEXT: slw 6, 6, 3
-; CHECK-NEXT: not 6, 6
-; CHECK-NEXT: clrlwi 7, 4, 16
-; CHECK-NEXT: b .LBB5_2
-; CHECK-NEXT: .LBB5_1: # %cmpxchg.nostore
-; CHECK-NEXT: # in Loop: Header=BB5_2 Depth=1
-; CHECK-NEXT: mr 8, 9
-; CHECK-NEXT: .LBB5_2: # %atomicrmw.start
-; CHECK-NEXT: # =>This Loop Header: Depth=1
-; CHECK-NEXT: # Child Loop BB5_5 Depth 2
+; CHECK-NEXT: not 6, 6
+; CHECK-NEXT: clrlwi 7, 4, 16
+; CHECK-NEXT: b .LBB5_3
+; CHECK-NEXT: .LBB5_1: # %cmpxchg.nostore
+; CHECK-NEXT: #
+; CHECK-NEXT: crxor 20, 20, 20
+; CHECK-NEXT: .LBB5_2: # %cmpxchg.end
+; CHECK-NEXT: #
+; CHECK-NEXT: mr 8, 9
+; CHECK-NEXT: bc 12, 20, .LBB5_8
+; CHECK-NEXT: .LBB5_3: # %atomicrmw.start
+; CHECK-NEXT: # =>This Loop Header: Depth=1
+; CHECK-NEXT: # Child Loop BB5_6 Depth 2
; CHECK-NEXT: srw 9, 8, 3
; CHECK-NEXT: andi. 10, 9, 65535
; CHECK-NEXT: cmplw 1, 10, 7
; CHECK-NEXT: cror 20, 2, 5
-; CHECK-NEXT: mr 10, 4
-; CHECK-NEXT: bc 12, 20, .LBB5_4
-; CHECK-NEXT: # %bb.3: # %atomicrmw.start
-; CHECK-NEXT: # in Loop: Header=BB5_2 Depth=1
+; CHECK-NEXT: mr 10, 4
+; CHECK-NEXT: bc 12, 20, .LBB5_5
+; CHECK-NEXT: # %bb.4: # %atomicrmw.start
+; CHECK-NEXT: #
; CHECK-NEXT: addi 10, 9, -1
-; CHECK-NEXT: .LBB5_4: # %atomicrmw.start
-; CHECK-NEXT: # in Loop: Header=BB5_2 Depth=1
-; CHECK-NEXT: clrlwi 9, 10, 16
+; CHECK-NEXT: .LBB5_5: # %atomicrmw.start
+; CHECK-NEXT: #
+; CHECK-NEXT: clrlwi 9, 10, 16
; CHECK-NEXT: slw 9, 9, 3
; CHECK-NEXT: and 10, 8, 6
; CHECK-NEXT: or 10, 10, 9
-; CHECK-NEXT: .LBB5_5: # %cmpxchg.start
-; CHECK-NEXT: # Parent Loop BB5_2 Depth=1
-; CHECK-NEXT: # => This Inner Loop Header: Depth=2
+; CHECK-NEXT: .LBB5_6: # %cmpxchg.start
+; CHECK-NEXT: # Parent Loop BB5_3 Depth=1
+; CHECK-NEXT: # => This Inner Loop Header: Depth=2
; CHECK-NEXT: lwarx 9, 0, 5
-; CHECK-NEXT: cmplw 9, 8
-; CHECK-NEXT: bne 0, .LBB5_1
-; CHECK-NEXT: # %bb.6: # %cmpxchg.fencedstore
-; CHECK-NEXT: # in Loop: Header=BB5_5 Depth=2
+; CHECK-NEXT: cmplw 9, 8
+; CHECK-NEXT: bne 0, .LBB5_1
+; CHECK-NEXT: # %bb.7: # %cmpxchg.fencedstore
+; CHECK-NEXT: #
; CHECK-NEXT: stwcx. 10, 0, 5
-; CHECK-NEXT: bne 0, .LBB5_5
-; CHECK-NEXT: # %bb.7:
-; CHECK-NEXT: mr 8, 9
-; CHECK-NEXT: # %bb.8: # %atomicrmw.end
+; CHECK-NEXT: creqv 20, 20, 20
+; CHECK-NEXT: bne 0, .LBB5_6
+; CHECK-NEXT: b .LBB5_2
+; CHECK-NEXT: .LBB5_8: # %atomicrmw.end
; CHECK-NEXT: srw 3, 9, 3
; CHECK-NEXT: lwsync
; CHECK-NEXT: blr
@@ -300,37 +324,41 @@ define i32 @atomicrmw_udec_wrap_i32(ptr %ptr, i32 %val) {
; CHECK: # %bb.0:
; CHECK-NEXT: sync
; CHECK-NEXT: lwz 6, 0(3)
-; CHECK-NEXT: b .LBB6_2
-; CHECK-NEXT: .LBB6_1: # %cmpxchg.nostore
-; CHECK-NEXT: # in Loop: Header=BB6_2 Depth=1
-; CHECK-NEXT: mr 6, 5
-; CHECK-NEXT: .LBB6_2: # %atomicrmw.start
-; CHECK-NEXT: # =>This Loop Header: Depth=1
-; CHECK-NEXT: # Child Loop BB6_5 Depth 2
-; CHECK-NEXT: cmpwi 6, 0
-; CHECK-NEXT: mr 7, 4
-; CHECK-NEXT: bc 12, 2, .LBB6_5
-; CHECK-NEXT: # %bb.3: # %atomicrmw.start
-; CHECK-NEXT: # in Loop: Header=BB6_2 Depth=1
-; CHECK-NEXT: cmplw 6, 4
-; CHECK-NEXT: mr 7, 4
-; CHECK-NEXT: bc 12, 1, .LBB6_5
-; CHECK-NEXT: # %bb.4: # %atomicrmw.start
-; CHECK-NEXT: # in Loop: Header=BB6_2 Depth=1
+; CHECK-NEXT: b .LBB6_3
+; CHECK-NEXT: .LBB6_1: # %cmpxchg.nostore
+; CHECK-NEXT: #
+; CHECK-NEXT: crxor 20, 20, 20
+; CHECK-NEXT: .LBB6_2: # %cmpxchg.end
+; CHECK-NEXT: #
+; CHECK-NEXT: mr 6, 5
+; CHECK-NEXT: bc 12, 20, .LBB6_8
+; CHECK-NEXT: .LBB6_3: # %atomicrmw.start
+; CHECK-NEXT: # =>This Loop Header: Depth=1
+; CHECK-NEXT: # Child Loop BB6_6 Depth 2
+; CHECK-NEXT: cmpwi 6, 0
+; CHECK-NEXT: mr 7, 4
+; CHECK-NEXT: bc 12, 2, .LBB6_6
+; CHECK-NEXT: # %bb.4: # %atomicrmw.start
+; CHECK-NEXT: #
+; CHECK-NEXT: cmplw 6, 4
+; CHECK-NEXT: mr 7, 4
+; CHECK-NEXT: bc 12, 1, .LBB6_6
+; CHECK-NEXT: # %bb.5: # %atomicrmw.start
+; CHECK-NEXT: #
; CHECK-NEXT: addi 7, 6, -1
-; CHECK-NEXT: .LBB6_5: # %cmpxchg.start
-; CHECK-NEXT: # Parent Loop BB6_2 Depth=1
-; CHECK-NEXT: # => This Inner Loop Header: Depth=2
+; CHECK-NEXT: .LBB6_6: # %cmpxchg.start
+; CHECK-NEXT: # Parent Loop BB6_3 Depth=1
+; CHECK-NEXT: # => This Inner Loop Header: Depth=2
; CHECK-NEXT: lwarx 5, 0, 3
-; CHECK-NEXT: cmplw 5, 6
-; CHECK-NEXT: bne 0, .LBB6_1
-; CHECK-NEXT: # %bb.6: # %cmpxchg.fencedstore
-; CHECK-NEXT: # in Loop: Header=BB6_5 Depth=2
+; CHECK-NEXT: cmplw 5, 6
+; CHECK-NEXT: bne 0, .LBB6_1
+; CHECK-NEXT: # %bb.7: # %cmpxchg.fencedstore
+; CHECK-NEXT: #
; CHECK-NEXT: stwcx. 7, 0, 3
-; CHECK-NEXT: bne 0, .LBB6_5
-; CHECK-NEXT: # %bb.7:
-; CHECK-NEXT: mr 6, 5
-; CHECK-NEXT: # %bb.8: # %atomicrmw.end
+; CHECK-NEXT: creqv 20, 20, 20
+; CHECK-NEXT: bne 0, .LBB6_6
+; CHECK-NEXT: b .LBB6_2
+; CHECK-NEXT: .LBB6_8: # %atomicrmw.end
; CHECK-NEXT: mr 3, 5
; CHECK-NEXT: lwsync
; CHECK-NEXT: blr
@@ -343,38 +371,42 @@ define i64 @atomicrmw_udec_wrap_i64(ptr %ptr, i64 %val) {
; CHECK: # %bb.0:
; CHECK-NEXT: sync
; CHECK-NEXT: ld 6, 0(3)
-; CHECK-NEXT: b .LBB7_2
-; CHECK-NEXT: .LBB7_1: # %cmpxchg.nostore
-; CHECK-NEXT: # in Loop: Header=BB7_2 Depth=1
-; CHECK-NEXT: mr 6, 5
-; CHECK-NEXT: .LBB7_2: # %atomicrmw.start
-; CHECK-NEXT: # =>This Loop Header: Depth=1
-; CHECK-NEXT: # Child Loop BB7_5 Depth 2
-; CHECK-NEXT: cmpdi 6, 0
-; CHECK-NEXT: mr 7, 4
-; CHECK-NEXT: bc 12, 2, .LBB7_5
-; CHECK-NEXT: # %bb.3: # %atomicrmw.start
-; CHECK-NEXT: # in Loop: Header=BB7_2 Depth=1
-; CHECK-NEXT: cmpld 6, 4
-; CHECK-NEXT: mr 7, 4
-; CHECK-NEXT: bc 12, 1, .LBB7_5
-; CHECK-NEXT: # %bb.4: # %atomicrmw.start
-; CHECK-NEXT: # in Loop: Header=BB7_2 Depth=1
+; CHECK-NEXT: b .LBB7_3
+; CHECK-NEXT: .LBB7_1: # %cmpxchg.nostore
+; CHECK-NEXT: #
+; CHECK-NEXT: crxor 20, 20, 20
+; CHECK-NEXT: .LBB7_2: # %cmpxchg.end
+; CHECK-NEXT: #
+; CHECK-NEXT: mr 6, 5
+; CHECK-NEXT: bc 12, 20, .LBB7_8
+; CHECK-NEXT: .LBB7_3: # %atomicrmw.start
+; CHECK-NEXT: # =>This Loop Header: Depth=1
+; CHECK-NEXT: # Child Loop BB7_6 Depth 2
+; CHECK-NEXT: cmpdi 6, 0
+; CHECK-NEXT: mr 7, 4
+; CHECK-NEXT: bc 12, 2, .LBB7_6
+; CHECK-NEXT: # %bb.4: # %atomicrmw.start
+; CHECK-NEXT: #
+; CHECK-NEXT: cmpld 6, 4
+; CHECK-NEXT: mr 7, 4
+; CHECK-NEXT: bc 12, 1, .LBB7_6
+; CHECK-NEXT: # %bb.5: # %atomicrmw.start
+; CHECK-NEXT: #
; CHECK-NEXT: addi 7, 6, -1
-; CHECK-NEXT: .LBB7_5: # %cmpxchg.start
-; CHECK-NEXT: # Parent Loop BB7_2 Depth=1
-; CHECK-NEXT: # => This Inner Loop Header: Depth=2
+; CHECK-NEXT: .LBB7_6: # %cmpxchg.start
+; CHECK-NEXT: # Parent Loop BB7_3 Depth=1
+; CHECK-NEXT: # => This Inner Loop Header: Depth=2
; CHECK-NEXT: ldarx 5, 0, 3
-; CHECK-NEXT: cmpld 5, 6
-; CHECK-NEXT: bne 0, .LBB7_1
-; CHECK-NEXT: # %bb.6: # %cmpxchg.fencedstore
-; CHECK-NEXT: # in Loop: Header=BB7_5 Depth=2
+; CHECK-NEXT: cmpld 5, 6
+; CHECK-NEXT: bne 0, .LBB7_1
+; CHECK-NEXT: # %bb.7: # %cmpxchg.fencedstore
+; CHECK-NEXT: #
; CHECK-NEXT: stdcx. 7, 0, 3
-; CHECK-NEXT: bne 0, .LBB7_5
-; CHECK-NEXT: # %bb.7:
-; CHECK-NEXT: mr 6, 5
-; CHECK-NEXT: # %bb.8: # %atomicrmw.end
-; CHECK-NEXT: mr 3, 5
+; CHECK-NEXT: creqv 20, 20, 20
+; CHECK-NEXT: bne 0, .LBB7_6
+; CHECK-NEXT: b .LBB7_2
+; CHECK-NEXT: .LBB7_8: # %atomicrmw.end
+; CHECK-NEXT: mr 3, 5
; CHECK-NEXT: lwsync
; CHECK-NEXT: blr
%result = atomicrmw udec_wrap ptr %ptr, i64 %val seq_cst
diff --git a/llvm/test/CodeGen/PowerPC/p10-spill-crun.ll b/llvm/test/CodeGen/PowerPC/p10-spill-crun.ll
index 4ca2dc5db..e1b1938 100644
--- a/llvm/test/CodeGen/PowerPC/p10-spill-crun.ll
+++ b/llvm/test/CodeGen/PowerPC/p10-spill-crun.ll
@@ -77,12 +77,12 @@ define dso_local void @P10_Spill_CR_UN(ptr %arg, ptr %arg1, i32 %arg2) local_unn
; CHECK-NEXT: srwi r3, r28, 7
; CHECK-NEXT: andi. r3, r3, 1
; CHECK-NEXT: crmove 4*cr2+un, gt
-; CHECK-NEXT: bc 12, 4*cr2+eq, .LBB0_7
+; CHECK-NEXT: bc 12, 4*cr2+eq, .LBB0_8
; CHECK-NEXT: # %bb.3: # %bb37
; CHECK-NEXT: lwz r28, 0(r3)
; CHECK-NEXT: bc 12, 4*cr5+lt, .LBB0_5
; CHECK-NEXT: # %bb.4: # %bb37
-; CHECK-NEXT: bc 4, 4*cr5+lt, .LBB0_14
+; CHECK-NEXT: bc 4, 4*cr5+lt, .LBB0_13
; CHECK-NEXT: .LBB0_5: # %bb42
; CHECK-NEXT: paddi r3, 0, global_1@PCREL, 1
; CHECK-NEXT: li r4, 0
@@ -91,15 +91,20 @@ define dso_local void @P10_Spill_CR_UN(ptr %arg, ptr %arg1, i32 %arg2) local_unn
; CHECK-NEXT: crnot 4*cr2+lt, eq
; CHECK-NEXT: bl call_5@notoc
; CHECK-NEXT: pld r3, global_2@got@pcrel(0), 1
+; CHECK-NEXT: xxspltidp vs1, 1071644672
; CHECK-NEXT: addi r3, r3, 8682
; CHECK-NEXT: lxsihzx v2, 0, r3
; CHECK-NEXT: vextsh2d v2, v2
; CHECK-NEXT: xscvsxdsp f0, v2
-; CHECK-NEXT: bc 12, 4*cr2+lt, .LBB0_12
+; CHECK-NEXT: bc 12, 4*cr2+lt, .LBB0_7
; CHECK-NEXT: # %bb.6: # %bb42
; CHECK-NEXT: xxspltidp vs1, 1069547520
-; CHECK-NEXT: b .LBB0_13
-; CHECK-NEXT: .LBB0_7: # %bb19
+; CHECK-NEXT: .LBB0_7: # %bb42
+; CHECK-NEXT: xsmulsp f0, f1, f0
+; CHECK-NEXT: xscvdpsxws f0, f0
+; CHECK-NEXT: mffprwz r3, f0
+; CHECK-NEXT: b .LBB0_14
+; CHECK-NEXT: .LBB0_8: # %bb19
; CHECK-NEXT: setnbc r3, 4*cr2+un
; CHECK-NEXT: paddi r4, 0, global_4@PCREL, 1
; CHECK-NEXT: stw r3, 176(r1)
@@ -114,36 +119,29 @@ define dso_local void @P10_Spill_CR_UN(ptr %arg, ptr %arg1, i32 %arg2) local_unn
; CHECK-NEXT: cmpwi cr2, r27, 0
; CHECK-NEXT: mcrf cr3, cr0
; CHECK-NEXT: .p2align 5
-; CHECK-NEXT: .LBB0_8: # %bb27
+; CHECK-NEXT: .LBB0_9: # %bb27
; CHECK-NEXT: #
; CHECK-NEXT: mr r3, r30
; CHECK-NEXT: li r4, 0
; CHECK-NEXT: bl call_6@notoc
-; CHECK-NEXT: bc 4, 4*cr4+eq, .LBB0_18
-; CHECK-NEXT: # %bb.9: # %bb31
+; CHECK-NEXT: bc 4, 4*cr4+eq, .LBB0_17
+; CHECK-NEXT: # %bb.10: # %bb31
; CHECK-NEXT: #
-; CHECK-NEXT: bc 4, 4*cr3+eq, .LBB0_18
-; CHECK-NEXT: # %bb.10: # %bb33
+; CHECK-NEXT: bc 4, 4*cr3+eq, .LBB0_17
+; CHECK-NEXT: # %bb.11: # %bb33
; CHECK-NEXT: #
-; CHECK-NEXT: bc 4, 4*cr2+eq, .LBB0_8
-; CHECK-NEXT: # %bb.11: # %bb36
+; CHECK-NEXT: bc 4, 4*cr2+eq, .LBB0_9
+; CHECK-NEXT: # %bb.12: # %bb36
; CHECK-NEXT: stb r3, 181(r1)
; CHECK-NEXT: # implicit-def: $cr2un
; CHECK-NEXT: mfocrf r3, 32
; CHECK-NEXT: lwz r4, 176(r1)
; CHECK-NEXT: rlwimi r3, r4, 21, 11, 11
; CHECK-NEXT: mtocrf 32, r3
-; CHECK-NEXT: b .LBB0_16
-; CHECK-NEXT: .LBB0_12:
-; CHECK-NEXT: xxspltidp vs1, 1071644672
-; CHECK-NEXT: .LBB0_13: # %bb42
-; CHECK-NEXT: xsmulsp f0, f1, f0
-; CHECK-NEXT: xscvdpsxws f0, f0
-; CHECK-NEXT: mffprwz r3, f0
; CHECK-NEXT: b .LBB0_15
-; CHECK-NEXT: .LBB0_14: # %bb41
+; CHECK-NEXT: .LBB0_13: # %bb41
; CHECK-NEXT: # implicit-def: $r3
-; CHECK-NEXT: .LBB0_15: # %bb50
+; CHECK-NEXT: .LBB0_14: # %bb50
; CHECK-NEXT: li r4, 0
; CHECK-NEXT: xxspltidp vs3, -1082130432
; CHECK-NEXT: xxspltidp vs4, -1082130432
@@ -161,9 +159,9 @@ define dso_local void @P10_Spill_CR_UN(ptr %arg, ptr %arg1, i32 %arg2) local_unn
; CHECK-NEXT: std r4, 112(r1)
; CHECK-NEXT: li r4, 1024
; CHECK-NEXT: bl call_4@notoc
-; CHECK-NEXT: .LBB0_16: # %bb54
-; CHECK-NEXT: bc 12, 4*cr2+un, .LBB0_19
-; CHECK-NEXT: # %bb.17: # %bb56
+; CHECK-NEXT: .LBB0_15: # %bb54
+; CHECK-NEXT: bc 12, 4*cr2+un, .LBB0_18
+; CHECK-NEXT: # %bb.16: # %bb56
; CHECK-NEXT: ld r30, 208(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r29, 200(r1) # 8-byte Folded Reload
; CHECK-NEXT: ld r28, 192(r1) # 8-byte Folded Reload
@@ -176,9 +174,9 @@ define dso_local void @P10_Spill_CR_UN(ptr %arg, ptr %arg1, i32 %arg2) local_unn
; CHECK-NEXT: mtocrf 16, r12
; CHECK-NEXT: mtocrf 8, r12
; CHECK-NEXT: blr
-; CHECK-NEXT: .LBB0_18: # %bb30
+; CHECK-NEXT: .LBB0_17: # %bb30
; CHECK-NEXT: stb r3, 181(r1)
-; CHECK-NEXT: .LBB0_19: # %bb55
+; CHECK-NEXT: .LBB0_18: # %bb55
;
; CHECK-BE-LABEL: P10_Spill_CR_UN:
; CHECK-BE: # %bb.0: # %bb
@@ -226,13 +224,13 @@ define dso_local void @P10_Spill_CR_UN(ptr %arg, ptr %arg1, i32 %arg2) local_unn
; CHECK-BE-NEXT: srwi r3, r28, 7
; CHECK-BE-NEXT: andi. r3, r3, 1
; CHECK-BE-NEXT: crmove 4*cr2+un, gt
-; CHECK-BE-NEXT: bc 12, 4*cr2+eq, .LBB0_7
+; CHECK-BE-NEXT: bc 12, 4*cr2+eq, .LBB0_8
; CHECK-BE-NEXT: # %bb.3: # %bb37
; CHECK-BE-NEXT: lwz r28, 0(r3)
; CHECK-BE-NEXT: addis r3, r2, global_1@toc@ha
; CHECK-BE-NEXT: bc 12, 4*cr5+lt, .LBB0_5
; CHECK-BE-NEXT: # %bb.4: # %bb37
-; CHECK-BE-NEXT: bc 4, 4*cr5+lt, .LBB0_14
+; CHECK-BE-NEXT: bc 4, 4*cr5+lt, .LBB0_13
; CHECK-BE-NEXT: .LBB0_5: # %bb42
; CHECK-BE-NEXT: addi r3, r3, global_1@toc@l
; CHECK-BE-NEXT: li r4, 0
@@ -242,16 +240,21 @@ define dso_local void @P10_Spill_CR_UN(ptr %arg, ptr %arg1, i32 %arg2) local_unn
; CHECK-BE-NEXT: bl call_5
; CHECK-BE-NEXT: nop
; CHECK-BE-NEXT: addis r3, r2, .LC0@toc@ha
+; CHECK-BE-NEXT: xxspltidp vs1, 1071644672
; CHECK-BE-NEXT: ld r3, .LC0@toc@l(r3)
; CHECK-BE-NEXT: addi r3, r3, 8682
; CHECK-BE-NEXT: lxsihzx v2, 0, r3
; CHECK-BE-NEXT: vextsh2d v2, v2
; CHECK-BE-NEXT: xscvsxdsp f0, v2
-; CHECK-BE-NEXT: bc 12, 4*cr2+lt, .LBB0_12
+; CHECK-BE-NEXT: bc 12, 4*cr2+lt, .LBB0_7
; CHECK-BE-NEXT: # %bb.6: # %bb42
; CHECK-BE-NEXT: xxspltidp vs1, 1069547520
-; CHECK-BE-NEXT: b .LBB0_13
-; CHECK-BE-NEXT: .LBB0_7: # %bb19
+; CHECK-BE-NEXT: .LBB0_7: # %bb42
+; CHECK-BE-NEXT: xsmulsp f0, f1, f0
+; CHECK-BE-NEXT: xscvdpsxws f0, f0
+; CHECK-BE-NEXT: mffprwz r3, f0
+; CHECK-BE-NEXT: b .LBB0_14
+; CHECK-BE-NEXT: .LBB0_8: # %bb19
; CHECK-BE-NEXT: setnbc r3, 4*cr2+un
; CHECK-BE-NEXT: addis r4, r2, global_4@toc@ha
; CHECK-BE-NEXT: stw r3, 192(r1)
@@ -271,37 +274,30 @@ define dso_local void @P10_Spill_CR_UN(ptr %arg, ptr %arg1, i32 %arg2) local_unn
; CHECK-BE-NEXT: cmpwi cr2, r27, 0
; CHECK-BE-NEXT: mcrf cr3, cr0
; CHECK-BE-NEXT: .p2align 5
-; CHECK-BE-NEXT: .LBB0_8: # %bb27
+; CHECK-BE-NEXT: .LBB0_9: # %bb27
; CHECK-BE-NEXT: #
; CHECK-BE-NEXT: mr r3, r30
; CHECK-BE-NEXT: li r4, 0
; CHECK-BE-NEXT: bl call_6
; CHECK-BE-NEXT: nop
-; CHECK-BE-NEXT: bc 4, 4*cr4+eq, .LBB0_18
-; CHECK-BE-NEXT: # %bb.9: # %bb31
+; CHECK-BE-NEXT: bc 4, 4*cr4+eq, .LBB0_17
+; CHECK-BE-NEXT: # %bb.10: # %bb31
; CHECK-BE-NEXT: #
-; CHECK-BE-NEXT: bc 4, 4*cr3+eq, .LBB0_18
-; CHECK-BE-NEXT: # %bb.10: # %bb33
+; CHECK-BE-NEXT: bc 4, 4*cr3+eq, .LBB0_17
+; CHECK-BE-NEXT: # %bb.11: # %bb33
; CHECK-BE-NEXT: #
-; CHECK-BE-NEXT: bc 4, 4*cr2+eq, .LBB0_8
-; CHECK-BE-NEXT: # %bb.11: # %bb36
+; CHECK-BE-NEXT: bc 4, 4*cr2+eq, .LBB0_9
+; CHECK-BE-NEXT: # %bb.12: # %bb36
; CHECK-BE-NEXT: stb r3, 197(r1)
; CHECK-BE-NEXT: # implicit-def: $cr2un
; CHECK-BE-NEXT: mfocrf r3, 32
; CHECK-BE-NEXT: lwz r4, 192(r1)
; CHECK-BE-NEXT: rlwimi r3, r4, 21, 11, 11
; CHECK-BE-NEXT: mtocrf 32, r3
-; CHECK-BE-NEXT: b .LBB0_16
-; CHECK-BE-NEXT: .LBB0_12:
-; CHECK-BE-NEXT: xxspltidp vs1, 1071644672
-; CHECK-BE-NEXT: .LBB0_13: # %bb42
-; CHECK-BE-NEXT: xsmulsp f0, f1, f0
-; CHECK-BE-NEXT: xscvdpsxws f0, f0
-; CHECK-BE-NEXT: mffprwz r3, f0
; CHECK-BE-NEXT: b .LBB0_15
-; CHECK-BE-NEXT: .LBB0_14: # %bb41
+; CHECK-BE-NEXT: .LBB0_13: # %bb41
; CHECK-BE-NEXT: # implicit-def: $r3
-; CHECK-BE-NEXT: .LBB0_15: # %bb50
+; CHECK-BE-NEXT: .LBB0_14: # %bb50
; CHECK-BE-NEXT: li r4, 0
; CHECK-BE-NEXT: xxspltidp vs3, -1082130432
; CHECK-BE-NEXT: xxspltidp vs4, -1082130432
@@ -320,9 +316,9 @@ define dso_local void @P10_Spill_CR_UN(ptr %arg, ptr %arg1, i32 %arg2) local_unn
; CHECK-BE-NEXT: li r4, 1024
; CHECK-BE-NEXT: bl call_4
; CHECK-BE-NEXT: nop
-; CHECK-BE-NEXT: .LBB0_16: # %bb54
-; CHECK-BE-NEXT: bc 12, 4*cr2+un, .LBB0_19
-; CHECK-BE-NEXT: # %bb.17: # %bb56
+; CHECK-BE-NEXT: .LBB0_15: # %bb54
+; CHECK-BE-NEXT: bc 12, 4*cr2+un, .LBB0_18
+; CHECK-BE-NEXT: # %bb.16: # %bb56
; CHECK-BE-NEXT: ld r30, 224(r1) # 8-byte Folded Reload
; CHECK-BE-NEXT: ld r29, 216(r1) # 8-byte Folded Reload
; CHECK-BE-NEXT: ld r28, 208(r1) # 8-byte Folded Reload
@@ -335,9 +331,9 @@ define dso_local void @P10_Spill_CR_UN(ptr %arg, ptr %arg1, i32 %arg2) local_unn
; CHECK-BE-NEXT: mtocrf 16, r12
; CHECK-BE-NEXT: mtocrf 8, r12
; CHECK-BE-NEXT: blr
-; CHECK-BE-NEXT: .LBB0_18: # %bb30
+; CHECK-BE-NEXT: .LBB0_17: # %bb30
; CHECK-BE-NEXT: stb r3, 197(r1)
-; CHECK-BE-NEXT: .LBB0_19: # %bb55
+; CHECK-BE-NEXT: .LBB0_18: # %bb55
bb:
%tmp = alloca [3 x i8], align 1
%tmp3 = tail call zeroext i8 @call_1(ptr %arg1)
diff --git a/llvm/test/CodeGen/PowerPC/sink-down-more-instructions-1.mir b/llvm/test/CodeGen/PowerPC/sink-down-more-instructions-1.mir
index ee16a8c..589b295 100644
--- a/llvm/test/CodeGen/PowerPC/sink-down-more-instructions-1.mir
+++ b/llvm/test/CodeGen/PowerPC/sink-down-more-instructions-1.mir
@@ -281,157 +281,193 @@ machineFunctionInfo: {}
body: |
; CHECK-LABEL: name: foo
; CHECK: bb.0 (%ir-block.5):
- ; CHECK: successors: %bb.1(0x50000000), %bb.8(0x30000000)
- ; CHECK: liveins: $x3, $x5, $x6, $x7
- ; CHECK: [[COPY:%[0-9]+]]:g8rc = COPY $x7
- ; CHECK: [[COPY1:%[0-9]+]]:g8rc_and_g8rc_nox0 = COPY $x6
- ; CHECK: [[COPY2:%[0-9]+]]:g8rc_and_g8rc_nox0 = COPY $x5
- ; CHECK: [[COPY3:%[0-9]+]]:g8rc = COPY $x3
- ; CHECK: [[COPY4:%[0-9]+]]:gprc = COPY [[COPY]].sub_32
- ; CHECK: [[CMPWI:%[0-9]+]]:crrc = CMPWI [[COPY4]], 1
- ; CHECK: BCC 12, killed [[CMPWI]], %bb.8
- ; CHECK: B %bb.1
- ; CHECK: bb.1 (%ir-block.7):
- ; CHECK: successors: %bb.18(0x40000000), %bb.2(0x40000000)
- ; CHECK: [[COPY5:%[0-9]+]]:gprc = COPY [[COPY3]].sub_32
- ; CHECK: [[DEF:%[0-9]+]]:g8rc = IMPLICIT_DEF
- ; CHECK: [[INSERT_SUBREG:%[0-9]+]]:g8rc = INSERT_SUBREG [[DEF]], [[COPY4]], %subreg.sub_32
- ; CHECK: [[RLDICL:%[0-9]+]]:g8rc = RLDICL killed [[INSERT_SUBREG]], 0, 32
- ; CHECK: [[CMPLWI:%[0-9]+]]:crrc = CMPLWI [[COPY4]], 1
- ; CHECK: [[CMPLWI1:%[0-9]+]]:crrc = CMPLWI [[COPY5]], 3
- ; CHECK: BCC 68, killed [[CMPLWI]], %bb.2
- ; CHECK: bb.18:
- ; CHECK: successors: %bb.4(0x80000000)
- ; CHECK: [[LI:%[0-9]+]]:gprc = LI 0
- ; CHECK: [[LI1:%[0-9]+]]:gprc = LI 100
- ; CHECK: [[LI8_:%[0-9]+]]:g8rc = LI8 0
- ; CHECK: B %bb.4
- ; CHECK: bb.2 (%ir-block.10):
- ; CHECK: successors: %bb.9(0x80000000)
- ; CHECK: [[RLWINM8_:%[0-9]+]]:g8rc_and_g8rc_nox0 = RLWINM8 [[RLDICL]], 0, 0, 30
- ; CHECK: [[ADDI8_:%[0-9]+]]:g8rc = ADDI8 [[COPY2]], -8
- ; CHECK: [[ADDI8_1:%[0-9]+]]:g8rc = ADDI8 [[COPY1]], -8
- ; CHECK: [[ADDI8_2:%[0-9]+]]:g8rc = nsw ADDI8 killed [[RLWINM8_]], -2
- ; CHECK: [[RLDICL1:%[0-9]+]]:g8rc_and_g8rc_nox0 = RLDICL [[ADDI8_2]], 63, 1
- ; CHECK: [[ADDI8_3:%[0-9]+]]:g8rc = nuw ADDI8 killed [[RLDICL1]], 1
- ; CHECK: MTCTR8loop killed [[ADDI8_3]], implicit-def dead $ctr8
- ; CHECK: [[LI2:%[0-9]+]]:gprc = LI 0
- ; CHECK: [[LI8_1:%[0-9]+]]:g8rc = LI8 0
- ; CHECK: [[LIS:%[0-9]+]]:gprc = LIS 34952
- ; CHECK: [[ORI:%[0-9]+]]:gprc = ORI [[LIS]], 34953
- ; CHECK: [[DEF1:%[0-9]+]]:g8rc = IMPLICIT_DEF
- ; CHECK: [[CMPLWI2:%[0-9]+]]:crrc = CMPLWI [[COPY5]], 1
- ; CHECK: B %bb.9
- ; CHECK: bb.3 (%ir-block.15):
- ; CHECK: successors: %bb.4(0x80000000)
- ; CHECK: [[COPY6:%[0-9]+]]:gprc_and_gprc_nor0 = COPY %32.sub_32
- ; CHECK: [[ADDI:%[0-9]+]]:gprc_and_gprc_nor0 = ADDI [[COPY6]], -2
- ; CHECK: [[ADDI1:%[0-9]+]]:gprc = nuw ADDI [[ADDI]], 102
- ; CHECK: bb.4 (%ir-block.17):
- ; CHECK: successors: %bb.8(0x40000000), %bb.5(0x40000000)
- ; CHECK: [[PHI:%[0-9]+]]:g8rc = PHI [[LI8_]], %bb.18, %32, %bb.3
- ; CHECK: [[PHI1:%[0-9]+]]:gprc = PHI [[LI1]], %bb.18, [[ADDI1]], %bb.3
- ; CHECK: [[PHI2:%[0-9]+]]:gprc = PHI [[LI]], %bb.18, %27, %bb.3
- ; CHECK: [[ANDI8_rec:%[0-9]+]]:g8rc = ANDI8_rec [[RLDICL]], 1, implicit-def $cr0
- ; CHECK: [[COPY7:%[0-9]+]]:crbitrc = COPY $cr0gt
- ; CHECK: BCn killed [[COPY7]], %bb.8
- ; CHECK: B %bb.5
- ; CHECK: bb.5 (%ir-block.23):
- ; CHECK: successors: %bb.7(0x2aaaaaab), %bb.6(0x55555555)
- ; CHECK: [[RLDICR:%[0-9]+]]:g8rc = RLDICR [[PHI]], 2, 61
- ; CHECK: [[LWZX:%[0-9]+]]:gprc = LWZX [[COPY2]], [[RLDICR]] :: (load (s32) from %ir.24, !tbaa !2)
- ; CHECK: [[ADD4_:%[0-9]+]]:gprc = nsw ADD4 killed [[LWZX]], [[PHI2]]
- ; CHECK: BCC 76, [[CMPLWI1]], %bb.7
- ; CHECK: B %bb.6
- ; CHECK: bb.6 (%ir-block.23):
- ; CHECK: successors: %bb.7(0x80000000)
- ; CHECK: [[CMPLWI3:%[0-9]+]]:crrc = CMPLWI [[COPY5]], 1
- ; CHECK: [[COPY8:%[0-9]+]]:gprc = COPY [[PHI]].sub_32
- ; CHECK: [[LIS1:%[0-9]+]]:gprc = LIS 34952
- ; CHECK: [[ORI1:%[0-9]+]]:gprc = ORI killed [[LIS1]], 34953
- ; CHECK: [[MULHWU:%[0-9]+]]:gprc = MULHWU [[COPY8]], killed [[ORI1]]
- ; CHECK: [[RLWINM:%[0-9]+]]:gprc = RLWINM [[MULHWU]], 28, 4, 31
- ; CHECK: [[MULLI:%[0-9]+]]:gprc = MULLI killed [[RLWINM]], 30
- ; CHECK: [[SUBF:%[0-9]+]]:gprc = SUBF killed [[MULLI]], [[COPY8]]
- ; CHECK: [[COPY9:%[0-9]+]]:gprc = COPY [[PHI]].sub_32
- ; CHECK: [[RLWINM1:%[0-9]+]]:gprc_and_gprc_nor0 = RLWINM [[COPY9]], 1, 0, 30
- ; CHECK: [[ISEL:%[0-9]+]]:gprc = ISEL [[RLWINM1]], [[SUBF]], [[CMPLWI3]].sub_eq
- ; CHECK: B %bb.7
- ; CHECK: bb.7 (%ir-block.33):
- ; CHECK: successors: %bb.8(0x80000000)
- ; CHECK: [[PHI3:%[0-9]+]]:gprc = PHI [[PHI1]], %bb.5, [[ISEL]], %bb.6
- ; CHECK: [[ADD4_1:%[0-9]+]]:gprc = nsw ADD4 [[PHI3]], [[ADD4_]]
- ; CHECK: STWX killed [[ADD4_1]], [[COPY1]], [[RLDICR]] :: (store (s32) into %ir.36, !tbaa !2)
- ; CHECK: bb.8 (%ir-block.37):
- ; CHECK: [[LI8_2:%[0-9]+]]:g8rc = LI8 0
- ; CHECK: $x3 = COPY [[LI8_2]]
- ; CHECK: BLR8 implicit $lr8, implicit $rm, implicit $x3
- ; CHECK: bb.9 (%ir-block.38):
- ; CHECK: successors: %bb.11(0x2aaaaaab), %bb.10(0x55555555)
- ; CHECK: [[PHI4:%[0-9]+]]:g8rc_and_g8rc_nox0 = PHI [[LI8_1]], %bb.2, %32, %bb.17
- ; CHECK: [[PHI5:%[0-9]+]]:gprc = PHI [[LI2]], %bb.2, %27, %bb.17
- ; CHECK: [[PHI6:%[0-9]+]]:g8rc_and_g8rc_nox0 = PHI [[ADDI8_]], %bb.2, %55, %bb.17
- ; CHECK: [[PHI7:%[0-9]+]]:g8rc_and_g8rc_nox0 = PHI [[ADDI8_1]], %bb.2, %15, %bb.17
- ; CHECK: [[COPY10:%[0-9]+]]:gprc_and_gprc_nor0 = COPY [[PHI4]].sub_32
- ; CHECK: [[MULHWU1:%[0-9]+]]:gprc = MULHWU [[COPY10]], [[ORI]]
- ; CHECK: [[RLWINM2:%[0-9]+]]:gprc = RLWINM [[MULHWU1]], 28, 4, 31
- ; CHECK: [[MULLI1:%[0-9]+]]:gprc = nsw MULLI killed [[RLWINM2]], -30
- ; CHECK: [[INSERT_SUBREG1:%[0-9]+]]:g8rc = INSERT_SUBREG [[DEF1]], killed [[MULLI1]], %subreg.sub_32
- ; CHECK: [[RLDICL2:%[0-9]+]]:g8rc = RLDICL killed [[INSERT_SUBREG1]], 0, 32
- ; CHECK: BCC 76, [[CMPLWI1]], %bb.11
- ; CHECK: B %bb.10
- ; CHECK: bb.10 (%ir-block.38):
- ; CHECK: successors: %bb.12(0x80000000)
- ; CHECK: [[ADD8_:%[0-9]+]]:g8rc = ADD8 [[PHI4]], [[RLDICL2]]
- ; CHECK: [[COPY11:%[0-9]+]]:gprc = COPY [[ADD8_]].sub_32
- ; CHECK: [[COPY12:%[0-9]+]]:gprc = COPY [[PHI4]].sub_32
- ; CHECK: [[RLWINM3:%[0-9]+]]:gprc_and_gprc_nor0 = RLWINM [[COPY12]], 1, 0, 30
- ; CHECK: [[ISEL1:%[0-9]+]]:gprc = ISEL [[RLWINM3]], [[COPY11]], [[CMPLWI2]].sub_eq
- ; CHECK: B %bb.12
- ; CHECK: bb.11 (%ir-block.56):
- ; CHECK: successors: %bb.12(0x80000000)
- ; CHECK: [[ADDI2:%[0-9]+]]:gprc = nuw nsw ADDI [[COPY10]], 100
- ; CHECK: B %bb.12
- ; CHECK: bb.12 (%ir-block.60):
- ; CHECK: successors: %bb.15(0x2aaaaaab), %bb.13(0x55555555)
- ; CHECK: [[PHI8:%[0-9]+]]:gprc = PHI [[ADDI2]], %bb.11, [[ISEL1]], %bb.10
- ; CHECK: [[ADDI8_4:%[0-9]+]]:g8rc_and_g8rc_nox0 = ADDI8 [[PHI7]], 8
- ; CHECK: [[LWZU:%[0-9]+]]:gprc, [[LWZU1:%[0-9]+]]:g8rc_and_g8rc_nox0 = LWZU 8, [[PHI6]] :: (load (s32) from %ir.46, !tbaa !2)
- ; CHECK: [[ADD4_2:%[0-9]+]]:gprc = nsw ADD4 [[LWZU]], [[PHI5]]
- ; CHECK: [[ADD4_3:%[0-9]+]]:gprc = nsw ADD4 [[PHI8]], [[ADD4_2]]
- ; CHECK: STW killed [[ADD4_3]], 0, [[ADDI8_4]] :: (store (s32) into %ir.44, !tbaa !2)
- ; CHECK: BCC 76, [[CMPLWI2]], %bb.15
- ; CHECK: B %bb.13
- ; CHECK: bb.13 (%ir-block.60):
- ; CHECK: successors: %bb.14(0x40000001), %bb.16(0x3fffffff)
- ; CHECK: BCC 68, [[CMPLWI1]], %bb.16
- ; CHECK: B %bb.14
- ; CHECK: bb.14 (%ir-block.67):
- ; CHECK: successors: %bb.17(0x80000000)
- ; CHECK: [[ADDI3:%[0-9]+]]:gprc = nuw nsw ADDI [[COPY10]], 101
- ; CHECK: B %bb.17
- ; CHECK: bb.15 (%ir-block.69):
- ; CHECK: successors: %bb.17(0x80000000)
- ; CHECK: [[ORI8_:%[0-9]+]]:g8rc = ORI8 [[PHI4]], 1
- ; CHECK: [[COPY13:%[0-9]+]]:gprc = COPY [[ORI8_]].sub_32
- ; CHECK: [[RLWINM4:%[0-9]+]]:gprc = RLWINM [[COPY13]], 1, 0, 30
- ; CHECK: B %bb.17
- ; CHECK: bb.16 (%ir-block.72):
- ; CHECK: successors: %bb.17(0x80000000)
- ; CHECK: [[ORI8_1:%[0-9]+]]:g8rc = ORI8 [[RLDICL2]], 1
- ; CHECK: [[ADD8_1:%[0-9]+]]:g8rc = ADD8 [[PHI4]], [[ORI8_1]]
- ; CHECK: [[COPY14:%[0-9]+]]:gprc = COPY [[ADD8_1]].sub_32
- ; CHECK: bb.17 (%ir-block.74):
- ; CHECK: successors: %bb.9(0x7c000000), %bb.3(0x04000000)
- ; CHECK: [[PHI9:%[0-9]+]]:gprc = PHI [[ADDI3]], %bb.14, [[RLWINM4]], %bb.15, [[COPY14]], %bb.16
- ; CHECK: [[COPY15:%[0-9]+]]:g8rc_and_g8rc_nox0 = COPY [[ADDI8_4]]
- ; CHECK: [[LWZ:%[0-9]+]]:gprc = LWZ 4, [[LWZU1]] :: (load (s32) from %ir.uglygep1112.cast, !tbaa !2)
- ; CHECK: [[ADD4_4:%[0-9]+]]:gprc = nsw ADD4 [[LWZ]], [[ADD4_2]]
- ; CHECK: [[ADD4_5:%[0-9]+]]:gprc = nsw ADD4 [[PHI9]], [[ADD4_4]]
- ; CHECK: STW killed [[ADD4_5]], 4, [[COPY15]] :: (store (s32) into %ir.uglygep78.cast, !tbaa !2)
- ; CHECK: [[ADDI8_5:%[0-9]+]]:g8rc = nuw nsw ADDI8 [[PHI4]], 2
- ; CHECK: BDNZ8 %bb.9, implicit-def dead $ctr8, implicit $ctr8
- ; CHECK: B %bb.3
+ ; CHECK-NEXT: successors: %bb.1(0x50000000), %bb.8(0x30000000)
+ ; CHECK-NEXT: liveins: $x3, $x5, $x6, $x7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY:%[0-9]+]]:g8rc = COPY $x7
+ ; CHECK-NEXT: [[COPY1:%[0-9]+]]:g8rc_and_g8rc_nox0 = COPY $x6
+ ; CHECK-NEXT: [[COPY2:%[0-9]+]]:g8rc_and_g8rc_nox0 = COPY $x5
+ ; CHECK-NEXT: [[COPY3:%[0-9]+]]:g8rc = COPY $x3
+ ; CHECK-NEXT: [[COPY4:%[0-9]+]]:gprc = COPY [[COPY]].sub_32
+ ; CHECK-NEXT: [[CMPWI:%[0-9]+]]:crrc = CMPWI [[COPY4]], 1
+ ; CHECK-NEXT: BCC 12, killed [[CMPWI]], %bb.8
+ ; CHECK-NEXT: B %bb.1
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.1 (%ir-block.7):
+ ; CHECK-NEXT: successors: %bb.18(0x40000000), %bb.2(0x40000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY5:%[0-9]+]]:gprc = COPY [[COPY3]].sub_32
+ ; CHECK-NEXT: [[DEF:%[0-9]+]]:g8rc = IMPLICIT_DEF
+ ; CHECK-NEXT: [[INSERT_SUBREG:%[0-9]+]]:g8rc = INSERT_SUBREG [[DEF]], [[COPY4]], %subreg.sub_32
+ ; CHECK-NEXT: [[RLDICL:%[0-9]+]]:g8rc = RLDICL killed [[INSERT_SUBREG]], 0, 32
+ ; CHECK-NEXT: [[CMPLWI:%[0-9]+]]:crrc = CMPLWI [[COPY4]], 1
+ ; CHECK-NEXT: [[CMPLWI1:%[0-9]+]]:crrc = CMPLWI [[COPY5]], 3
+ ; CHECK-NEXT: [[LI:%[0-9]+]]:gprc = LI 0
+ ; CHECK-NEXT: [[LI1:%[0-9]+]]:gprc = LI 100
+ ; CHECK-NEXT: [[LI8_:%[0-9]+]]:g8rc = LI8 0
+ ; CHECK-NEXT: BCC 68, killed [[CMPLWI]], %bb.2
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.18:
+ ; CHECK-NEXT: successors: %bb.4(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: B %bb.4
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.2 (%ir-block.10):
+ ; CHECK-NEXT: successors: %bb.9(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[RLWINM8_:%[0-9]+]]:g8rc_and_g8rc_nox0 = RLWINM8 [[RLDICL]], 0, 0, 30
+ ; CHECK-NEXT: [[ADDI8_:%[0-9]+]]:g8rc = ADDI8 [[COPY2]], -8
+ ; CHECK-NEXT: [[ADDI8_1:%[0-9]+]]:g8rc = ADDI8 [[COPY1]], -8
+ ; CHECK-NEXT: [[ADDI8_2:%[0-9]+]]:g8rc = nsw ADDI8 killed [[RLWINM8_]], -2
+ ; CHECK-NEXT: [[RLDICL1:%[0-9]+]]:g8rc_and_g8rc_nox0 = RLDICL [[ADDI8_2]], 63, 1
+ ; CHECK-NEXT: [[ADDI8_3:%[0-9]+]]:g8rc = nuw ADDI8 killed [[RLDICL1]], 1
+ ; CHECK-NEXT: MTCTR8loop killed [[ADDI8_3]], implicit-def dead $ctr8
+ ; CHECK-NEXT: [[LI2:%[0-9]+]]:gprc = LI 0
+ ; CHECK-NEXT: [[LI8_1:%[0-9]+]]:g8rc = LI8 0
+ ; CHECK-NEXT: [[LIS:%[0-9]+]]:gprc = LIS 34952
+ ; CHECK-NEXT: [[ORI:%[0-9]+]]:gprc = ORI [[LIS]], 34953
+ ; CHECK-NEXT: [[DEF1:%[0-9]+]]:g8rc = IMPLICIT_DEF
+ ; CHECK-NEXT: [[CMPLWI2:%[0-9]+]]:crrc = CMPLWI [[COPY5]], 1
+ ; CHECK-NEXT: B %bb.9
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.3 (%ir-block.15):
+ ; CHECK-NEXT: successors: %bb.4(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[COPY6:%[0-9]+]]:gprc_and_gprc_nor0 = COPY %32.sub_32
+ ; CHECK-NEXT: [[ADDI:%[0-9]+]]:gprc_and_gprc_nor0 = ADDI [[COPY6]], -2
+ ; CHECK-NEXT: [[ADDI1:%[0-9]+]]:gprc = nuw ADDI [[ADDI]], 102
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.4 (%ir-block.17):
+ ; CHECK-NEXT: successors: %bb.8(0x40000000), %bb.5(0x40000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[PHI:%[0-9]+]]:g8rc = PHI [[LI8_]], %bb.18, %32, %bb.3
+ ; CHECK-NEXT: [[PHI1:%[0-9]+]]:gprc = PHI [[LI1]], %bb.18, [[ADDI1]], %bb.3
+ ; CHECK-NEXT: [[PHI2:%[0-9]+]]:gprc = PHI [[LI]], %bb.18, %27, %bb.3
+ ; CHECK-NEXT: [[ANDI8_rec:%[0-9]+]]:g8rc = ANDI8_rec [[RLDICL]], 1, implicit-def $cr0
+ ; CHECK-NEXT: [[COPY7:%[0-9]+]]:crbitrc = COPY $cr0gt
+ ; CHECK-NEXT: BCn killed [[COPY7]], %bb.8
+ ; CHECK-NEXT: B %bb.5
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.5 (%ir-block.23):
+ ; CHECK-NEXT: successors: %bb.7(0x2aaaaaab), %bb.6(0x55555555)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[RLDICR:%[0-9]+]]:g8rc = RLDICR [[PHI]], 2, 61
+ ; CHECK-NEXT: [[LWZX:%[0-9]+]]:gprc = LWZX [[COPY2]], [[RLDICR]] :: (load (s32) from %ir.24, !tbaa !2)
+ ; CHECK-NEXT: [[ADD4_:%[0-9]+]]:gprc = nsw ADD4 killed [[LWZX]], [[PHI2]]
+ ; CHECK-NEXT: BCC 76, [[CMPLWI1]], %bb.7
+ ; CHECK-NEXT: B %bb.6
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.6 (%ir-block.23):
+ ; CHECK-NEXT: successors: %bb.7(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[CMPLWI3:%[0-9]+]]:crrc = CMPLWI [[COPY5]], 1
+ ; CHECK-NEXT: [[COPY8:%[0-9]+]]:gprc = COPY [[PHI]].sub_32
+ ; CHECK-NEXT: [[LIS1:%[0-9]+]]:gprc = LIS 34952
+ ; CHECK-NEXT: [[ORI1:%[0-9]+]]:gprc = ORI killed [[LIS1]], 34953
+ ; CHECK-NEXT: [[MULHWU:%[0-9]+]]:gprc = MULHWU [[COPY8]], killed [[ORI1]]
+ ; CHECK-NEXT: [[RLWINM:%[0-9]+]]:gprc = RLWINM [[MULHWU]], 28, 4, 31
+ ; CHECK-NEXT: [[MULLI:%[0-9]+]]:gprc = MULLI killed [[RLWINM]], 30
+ ; CHECK-NEXT: [[SUBF:%[0-9]+]]:gprc = SUBF killed [[MULLI]], [[COPY8]]
+ ; CHECK-NEXT: [[COPY9:%[0-9]+]]:gprc = COPY [[PHI]].sub_32
+ ; CHECK-NEXT: [[RLWINM1:%[0-9]+]]:gprc_and_gprc_nor0 = RLWINM [[COPY9]], 1, 0, 30
+ ; CHECK-NEXT: [[ISEL:%[0-9]+]]:gprc = ISEL [[RLWINM1]], [[SUBF]], [[CMPLWI3]].sub_eq
+ ; CHECK-NEXT: B %bb.7
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.7 (%ir-block.33):
+ ; CHECK-NEXT: successors: %bb.8(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[PHI3:%[0-9]+]]:gprc = PHI [[PHI1]], %bb.5, [[ISEL]], %bb.6
+ ; CHECK-NEXT: [[ADD4_1:%[0-9]+]]:gprc = nsw ADD4 [[PHI3]], [[ADD4_]]
+ ; CHECK-NEXT: STWX killed [[ADD4_1]], [[COPY1]], [[RLDICR]] :: (store (s32) into %ir.36, !tbaa !2)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.8 (%ir-block.37):
+ ; CHECK-NEXT: [[LI8_2:%[0-9]+]]:g8rc = LI8 0
+ ; CHECK-NEXT: $x3 = COPY [[LI8_2]]
+ ; CHECK-NEXT: BLR8 implicit $lr8, implicit $rm, implicit $x3
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.9 (%ir-block.38):
+ ; CHECK-NEXT: successors: %bb.11(0x2aaaaaab), %bb.10(0x55555555)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[PHI4:%[0-9]+]]:g8rc_and_g8rc_nox0 = PHI [[LI8_1]], %bb.2, %32, %bb.17
+ ; CHECK-NEXT: [[PHI5:%[0-9]+]]:gprc = PHI [[LI2]], %bb.2, %27, %bb.17
+ ; CHECK-NEXT: [[PHI6:%[0-9]+]]:g8rc_and_g8rc_nox0 = PHI [[ADDI8_]], %bb.2, %55, %bb.17
+ ; CHECK-NEXT: [[PHI7:%[0-9]+]]:g8rc_and_g8rc_nox0 = PHI [[ADDI8_1]], %bb.2, %15, %bb.17
+ ; CHECK-NEXT: [[COPY10:%[0-9]+]]:gprc_and_gprc_nor0 = COPY [[PHI4]].sub_32
+ ; CHECK-NEXT: [[MULHWU1:%[0-9]+]]:gprc = MULHWU [[COPY10]], [[ORI]]
+ ; CHECK-NEXT: [[RLWINM2:%[0-9]+]]:gprc = RLWINM [[MULHWU1]], 28, 4, 31
+ ; CHECK-NEXT: [[MULLI1:%[0-9]+]]:gprc = nsw MULLI killed [[RLWINM2]], -30
+ ; CHECK-NEXT: [[INSERT_SUBREG1:%[0-9]+]]:g8rc = INSERT_SUBREG [[DEF1]], killed [[MULLI1]], %subreg.sub_32
+ ; CHECK-NEXT: [[RLDICL2:%[0-9]+]]:g8rc = RLDICL killed [[INSERT_SUBREG1]], 0, 32
+ ; CHECK-NEXT: BCC 76, [[CMPLWI1]], %bb.11
+ ; CHECK-NEXT: B %bb.10
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.10 (%ir-block.38):
+ ; CHECK-NEXT: successors: %bb.12(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[ADD8_:%[0-9]+]]:g8rc = ADD8 [[PHI4]], [[RLDICL2]]
+ ; CHECK-NEXT: [[COPY11:%[0-9]+]]:gprc = COPY [[ADD8_]].sub_32
+ ; CHECK-NEXT: [[COPY12:%[0-9]+]]:gprc = COPY [[PHI4]].sub_32
+ ; CHECK-NEXT: [[RLWINM3:%[0-9]+]]:gprc_and_gprc_nor0 = RLWINM [[COPY12]], 1, 0, 30
+ ; CHECK-NEXT: [[ISEL1:%[0-9]+]]:gprc = ISEL [[RLWINM3]], [[COPY11]], [[CMPLWI2]].sub_eq
+ ; CHECK-NEXT: B %bb.12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.11 (%ir-block.56):
+ ; CHECK-NEXT: successors: %bb.12(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[ADDI2:%[0-9]+]]:gprc = nuw nsw ADDI [[COPY10]], 100
+ ; CHECK-NEXT: B %bb.12
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.12 (%ir-block.60):
+ ; CHECK-NEXT: successors: %bb.15(0x2aaaaaab), %bb.13(0x55555555)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[PHI8:%[0-9]+]]:gprc = PHI [[ADDI2]], %bb.11, [[ISEL1]], %bb.10
+ ; CHECK-NEXT: [[ADDI8_4:%[0-9]+]]:g8rc_and_g8rc_nox0 = ADDI8 [[PHI7]], 8
+ ; CHECK-NEXT: [[LWZU:%[0-9]+]]:gprc, [[LWZU1:%[0-9]+]]:g8rc_and_g8rc_nox0 = LWZU 8, [[PHI6]] :: (load (s32) from %ir.46, !tbaa !2)
+ ; CHECK-NEXT: [[ADD4_2:%[0-9]+]]:gprc = nsw ADD4 [[LWZU]], [[PHI5]]
+ ; CHECK-NEXT: [[ADD4_3:%[0-9]+]]:gprc = nsw ADD4 [[PHI8]], [[ADD4_2]]
+ ; CHECK-NEXT: STW killed [[ADD4_3]], 0, [[ADDI8_4]] :: (store (s32) into %ir.44, !tbaa !2)
+ ; CHECK-NEXT: BCC 76, [[CMPLWI2]], %bb.15
+ ; CHECK-NEXT: B %bb.13
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.13 (%ir-block.60):
+ ; CHECK-NEXT: successors: %bb.14(0x40000001), %bb.16(0x3fffffff)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: BCC 68, [[CMPLWI1]], %bb.16
+ ; CHECK-NEXT: B %bb.14
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.14 (%ir-block.67):
+ ; CHECK-NEXT: successors: %bb.17(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[ADDI3:%[0-9]+]]:gprc = nuw nsw ADDI [[COPY10]], 101
+ ; CHECK-NEXT: B %bb.17
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.15 (%ir-block.69):
+ ; CHECK-NEXT: successors: %bb.17(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[ORI8_:%[0-9]+]]:g8rc = ORI8 [[PHI4]], 1
+ ; CHECK-NEXT: [[COPY13:%[0-9]+]]:gprc = COPY [[ORI8_]].sub_32
+ ; CHECK-NEXT: [[RLWINM4:%[0-9]+]]:gprc = RLWINM [[COPY13]], 1, 0, 30
+ ; CHECK-NEXT: B %bb.17
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.16 (%ir-block.72):
+ ; CHECK-NEXT: successors: %bb.17(0x80000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[ORI8_1:%[0-9]+]]:g8rc = ORI8 [[RLDICL2]], 1
+ ; CHECK-NEXT: [[ADD8_1:%[0-9]+]]:g8rc = ADD8 [[PHI4]], [[ORI8_1]]
+ ; CHECK-NEXT: [[COPY14:%[0-9]+]]:gprc = COPY [[ADD8_1]].sub_32
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: bb.17 (%ir-block.74):
+ ; CHECK-NEXT: successors: %bb.9(0x7c000000), %bb.3(0x04000000)
+ ; CHECK-NEXT: {{ $}}
+ ; CHECK-NEXT: [[PHI9:%[0-9]+]]:gprc = PHI [[ADDI3]], %bb.14, [[RLWINM4]], %bb.15, [[COPY14]], %bb.16
+ ; CHECK-NEXT: [[COPY15:%[0-9]+]]:g8rc_and_g8rc_nox0 = COPY [[ADDI8_4]]
+ ; CHECK-NEXT: [[LWZ:%[0-9]+]]:gprc = LWZ 4, [[LWZU1]] :: (load (s32) from %ir.uglygep1112.cast, !tbaa !2)
+ ; CHECK-NEXT: [[ADD4_4:%[0-9]+]]:gprc = nsw ADD4 [[LWZ]], [[ADD4_2]]
+ ; CHECK-NEXT: [[ADD4_5:%[0-9]+]]:gprc = nsw ADD4 [[PHI9]], [[ADD4_4]]
+ ; CHECK-NEXT: STW killed [[ADD4_5]], 4, [[COPY15]] :: (store (s32) into %ir.uglygep78.cast, !tbaa !2)
+ ; CHECK-NEXT: [[ADDI8_5:%[0-9]+]]:g8rc = nuw nsw ADDI8 [[PHI4]], 2
+ ; CHECK-NEXT: BDNZ8 %bb.9, implicit-def dead $ctr8, implicit $ctr8
+ ; CHECK-NEXT: B %bb.3
bb.0 (%ir-block.5):
successors: %bb.1(0x50000000), %bb.9(0x30000000)
liveins: $x3, $x5, $x6, $x7
diff --git a/llvm/test/CodeGen/RISCV/combine-storetomstore.ll b/llvm/test/CodeGen/RISCV/combine-storetomstore.ll
index c7d1f76..65c3eb64 100644
--- a/llvm/test/CodeGen/RISCV/combine-storetomstore.ll
+++ b/llvm/test/CodeGen/RISCV/combine-storetomstore.ll
@@ -73,37 +73,33 @@ define void @test_masked_store_success_v4f16(<4 x half> %x, ptr %ptr, <4 x i1> %
; RISCV-NEXT: vmerge.vim v11, v10, 1, v0
; RISCV-NEXT: vslidedown.vi v11, v11, 1
; RISCV-NEXT: vmv.x.s a3, v11
-; RISCV-NEXT: andi a3, a3, 1
-; RISCV-NEXT: bnez a3, .LBB4_4
+; RISCV-NEXT: andi a4, a3, 1
+; RISCV-NEXT: addi a3, a0, 24
+; RISCV-NEXT: bnez a4, .LBB4_4
; RISCV-NEXT: # %bb.3:
; RISCV-NEXT: addi a3, a1, 6
-; RISCV-NEXT: j .LBB4_5
; RISCV-NEXT: .LBB4_4:
-; RISCV-NEXT: addi a3, a0, 24
-; RISCV-NEXT: .LBB4_5:
; RISCV-NEXT: vmv1r.v v0, v9
; RISCV-NEXT: vmerge.vim v9, v10, 1, v0
; RISCV-NEXT: vslidedown.vi v9, v9, 1
; RISCV-NEXT: vmv.x.s a4, v9
; RISCV-NEXT: andi a4, a4, 1
-; RISCV-NEXT: bnez a4, .LBB4_7
-; RISCV-NEXT: # %bb.6:
-; RISCV-NEXT: addi a5, a1, 2
-; RISCV-NEXT: j .LBB4_8
-; RISCV-NEXT: .LBB4_7:
; RISCV-NEXT: addi a5, a0, 8
-; RISCV-NEXT: .LBB4_8:
+; RISCV-NEXT: bnez a4, .LBB4_6
+; RISCV-NEXT: # %bb.5:
+; RISCV-NEXT: addi a5, a1, 2
+; RISCV-NEXT: .LBB4_6:
; RISCV-NEXT: lh a4, 0(a2)
; RISCV-NEXT: lh a2, 0(a3)
; RISCV-NEXT: lh a3, 0(a5)
; RISCV-NEXT: vfirst.m a5, v8
-; RISCV-NEXT: beqz a5, .LBB4_10
-; RISCV-NEXT: # %bb.9:
+; RISCV-NEXT: beqz a5, .LBB4_8
+; RISCV-NEXT: # %bb.7:
; RISCV-NEXT: addi a0, a1, 4
-; RISCV-NEXT: j .LBB4_11
-; RISCV-NEXT: .LBB4_10:
+; RISCV-NEXT: j .LBB4_9
+; RISCV-NEXT: .LBB4_8:
; RISCV-NEXT: addi a0, a0, 16
-; RISCV-NEXT: .LBB4_11:
+; RISCV-NEXT: .LBB4_9:
; RISCV-NEXT: lh a0, 0(a0)
; RISCV-NEXT: sh a4, 0(a1)
; RISCV-NEXT: sh a3, 2(a1)
@@ -219,14 +215,12 @@ define void @test_masked_store_success_v8f16(<8 x half> %x, ptr %ptr, <8 x i1> %
; RISCV-NEXT: vmerge.vim v13, v12, 1, v0
; RISCV-NEXT: vslidedown.vi v13, v13, 1
; RISCV-NEXT: vmv.x.s a3, v13
-; RISCV-NEXT: andi a3, a3, 1
-; RISCV-NEXT: bnez a3, .LBB11_4
+; RISCV-NEXT: andi a4, a3, 1
+; RISCV-NEXT: addi a3, a0, 56
+; RISCV-NEXT: bnez a4, .LBB11_4
; RISCV-NEXT: # %bb.3:
; RISCV-NEXT: addi a3, a1, 14
-; RISCV-NEXT: j .LBB11_5
; RISCV-NEXT: .LBB11_4:
-; RISCV-NEXT: addi a3, a0, 56
-; RISCV-NEXT: .LBB11_5:
; RISCV-NEXT: vmv1r.v v0, v8
; RISCV-NEXT: vsetivli zero, 4, e8, mf4, ta, ma
; RISCV-NEXT: vmerge.vim v10, v10, 1, v0
@@ -238,50 +232,40 @@ define void @test_masked_store_success_v8f16(<8 x half> %x, ptr %ptr, <8 x i1> %
; RISCV-NEXT: vmerge.vim v13, v12, 1, v0
; RISCV-NEXT: vslidedown.vi v13, v13, 1
; RISCV-NEXT: vmv.x.s a4, v13
-; RISCV-NEXT: andi a4, a4, 1
-; RISCV-NEXT: bnez a4, .LBB11_8
-; RISCV-NEXT: # %bb.6:
+; RISCV-NEXT: andi a5, a4, 1
+; RISCV-NEXT: addi a4, a0, 24
+; RISCV-NEXT: bnez a5, .LBB11_6
+; RISCV-NEXT: # %bb.5:
; RISCV-NEXT: addi a4, a1, 6
-; RISCV-NEXT: vfirst.m a5, v11
-; RISCV-NEXT: bnez a5, .LBB11_9
-; RISCV-NEXT: .LBB11_7:
+; RISCV-NEXT: .LBB11_6:
+; RISCV-NEXT: vfirst.m a6, v11
; RISCV-NEXT: addi a5, a0, 32
-; RISCV-NEXT: j .LBB11_10
-; RISCV-NEXT: .LBB11_8:
-; RISCV-NEXT: addi a4, a0, 24
-; RISCV-NEXT: vfirst.m a5, v11
-; RISCV-NEXT: beqz a5, .LBB11_7
-; RISCV-NEXT: .LBB11_9:
+; RISCV-NEXT: beqz a6, .LBB11_8
+; RISCV-NEXT: # %bb.7:
; RISCV-NEXT: addi a5, a1, 8
-; RISCV-NEXT: .LBB11_10:
+; RISCV-NEXT: .LBB11_8:
; RISCV-NEXT: vmv1r.v v0, v11
; RISCV-NEXT: vmerge.vim v11, v12, 1, v0
; RISCV-NEXT: vslidedown.vi v11, v11, 1
; RISCV-NEXT: vmv.x.s a6, v11
-; RISCV-NEXT: andi a6, a6, 1
-; RISCV-NEXT: bnez a6, .LBB11_14
-; RISCV-NEXT: # %bb.11:
+; RISCV-NEXT: andi a7, a6, 1
+; RISCV-NEXT: addi a6, a0, 40
+; RISCV-NEXT: bnez a7, .LBB11_10
+; RISCV-NEXT: # %bb.9:
; RISCV-NEXT: addi a6, a1, 10
-; RISCV-NEXT: vfirst.m a7, v9
-; RISCV-NEXT: bnez a7, .LBB11_15
-; RISCV-NEXT: .LBB11_12:
+; RISCV-NEXT: .LBB11_10:
+; RISCV-NEXT: vfirst.m t0, v9
; RISCV-NEXT: addi a7, a0, 48
-; RISCV-NEXT: vfirst.m t0, v10
-; RISCV-NEXT: bnez t0, .LBB11_16
-; RISCV-NEXT: .LBB11_13:
-; RISCV-NEXT: addi t1, a0, 16
-; RISCV-NEXT: j .LBB11_17
-; RISCV-NEXT: .LBB11_14:
-; RISCV-NEXT: addi a6, a0, 40
-; RISCV-NEXT: vfirst.m a7, v9
-; RISCV-NEXT: beqz a7, .LBB11_12
-; RISCV-NEXT: .LBB11_15:
+; RISCV-NEXT: beqz t0, .LBB11_12
+; RISCV-NEXT: # %bb.11:
; RISCV-NEXT: addi a7, a1, 12
+; RISCV-NEXT: .LBB11_12:
; RISCV-NEXT: vfirst.m t0, v10
-; RISCV-NEXT: beqz t0, .LBB11_13
-; RISCV-NEXT: .LBB11_16:
+; RISCV-NEXT: addi t1, a0, 16
+; RISCV-NEXT: beqz t0, .LBB11_14
+; RISCV-NEXT: # %bb.13:
; RISCV-NEXT: addi t1, a1, 4
-; RISCV-NEXT: .LBB11_17:
+; RISCV-NEXT: .LBB11_14:
; RISCV-NEXT: vmv1r.v v0, v8
; RISCV-NEXT: lh t0, 0(a2)
; RISCV-NEXT: lh a2, 0(a3)
@@ -294,13 +278,13 @@ define void @test_masked_store_success_v8f16(<8 x half> %x, ptr %ptr, <8 x i1> %
; RISCV-NEXT: vslidedown.vi v8, v8, 1
; RISCV-NEXT: vmv.x.s t1, v8
; RISCV-NEXT: andi t1, t1, 1
-; RISCV-NEXT: bnez t1, .LBB11_19
-; RISCV-NEXT: # %bb.18:
+; RISCV-NEXT: bnez t1, .LBB11_16
+; RISCV-NEXT: # %bb.15:
; RISCV-NEXT: addi a0, a1, 2
-; RISCV-NEXT: j .LBB11_20
-; RISCV-NEXT: .LBB11_19:
+; RISCV-NEXT: j .LBB11_17
+; RISCV-NEXT: .LBB11_16:
; RISCV-NEXT: addi a0, a0, 8
-; RISCV-NEXT: .LBB11_20:
+; RISCV-NEXT: .LBB11_17:
; RISCV-NEXT: lh a0, 0(a0)
; RISCV-NEXT: sh t0, 0(a1)
; RISCV-NEXT: sh a0, 2(a1)
diff --git a/llvm/test/CodeGen/RISCV/riscv-codegenprepare-asm.ll b/llvm/test/CodeGen/RISCV/riscv-codegenprepare-asm.ll
index 6136c32..252ecd3 100644
--- a/llvm/test/CodeGen/RISCV/riscv-codegenprepare-asm.ll
+++ b/llvm/test/CodeGen/RISCV/riscv-codegenprepare-asm.ll
@@ -48,38 +48,35 @@ for.body: ; preds = %for.body.preheader,
define void @test2(ptr nocapture noundef %a, i32 noundef signext %n) {
; CHECK-LABEL: test2:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: blez a1, .LBB1_7
+; CHECK-NEXT: blez a1, .LBB1_6
; CHECK-NEXT: # %bb.1: # %for.body.preheader
-; CHECK-NEXT: li a3, 1
-; CHECK-NEXT: andi a2, a1, 1
-; CHECK-NEXT: bne a1, a3, .LBB1_3
-; CHECK-NEXT: # %bb.2:
-; CHECK-NEXT: li a3, 0
-; CHECK-NEXT: j .LBB1_5
-; CHECK-NEXT: .LBB1_3: # %for.body.preheader.new
-; CHECK-NEXT: li a3, 0
+; CHECK-NEXT: li a4, 1
+; CHECK-NEXT: andi a3, a1, 1
+; CHECK-NEXT: li a2, 0
+; CHECK-NEXT: beq a1, a4, .LBB1_4
+; CHECK-NEXT: # %bb.2: # %for.body.preheader.new
; CHECK-NEXT: andi a1, a1, -2
; CHECK-NEXT: addi a4, a0, 4
-; CHECK-NEXT: .LBB1_4: # %for.body
+; CHECK-NEXT: .LBB1_3: # %for.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: lw a5, -4(a4)
; CHECK-NEXT: lw a6, 0(a4)
-; CHECK-NEXT: addi a3, a3, 2
+; CHECK-NEXT: addi a2, a2, 2
; CHECK-NEXT: addi a5, a5, 4
; CHECK-NEXT: addi a6, a6, 4
; CHECK-NEXT: sw a5, -4(a4)
; CHECK-NEXT: sw a6, 0(a4)
; CHECK-NEXT: addi a4, a4, 8
-; CHECK-NEXT: bne a1, a3, .LBB1_4
-; CHECK-NEXT: .LBB1_5: # %for.cond.cleanup.loopexit.unr-lcssa
-; CHECK-NEXT: beqz a2, .LBB1_7
-; CHECK-NEXT: # %bb.6: # %for.body.epil
-; CHECK-NEXT: slli a3, a3, 2
-; CHECK-NEXT: add a0, a0, a3
+; CHECK-NEXT: bne a1, a2, .LBB1_3
+; CHECK-NEXT: .LBB1_4: # %for.cond.cleanup.loopexit.unr-lcssa
+; CHECK-NEXT: beqz a3, .LBB1_6
+; CHECK-NEXT: # %bb.5: # %for.body.epil
+; CHECK-NEXT: slli a2, a2, 2
+; CHECK-NEXT: add a0, a0, a2
; CHECK-NEXT: lw a1, 0(a0)
; CHECK-NEXT: addi a1, a1, 4
; CHECK-NEXT: sw a1, 0(a0)
-; CHECK-NEXT: .LBB1_7: # %for.cond.cleanup
+; CHECK-NEXT: .LBB1_6: # %for.cond.cleanup
; CHECK-NEXT: ret
entry:
%cmp3 = icmp sgt i32 %n, 0
diff --git a/llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll
index 31fa5d0..a3374c5 100644
--- a/llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/dont-sink-splat-operands.ll
@@ -88,8 +88,8 @@ define void @sink_splat_add_scalable(ptr nocapture %a, i32 signext %x) {
; NO-SINK: # %bb.0: # %entry
; NO-SINK-NEXT: csrr a5, vlenb
; NO-SINK-NEXT: srli a3, a5, 1
-; NO-SINK-NEXT: li a2, 1024
-; NO-SINK-NEXT: bgeu a2, a3, .LBB1_2
+; NO-SINK-NEXT: li a4, 1024
+; NO-SINK-NEXT: bgeu a4, a3, .LBB1_2
; NO-SINK-NEXT: # %bb.1:
; NO-SINK-NEXT: li a2, 0
; NO-SINK-NEXT: j .LBB1_5
@@ -131,8 +131,8 @@ define void @sink_splat_add_scalable(ptr nocapture %a, i32 signext %x) {
; SINK: # %bb.0: # %entry
; SINK-NEXT: csrr a5, vlenb
; SINK-NEXT: srli a3, a5, 1
-; SINK-NEXT: li a2, 1024
-; SINK-NEXT: bgeu a2, a3, .LBB1_2
+; SINK-NEXT: li a4, 1024
+; SINK-NEXT: bgeu a4, a3, .LBB1_2
; SINK-NEXT: # %bb.1:
; SINK-NEXT: li a2, 0
; SINK-NEXT: j .LBB1_5
@@ -173,8 +173,8 @@ define void @sink_splat_add_scalable(ptr nocapture %a, i32 signext %x) {
; DEFAULT: # %bb.0: # %entry
; DEFAULT-NEXT: csrr a5, vlenb
; DEFAULT-NEXT: srli a3, a5, 1
-; DEFAULT-NEXT: li a2, 1024
-; DEFAULT-NEXT: bgeu a2, a3, .LBB1_2
+; DEFAULT-NEXT: li a4, 1024
+; DEFAULT-NEXT: bgeu a4, a3, .LBB1_2
; DEFAULT-NEXT: # %bb.1:
; DEFAULT-NEXT: li a2, 0
; DEFAULT-NEXT: j .LBB1_5
@@ -406,33 +406,33 @@ for.cond.cleanup: ; preds = %vector.body
define void @sink_splat_fadd_scalable(ptr nocapture %a, float %x) {
; NO-SINK-LABEL: sink_splat_fadd_scalable:
; NO-SINK: # %bb.0: # %entry
-; NO-SINK-NEXT: csrr a1, vlenb
-; NO-SINK-NEXT: srli a3, a1, 2
-; NO-SINK-NEXT: li a2, 1024
-; NO-SINK-NEXT: bgeu a2, a3, .LBB4_2
+; NO-SINK-NEXT: csrr a2, vlenb
+; NO-SINK-NEXT: srli a3, a2, 2
+; NO-SINK-NEXT: li a4, 1024
+; NO-SINK-NEXT: bgeu a4, a3, .LBB4_2
; NO-SINK-NEXT: # %bb.1:
-; NO-SINK-NEXT: li a2, 0
+; NO-SINK-NEXT: li a1, 0
; NO-SINK-NEXT: j .LBB4_5
; NO-SINK-NEXT: .LBB4_2: # %vector.ph
-; NO-SINK-NEXT: addi a2, a3, -1
-; NO-SINK-NEXT: andi a4, a2, 1024
-; NO-SINK-NEXT: xori a2, a4, 1024
+; NO-SINK-NEXT: addi a1, a3, -1
+; NO-SINK-NEXT: andi a4, a1, 1024
+; NO-SINK-NEXT: xori a1, a4, 1024
; NO-SINK-NEXT: vsetvli a5, zero, e32, m1, ta, ma
; NO-SINK-NEXT: vfmv.v.f v8, fa0
; NO-SINK-NEXT: mv a5, a0
-; NO-SINK-NEXT: mv a6, a2
+; NO-SINK-NEXT: mv a6, a1
; NO-SINK-NEXT: .LBB4_3: # %vector.body
; NO-SINK-NEXT: # =>This Inner Loop Header: Depth=1
; NO-SINK-NEXT: vl1re32.v v9, (a5)
; NO-SINK-NEXT: sub a6, a6, a3
; NO-SINK-NEXT: vfadd.vv v9, v9, v8
; NO-SINK-NEXT: vs1r.v v9, (a5)
-; NO-SINK-NEXT: add a5, a5, a1
+; NO-SINK-NEXT: add a5, a5, a2
; NO-SINK-NEXT: bnez a6, .LBB4_3
; NO-SINK-NEXT: # %bb.4: # %middle.block
; NO-SINK-NEXT: beqz a4, .LBB4_7
; NO-SINK-NEXT: .LBB4_5: # %for.body.preheader
-; NO-SINK-NEXT: slli a1, a2, 2
+; NO-SINK-NEXT: slli a1, a1, 2
; NO-SINK-NEXT: lui a2, 1
; NO-SINK-NEXT: add a1, a0, a1
; NO-SINK-NEXT: add a0, a0, a2
@@ -448,19 +448,19 @@ define void @sink_splat_fadd_scalable(ptr nocapture %a, float %x) {
;
; SINK-LABEL: sink_splat_fadd_scalable:
; SINK: # %bb.0: # %entry
-; SINK-NEXT: csrr a1, vlenb
-; SINK-NEXT: srli a3, a1, 2
-; SINK-NEXT: li a2, 1024
-; SINK-NEXT: bgeu a2, a3, .LBB4_2
+; SINK-NEXT: csrr a2, vlenb
+; SINK-NEXT: srli a3, a2, 2
+; SINK-NEXT: li a4, 1024
+; SINK-NEXT: bgeu a4, a3, .LBB4_2
; SINK-NEXT: # %bb.1:
-; SINK-NEXT: li a2, 0
+; SINK-NEXT: li a1, 0
; SINK-NEXT: j .LBB4_5
; SINK-NEXT: .LBB4_2: # %vector.ph
-; SINK-NEXT: addi a2, a3, -1
-; SINK-NEXT: andi a4, a2, 1024
-; SINK-NEXT: xori a2, a4, 1024
+; SINK-NEXT: addi a1, a3, -1
+; SINK-NEXT: andi a4, a1, 1024
+; SINK-NEXT: xori a1, a4, 1024
; SINK-NEXT: mv a5, a0
-; SINK-NEXT: mv a6, a2
+; SINK-NEXT: mv a6, a1
; SINK-NEXT: vsetvli a7, zero, e32, m1, ta, ma
; SINK-NEXT: .LBB4_3: # %vector.body
; SINK-NEXT: # =>This Inner Loop Header: Depth=1
@@ -468,12 +468,12 @@ define void @sink_splat_fadd_scalable(ptr nocapture %a, float %x) {
; SINK-NEXT: sub a6, a6, a3
; SINK-NEXT: vfadd.vf v8, v8, fa0
; SINK-NEXT: vs1r.v v8, (a5)
-; SINK-NEXT: add a5, a5, a1
+; SINK-NEXT: add a5, a5, a2
; SINK-NEXT: bnez a6, .LBB4_3
; SINK-NEXT: # %bb.4: # %middle.block
; SINK-NEXT: beqz a4, .LBB4_7
; SINK-NEXT: .LBB4_5: # %for.body.preheader
-; SINK-NEXT: slli a1, a2, 2
+; SINK-NEXT: slli a1, a1, 2
; SINK-NEXT: lui a2, 1
; SINK-NEXT: add a1, a0, a1
; SINK-NEXT: add a0, a0, a2
@@ -489,19 +489,19 @@ define void @sink_splat_fadd_scalable(ptr nocapture %a, float %x) {
;
; DEFAULT-LABEL: sink_splat_fadd_scalable:
; DEFAULT: # %bb.0: # %entry
-; DEFAULT-NEXT: csrr a1, vlenb
-; DEFAULT-NEXT: srli a3, a1, 2
-; DEFAULT-NEXT: li a2, 1024
-; DEFAULT-NEXT: bgeu a2, a3, .LBB4_2
+; DEFAULT-NEXT: csrr a2, vlenb
+; DEFAULT-NEXT: srli a3, a2, 2
+; DEFAULT-NEXT: li a4, 1024
+; DEFAULT-NEXT: bgeu a4, a3, .LBB4_2
; DEFAULT-NEXT: # %bb.1:
-; DEFAULT-NEXT: li a2, 0
+; DEFAULT-NEXT: li a1, 0
; DEFAULT-NEXT: j .LBB4_5
; DEFAULT-NEXT: .LBB4_2: # %vector.ph
-; DEFAULT-NEXT: addi a2, a3, -1
-; DEFAULT-NEXT: andi a4, a2, 1024
-; DEFAULT-NEXT: xori a2, a4, 1024
+; DEFAULT-NEXT: addi a1, a3, -1
+; DEFAULT-NEXT: andi a4, a1, 1024
+; DEFAULT-NEXT: xori a1, a4, 1024
; DEFAULT-NEXT: mv a5, a0
-; DEFAULT-NEXT: mv a6, a2
+; DEFAULT-NEXT: mv a6, a1
; DEFAULT-NEXT: vsetvli a7, zero, e32, m1, ta, ma
; DEFAULT-NEXT: .LBB4_3: # %vector.body
; DEFAULT-NEXT: # =>This Inner Loop Header: Depth=1
@@ -509,12 +509,12 @@ define void @sink_splat_fadd_scalable(ptr nocapture %a, float %x) {
; DEFAULT-NEXT: sub a6, a6, a3
; DEFAULT-NEXT: vfadd.vf v8, v8, fa0
; DEFAULT-NEXT: vs1r.v v8, (a5)
-; DEFAULT-NEXT: add a5, a5, a1
+; DEFAULT-NEXT: add a5, a5, a2
; DEFAULT-NEXT: bnez a6, .LBB4_3
; DEFAULT-NEXT: # %bb.4: # %middle.block
; DEFAULT-NEXT: beqz a4, .LBB4_7
; DEFAULT-NEXT: .LBB4_5: # %for.body.preheader
-; DEFAULT-NEXT: slli a1, a2, 2
+; DEFAULT-NEXT: slli a1, a1, 2
; DEFAULT-NEXT: lui a2, 1
; DEFAULT-NEXT: add a1, a0, a1
; DEFAULT-NEXT: add a0, a0, a2
diff --git a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
index 02825b2..287050c 100644
--- a/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/sink-splat-operands.ll
@@ -245,8 +245,8 @@ define void @sink_splat_mul_scalable(ptr nocapture %a, i32 signext %x) {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrr a5, vlenb
; CHECK-NEXT: srli a3, a5, 1
-; CHECK-NEXT: li a2, 1024
-; CHECK-NEXT: bgeu a2, a3, .LBB7_2
+; CHECK-NEXT: li a4, 1024
+; CHECK-NEXT: bgeu a4, a3, .LBB7_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a2, 0
; CHECK-NEXT: j .LBB7_5
@@ -336,8 +336,8 @@ define void @sink_splat_add_scalable(ptr nocapture %a, i32 signext %x) {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrr a5, vlenb
; CHECK-NEXT: srli a3, a5, 1
-; CHECK-NEXT: li a2, 1024
-; CHECK-NEXT: bgeu a2, a3, .LBB8_2
+; CHECK-NEXT: li a4, 1024
+; CHECK-NEXT: bgeu a4, a3, .LBB8_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a2, 0
; CHECK-NEXT: j .LBB8_5
@@ -427,8 +427,8 @@ define void @sink_splat_sub_scalable(ptr nocapture %a, i32 signext %x) {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrr a5, vlenb
; CHECK-NEXT: srli a3, a5, 1
-; CHECK-NEXT: li a2, 1024
-; CHECK-NEXT: bgeu a2, a3, .LBB9_2
+; CHECK-NEXT: li a4, 1024
+; CHECK-NEXT: bgeu a4, a3, .LBB9_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a2, 0
; CHECK-NEXT: j .LBB9_5
@@ -518,8 +518,8 @@ define void @sink_splat_rsub_scalable(ptr nocapture %a, i32 signext %x) {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrr a5, vlenb
; CHECK-NEXT: srli a3, a5, 1
-; CHECK-NEXT: li a2, 1024
-; CHECK-NEXT: bgeu a2, a3, .LBB10_2
+; CHECK-NEXT: li a4, 1024
+; CHECK-NEXT: bgeu a4, a3, .LBB10_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a2, 0
; CHECK-NEXT: j .LBB10_5
@@ -609,8 +609,8 @@ define void @sink_splat_and_scalable(ptr nocapture %a, i32 signext %x) {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrr a5, vlenb
; CHECK-NEXT: srli a3, a5, 1
-; CHECK-NEXT: li a2, 1024
-; CHECK-NEXT: bgeu a2, a3, .LBB11_2
+; CHECK-NEXT: li a4, 1024
+; CHECK-NEXT: bgeu a4, a3, .LBB11_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a2, 0
; CHECK-NEXT: j .LBB11_5
@@ -700,8 +700,8 @@ define void @sink_splat_or_scalable(ptr nocapture %a, i32 signext %x) {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrr a5, vlenb
; CHECK-NEXT: srli a3, a5, 1
-; CHECK-NEXT: li a2, 1024
-; CHECK-NEXT: bgeu a2, a3, .LBB12_2
+; CHECK-NEXT: li a4, 1024
+; CHECK-NEXT: bgeu a4, a3, .LBB12_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a2, 0
; CHECK-NEXT: j .LBB12_5
@@ -791,8 +791,8 @@ define void @sink_splat_xor_scalable(ptr nocapture %a, i32 signext %x) {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrr a5, vlenb
; CHECK-NEXT: srli a3, a5, 1
-; CHECK-NEXT: li a2, 1024
-; CHECK-NEXT: bgeu a2, a3, .LBB13_2
+; CHECK-NEXT: li a4, 1024
+; CHECK-NEXT: bgeu a4, a3, .LBB13_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a2, 0
; CHECK-NEXT: j .LBB13_5
@@ -984,8 +984,8 @@ define void @sink_splat_shl_scalable(ptr nocapture %a, i32 signext %x) {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrr a5, vlenb
; CHECK-NEXT: srli a3, a5, 1
-; CHECK-NEXT: li a2, 1024
-; CHECK-NEXT: bgeu a2, a3, .LBB17_2
+; CHECK-NEXT: li a4, 1024
+; CHECK-NEXT: bgeu a4, a3, .LBB17_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a2, 0
; CHECK-NEXT: j .LBB17_5
@@ -1075,8 +1075,8 @@ define void @sink_splat_lshr_scalable(ptr nocapture %a, i32 signext %x) {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrr a5, vlenb
; CHECK-NEXT: srli a3, a5, 1
-; CHECK-NEXT: li a2, 1024
-; CHECK-NEXT: bgeu a2, a3, .LBB18_2
+; CHECK-NEXT: li a4, 1024
+; CHECK-NEXT: bgeu a4, a3, .LBB18_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a2, 0
; CHECK-NEXT: j .LBB18_5
@@ -1166,8 +1166,8 @@ define void @sink_splat_ashr_scalable(ptr nocapture %a) {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrr a4, vlenb
; CHECK-NEXT: srli a2, a4, 1
-; CHECK-NEXT: li a1, 1024
-; CHECK-NEXT: bgeu a1, a2, .LBB19_2
+; CHECK-NEXT: li a3, 1024
+; CHECK-NEXT: bgeu a3, a2, .LBB19_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 0
; CHECK-NEXT: j .LBB19_5
@@ -1457,19 +1457,19 @@ for.cond.cleanup: ; preds = %vector.body
define void @sink_splat_fmul_scalable(ptr nocapture %a, float %x) {
; CHECK-LABEL: sink_splat_fmul_scalable:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: srli a3, a1, 2
-; CHECK-NEXT: li a2, 1024
-; CHECK-NEXT: bgeu a2, a3, .LBB26_2
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: srli a3, a2, 2
+; CHECK-NEXT: li a4, 1024
+; CHECK-NEXT: bgeu a4, a3, .LBB26_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: li a2, 0
+; CHECK-NEXT: li a1, 0
; CHECK-NEXT: j .LBB26_5
; CHECK-NEXT: .LBB26_2: # %vector.ph
-; CHECK-NEXT: addi a2, a3, -1
-; CHECK-NEXT: andi a4, a2, 1024
-; CHECK-NEXT: xori a2, a4, 1024
+; CHECK-NEXT: addi a1, a3, -1
+; CHECK-NEXT: andi a4, a1, 1024
+; CHECK-NEXT: xori a1, a4, 1024
; CHECK-NEXT: mv a5, a0
-; CHECK-NEXT: mv a6, a2
+; CHECK-NEXT: mv a6, a1
; CHECK-NEXT: vsetvli a7, zero, e32, m1, ta, ma
; CHECK-NEXT: .LBB26_3: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
@@ -1477,12 +1477,12 @@ define void @sink_splat_fmul_scalable(ptr nocapture %a, float %x) {
; CHECK-NEXT: sub a6, a6, a3
; CHECK-NEXT: vfmul.vf v8, v8, fa0
; CHECK-NEXT: vs1r.v v8, (a5)
-; CHECK-NEXT: add a5, a5, a1
+; CHECK-NEXT: add a5, a5, a2
; CHECK-NEXT: bnez a6, .LBB26_3
; CHECK-NEXT: # %bb.4: # %middle.block
; CHECK-NEXT: beqz a4, .LBB26_7
; CHECK-NEXT: .LBB26_5: # %for.body.preheader
-; CHECK-NEXT: slli a1, a2, 2
+; CHECK-NEXT: slli a1, a1, 2
; CHECK-NEXT: lui a2, 1
; CHECK-NEXT: add a1, a0, a1
; CHECK-NEXT: add a0, a0, a2
@@ -1547,19 +1547,19 @@ for.body: ; preds = %for.body.preheader,
define void @sink_splat_fdiv_scalable(ptr nocapture %a, float %x) {
; CHECK-LABEL: sink_splat_fdiv_scalable:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: srli a3, a1, 2
-; CHECK-NEXT: li a2, 1024
-; CHECK-NEXT: bgeu a2, a3, .LBB27_2
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: srli a3, a2, 2
+; CHECK-NEXT: li a4, 1024
+; CHECK-NEXT: bgeu a4, a3, .LBB27_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: li a2, 0
+; CHECK-NEXT: li a1, 0
; CHECK-NEXT: j .LBB27_5
; CHECK-NEXT: .LBB27_2: # %vector.ph
-; CHECK-NEXT: addi a2, a3, -1
-; CHECK-NEXT: andi a4, a2, 1024
-; CHECK-NEXT: xori a2, a4, 1024
+; CHECK-NEXT: addi a1, a3, -1
+; CHECK-NEXT: andi a4, a1, 1024
+; CHECK-NEXT: xori a1, a4, 1024
; CHECK-NEXT: mv a5, a0
-; CHECK-NEXT: mv a6, a2
+; CHECK-NEXT: mv a6, a1
; CHECK-NEXT: vsetvli a7, zero, e32, m1, ta, ma
; CHECK-NEXT: .LBB27_3: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
@@ -1567,12 +1567,12 @@ define void @sink_splat_fdiv_scalable(ptr nocapture %a, float %x) {
; CHECK-NEXT: vfdiv.vf v8, v8, fa0
; CHECK-NEXT: sub a6, a6, a3
; CHECK-NEXT: vs1r.v v8, (a5)
-; CHECK-NEXT: add a5, a5, a1
+; CHECK-NEXT: add a5, a5, a2
; CHECK-NEXT: bnez a6, .LBB27_3
; CHECK-NEXT: # %bb.4: # %middle.block
; CHECK-NEXT: beqz a4, .LBB27_7
; CHECK-NEXT: .LBB27_5: # %for.body.preheader
-; CHECK-NEXT: slli a1, a2, 2
+; CHECK-NEXT: slli a1, a1, 2
; CHECK-NEXT: lui a2, 1
; CHECK-NEXT: add a1, a0, a1
; CHECK-NEXT: add a0, a0, a2
@@ -1637,19 +1637,19 @@ for.body: ; preds = %for.body.preheader,
define void @sink_splat_frdiv_scalable(ptr nocapture %a, float %x) {
; CHECK-LABEL: sink_splat_frdiv_scalable:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: srli a3, a1, 2
-; CHECK-NEXT: li a2, 1024
-; CHECK-NEXT: bgeu a2, a3, .LBB28_2
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: srli a3, a2, 2
+; CHECK-NEXT: li a4, 1024
+; CHECK-NEXT: bgeu a4, a3, .LBB28_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: li a2, 0
+; CHECK-NEXT: li a1, 0
; CHECK-NEXT: j .LBB28_5
; CHECK-NEXT: .LBB28_2: # %vector.ph
-; CHECK-NEXT: addi a2, a3, -1
-; CHECK-NEXT: andi a4, a2, 1024
-; CHECK-NEXT: xori a2, a4, 1024
+; CHECK-NEXT: addi a1, a3, -1
+; CHECK-NEXT: andi a4, a1, 1024
+; CHECK-NEXT: xori a1, a4, 1024
; CHECK-NEXT: mv a5, a0
-; CHECK-NEXT: mv a6, a2
+; CHECK-NEXT: mv a6, a1
; CHECK-NEXT: vsetvli a7, zero, e32, m1, ta, ma
; CHECK-NEXT: .LBB28_3: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
@@ -1657,12 +1657,12 @@ define void @sink_splat_frdiv_scalable(ptr nocapture %a, float %x) {
; CHECK-NEXT: vfrdiv.vf v8, v8, fa0
; CHECK-NEXT: sub a6, a6, a3
; CHECK-NEXT: vs1r.v v8, (a5)
-; CHECK-NEXT: add a5, a5, a1
+; CHECK-NEXT: add a5, a5, a2
; CHECK-NEXT: bnez a6, .LBB28_3
; CHECK-NEXT: # %bb.4: # %middle.block
; CHECK-NEXT: beqz a4, .LBB28_7
; CHECK-NEXT: .LBB28_5: # %for.body.preheader
-; CHECK-NEXT: slli a1, a2, 2
+; CHECK-NEXT: slli a1, a1, 2
; CHECK-NEXT: lui a2, 1
; CHECK-NEXT: add a1, a0, a1
; CHECK-NEXT: add a0, a0, a2
@@ -1727,19 +1727,19 @@ for.body: ; preds = %for.body.preheader,
define void @sink_splat_fadd_scalable(ptr nocapture %a, float %x) {
; CHECK-LABEL: sink_splat_fadd_scalable:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: srli a3, a1, 2
-; CHECK-NEXT: li a2, 1024
-; CHECK-NEXT: bgeu a2, a3, .LBB29_2
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: srli a3, a2, 2
+; CHECK-NEXT: li a4, 1024
+; CHECK-NEXT: bgeu a4, a3, .LBB29_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: li a2, 0
+; CHECK-NEXT: li a1, 0
; CHECK-NEXT: j .LBB29_5
; CHECK-NEXT: .LBB29_2: # %vector.ph
-; CHECK-NEXT: addi a2, a3, -1
-; CHECK-NEXT: andi a4, a2, 1024
-; CHECK-NEXT: xori a2, a4, 1024
+; CHECK-NEXT: addi a1, a3, -1
+; CHECK-NEXT: andi a4, a1, 1024
+; CHECK-NEXT: xori a1, a4, 1024
; CHECK-NEXT: mv a5, a0
-; CHECK-NEXT: mv a6, a2
+; CHECK-NEXT: mv a6, a1
; CHECK-NEXT: vsetvli a7, zero, e32, m1, ta, ma
; CHECK-NEXT: .LBB29_3: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
@@ -1747,12 +1747,12 @@ define void @sink_splat_fadd_scalable(ptr nocapture %a, float %x) {
; CHECK-NEXT: sub a6, a6, a3
; CHECK-NEXT: vfadd.vf v8, v8, fa0
; CHECK-NEXT: vs1r.v v8, (a5)
-; CHECK-NEXT: add a5, a5, a1
+; CHECK-NEXT: add a5, a5, a2
; CHECK-NEXT: bnez a6, .LBB29_3
; CHECK-NEXT: # %bb.4: # %middle.block
; CHECK-NEXT: beqz a4, .LBB29_7
; CHECK-NEXT: .LBB29_5: # %for.body.preheader
-; CHECK-NEXT: slli a1, a2, 2
+; CHECK-NEXT: slli a1, a1, 2
; CHECK-NEXT: lui a2, 1
; CHECK-NEXT: add a1, a0, a1
; CHECK-NEXT: add a0, a0, a2
@@ -1817,19 +1817,19 @@ for.body: ; preds = %for.body.preheader,
define void @sink_splat_fsub_scalable(ptr nocapture %a, float %x) {
; CHECK-LABEL: sink_splat_fsub_scalable:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: srli a3, a1, 2
-; CHECK-NEXT: li a2, 1024
-; CHECK-NEXT: bgeu a2, a3, .LBB30_2
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: srli a3, a2, 2
+; CHECK-NEXT: li a4, 1024
+; CHECK-NEXT: bgeu a4, a3, .LBB30_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: li a2, 0
+; CHECK-NEXT: li a1, 0
; CHECK-NEXT: j .LBB30_5
; CHECK-NEXT: .LBB30_2: # %vector.ph
-; CHECK-NEXT: addi a2, a3, -1
-; CHECK-NEXT: andi a4, a2, 1024
-; CHECK-NEXT: xori a2, a4, 1024
+; CHECK-NEXT: addi a1, a3, -1
+; CHECK-NEXT: andi a4, a1, 1024
+; CHECK-NEXT: xori a1, a4, 1024
; CHECK-NEXT: mv a5, a0
-; CHECK-NEXT: mv a6, a2
+; CHECK-NEXT: mv a6, a1
; CHECK-NEXT: vsetvli a7, zero, e32, m1, ta, ma
; CHECK-NEXT: .LBB30_3: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
@@ -1837,12 +1837,12 @@ define void @sink_splat_fsub_scalable(ptr nocapture %a, float %x) {
; CHECK-NEXT: sub a6, a6, a3
; CHECK-NEXT: vfsub.vf v8, v8, fa0
; CHECK-NEXT: vs1r.v v8, (a5)
-; CHECK-NEXT: add a5, a5, a1
+; CHECK-NEXT: add a5, a5, a2
; CHECK-NEXT: bnez a6, .LBB30_3
; CHECK-NEXT: # %bb.4: # %middle.block
; CHECK-NEXT: beqz a4, .LBB30_7
; CHECK-NEXT: .LBB30_5: # %for.body.preheader
-; CHECK-NEXT: slli a1, a2, 2
+; CHECK-NEXT: slli a1, a1, 2
; CHECK-NEXT: lui a2, 1
; CHECK-NEXT: add a1, a0, a1
; CHECK-NEXT: add a0, a0, a2
@@ -1907,19 +1907,19 @@ for.body: ; preds = %for.body.preheader,
define void @sink_splat_frsub_scalable(ptr nocapture %a, float %x) {
; CHECK-LABEL: sink_splat_frsub_scalable:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrr a1, vlenb
-; CHECK-NEXT: srli a3, a1, 2
-; CHECK-NEXT: li a2, 1024
-; CHECK-NEXT: bgeu a2, a3, .LBB31_2
+; CHECK-NEXT: csrr a2, vlenb
+; CHECK-NEXT: srli a3, a2, 2
+; CHECK-NEXT: li a4, 1024
+; CHECK-NEXT: bgeu a4, a3, .LBB31_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: li a2, 0
+; CHECK-NEXT: li a1, 0
; CHECK-NEXT: j .LBB31_5
; CHECK-NEXT: .LBB31_2: # %vector.ph
-; CHECK-NEXT: addi a2, a3, -1
-; CHECK-NEXT: andi a4, a2, 1024
-; CHECK-NEXT: xori a2, a4, 1024
+; CHECK-NEXT: addi a1, a3, -1
+; CHECK-NEXT: andi a4, a1, 1024
+; CHECK-NEXT: xori a1, a4, 1024
; CHECK-NEXT: mv a5, a0
-; CHECK-NEXT: mv a6, a2
+; CHECK-NEXT: mv a6, a1
; CHECK-NEXT: vsetvli a7, zero, e32, m1, ta, ma
; CHECK-NEXT: .LBB31_3: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
@@ -1927,12 +1927,12 @@ define void @sink_splat_frsub_scalable(ptr nocapture %a, float %x) {
; CHECK-NEXT: sub a6, a6, a3
; CHECK-NEXT: vfrsub.vf v8, v8, fa0
; CHECK-NEXT: vs1r.v v8, (a5)
-; CHECK-NEXT: add a5, a5, a1
+; CHECK-NEXT: add a5, a5, a2
; CHECK-NEXT: bnez a6, .LBB31_3
; CHECK-NEXT: # %bb.4: # %middle.block
; CHECK-NEXT: beqz a4, .LBB31_7
; CHECK-NEXT: .LBB31_5: # %for.body.preheader
-; CHECK-NEXT: slli a1, a2, 2
+; CHECK-NEXT: slli a1, a1, 2
; CHECK-NEXT: lui a2, 1
; CHECK-NEXT: add a1, a0, a1
; CHECK-NEXT: add a0, a0, a2
@@ -2073,35 +2073,35 @@ for.cond.cleanup: ; preds = %vector.body
define void @sink_splat_fma_scalable(ptr noalias nocapture %a, ptr noalias nocapture readonly %b, float %x) {
; CHECK-LABEL: sink_splat_fma_scalable:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: srli a4, a2, 2
-; CHECK-NEXT: li a3, 1024
-; CHECK-NEXT: bgeu a3, a4, .LBB34_2
+; CHECK-NEXT: csrr a3, vlenb
+; CHECK-NEXT: srli a4, a3, 2
+; CHECK-NEXT: li a5, 1024
+; CHECK-NEXT: bgeu a5, a4, .LBB34_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: li a3, 0
+; CHECK-NEXT: li a2, 0
; CHECK-NEXT: j .LBB34_5
; CHECK-NEXT: .LBB34_2: # %vector.ph
-; CHECK-NEXT: addi a3, a4, -1
-; CHECK-NEXT: andi a5, a3, 1024
-; CHECK-NEXT: xori a3, a5, 1024
+; CHECK-NEXT: addi a2, a4, -1
+; CHECK-NEXT: andi a5, a2, 1024
+; CHECK-NEXT: xori a2, a5, 1024
; CHECK-NEXT: mv a6, a0
; CHECK-NEXT: mv a7, a1
-; CHECK-NEXT: mv t0, a3
+; CHECK-NEXT: mv t0, a2
; CHECK-NEXT: vsetvli t1, zero, e32, m1, ta, ma
; CHECK-NEXT: .LBB34_3: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vl1re32.v v8, (a6)
; CHECK-NEXT: vl1re32.v v9, (a7)
; CHECK-NEXT: sub t0, t0, a4
-; CHECK-NEXT: add a7, a7, a2
+; CHECK-NEXT: add a7, a7, a3
; CHECK-NEXT: vfmacc.vf v9, fa0, v8
; CHECK-NEXT: vs1r.v v9, (a6)
-; CHECK-NEXT: add a6, a6, a2
+; CHECK-NEXT: add a6, a6, a3
; CHECK-NEXT: bnez t0, .LBB34_3
; CHECK-NEXT: # %bb.4: # %middle.block
; CHECK-NEXT: beqz a5, .LBB34_7
; CHECK-NEXT: .LBB34_5: # %for.body.preheader
-; CHECK-NEXT: slli a2, a3, 2
+; CHECK-NEXT: slli a2, a2, 2
; CHECK-NEXT: lui a3, 1
; CHECK-NEXT: add a0, a0, a2
; CHECK-NEXT: add a2, a1, a2
@@ -2173,35 +2173,35 @@ for.body: ; preds = %for.body.preheader,
define void @sink_splat_fma_commute_scalable(ptr noalias nocapture %a, ptr noalias nocapture readonly %b, float %x) {
; CHECK-LABEL: sink_splat_fma_commute_scalable:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: csrr a2, vlenb
-; CHECK-NEXT: srli a4, a2, 2
-; CHECK-NEXT: li a3, 1024
-; CHECK-NEXT: bgeu a3, a4, .LBB35_2
+; CHECK-NEXT: csrr a3, vlenb
+; CHECK-NEXT: srli a4, a3, 2
+; CHECK-NEXT: li a5, 1024
+; CHECK-NEXT: bgeu a5, a4, .LBB35_2
; CHECK-NEXT: # %bb.1:
-; CHECK-NEXT: li a3, 0
+; CHECK-NEXT: li a2, 0
; CHECK-NEXT: j .LBB35_5
; CHECK-NEXT: .LBB35_2: # %vector.ph
-; CHECK-NEXT: addi a3, a4, -1
-; CHECK-NEXT: andi a5, a3, 1024
-; CHECK-NEXT: xori a3, a5, 1024
+; CHECK-NEXT: addi a2, a4, -1
+; CHECK-NEXT: andi a5, a2, 1024
+; CHECK-NEXT: xori a2, a5, 1024
; CHECK-NEXT: mv a6, a0
; CHECK-NEXT: mv a7, a1
-; CHECK-NEXT: mv t0, a3
+; CHECK-NEXT: mv t0, a2
; CHECK-NEXT: vsetvli t1, zero, e32, m1, ta, ma
; CHECK-NEXT: .LBB35_3: # %vector.body
; CHECK-NEXT: # =>This Inner Loop Header: Depth=1
; CHECK-NEXT: vl1re32.v v8, (a6)
; CHECK-NEXT: vl1re32.v v9, (a7)
; CHECK-NEXT: sub t0, t0, a4
-; CHECK-NEXT: add a7, a7, a2
+; CHECK-NEXT: add a7, a7, a3
; CHECK-NEXT: vfmacc.vf v9, fa0, v8
; CHECK-NEXT: vs1r.v v9, (a6)
-; CHECK-NEXT: add a6, a6, a2
+; CHECK-NEXT: add a6, a6, a3
; CHECK-NEXT: bnez t0, .LBB35_3
; CHECK-NEXT: # %bb.4: # %middle.block
; CHECK-NEXT: beqz a5, .LBB35_7
; CHECK-NEXT: .LBB35_5: # %for.body.preheader
-; CHECK-NEXT: slli a2, a3, 2
+; CHECK-NEXT: slli a2, a2, 2
; CHECK-NEXT: lui a3, 1
; CHECK-NEXT: add a0, a0, a2
; CHECK-NEXT: add a2, a1, a2
@@ -2488,8 +2488,8 @@ define void @sink_splat_udiv_scalable(ptr nocapture %a, i32 signext %x) {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrr a5, vlenb
; CHECK-NEXT: srli a3, a5, 1
-; CHECK-NEXT: li a2, 1024
-; CHECK-NEXT: bgeu a2, a3, .LBB42_2
+; CHECK-NEXT: li a4, 1024
+; CHECK-NEXT: bgeu a4, a3, .LBB42_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a2, 0
; CHECK-NEXT: j .LBB42_5
@@ -2579,8 +2579,8 @@ define void @sink_splat_sdiv_scalable(ptr nocapture %a, i32 signext %x) {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrr a5, vlenb
; CHECK-NEXT: srli a3, a5, 1
-; CHECK-NEXT: li a2, 1024
-; CHECK-NEXT: bgeu a2, a3, .LBB43_2
+; CHECK-NEXT: li a4, 1024
+; CHECK-NEXT: bgeu a4, a3, .LBB43_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a2, 0
; CHECK-NEXT: j .LBB43_5
@@ -2670,8 +2670,8 @@ define void @sink_splat_urem_scalable(ptr nocapture %a, i32 signext %x) {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrr a5, vlenb
; CHECK-NEXT: srli a3, a5, 1
-; CHECK-NEXT: li a2, 1024
-; CHECK-NEXT: bgeu a2, a3, .LBB44_2
+; CHECK-NEXT: li a4, 1024
+; CHECK-NEXT: bgeu a4, a3, .LBB44_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a2, 0
; CHECK-NEXT: j .LBB44_5
@@ -2761,8 +2761,8 @@ define void @sink_splat_srem_scalable(ptr nocapture %a, i32 signext %x) {
; CHECK: # %bb.0: # %entry
; CHECK-NEXT: csrr a5, vlenb
; CHECK-NEXT: srli a3, a5, 1
-; CHECK-NEXT: li a2, 1024
-; CHECK-NEXT: bgeu a2, a3, .LBB45_2
+; CHECK-NEXT: li a4, 1024
+; CHECK-NEXT: bgeu a4, a3, .LBB45_2
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a2, 0
; CHECK-NEXT: j .LBB45_5
diff --git a/llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll b/llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll
index f295bd8..cb101f9 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vandn-sdnode.ll
@@ -2240,19 +2240,19 @@ define void @vand_vx_loop_hoisted_not(ptr %a, i32 noundef signext %mask) {
; CHECK-RV32-LABEL: vand_vx_loop_hoisted_not:
; CHECK-RV32: # %bb.0: # %entry
; CHECK-RV32-NEXT: csrr a4, vlenb
-; CHECK-RV32-NEXT: srli a2, a4, 3
-; CHECK-RV32-NEXT: li a3, 64
+; CHECK-RV32-NEXT: srli a3, a4, 3
+; CHECK-RV32-NEXT: li a5, 64
; CHECK-RV32-NEXT: not a1, a1
-; CHECK-RV32-NEXT: bgeu a3, a2, .LBB98_2
+; CHECK-RV32-NEXT: bgeu a5, a3, .LBB98_2
; CHECK-RV32-NEXT: # %bb.1:
-; CHECK-RV32-NEXT: li a3, 0
; CHECK-RV32-NEXT: li a2, 0
+; CHECK-RV32-NEXT: li a3, 0
; CHECK-RV32-NEXT: j .LBB98_5
; CHECK-RV32-NEXT: .LBB98_2: # %vector.ph
-; CHECK-RV32-NEXT: li a2, 0
+; CHECK-RV32-NEXT: li a3, 0
; CHECK-RV32-NEXT: srli a4, a4, 1
-; CHECK-RV32-NEXT: neg a3, a4
-; CHECK-RV32-NEXT: andi a3, a3, 256
+; CHECK-RV32-NEXT: neg a2, a4
+; CHECK-RV32-NEXT: andi a2, a2, 256
; CHECK-RV32-NEXT: li a6, 0
; CHECK-RV32-NEXT: li a5, 0
; CHECK-RV32-NEXT: vsetvli a7, zero, e32, m2, ta, ma
@@ -2264,25 +2264,25 @@ define void @vand_vx_loop_hoisted_not(ptr %a, i32 noundef signext %mask) {
; CHECK-RV32-NEXT: vl2re32.v v8, (a7)
; CHECK-RV32-NEXT: sltu a6, t0, a6
; CHECK-RV32-NEXT: add a5, a5, a6
-; CHECK-RV32-NEXT: xor a6, t0, a3
+; CHECK-RV32-NEXT: xor a6, t0, a2
; CHECK-RV32-NEXT: vand.vx v8, v8, a1
; CHECK-RV32-NEXT: or t1, a6, a5
; CHECK-RV32-NEXT: vs2r.v v8, (a7)
; CHECK-RV32-NEXT: mv a6, t0
; CHECK-RV32-NEXT: bnez t1, .LBB98_3
; CHECK-RV32-NEXT: # %bb.4: # %middle.block
-; CHECK-RV32-NEXT: bnez a3, .LBB98_6
+; CHECK-RV32-NEXT: bnez a2, .LBB98_6
; CHECK-RV32-NEXT: .LBB98_5: # %for.body
; CHECK-RV32-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-RV32-NEXT: slli a4, a3, 2
-; CHECK-RV32-NEXT: addi a3, a3, 1
+; CHECK-RV32-NEXT: slli a4, a2, 2
+; CHECK-RV32-NEXT: addi a2, a2, 1
; CHECK-RV32-NEXT: add a4, a0, a4
; CHECK-RV32-NEXT: lw a5, 0(a4)
-; CHECK-RV32-NEXT: seqz a6, a3
-; CHECK-RV32-NEXT: add a2, a2, a6
-; CHECK-RV32-NEXT: xori a6, a3, 256
+; CHECK-RV32-NEXT: seqz a6, a2
+; CHECK-RV32-NEXT: add a3, a3, a6
+; CHECK-RV32-NEXT: xori a6, a2, 256
; CHECK-RV32-NEXT: and a5, a5, a1
-; CHECK-RV32-NEXT: or a6, a6, a2
+; CHECK-RV32-NEXT: or a6, a6, a3
; CHECK-RV32-NEXT: sw a5, 0(a4)
; CHECK-RV32-NEXT: bnez a6, .LBB98_5
; CHECK-RV32-NEXT: .LBB98_6: # %for.cond.cleanup
@@ -2291,10 +2291,10 @@ define void @vand_vx_loop_hoisted_not(ptr %a, i32 noundef signext %mask) {
; CHECK-RV64-LABEL: vand_vx_loop_hoisted_not:
; CHECK-RV64: # %bb.0: # %entry
; CHECK-RV64-NEXT: csrr a4, vlenb
-; CHECK-RV64-NEXT: srli a2, a4, 3
-; CHECK-RV64-NEXT: li a3, 64
+; CHECK-RV64-NEXT: srli a3, a4, 3
+; CHECK-RV64-NEXT: li a5, 64
; CHECK-RV64-NEXT: not a1, a1
-; CHECK-RV64-NEXT: bgeu a3, a2, .LBB98_2
+; CHECK-RV64-NEXT: bgeu a5, a3, .LBB98_2
; CHECK-RV64-NEXT: # %bb.1:
; CHECK-RV64-NEXT: li a2, 0
; CHECK-RV64-NEXT: j .LBB98_5
@@ -2333,18 +2333,18 @@ define void @vand_vx_loop_hoisted_not(ptr %a, i32 noundef signext %mask) {
; CHECK-ZVKB-NOZBB32-LABEL: vand_vx_loop_hoisted_not:
; CHECK-ZVKB-NOZBB32: # %bb.0: # %entry
; CHECK-ZVKB-NOZBB32-NEXT: csrr a4, vlenb
-; CHECK-ZVKB-NOZBB32-NEXT: srli a2, a4, 3
-; CHECK-ZVKB-NOZBB32-NEXT: li a3, 64
-; CHECK-ZVKB-NOZBB32-NEXT: bgeu a3, a2, .LBB98_2
+; CHECK-ZVKB-NOZBB32-NEXT: srli a3, a4, 3
+; CHECK-ZVKB-NOZBB32-NEXT: li a5, 64
+; CHECK-ZVKB-NOZBB32-NEXT: bgeu a5, a3, .LBB98_2
; CHECK-ZVKB-NOZBB32-NEXT: # %bb.1:
-; CHECK-ZVKB-NOZBB32-NEXT: li a3, 0
; CHECK-ZVKB-NOZBB32-NEXT: li a2, 0
+; CHECK-ZVKB-NOZBB32-NEXT: li a3, 0
; CHECK-ZVKB-NOZBB32-NEXT: j .LBB98_5
; CHECK-ZVKB-NOZBB32-NEXT: .LBB98_2: # %vector.ph
-; CHECK-ZVKB-NOZBB32-NEXT: li a2, 0
+; CHECK-ZVKB-NOZBB32-NEXT: li a3, 0
; CHECK-ZVKB-NOZBB32-NEXT: srli a4, a4, 1
-; CHECK-ZVKB-NOZBB32-NEXT: neg a3, a4
-; CHECK-ZVKB-NOZBB32-NEXT: andi a3, a3, 256
+; CHECK-ZVKB-NOZBB32-NEXT: neg a2, a4
+; CHECK-ZVKB-NOZBB32-NEXT: andi a2, a2, 256
; CHECK-ZVKB-NOZBB32-NEXT: li a6, 0
; CHECK-ZVKB-NOZBB32-NEXT: li a5, 0
; CHECK-ZVKB-NOZBB32-NEXT: vsetvli a7, zero, e32, m2, ta, ma
@@ -2356,27 +2356,27 @@ define void @vand_vx_loop_hoisted_not(ptr %a, i32 noundef signext %mask) {
; CHECK-ZVKB-NOZBB32-NEXT: vl2re32.v v8, (a7)
; CHECK-ZVKB-NOZBB32-NEXT: sltu a6, t0, a6
; CHECK-ZVKB-NOZBB32-NEXT: add a5, a5, a6
-; CHECK-ZVKB-NOZBB32-NEXT: xor a6, t0, a3
+; CHECK-ZVKB-NOZBB32-NEXT: xor a6, t0, a2
; CHECK-ZVKB-NOZBB32-NEXT: vandn.vx v8, v8, a1
; CHECK-ZVKB-NOZBB32-NEXT: or t1, a6, a5
; CHECK-ZVKB-NOZBB32-NEXT: vs2r.v v8, (a7)
; CHECK-ZVKB-NOZBB32-NEXT: mv a6, t0
; CHECK-ZVKB-NOZBB32-NEXT: bnez t1, .LBB98_3
; CHECK-ZVKB-NOZBB32-NEXT: # %bb.4: # %middle.block
-; CHECK-ZVKB-NOZBB32-NEXT: bnez a3, .LBB98_7
+; CHECK-ZVKB-NOZBB32-NEXT: bnez a2, .LBB98_7
; CHECK-ZVKB-NOZBB32-NEXT: .LBB98_5: # %for.body.preheader
; CHECK-ZVKB-NOZBB32-NEXT: not a1, a1
; CHECK-ZVKB-NOZBB32-NEXT: .LBB98_6: # %for.body
; CHECK-ZVKB-NOZBB32-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-ZVKB-NOZBB32-NEXT: slli a4, a3, 2
-; CHECK-ZVKB-NOZBB32-NEXT: addi a3, a3, 1
+; CHECK-ZVKB-NOZBB32-NEXT: slli a4, a2, 2
+; CHECK-ZVKB-NOZBB32-NEXT: addi a2, a2, 1
; CHECK-ZVKB-NOZBB32-NEXT: add a4, a0, a4
; CHECK-ZVKB-NOZBB32-NEXT: lw a5, 0(a4)
-; CHECK-ZVKB-NOZBB32-NEXT: seqz a6, a3
-; CHECK-ZVKB-NOZBB32-NEXT: add a2, a2, a6
-; CHECK-ZVKB-NOZBB32-NEXT: xori a6, a3, 256
+; CHECK-ZVKB-NOZBB32-NEXT: seqz a6, a2
+; CHECK-ZVKB-NOZBB32-NEXT: add a3, a3, a6
+; CHECK-ZVKB-NOZBB32-NEXT: xori a6, a2, 256
; CHECK-ZVKB-NOZBB32-NEXT: and a5, a5, a1
-; CHECK-ZVKB-NOZBB32-NEXT: or a6, a6, a2
+; CHECK-ZVKB-NOZBB32-NEXT: or a6, a6, a3
; CHECK-ZVKB-NOZBB32-NEXT: sw a5, 0(a4)
; CHECK-ZVKB-NOZBB32-NEXT: bnez a6, .LBB98_6
; CHECK-ZVKB-NOZBB32-NEXT: .LBB98_7: # %for.cond.cleanup
@@ -2385,9 +2385,9 @@ define void @vand_vx_loop_hoisted_not(ptr %a, i32 noundef signext %mask) {
; CHECK-ZVKB-NOZBB64-LABEL: vand_vx_loop_hoisted_not:
; CHECK-ZVKB-NOZBB64: # %bb.0: # %entry
; CHECK-ZVKB-NOZBB64-NEXT: csrr a4, vlenb
-; CHECK-ZVKB-NOZBB64-NEXT: srli a2, a4, 3
-; CHECK-ZVKB-NOZBB64-NEXT: li a3, 64
-; CHECK-ZVKB-NOZBB64-NEXT: bgeu a3, a2, .LBB98_2
+; CHECK-ZVKB-NOZBB64-NEXT: srli a3, a4, 3
+; CHECK-ZVKB-NOZBB64-NEXT: li a5, 64
+; CHECK-ZVKB-NOZBB64-NEXT: bgeu a5, a3, .LBB98_2
; CHECK-ZVKB-NOZBB64-NEXT: # %bb.1:
; CHECK-ZVKB-NOZBB64-NEXT: li a2, 0
; CHECK-ZVKB-NOZBB64-NEXT: j .LBB98_5
@@ -2427,18 +2427,18 @@ define void @vand_vx_loop_hoisted_not(ptr %a, i32 noundef signext %mask) {
; CHECK-ZVKB-ZBB32-LABEL: vand_vx_loop_hoisted_not:
; CHECK-ZVKB-ZBB32: # %bb.0: # %entry
; CHECK-ZVKB-ZBB32-NEXT: csrr a4, vlenb
-; CHECK-ZVKB-ZBB32-NEXT: srli a2, a4, 3
-; CHECK-ZVKB-ZBB32-NEXT: li a3, 64
-; CHECK-ZVKB-ZBB32-NEXT: bgeu a3, a2, .LBB98_2
+; CHECK-ZVKB-ZBB32-NEXT: srli a3, a4, 3
+; CHECK-ZVKB-ZBB32-NEXT: li a5, 64
+; CHECK-ZVKB-ZBB32-NEXT: bgeu a5, a3, .LBB98_2
; CHECK-ZVKB-ZBB32-NEXT: # %bb.1:
-; CHECK-ZVKB-ZBB32-NEXT: li a3, 0
; CHECK-ZVKB-ZBB32-NEXT: li a2, 0
+; CHECK-ZVKB-ZBB32-NEXT: li a3, 0
; CHECK-ZVKB-ZBB32-NEXT: j .LBB98_5
; CHECK-ZVKB-ZBB32-NEXT: .LBB98_2: # %vector.ph
-; CHECK-ZVKB-ZBB32-NEXT: li a2, 0
+; CHECK-ZVKB-ZBB32-NEXT: li a3, 0
; CHECK-ZVKB-ZBB32-NEXT: srli a4, a4, 1
-; CHECK-ZVKB-ZBB32-NEXT: neg a3, a4
-; CHECK-ZVKB-ZBB32-NEXT: andi a3, a3, 256
+; CHECK-ZVKB-ZBB32-NEXT: neg a2, a4
+; CHECK-ZVKB-ZBB32-NEXT: andi a2, a2, 256
; CHECK-ZVKB-ZBB32-NEXT: li a6, 0
; CHECK-ZVKB-ZBB32-NEXT: li a5, 0
; CHECK-ZVKB-ZBB32-NEXT: vsetvli a7, zero, e32, m2, ta, ma
@@ -2450,25 +2450,25 @@ define void @vand_vx_loop_hoisted_not(ptr %a, i32 noundef signext %mask) {
; CHECK-ZVKB-ZBB32-NEXT: vl2re32.v v8, (a7)
; CHECK-ZVKB-ZBB32-NEXT: sltu a6, t0, a6
; CHECK-ZVKB-ZBB32-NEXT: add a5, a5, a6
-; CHECK-ZVKB-ZBB32-NEXT: xor a6, t0, a3
+; CHECK-ZVKB-ZBB32-NEXT: xor a6, t0, a2
; CHECK-ZVKB-ZBB32-NEXT: vandn.vx v8, v8, a1
; CHECK-ZVKB-ZBB32-NEXT: or t1, a6, a5
; CHECK-ZVKB-ZBB32-NEXT: vs2r.v v8, (a7)
; CHECK-ZVKB-ZBB32-NEXT: mv a6, t0
; CHECK-ZVKB-ZBB32-NEXT: bnez t1, .LBB98_3
; CHECK-ZVKB-ZBB32-NEXT: # %bb.4: # %middle.block
-; CHECK-ZVKB-ZBB32-NEXT: bnez a3, .LBB98_6
+; CHECK-ZVKB-ZBB32-NEXT: bnez a2, .LBB98_6
; CHECK-ZVKB-ZBB32-NEXT: .LBB98_5: # %for.body
; CHECK-ZVKB-ZBB32-NEXT: # =>This Inner Loop Header: Depth=1
-; CHECK-ZVKB-ZBB32-NEXT: slli a4, a3, 2
-; CHECK-ZVKB-ZBB32-NEXT: addi a3, a3, 1
+; CHECK-ZVKB-ZBB32-NEXT: slli a4, a2, 2
+; CHECK-ZVKB-ZBB32-NEXT: addi a2, a2, 1
; CHECK-ZVKB-ZBB32-NEXT: add a4, a0, a4
; CHECK-ZVKB-ZBB32-NEXT: lw a5, 0(a4)
-; CHECK-ZVKB-ZBB32-NEXT: seqz a6, a3
-; CHECK-ZVKB-ZBB32-NEXT: add a2, a2, a6
-; CHECK-ZVKB-ZBB32-NEXT: xori a6, a3, 256
+; CHECK-ZVKB-ZBB32-NEXT: seqz a6, a2
+; CHECK-ZVKB-ZBB32-NEXT: add a3, a3, a6
+; CHECK-ZVKB-ZBB32-NEXT: xori a6, a2, 256
; CHECK-ZVKB-ZBB32-NEXT: andn a5, a5, a1
-; CHECK-ZVKB-ZBB32-NEXT: or a6, a6, a2
+; CHECK-ZVKB-ZBB32-NEXT: or a6, a6, a3
; CHECK-ZVKB-ZBB32-NEXT: sw a5, 0(a4)
; CHECK-ZVKB-ZBB32-NEXT: bnez a6, .LBB98_5
; CHECK-ZVKB-ZBB32-NEXT: .LBB98_6: # %for.cond.cleanup
@@ -2477,9 +2477,9 @@ define void @vand_vx_loop_hoisted_not(ptr %a, i32 noundef signext %mask) {
; CHECK-ZVKB-ZBB64-LABEL: vand_vx_loop_hoisted_not:
; CHECK-ZVKB-ZBB64: # %bb.0: # %entry
; CHECK-ZVKB-ZBB64-NEXT: csrr a4, vlenb
-; CHECK-ZVKB-ZBB64-NEXT: srli a2, a4, 3
-; CHECK-ZVKB-ZBB64-NEXT: li a3, 64
-; CHECK-ZVKB-ZBB64-NEXT: bgeu a3, a2, .LBB98_2
+; CHECK-ZVKB-ZBB64-NEXT: srli a3, a4, 3
+; CHECK-ZVKB-ZBB64-NEXT: li a5, 64
+; CHECK-ZVKB-ZBB64-NEXT: bgeu a5, a3, .LBB98_2
; CHECK-ZVKB-ZBB64-NEXT: # %bb.1:
; CHECK-ZVKB-ZBB64-NEXT: li a2, 0
; CHECK-ZVKB-ZBB64-NEXT: j .LBB98_5
diff --git a/llvm/test/CodeGen/RISCV/rvv/vxrm-insert-out-of-loop.ll b/llvm/test/CodeGen/RISCV/rvv/vxrm-insert-out-of-loop.ll
index 7990dfc..9a0f27b 100644
--- a/llvm/test/CodeGen/RISCV/rvv/vxrm-insert-out-of-loop.ll
+++ b/llvm/test/CodeGen/RISCV/rvv/vxrm-insert-out-of-loop.ll
@@ -86,14 +86,14 @@ define void @test1(ptr nocapture noundef writeonly %dst, i32 noundef signext %i_
; RV32-NEXT: # Child Loop BB0_15 Depth 2
; RV32-NEXT: beqz t1, .LBB0_12
; RV32-NEXT: # %bb.11: # in Loop: Header=BB0_10 Depth=1
-; RV32-NEXT: li t4, 0
; RV32-NEXT: li t3, 0
+; RV32-NEXT: li t4, 0
; RV32-NEXT: j .LBB0_15
; RV32-NEXT: .LBB0_12: # %vector.ph
; RV32-NEXT: # in Loop: Header=BB0_10 Depth=1
-; RV32-NEXT: li t3, 0
-; RV32-NEXT: neg t4, t2
-; RV32-NEXT: and t4, t4, a6
+; RV32-NEXT: li t4, 0
+; RV32-NEXT: neg t3, t2
+; RV32-NEXT: and t3, t3, a6
; RV32-NEXT: li t6, 0
; RV32-NEXT: li t5, 0
; RV32-NEXT: vsetvli s0, zero, e8, m2, ta, ma
@@ -108,7 +108,7 @@ define void @test1(ptr nocapture noundef writeonly %dst, i32 noundef signext %i_
; RV32-NEXT: add s1, t6, t2
; RV32-NEXT: sltu t6, s1, t6
; RV32-NEXT: add t5, t5, t6
-; RV32-NEXT: xor t6, s1, t4
+; RV32-NEXT: xor t6, s1, t3
; RV32-NEXT: vaaddu.vv v8, v8, v10
; RV32-NEXT: or s2, t6, t5
; RV32-NEXT: vs2r.v v8, (s0)
@@ -116,23 +116,23 @@ define void @test1(ptr nocapture noundef writeonly %dst, i32 noundef signext %i_
; RV32-NEXT: bnez s2, .LBB0_13
; RV32-NEXT: # %bb.14: # %middle.block
; RV32-NEXT: # in Loop: Header=BB0_10 Depth=1
-; RV32-NEXT: beq t4, a6, .LBB0_9
+; RV32-NEXT: beq t3, a6, .LBB0_9
; RV32-NEXT: .LBB0_15: # %for.body4.us
; RV32-NEXT: # Parent Loop BB0_10 Depth=1
; RV32-NEXT: # => This Inner Loop Header: Depth=2
-; RV32-NEXT: add t5, a2, t4
-; RV32-NEXT: add t6, a4, t4
-; RV32-NEXT: add s0, a0, t4
+; RV32-NEXT: add t5, a2, t3
+; RV32-NEXT: add t6, a4, t3
+; RV32-NEXT: add s0, a0, t3
; RV32-NEXT: lbu t5, 0(t5)
; RV32-NEXT: lbu t6, 0(t6)
-; RV32-NEXT: addi t4, t4, 1
-; RV32-NEXT: seqz s1, t4
-; RV32-NEXT: add t3, t3, s1
+; RV32-NEXT: addi t3, t3, 1
+; RV32-NEXT: seqz s1, t3
+; RV32-NEXT: add t4, t4, s1
; RV32-NEXT: add t5, t5, t6
-; RV32-NEXT: xor t6, t4, a6
+; RV32-NEXT: xor t6, t3, a6
; RV32-NEXT: addi t5, t5, 1
; RV32-NEXT: srli t5, t5, 1
-; RV32-NEXT: or t6, t6, t3
+; RV32-NEXT: or t6, t6, t4
; RV32-NEXT: sb t5, 0(s0)
; RV32-NEXT: bnez t6, .LBB0_15
; RV32-NEXT: j .LBB0_9
diff --git a/llvm/test/CodeGen/RISCV/select-optimize-multiple.ll b/llvm/test/CodeGen/RISCV/select-optimize-multiple.ll
index 4066f62..141b814 100644
--- a/llvm/test/CodeGen/RISCV/select-optimize-multiple.ll
+++ b/llvm/test/CodeGen/RISCV/select-optimize-multiple.ll
@@ -42,33 +42,35 @@ define i128 @cmovcc128(i64 signext %a, i128 %b, i128 %c) nounwind {
; RV32I-NEXT: xori a1, a1, 123
; RV32I-NEXT: or a1, a1, a2
; RV32I-NEXT: mv a2, a3
-; RV32I-NEXT: beqz a1, .LBB1_2
+; RV32I-NEXT: bnez a1, .LBB1_6
; RV32I-NEXT: # %bb.1: # %entry
-; RV32I-NEXT: mv a2, a4
+; RV32I-NEXT: addi a5, a3, 4
+; RV32I-NEXT: bnez a1, .LBB1_7
; RV32I-NEXT: .LBB1_2: # %entry
-; RV32I-NEXT: beqz a1, .LBB1_5
-; RV32I-NEXT: # %bb.3: # %entry
-; RV32I-NEXT: addi a5, a4, 4
-; RV32I-NEXT: bnez a1, .LBB1_6
-; RV32I-NEXT: .LBB1_4:
; RV32I-NEXT: addi a6, a3, 8
-; RV32I-NEXT: j .LBB1_7
-; RV32I-NEXT: .LBB1_5:
-; RV32I-NEXT: addi a5, a3, 4
; RV32I-NEXT: beqz a1, .LBB1_4
-; RV32I-NEXT: .LBB1_6: # %entry
+; RV32I-NEXT: .LBB1_3: # %entry
; RV32I-NEXT: addi a6, a4, 8
-; RV32I-NEXT: .LBB1_7: # %entry
+; RV32I-NEXT: .LBB1_4: # %entry
; RV32I-NEXT: lw a2, 0(a2)
; RV32I-NEXT: lw a5, 0(a5)
; RV32I-NEXT: lw a6, 0(a6)
-; RV32I-NEXT: beqz a1, .LBB1_9
-; RV32I-NEXT: # %bb.8: # %entry
+; RV32I-NEXT: beqz a1, .LBB1_8
+; RV32I-NEXT: # %bb.5: # %entry
; RV32I-NEXT: addi a3, a4, 12
-; RV32I-NEXT: j .LBB1_10
-; RV32I-NEXT: .LBB1_9:
+; RV32I-NEXT: j .LBB1_9
+; RV32I-NEXT: .LBB1_6: # %entry
+; RV32I-NEXT: mv a2, a4
+; RV32I-NEXT: addi a5, a3, 4
+; RV32I-NEXT: beqz a1, .LBB1_2
+; RV32I-NEXT: .LBB1_7: # %entry
+; RV32I-NEXT: addi a5, a4, 4
+; RV32I-NEXT: addi a6, a3, 8
+; RV32I-NEXT: bnez a1, .LBB1_3
+; RV32I-NEXT: j .LBB1_4
+; RV32I-NEXT: .LBB1_8:
; RV32I-NEXT: addi a3, a3, 12
-; RV32I-NEXT: .LBB1_10: # %entry
+; RV32I-NEXT: .LBB1_9: # %entry
; RV32I-NEXT: lw a1, 0(a3)
; RV32I-NEXT: sw a2, 0(a0)
; RV32I-NEXT: sw a5, 4(a0)
@@ -125,33 +127,35 @@ define i128 @cmov128(i1 %a, i128 %b, i128 %c) nounwind {
; RV32I: # %bb.0: # %entry
; RV32I-NEXT: andi a1, a1, 1
; RV32I-NEXT: mv a4, a2
-; RV32I-NEXT: bnez a1, .LBB3_2
+; RV32I-NEXT: beqz a1, .LBB3_6
; RV32I-NEXT: # %bb.1: # %entry
-; RV32I-NEXT: mv a4, a3
+; RV32I-NEXT: addi a5, a2, 4
+; RV32I-NEXT: beqz a1, .LBB3_7
; RV32I-NEXT: .LBB3_2: # %entry
-; RV32I-NEXT: bnez a1, .LBB3_5
-; RV32I-NEXT: # %bb.3: # %entry
-; RV32I-NEXT: addi a5, a3, 4
-; RV32I-NEXT: beqz a1, .LBB3_6
-; RV32I-NEXT: .LBB3_4:
; RV32I-NEXT: addi a6, a2, 8
-; RV32I-NEXT: j .LBB3_7
-; RV32I-NEXT: .LBB3_5:
-; RV32I-NEXT: addi a5, a2, 4
; RV32I-NEXT: bnez a1, .LBB3_4
-; RV32I-NEXT: .LBB3_6: # %entry
+; RV32I-NEXT: .LBB3_3: # %entry
; RV32I-NEXT: addi a6, a3, 8
-; RV32I-NEXT: .LBB3_7: # %entry
+; RV32I-NEXT: .LBB3_4: # %entry
; RV32I-NEXT: lw a4, 0(a4)
; RV32I-NEXT: lw a5, 0(a5)
; RV32I-NEXT: lw a6, 0(a6)
-; RV32I-NEXT: bnez a1, .LBB3_9
-; RV32I-NEXT: # %bb.8: # %entry
+; RV32I-NEXT: bnez a1, .LBB3_8
+; RV32I-NEXT: # %bb.5: # %entry
; RV32I-NEXT: addi a2, a3, 12
-; RV32I-NEXT: j .LBB3_10
-; RV32I-NEXT: .LBB3_9:
+; RV32I-NEXT: j .LBB3_9
+; RV32I-NEXT: .LBB3_6: # %entry
+; RV32I-NEXT: mv a4, a3
+; RV32I-NEXT: addi a5, a2, 4
+; RV32I-NEXT: bnez a1, .LBB3_2
+; RV32I-NEXT: .LBB3_7: # %entry
+; RV32I-NEXT: addi a5, a3, 4
+; RV32I-NEXT: addi a6, a2, 8
+; RV32I-NEXT: beqz a1, .LBB3_3
+; RV32I-NEXT: j .LBB3_4
+; RV32I-NEXT: .LBB3_8:
; RV32I-NEXT: addi a2, a2, 12
-; RV32I-NEXT: .LBB3_10: # %entry
+; RV32I-NEXT: .LBB3_9: # %entry
; RV32I-NEXT: lw a1, 0(a2)
; RV32I-NEXT: sw a4, 0(a0)
; RV32I-NEXT: sw a5, 4(a0)
diff --git a/llvm/test/CodeGen/RISCV/sextw-removal.ll b/llvm/test/CodeGen/RISCV/sextw-removal.ll
index b155fea..4066391 100644
--- a/llvm/test/CodeGen/RISCV/sextw-removal.ll
+++ b/llvm/test/CodeGen/RISCV/sextw-removal.ll
@@ -1032,19 +1032,17 @@ bb7: ; preds = %bb2
define signext i32 @bug(i32 signext %x) {
; CHECK-LABEL: bug:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: beqz a0, .LBB18_5
+; CHECK-NEXT: beqz a0, .LBB18_4
; CHECK-NEXT: # %bb.1: # %if.end
-; CHECK-NEXT: srliw a1, a0, 16
-; CHECK-NEXT: seqz a2, a1
-; CHECK-NEXT: slli a2, a2, 4
-; CHECK-NEXT: sllw a0, a0, a2
-; CHECK-NEXT: beqz a1, .LBB18_3
+; CHECK-NEXT: srliw a2, a0, 16
+; CHECK-NEXT: seqz a1, a2
+; CHECK-NEXT: slli a1, a1, 4
+; CHECK-NEXT: sllw a0, a0, a1
+; CHECK-NEXT: li a1, 16
+; CHECK-NEXT: beqz a2, .LBB18_3
; CHECK-NEXT: # %bb.2: # %if.end
; CHECK-NEXT: li a1, 32
-; CHECK-NEXT: j .LBB18_4
-; CHECK-NEXT: .LBB18_3:
-; CHECK-NEXT: li a1, 16
-; CHECK-NEXT: .LBB18_4: # %if.end
+; CHECK-NEXT: .LBB18_3: # %if.end
; CHECK-NEXT: srliw a2, a0, 24
; CHECK-NEXT: seqz a2, a2
; CHECK-NEXT: slli a3, a2, 3
@@ -1069,24 +1067,22 @@ define signext i32 @bug(i32 signext %x) {
; CHECK-NEXT: not a0, a0
; CHECK-NEXT: srli a0, a0, 31
; CHECK-NEXT: addw a0, a1, a0
-; CHECK-NEXT: .LBB18_5: # %cleanup
+; CHECK-NEXT: .LBB18_4: # %cleanup
; CHECK-NEXT: ret
;
; NOREMOVAL-LABEL: bug:
; NOREMOVAL: # %bb.0: # %entry
-; NOREMOVAL-NEXT: beqz a0, .LBB18_5
+; NOREMOVAL-NEXT: beqz a0, .LBB18_4
; NOREMOVAL-NEXT: # %bb.1: # %if.end
-; NOREMOVAL-NEXT: srliw a1, a0, 16
-; NOREMOVAL-NEXT: seqz a2, a1
-; NOREMOVAL-NEXT: slli a2, a2, 4
-; NOREMOVAL-NEXT: sllw a0, a0, a2
-; NOREMOVAL-NEXT: beqz a1, .LBB18_3
+; NOREMOVAL-NEXT: srliw a2, a0, 16
+; NOREMOVAL-NEXT: seqz a1, a2
+; NOREMOVAL-NEXT: slli a1, a1, 4
+; NOREMOVAL-NEXT: sllw a0, a0, a1
+; NOREMOVAL-NEXT: li a1, 16
+; NOREMOVAL-NEXT: beqz a2, .LBB18_3
; NOREMOVAL-NEXT: # %bb.2: # %if.end
; NOREMOVAL-NEXT: li a1, 32
-; NOREMOVAL-NEXT: j .LBB18_4
-; NOREMOVAL-NEXT: .LBB18_3:
-; NOREMOVAL-NEXT: li a1, 16
-; NOREMOVAL-NEXT: .LBB18_4: # %if.end
+; NOREMOVAL-NEXT: .LBB18_3: # %if.end
; NOREMOVAL-NEXT: srliw a2, a0, 24
; NOREMOVAL-NEXT: seqz a2, a2
; NOREMOVAL-NEXT: slli a3, a2, 3
@@ -1111,7 +1107,7 @@ define signext i32 @bug(i32 signext %x) {
; NOREMOVAL-NEXT: not a0, a0
; NOREMOVAL-NEXT: srli a0, a0, 31
; NOREMOVAL-NEXT: addw a0, a1, a0
-; NOREMOVAL-NEXT: .LBB18_5: # %cleanup
+; NOREMOVAL-NEXT: .LBB18_4: # %cleanup
; NOREMOVAL-NEXT: ret
entry:
%tobool.not = icmp eq i32 %x, 0
diff --git a/llvm/test/CodeGen/RISCV/simplify-condbr.ll b/llvm/test/CodeGen/RISCV/simplify-condbr.ll
index 6dabd7d..4e3940f 100644
--- a/llvm/test/CodeGen/RISCV/simplify-condbr.ll
+++ b/llvm/test/CodeGen/RISCV/simplify-condbr.ll
@@ -112,21 +112,21 @@ if.end1497: ; preds = %if.else1492, %sw.ep
define ptr @Perl_pp_refassign(ptr %PL_stack_sp, i1 %tobool.not, i1 %tobool3.not, i1 %cond1) nounwind {
; CHECK-LABEL: Perl_pp_refassign:
; CHECK: # %bb.0: # %entry
-; CHECK-NEXT: andi a1, a1, 1
-; CHECK-NEXT: beqz a1, .LBB1_3
+; CHECK-NEXT: andi a3, a1, 1
+; CHECK-NEXT: beqz a3, .LBB1_3
; CHECK-NEXT: # %bb.1:
; CHECK-NEXT: li a1, 0
-; CHECK-NEXT: andi a2, a2, 1
-; CHECK-NEXT: bnez a2, .LBB1_4
+; CHECK-NEXT: andi a3, a2, 1
+; CHECK-NEXT: bnez a3, .LBB1_4
; CHECK-NEXT: .LBB1_2: # %cond.true4
; CHECK-NEXT: ld a0, 0(a0)
-; CHECK-NEXT: snez a0, a0
-; CHECK-NEXT: bnez a0, .LBB1_5
+; CHECK-NEXT: snez a2, a0
+; CHECK-NEXT: bnez a2, .LBB1_5
; CHECK-NEXT: j .LBB1_6
; CHECK-NEXT: .LBB1_3: # %cond.true
; CHECK-NEXT: ld a1, 0(a0)
-; CHECK-NEXT: andi a2, a2, 1
-; CHECK-NEXT: beqz a2, .LBB1_2
+; CHECK-NEXT: andi a3, a2, 1
+; CHECK-NEXT: beqz a3, .LBB1_2
; CHECK-NEXT: .LBB1_4:
; CHECK-NEXT: j .LBB1_6
; CHECK-NEXT: .LBB1_5: # %sw.bb85
diff --git a/llvm/test/CodeGen/X86/2008-04-28-CoalescerBug.ll b/llvm/test/CodeGen/X86/2008-04-28-CoalescerBug.ll
index c95fc00..eca4eaf 100644
--- a/llvm/test/CodeGen/X86/2008-04-28-CoalescerBug.ll
+++ b/llvm/test/CodeGen/X86/2008-04-28-CoalescerBug.ll
@@ -15,7 +15,7 @@ define void @t(ptr %depth, ptr %bop, i32 %mode) nounwind {
; CHECK-NEXT: je LBB0_3
; CHECK-NEXT: ## %bb.1: ## %entry
; CHECK-NEXT: cmpl $1, %edx
-; CHECK-NEXT: jne LBB0_10
+; CHECK-NEXT: jne LBB0_9
; CHECK-NEXT: .p2align 4
; CHECK-NEXT: LBB0_2: ## %bb2898.us
; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1
@@ -26,15 +26,12 @@ define void @t(ptr %depth, ptr %bop, i32 %mode) nounwind {
; CHECK-NEXT: LBB0_4: ## %bb13088
; CHECK-NEXT: ## =>This Inner Loop Header: Depth=1
; CHECK-NEXT: testb %al, %al
-; CHECK-NEXT: jne LBB0_5
-; CHECK-NEXT: ## %bb.6: ## %bb13101
+; CHECK-NEXT: movl $65535, %ecx ## imm = 0xFFFF
+; CHECK-NEXT: jne LBB0_6
+; CHECK-NEXT: ## %bb.5: ## %bb13101
; CHECK-NEXT: ## in Loop: Header=BB0_4 Depth=1
; CHECK-NEXT: xorl %ecx, %ecx
-; CHECK-NEXT: jmp LBB0_7
-; CHECK-NEXT: .p2align 4
-; CHECK-NEXT: LBB0_5: ## in Loop: Header=BB0_4 Depth=1
-; CHECK-NEXT: movl $65535, %ecx ## imm = 0xFFFF
-; CHECK-NEXT: LBB0_7: ## %bb13107
+; CHECK-NEXT: LBB0_6: ## %bb13107
; CHECK-NEXT: ## in Loop: Header=BB0_4 Depth=1
; CHECK-NEXT: movl %ecx, %edx
; CHECK-NEXT: shll $16, %edx
@@ -44,11 +41,11 @@ define void @t(ptr %depth, ptr %bop, i32 %mode) nounwind {
; CHECK-NEXT: subl %edx, %ecx
; CHECK-NEXT: testw %cx, %cx
; CHECK-NEXT: je LBB0_4
-; CHECK-NEXT: ## %bb.8: ## %bb13236
+; CHECK-NEXT: ## %bb.7: ## %bb13236
; CHECK-NEXT: ## in Loop: Header=BB0_4 Depth=1
; CHECK-NEXT: testb %al, %al
; CHECK-NEXT: jne LBB0_4
-; CHECK-NEXT: ## %bb.9: ## %bb13572
+; CHECK-NEXT: ## %bb.8: ## %bb13572
; CHECK-NEXT: ## in Loop: Header=BB0_4 Depth=1
; CHECK-NEXT: movzwl %cx, %ecx
; CHECK-NEXT: movl %ecx, %edx
@@ -58,7 +55,7 @@ define void @t(ptr %depth, ptr %bop, i32 %mode) nounwind {
; CHECK-NEXT: shrl $16, %edx
; CHECK-NEXT: movw %dx, 0
; CHECK-NEXT: jmp LBB0_4
-; CHECK-NEXT: LBB0_10: ## %return
+; CHECK-NEXT: LBB0_9: ## %return
; CHECK-NEXT: retq
entry:
switch i32 %mode, label %return [
diff --git a/llvm/test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll b/llvm/test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll
index 1962dde..e5ff713 100644
--- a/llvm/test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll
+++ b/llvm/test/CodeGen/X86/2012-01-10-UndefExceptionEdge.ll
@@ -36,10 +36,10 @@ define void @f(ptr nocapture %arg, ptr nocapture %arg1, ptr nocapture %arg2, ptr
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: xorl %edi, %edi
; CHECK-NEXT: testb %al, %al
-; CHECK-NEXT: Ltmp0:
+; CHECK-NEXT: Ltmp0: ## EH_LABEL
; CHECK-NEXT: ## implicit-def: $ebx
; CHECK-NEXT: calll __Znam
-; CHECK-NEXT: Ltmp1:
+; CHECK-NEXT: Ltmp1: ## EH_LABEL
; CHECK-NEXT: ## %bb.1: ## %bb11
; CHECK-NEXT: movl %eax, %esi
; CHECK-NEXT: movb $1, %al
@@ -50,58 +50,58 @@ define void @f(ptr nocapture %arg, ptr nocapture %arg1, ptr nocapture %arg2, ptr
; CHECK-NEXT: ## kill: killed $eax
; CHECK-NEXT: LBB0_8: ## %bb38
; CHECK-NEXT: ## =>This Loop Header: Depth=1
-; CHECK-NEXT: ## Child Loop BB0_13 Depth 2
-; CHECK-NEXT: ## Child Loop BB0_16 Depth 3
-; CHECK-NEXT: ## Child Loop BB0_21 Depth 2
+; CHECK-NEXT: ## Child Loop BB0_12 Depth 2
+; CHECK-NEXT: ## Child Loop BB0_15 Depth 3
+; CHECK-NEXT: ## Child Loop BB0_20 Depth 2
; CHECK-NEXT: movb $1, %al
; CHECK-NEXT: testb %al, %al
-; CHECK-NEXT: jne LBB0_9
-; CHECK-NEXT: ## %bb.10: ## %bb41
+; CHECK-NEXT: jne LBB0_6
+; CHECK-NEXT: ## %bb.9: ## %bb41
; CHECK-NEXT: ## in Loop: Header=BB0_8 Depth=1
-; CHECK-NEXT: Ltmp2:
+; CHECK-NEXT: Ltmp2: ## EH_LABEL
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: movl %eax, {{[0-9]+}}(%esp)
; CHECK-NEXT: movl %eax, {{[0-9]+}}(%esp)
; CHECK-NEXT: movl %esi, (%esp)
; CHECK-NEXT: calll _Pjii
-; CHECK-NEXT: Ltmp3:
-; CHECK-NEXT: ## %bb.11: ## %bb42
+; CHECK-NEXT: Ltmp3: ## EH_LABEL
+; CHECK-NEXT: ## %bb.10: ## %bb42
; CHECK-NEXT: ## in Loop: Header=BB0_8 Depth=1
; CHECK-NEXT: xorl %eax, %eax
; CHECK-NEXT: decl %eax
; CHECK-NEXT: testl %eax, %eax
-; CHECK-NEXT: jne LBB0_18
-; CHECK-NEXT: ## %bb.12: ## %bb45.preheader
+; CHECK-NEXT: jne LBB0_17
+; CHECK-NEXT: ## %bb.11: ## %bb45.preheader
; CHECK-NEXT: ## in Loop: Header=BB0_8 Depth=1
; CHECK-NEXT: movl $255, %eax
-; CHECK-NEXT: LBB0_13: ## %bb45
+; CHECK-NEXT: LBB0_12: ## %bb45
; CHECK-NEXT: ## Parent Loop BB0_8 Depth=1
; CHECK-NEXT: ## => This Loop Header: Depth=2
-; CHECK-NEXT: ## Child Loop BB0_16 Depth 3
+; CHECK-NEXT: ## Child Loop BB0_15 Depth 3
; CHECK-NEXT: movb $1, %cl
; CHECK-NEXT: testb %cl, %cl
-; CHECK-NEXT: jne LBB0_19
-; CHECK-NEXT: ## %bb.14: ## %bb48
-; CHECK-NEXT: ## in Loop: Header=BB0_13 Depth=2
-; CHECK-NEXT: jne LBB0_17
-; CHECK-NEXT: ## %bb.15: ## %bb49.preheader
-; CHECK-NEXT: ## in Loop: Header=BB0_13 Depth=2
+; CHECK-NEXT: jne LBB0_18
+; CHECK-NEXT: ## %bb.13: ## %bb48
+; CHECK-NEXT: ## in Loop: Header=BB0_12 Depth=2
+; CHECK-NEXT: jne LBB0_16
+; CHECK-NEXT: ## %bb.14: ## %bb49.preheader
+; CHECK-NEXT: ## in Loop: Header=BB0_12 Depth=2
; CHECK-NEXT: xorl %ecx, %ecx
; CHECK-NEXT: movl %esi, %edx
; CHECK-NEXT: movl %edi, %ebx
-; CHECK-NEXT: LBB0_16: ## %bb49
+; CHECK-NEXT: LBB0_15: ## %bb49
; CHECK-NEXT: ## Parent Loop BB0_8 Depth=1
-; CHECK-NEXT: ## Parent Loop BB0_13 Depth=2
+; CHECK-NEXT: ## Parent Loop BB0_12 Depth=2
; CHECK-NEXT: ## => This Inner Loop Header: Depth=3
; CHECK-NEXT: incl %ecx
; CHECK-NEXT: addl $4, %edx
; CHECK-NEXT: decl %ebx
-; CHECK-NEXT: jne LBB0_16
-; CHECK-NEXT: LBB0_17: ## %bb57
-; CHECK-NEXT: ## in Loop: Header=BB0_13 Depth=2
+; CHECK-NEXT: jne LBB0_15
+; CHECK-NEXT: LBB0_16: ## %bb57
+; CHECK-NEXT: ## in Loop: Header=BB0_12 Depth=2
; CHECK-NEXT: decl %eax
-; CHECK-NEXT: jmp LBB0_13
-; CHECK-NEXT: LBB0_19: ## %bb59
+; CHECK-NEXT: jmp LBB0_12
+; CHECK-NEXT: LBB0_18: ## %bb59
; CHECK-NEXT: ## in Loop: Header=BB0_8 Depth=1
; CHECK-NEXT: movl $-4, %eax
; CHECK-NEXT: movl %eax, {{[0-9]+}}(%esp)
@@ -109,50 +109,49 @@ define void @f(ptr nocapture %arg, ptr nocapture %arg1, ptr nocapture %arg2, ptr
; CHECK-NEXT: calll ___bzero
; CHECK-NEXT: movb $1, %al
; CHECK-NEXT: testb %al, %al
-; CHECK-NEXT: jne LBB0_22
-; CHECK-NEXT: ## %bb.20: ## %bb61.preheader
+; CHECK-NEXT: jne LBB0_21
+; CHECK-NEXT: ## %bb.19: ## %bb61.preheader
; CHECK-NEXT: ## in Loop: Header=BB0_8 Depth=1
; CHECK-NEXT: movl %esi, %eax
; CHECK-NEXT: movl %edi, %ecx
-; CHECK-NEXT: LBB0_21: ## %bb61
+; CHECK-NEXT: LBB0_20: ## %bb61
; CHECK-NEXT: ## Parent Loop BB0_8 Depth=1
; CHECK-NEXT: ## => This Inner Loop Header: Depth=2
; CHECK-NEXT: movl $0, (%eax)
; CHECK-NEXT: addl $4, %eax
; CHECK-NEXT: decl %ecx
-; CHECK-NEXT: jne LBB0_21
-; CHECK-NEXT: LBB0_22: ## %bb67
+; CHECK-NEXT: jne LBB0_20
+; CHECK-NEXT: LBB0_21: ## %bb67
; CHECK-NEXT: ## in Loop: Header=BB0_8 Depth=1
; CHECK-NEXT: decl {{[-0-9]+}}(%e{{[sb]}}p) ## 4-byte Folded Spill
; CHECK-NEXT: jmp LBB0_8
-; CHECK-NEXT: LBB0_18: ## %bb43
-; CHECK-NEXT: Ltmp5:
+; CHECK-NEXT: LBB0_17: ## %bb43
+; CHECK-NEXT: Ltmp5: ## EH_LABEL
; CHECK-NEXT: movl %esi, %ebx
; CHECK-NEXT: calll _OnOverFlow
-; CHECK-NEXT: Ltmp6:
+; CHECK-NEXT: Ltmp6: ## EH_LABEL
; CHECK-NEXT: jmp LBB0_3
; CHECK-NEXT: LBB0_2: ## %bb29
-; CHECK-NEXT: Ltmp7:
+; CHECK-NEXT: Ltmp7: ## EH_LABEL
; CHECK-NEXT: movl %esi, %ebx
; CHECK-NEXT: calll _OnOverFlow
-; CHECK-NEXT: Ltmp8:
+; CHECK-NEXT: Ltmp8: ## EH_LABEL
; CHECK-NEXT: LBB0_3: ## %bb30
; CHECK-NEXT: ud2
; CHECK-NEXT: LBB0_4: ## %bb20.loopexit
-; CHECK-NEXT: Ltmp4:
-; CHECK-NEXT: LBB0_9:
-; CHECK-NEXT: movl %esi, %ebx
+; CHECK-NEXT: Ltmp4: ## EH_LABEL
+; CHECK-NEXT: jmp LBB0_6
+; CHECK-NEXT: LBB0_5: ## %bb20.loopexit.split-lp
+; CHECK-NEXT: Ltmp9: ## EH_LABEL
+; CHECK-NEXT: movl %ebx, %esi
; CHECK-NEXT: LBB0_6: ## %bb23
-; CHECK-NEXT: testl %ebx, %ebx
+; CHECK-NEXT: testl %esi, %esi
; CHECK-NEXT: addl $28, %esp
; CHECK-NEXT: popl %esi
; CHECK-NEXT: popl %edi
; CHECK-NEXT: popl %ebx
; CHECK-NEXT: popl %ebp
; CHECK-NEXT: retl
-; CHECK-NEXT: LBB0_5: ## %bb20.loopexit.split-lp
-; CHECK-NEXT: Ltmp9:
-; CHECK-NEXT: jmp LBB0_6
; CHECK-NEXT: Lfunc_end0:
bb:
br i1 undef, label %bb6, label %bb7
diff --git a/llvm/test/CodeGen/X86/bsf.ll b/llvm/test/CodeGen/X86/bsf.ll
index 143e10e..4cceaf1 100644
--- a/llvm/test/CodeGen/X86/bsf.ll
+++ b/llvm/test/CodeGen/X86/bsf.ll
@@ -125,20 +125,17 @@ define i32 @cmov_bsf32(i32 %x, i32 %y) nounwind {
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: testl %ecx, %ecx
-; X86-NEXT: je .LBB4_1
-; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: je .LBB4_2
+; X86-NEXT: # %bb.1: # %cond.false
; X86-NEXT: rep bsfl %ecx, %eax
+; X86-NEXT: .LBB4_2: # %cond.end
; X86-NEXT: testl %ecx, %ecx
-; X86-NEXT: jne .LBB4_5
-; X86-NEXT: .LBB4_4:
+; X86-NEXT: jne .LBB4_4
+; X86-NEXT: # %bb.3:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: .LBB4_5: # %cond.end
+; X86-NEXT: .LBB4_4: # %cond.end
; X86-NEXT: retl
-; X86-NEXT: .LBB4_1:
-; X86-NEXT: movl $32, %eax
-; X86-NEXT: testl %ecx, %ecx
-; X86-NEXT: je .LBB4_4
-; X86-NEXT: jmp .LBB4_5
;
; X64-LABEL: cmov_bsf32:
; X64: # %bb.0:
@@ -185,31 +182,28 @@ define i64 @cmov_bsf64(i64 %x, i64 %y) nounwind {
; X86-NEXT: xorl %edx, %edx
; X86-NEXT: movl %esi, %eax
; X86-NEXT: orl %ecx, %eax
-; X86-NEXT: je .LBB6_1
-; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: movl $64, %eax
+; X86-NEXT: je .LBB6_4
+; X86-NEXT: # %bb.1: # %cond.false
; X86-NEXT: testl %esi, %esi
-; X86-NEXT: jne .LBB6_3
-; X86-NEXT: # %bb.4: # %cond.false
+; X86-NEXT: jne .LBB6_2
+; X86-NEXT: # %bb.3: # %cond.false
; X86-NEXT: rep bsfl %ecx, %eax
; X86-NEXT: addl $32, %eax
+; X86-NEXT: .LBB6_4: # %cond.end
; X86-NEXT: orl %ecx, %esi
-; X86-NEXT: je .LBB6_6
-; X86-NEXT: jmp .LBB6_7
-; X86-NEXT: .LBB6_1:
-; X86-NEXT: movl $64, %eax
-; X86-NEXT: orl %ecx, %esi
-; X86-NEXT: jne .LBB6_7
-; X86-NEXT: .LBB6_6: # %cond.end
+; X86-NEXT: jne .LBB6_6
+; X86-NEXT: .LBB6_5: # %cond.end
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
-; X86-NEXT: .LBB6_7: # %cond.end
+; X86-NEXT: .LBB6_6: # %cond.end
; X86-NEXT: popl %esi
; X86-NEXT: retl
-; X86-NEXT: .LBB6_3:
+; X86-NEXT: .LBB6_2:
; X86-NEXT: rep bsfl %esi, %eax
; X86-NEXT: orl %ecx, %esi
-; X86-NEXT: je .LBB6_6
-; X86-NEXT: jmp .LBB6_7
+; X86-NEXT: je .LBB6_5
+; X86-NEXT: jmp .LBB6_6
;
; X64-LABEL: cmov_bsf64:
; X64: # %bb.0:
diff --git a/llvm/test/CodeGen/X86/bsr.ll b/llvm/test/CodeGen/X86/bsr.ll
index ab0478a..295c7d2a7 100644
--- a/llvm/test/CodeGen/X86/bsr.ll
+++ b/llvm/test/CodeGen/X86/bsr.ll
@@ -7,22 +7,20 @@ define i8 @cmov_bsr8(i8 %x, i8 %y) nounwind {
; X86: # %bb.0:
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: testb %cl, %cl
-; X86-NEXT: je .LBB0_1
-; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: movb $8, %al
+; X86-NEXT: je .LBB0_2
+; X86-NEXT: # %bb.1: # %cond.false
; X86-NEXT: movzbl %cl, %eax
; X86-NEXT: bsrl %eax, %eax
; X86-NEXT: xorl $7, %eax
+; X86-NEXT: .LBB0_2: # %cond.end
; X86-NEXT: testb %cl, %cl
-; X86-NEXT: je .LBB0_4
-; X86-NEXT: .LBB0_5: # %cond.end
+; X86-NEXT: je .LBB0_3
+; X86-NEXT: # %bb.4: # %cond.end
; X86-NEXT: xorb $7, %al
; X86-NEXT: # kill: def $al killed $al killed $eax
; X86-NEXT: retl
-; X86-NEXT: .LBB0_1:
-; X86-NEXT: movb $8, %al
-; X86-NEXT: testb %cl, %cl
-; X86-NEXT: jne .LBB0_5
-; X86-NEXT: .LBB0_4:
+; X86-NEXT: .LBB0_3:
; X86-NEXT: movzbl {{[0-9]+}}(%esp), %eax
; X86-NEXT: # kill: def $al killed $al killed $eax
; X86-NEXT: retl
@@ -78,21 +76,19 @@ define i16 @cmov_bsr16(i16 %x, i16 %y) nounwind {
; X86: # %bb.0:
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: testw %ax, %ax
-; X86-NEXT: je .LBB2_1
-; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: movw $16, %cx
+; X86-NEXT: je .LBB2_2
+; X86-NEXT: # %bb.1: # %cond.false
; X86-NEXT: bsrw %ax, %cx
; X86-NEXT: xorl $15, %ecx
+; X86-NEXT: .LBB2_2: # %cond.end
; X86-NEXT: testw %ax, %ax
-; X86-NEXT: jne .LBB2_4
-; X86-NEXT: .LBB2_5: # %cond.end
+; X86-NEXT: jne .LBB2_3
+; X86-NEXT: # %bb.4: # %cond.end
; X86-NEXT: movzwl {{[0-9]+}}(%esp), %eax
; X86-NEXT: # kill: def $ax killed $ax killed $eax
; X86-NEXT: retl
-; X86-NEXT: .LBB2_1:
-; X86-NEXT: movw $16, %cx
-; X86-NEXT: testw %ax, %ax
-; X86-NEXT: je .LBB2_5
-; X86-NEXT: .LBB2_4:
+; X86-NEXT: .LBB2_3:
; X86-NEXT: movzwl %cx, %eax
; X86-NEXT: xorl $15, %eax
; X86-NEXT: # kill: def $ax killed $ax killed $eax
@@ -143,20 +139,18 @@ define i32 @cmov_bsr32(i32 %x, i32 %y) nounwind {
; X86: # %bb.0:
; X86-NEXT: movl {{[0-9]+}}(%esp), %ecx
; X86-NEXT: testl %ecx, %ecx
-; X86-NEXT: je .LBB4_1
-; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: movl $32, %eax
+; X86-NEXT: je .LBB4_2
+; X86-NEXT: # %bb.1: # %cond.false
; X86-NEXT: bsrl %ecx, %eax
; X86-NEXT: xorl $31, %eax
+; X86-NEXT: .LBB4_2: # %cond.end
; X86-NEXT: testl %ecx, %ecx
-; X86-NEXT: je .LBB4_4
-; X86-NEXT: .LBB4_5: # %cond.end
+; X86-NEXT: je .LBB4_3
+; X86-NEXT: # %bb.4: # %cond.end
; X86-NEXT: xorl $31, %eax
; X86-NEXT: retl
-; X86-NEXT: .LBB4_1:
-; X86-NEXT: movl $32, %eax
-; X86-NEXT: testl %ecx, %ecx
-; X86-NEXT: jne .LBB4_5
-; X86-NEXT: .LBB4_4:
+; X86-NEXT: .LBB4_3:
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: retl
;
@@ -206,32 +200,29 @@ define i64 @cmov_bsr64(i64 %x, i64 %y) nounwind {
; X86-NEXT: xorl %edx, %edx
; X86-NEXT: movl %esi, %eax
; X86-NEXT: orl %ecx, %eax
-; X86-NEXT: je .LBB6_1
-; X86-NEXT: # %bb.2: # %cond.false
+; X86-NEXT: movl $64, %eax
+; X86-NEXT: je .LBB6_4
+; X86-NEXT: # %bb.1: # %cond.false
; X86-NEXT: testl %ecx, %ecx
-; X86-NEXT: jne .LBB6_3
-; X86-NEXT: # %bb.4: # %cond.false
+; X86-NEXT: jne .LBB6_2
+; X86-NEXT: # %bb.3: # %cond.false
; X86-NEXT: bsrl %esi, %eax
; X86-NEXT: xorl $31, %eax
; X86-NEXT: orl $32, %eax
+; X86-NEXT: .LBB6_4: # %cond.end
; X86-NEXT: orl %ecx, %esi
-; X86-NEXT: je .LBB6_7
-; X86-NEXT: jmp .LBB6_6
-; X86-NEXT: .LBB6_1:
-; X86-NEXT: movl $64, %eax
-; X86-NEXT: orl %ecx, %esi
-; X86-NEXT: jne .LBB6_6
-; X86-NEXT: .LBB6_7: # %cond.end
+; X86-NEXT: jne .LBB6_5
+; X86-NEXT: .LBB6_6: # %cond.end
; X86-NEXT: movl {{[0-9]+}}(%esp), %edx
; X86-NEXT: movl {{[0-9]+}}(%esp), %eax
; X86-NEXT: popl %esi
; X86-NEXT: retl
-; X86-NEXT: .LBB6_3:
+; X86-NEXT: .LBB6_2:
; X86-NEXT: bsrl %ecx, %eax
; X86-NEXT: xorl $31, %eax
; X86-NEXT: orl %ecx, %esi
-; X86-NEXT: je .LBB6_7
-; X86-NEXT: .LBB6_6:
+; X86-NEXT: je .LBB6_6
+; X86-NEXT: .LBB6_5:
; X86-NEXT: xorl $63, %eax
; X86-NEXT: popl %esi
; X86-NEXT: retl
diff --git a/llvm/test/CodeGen/X86/switch-phi-const.ll b/llvm/test/CodeGen/X86/switch-phi-const.ll
index dba7666..35aa275 100644
--- a/llvm/test/CodeGen/X86/switch-phi-const.ll
+++ b/llvm/test/CodeGen/X86/switch-phi-const.ll
@@ -7,37 +7,35 @@ define void @switch_phi_const(i32 %x) {
; CHECK-LABEL: switch_phi_const:
; CHECK: # %bb.0: # %bb0
; CHECK-NEXT: # kill: def $edi killed $edi def $rdi
-; CHECK-NEXT: leal -1(%rdi), %eax
-; CHECK-NEXT: cmpl $54, %eax
-; CHECK-NEXT: ja .LBB0_9
+; CHECK-NEXT: leal -1(%rdi), %ecx
+; CHECK-NEXT: cmpl $54, %ecx
+; CHECK-NEXT: ja .LBB0_8
; CHECK-NEXT: # %bb.1: # %bb0
-; CHECK-NEXT: jmpq *.LJTI0_0(,%rax,8)
-; CHECK-NEXT: .LBB0_3: # %case_7
+; CHECK-NEXT: movl $42, %eax
+; CHECK-NEXT: jmpq *.LJTI0_0(,%rcx,8)
+; CHECK-NEXT: .LBB0_2: # %case_7
; CHECK-NEXT: movq g@GOTPCREL(%rip), %rax
; CHECK-NEXT: movl (%rax), %edi
; CHECK-NEXT: movq effect@GOTPCREL(%rip), %rax
; CHECK-NEXT: movl $7, (%rax)
-; CHECK-NEXT: .LBB0_4: # %case_1_loop
+; CHECK-NEXT: .LBB0_3: # %case_1_loop
; CHECK-NEXT: movq effect@GOTPCREL(%rip), %rax
; CHECK-NEXT: movl $1, (%rax)
-; CHECK-NEXT: .LBB0_5: # %case_5
+; CHECK-NEXT: .LBB0_4: # %case_5
; CHECK-NEXT: movq effect@GOTPCREL(%rip), %rax
; CHECK-NEXT: movl $5, (%rax)
-; CHECK-NEXT: .LBB0_6: # %case_13
+; CHECK-NEXT: .LBB0_5: # %case_13
; CHECK-NEXT: movq effect@GOTPCREL(%rip), %rax
; CHECK-NEXT: movl $13, (%rax)
-; CHECK-NEXT: .LBB0_7: # %case_42
+; CHECK-NEXT: .LBB0_6: # %case_42
; CHECK-NEXT: movq effect@GOTPCREL(%rip), %rax
; CHECK-NEXT: movl %edi, (%rax)
; CHECK-NEXT: movl $55, %eax
-; CHECK-NEXT: .LBB0_8: # %case_55
+; CHECK-NEXT: .LBB0_7: # %case_55
; CHECK-NEXT: movq effect@GOTPCREL(%rip), %rcx
; CHECK-NEXT: movl %eax, (%rcx)
-; CHECK-NEXT: .LBB0_9: # %default
+; CHECK-NEXT: .LBB0_8: # %default
; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB0_2:
-; CHECK-NEXT: movl $42, %eax
-; CHECK-NEXT: jmp .LBB0_8
bb0:
switch i32 %x, label %default [
i32 1, label %case_1_loop
@@ -94,38 +92,36 @@ define void @switch_trunc_phi_const(i32 %x) {
; CHECK-LABEL: switch_trunc_phi_const:
; CHECK: # %bb.0: # %bb0
; CHECK-NEXT: movzbl %dil, %eax
-; CHECK-NEXT: leal -1(%rax), %ecx
-; CHECK-NEXT: cmpl $54, %ecx
-; CHECK-NEXT: ja .LBB1_9
+; CHECK-NEXT: leal -1(%rax), %edx
+; CHECK-NEXT: cmpl $54, %edx
+; CHECK-NEXT: ja .LBB1_8
; CHECK-NEXT: # %bb.1: # %bb0
-; CHECK-NEXT: jmpq *.LJTI1_0(,%rcx,8)
-; CHECK-NEXT: .LBB1_2:
-; CHECK-NEXT: movl $3895, %eax # imm = 0xF37
-; CHECK-NEXT: jmp .LBB1_7
-; CHECK-NEXT: .LBB1_9: # %default
+; CHECK-NEXT: movl $3895, %ecx # imm = 0xF37
+; CHECK-NEXT: jmpq *.LJTI1_0(,%rdx,8)
+; CHECK-NEXT: .LBB1_8: # %default
; CHECK-NEXT: retq
-; CHECK-NEXT: .LBB1_3: # %case_1_loop
+; CHECK-NEXT: .LBB1_2: # %case_1_loop
; CHECK-NEXT: movq effect64@GOTPCREL(%rip), %rcx
; CHECK-NEXT: movq $1, (%rcx)
-; CHECK-NEXT: .LBB1_4: # %case_5
+; CHECK-NEXT: .LBB1_3: # %case_5
; CHECK-NEXT: movq effect64@GOTPCREL(%rip), %rcx
; CHECK-NEXT: movq $5, (%rcx)
-; CHECK-NEXT: .LBB1_5: # %case_13
+; CHECK-NEXT: .LBB1_4: # %case_13
; CHECK-NEXT: movq effect64@GOTPCREL(%rip), %rcx
; CHECK-NEXT: movq $13, (%rcx)
-; CHECK-NEXT: .LBB1_6: # %case_42
-; CHECK-NEXT: movq effect64@GOTPCREL(%rip), %rcx
-; CHECK-NEXT: movq %rax, (%rcx)
-; CHECK-NEXT: movl $55, %eax
-; CHECK-NEXT: .LBB1_7: # %case_55
+; CHECK-NEXT: .LBB1_5: # %case_42
; CHECK-NEXT: movq effect64@GOTPCREL(%rip), %rcx
; CHECK-NEXT: movq %rax, (%rcx)
-; CHECK-NEXT: .LBB1_8: # %case_7
+; CHECK-NEXT: movl $55, %ecx
+; CHECK-NEXT: .LBB1_6: # %case_55
+; CHECK-NEXT: movq effect64@GOTPCREL(%rip), %rax
+; CHECK-NEXT: movq %rcx, (%rax)
+; CHECK-NEXT: .LBB1_7: # %case_7
; CHECK-NEXT: movq g64@GOTPCREL(%rip), %rax
; CHECK-NEXT: movq (%rax), %rax
; CHECK-NEXT: movq effect64@GOTPCREL(%rip), %rcx
; CHECK-NEXT: movq $7, (%rcx)
-; CHECK-NEXT: jmp .LBB1_3
+; CHECK-NEXT: jmp .LBB1_2
bb0:
%x_trunc = trunc i32 %x to i8
switch i8 %x_trunc, label %default [